def evaluate(): with tf.Graph().as_default(): #images, labels = mnist.load_test_data(FLAGS.test_data) mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) x_ = tf.placeholder(tf.float32, shape=[None, 784]) #data gets loaded as a 28x8 vector x = tf.reshape(x_, [-1, 28, 28, 1], name='x') #mnist dataset is shape 28,28,1 y = tf.placeholder(shape=[None, 10], dtype=tf.float32, name='y') #10 labels model = Model() logits = model.inference(x, keep_prob=1.0) accuracy = model.accuracy(logits, y) saver = tf.train.Saver() with tf.Session() as sess: tf.global_variables_initializer().run() saver.restore(sess, FLAGS.checkpoint_file_path) total_accuracy = sess.run([accuracy], feed_dict={ x_: mnist.test.images, y: mnist.test.labels }) print('Test accuracy: {}'.format(total_accuracy))
def evaluate(): with tf.Graph().as_default(): mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) x_ = tf.placeholder(tf.float32, shape=[None, 784]) #data gets loaded as a 28x8 vector x = tf.reshape(x_,[-1,28,28,1],name = 'x') #mnist dataset is shape 28,28,1 y = tf.placeholder(shape=[None, 10], dtype=tf.float32, name='y') #10 labels softmax_tensors = tf.placeholder(tf.float32, shape=[None, 10000, 10]) model = Model() logits = model.inference(x, keep_prob=FLAGS.prob) softmax = tf.nn.softmax(logits) shape = tf.shape(softmax) saver = tf.train.Saver(max_to_keep = None) mean = tf.reduce_mean(softmax_tensors,0) accuracy = model.accuracy(mean,y) softmax_list = [] with tf.Session() as sess: tf.global_variables_initializer().run() saver.restore(sess, FLAGS.checkpoint_file_path) for i in range(FLAGS.T): softmaxi = sess.run([softmax], feed_dict={x_: mnist.test.images, y: mnist.test.labels}) softmax_list.append(softmaxi) for i in range(FLAGS.T): if i>1: arr=np.squeeze(np.array(softmax_list)[:i,:,:]) else : arr=np.array(softmax_list)[0,:,:] total_accuracy, soft = sess.run([accuracy,mean], feed_dict={softmax_tensors: arr, y: mnist.test.labels}) f=open('bias_var.log','a') f.write(str(FLAGS.prob)+ ","+str(i+1)+","+str(total_accuracy)+'\n') f.close()
def evaluate(): with tf.Graph().as_default(): #images, labels = mnist.load_test_data(FLAGS.test_data) mnist = input_data.read_data_sets("/home/rns38/Documents/MLSALT4/lenet-all-standard-dropout/MNIST_data/", one_hot=True) x_ = tf.placeholder(tf.float32, shape=[None, 784]) #data gets loaded as a 28x8 vector x = tf.reshape(x_,[-1,28,28,1],name = 'x') #mnist dataset is shape 28,28,1 y = tf.placeholder(shape=[None, 10], dtype=tf.float32, name='y') #10 labels model = Model() logits = model.inference(x, keep_prob=1.0) accuracy = model.accuracy(logits, y) saver = tf.train.Saver() with tf.Session() as sess: tf.global_variables_initializer().run() saver.restore(sess, FLAGS.checkpoint_file_path) total_accuracy = sess.run([accuracy], feed_dict={x_: mnist.test.images, y: mnist.test.labels}) print('Test accuracy: {}'.format(total_accuracy)) #below are added by ms for output into a txt file output=total_accuracy[0] f=open('std_drop_eval.log','a') #f.write(str(total_accuracy)+'\n') f.write(str(output)+'\n') f.close()
def evaluate(): with tf.Graph().as_default(): mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) x_ = tf.placeholder(tf.float32, shape=[None, 784]) #data gets loaded as a 28x8 vector x = tf.reshape(x_,[-1,28,28,1],name = 'x') #mnist dataset is shape 28,28,1 y = tf.placeholder(shape=[None, 10], dtype=tf.float32, name='y') #10 labels softmax_tensors = tf.placeholder(tf.float32, shape=[FLAGS.T, 10000, 10]) model = Model() logits = model.inference(x, keep_prob=FLAGS.prob) softmax = tf.nn.softmax(logits) shape = tf.shape(softmax) saver = tf.train.Saver(max_to_keep = None) mean = tf.reduce_mean(softmax_tensors,0) accuracy = model.accuracy(mean,y) softmax_list = [] with tf.Session() as sess: tf.global_variables_initializer().run() saver.restore(sess, FLAGS.checkpoint_file_path) for i in range(FLAGS.T): softmaxi = sess.run([softmax], feed_dict={x_: mnist.test.images, y: mnist.test.labels}) softmax_list.append(softmaxi) #mean_prob = sess.run([mean], feed_dict = {softmax_tensors: np.squeeze(np.array(softmax_list))}) total_accuracy, soft = sess.run([accuracy,mean], feed_dict={softmax_tensors: np.squeeze(np.array(softmax_list)), y: mnist.test.labels}) print('Test accuracy: {}'.format(total_accuracy)) #np.max(soft.flatten())) L=np.array([soft.flatten(), mnist.test.labels.flatten()]) L=np.transpose(L) #L=L[L[:,0].argsort()] np.savetxt('lenet-ip_mcdrop_uncertainty.log', L, delimiter=',')
def evaluate(): with tf.Graph().as_default(): mnist = input_data.read_data_sets( "/home/rns38/Documents/MLSALT4/lenet-all-standard-dropout/MNIST_data/", one_hot=True) x_ = tf.placeholder(tf.float32, shape=[None, 784]) #data gets loaded as a 28x8 vector x = tf.reshape(x_, [-1, 28, 28, 1], name='x') #mnist dataset is shape 28,28,1 y = tf.placeholder(shape=[None, 10], dtype=tf.float32, name='y') #10 labels softmax_tensors = tf.placeholder(tf.float32, shape=[None, 10000, 10]) model = Model() logits = model.inference(x, keep_prob=FLAGS.prob) softmax = tf.nn.softmax(logits) shape = tf.shape(softmax) saver = tf.train.Saver(max_to_keep=None) mean = tf.reduce_mean(softmax_tensors, 0) accuracy = model.accuracy(mean, y) softmax_list = [] with tf.Session() as sess: tf.global_variables_initializer().run() saver.restore(sess, FLAGS.checkpoint_file_path) for i in range(FLAGS.T): softmaxi = sess.run([softmax], feed_dict={ x_: mnist.test.images, y: mnist.test.labels }) softmax_list.append(softmaxi) #mean_prob = sess.run([mean], feed_dict = {softmax_tensors: np.squeeze(np.array(softmax_list))}) #print (np.squeeze(np.array(softmax_list)).shape) for i in range(FLAGS.T): if i > 1: arr = np.squeeze(np.array(softmax_list)[:i, :, :]) else: arr = np.array(softmax_list)[0, :, :] total_accuracy, soft = sess.run([accuracy, mean], feed_dict={ softmax_tensors: arr, y: mnist.test.labels }) if i == FLAGS.T - 1: f = open('mc_drop_eval.log', 'a') split_path = FLAGS.checkpoint_file_path.split() iteration_number = split_path[0][-6:] #print (iteration_number) f.write(str(total_accuracy) + '\n') f.close()
def train(): model = Model() with tf.Graph().as_default(): # Load training data and use test as evaluation set in one hot format mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) # images = mnist.train.images # Returns np.array # labels = np.asarray(mnist.train.labels, dtype=np.int32) # val_images = mnist.test.images # Returns np.array # val_labels = np.asarray(mnist.test.labels, dtype=np.int32) x_ = tf.placeholder(tf.float32, shape=[None, 784]) #data gets loaded as a 28x8 vector x = tf.reshape(x_, [-1, 28, 28, 1], name='x') #mnist dataset is shape 28,28,1 y = tf.placeholder(shape=[None, 10], dtype=tf.float32, name='y') #10 labels keep_prob = tf.placeholder(tf.float32, name='dropout_prob') global_step = tf.contrib.framework.get_or_create_global_step() logits = model.inference(x, keep_prob=keep_prob) loss = model.loss(logits=logits, labels=y) accuracy = model.accuracy(logits, y) summary_op = tf.summary.merge_all() train_op = model.train(loss, global_step=global_step) init = tf.global_variables_initializer() saver = tf.train.Saver(max_to_keep=None) with tf.Session(config=tf.ConfigProto( log_device_placement=False)) as sess: writer = tf.summary.FileWriter(FLAGS.summary_dir, sess.graph) sess.run(init) for i in range(FLAGS.num_iter): batch = mnist.train.next_batch(FLAGS.batch_size) _, cur_loss, summary = sess.run([train_op, loss, summary_op], feed_dict={ x_: batch[0], y: batch[1], keep_prob: 0.5 }) writer.add_summary(summary, i) if i % 10000 == 0: f = open('trainingStdDrop.log', 'a+') validation_accuracy = accuracy.eval( feed_dict={ x_: mnist.test.images, y: mnist.test.labels, keep_prob: 1.0 }) f.write('{}, {}, {} \n'.format(i, cur_loss, validation_accuracy)) f.close() saver.save(sess, FLAGS.checkpoint_file_path + "-" + str(i))
def train(): model = Model() with tf.Graph().as_default(): # Load training data and use test as evaluation set in one hot format mnist_ = input_data.read_data_sets("MNIST_data/", one_hot=True) perm =np.random.permutation(13750) images = mnist_.train.images[perm] labels = mnist_.train.labels[perm] print (images.shape) #[13750,28,28,1] np array x_ = tf.placeholder(tf.float32, shape=[None, 784]) #data gets loaded as a 28x8 vector x = tf.reshape(x_,[-1,28,28,1],name = 'x') #mnist dataset is shape 28,28,1 y = tf.placeholder(shape=[None, 10], dtype=tf.float32, name='y') #10 labels keep_prob = tf.placeholder(tf.float32, name='dropout_prob') global_step = tf.contrib.framework.get_or_create_global_step() logits = model.inference(x, keep_prob=keep_prob) loss = model.loss(logits=logits, labels=y) accuracy = model.accuracy(logits, y) summary_op = tf.summary.merge_all() train_op = model.train(loss, global_step=global_step) batch_size = FLAGS.batch_size #batch size, this might not be correct size input_size = 13750 porp = int(math.ceil(input_size/batch_size)) init = tf.global_variables_initializer() saver = tf.train.Saver(max_to_keep = 100000) with tf.Session(config=tf.ConfigProto(log_device_placement=False)) as sess: writer = tf.summary.FileWriter(FLAGS.summary_dir, sess.graph) sess.run(init) for i in range(FLAGS.num_iter): if i%(porp) == 0: permutation=np.random.permutation(input_size) #create a list with random indexes increment = 0 #restart the increment variable batch_idx = permutation[increment*batch_size:(increment+1)*batch_size] increment += 1 image_batch=images[batch_idx] #this is a list with batch size number of elements. Each element is a (32,32,3) array (images) label_batch=labels[batch_idx] _, cur_loss, summary = sess.run([train_op, loss, summary_op], feed_dict={x_: image_batch, y: label_batch, keep_prob: 0.5}) writer.add_summary(summary, i) if i % 5000 == 0: f = open('trainingStdDrop.log', 'a+') validation_accuracy = accuracy.eval(feed_dict={x_: mnist_.test.images, y: mnist_.test.labels, keep_prob: 1.0}) f.write('{}, {}, {} \n'.format(i, cur_loss, validation_accuracy)) f.close() saver.save(sess, FLAGS.checkpoint_file_path+"-"+str(i))
def evaluate(): with tf.Graph().as_default(): #images, labels = mnist.load_test_data(FLAGS.test_data) mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) x_ = tf.placeholder(tf.float32, shape=[None, 784]) #data gets loaded as a 28x8 vector x = tf.reshape(x_, [-1, 28, 28, 1], name='x') #mnist dataset is shape 28,28,1 y = tf.placeholder(shape=[None, 10], dtype=tf.float32, name='y') #10 labels model = Model() softmax_prob_layer = [] #logits with drop out logits = model.inference(x, keep_prob=0.5) #normalized probabilities softmax_prob = tf.nn.softmax(logits) #accuracy = model.accuracy(logits, y) saver = tf.train.Saver() with tf.Session() as sess: tf.global_variables_initializer().run() #print(sess.run([logits_layer],feed_dict={x_: mnist.test.images, y: mnist.test.labels})) #total_accuracy = sess.run([accuracy], #feed_dict={x_: mnist.test.images, y: mnist.test.labels}) saver.restore(sess, FLAGS.checkpoint_file_path) #Monte Carlo stocastic forward pass: for i in range(50): softmax_prob_layer.append( sess.run([softmax_prob], feed_dict={ x_: mnist.test.images, y: mnist.test.labels })) softmax_prob_sum = tf.convert_to_tensor( np.array(softmax_prob_layer)) softmax_prob_average = tf.reduce_mean(softmax_prob_sum, 0) softmax_prob_average = tf.squeeze(softmax_prob_average) #obtain the test accuracy: accuracy = model.accuracy(softmax_prob_average, y) total_accuracy = sess.run([accuracy], feed_dict={ x_: mnist.test.images, y: mnist.test.labels }) print('Test accuracy: {}'.format(total_accuracy))
def main(): # Data print("Getting dir infos...") files = [f for f in os.listdir(DATA_PATH) if f.rfind('.csv') != -1] files = files[:5000] print("Loading Labels...") y_csv = np.loadtxt(LABL_PATH, dtype={ 'names': ('Id', 'Class'), 'formats': ('|S20', np.int) }, delimiter=',', skiprows=1) y_tmp = [d[1] for d in y_csv if d[0].decode('utf-8') + '.csv' in files] y_data = [] for t in y_tmp: tmp = [0] * Model.SIZE_Y tmp[t] = 1 y_data.append(tmp) y_data = np.array(y_data) print("Loading Datas...") loader = Loader() x_data = np.empty([len(files), loader.SIZE**2]) for i in range(len(files)): x_data[i] = loader.load(os.path.join(DATA_PATH, files[i])) #make test x_test = x_data[-int(0.3 * len(x_data)):] x_data = x_data[:int(0.7 * len(x_data))] y_test = y_data[-int(0.3 * len(y_data)):] y_data = y_data[:int(0.7 * len(y_data))] # Tensorflow m1 = Model("m1") m1.set(16, 10, 0.01) if LOAD or TEST: m1.load("./model") if not TEST: print("Learning Start...") for epoch in range(EPOCH): #cost, optimizer cost, _, _ = m1.train(x_data, y_data) if epoch % 100 == 0: print("In", str(epoch) + "... cost:", cost) print("Learning Finished") print("Accuracy :", m1.accuracy(x_test, y_test)) if SAVE: m1.save("./model/ckpt")
def evaluate(): with tf.Graph().as_default(): mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) labels = mnist.test.labels noiseLevel= [0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1] # plt.gray() # plt.imshow(dataset5[53]) # plt.show() x = tf.placeholder(shape=[10000, 28,28,1], dtype=tf.float32, name='x') y = tf.placeholder(shape=[10000, 10], dtype=tf.float32, name='y') #10 labels softmax_tensors = tf.placeholder(tf.float32, shape=[FLAGS.T, 10000, 10]) model = Model() logits = model.inference(x, keep_prob=0.5) softmax = tf.nn.softmax(logits) shape = tf.shape(softmax) saver = tf.train.Saver(max_to_keep = None) mean = tf.reduce_mean(softmax_tensors,0) accuracy = model.accuracy(mean,y) for l in range(len(noiseLevel)): data = np.load(str(noiseLevel[l])+'.npy') data = data.reshape([10000,28,28,1]) with tf.Session(config=tf.ConfigProto(log_device_placement=False)) as sess: tf.global_variables_initializer().run() saver.restore(sess, FLAGS.checkpoint_file_path) softmax_list = [] for i in range(FLAGS.T): softmaxi = sess.run([softmax], feed_dict={x: data, y: labels}) softmax_list.append(softmaxi) mean_softmax = sess.run([mean], feed_dict = {softmax_tensors: np.squeeze(np.array(softmax_list))}) print (np.squeeze(mean_softmax).shape) softmax_max = np.max(np.squeeze(mean_softmax), axis = 1) print (softmax_max.shape) #softmax_correct = np.multiply(np.squeeze(mean_softmax), labels) summ = (np.sum(softmax_max)) print (np.divide(summ, 10000.0))
def evaluate(): with tf.Graph().as_default(): mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) x_ = tf.placeholder(tf.float32, shape=[None, 784]) #data gets loaded as a 28x8 vector x = tf.reshape(x_, [-1, 28, 28, 1], name='x') #mnist dataset is shape 28,28,1 y = tf.placeholder(shape=[None, 10], dtype=tf.float32, name='y') #10 labels softmax_tensors = tf.placeholder(tf.float32, shape=[FLAGS.T, 10000, 10]) model = Model() logits = model.inference(x, keep_prob=0.5) softmax = tf.nn.softmax(logits) shape = tf.shape(softmax) saver = tf.train.Saver(max_to_keep=None) mean = tf.reduce_mean(softmax_tensors, 0) accuracy = model.accuracy(mean, y) #this is for each forward pass accuracy softmax_each = tf.placeholder(tf.float32, shape=[10000, 10]) accuracyi = model.accuracy(softmax_each, y) softmax_list = [] accuracy_list = [] with tf.Session() as sess: tf.global_variables_initializer().run() saver.restore(sess, FLAGS.checkpoint_file_path) for i in range(FLAGS.T): softmaxi = sess.run([softmax], feed_dict={ x_: mnist.test.images, y: mnist.test.labels }) softmax_list.append(softmaxi) #added for accuracy of each forward pass: print(softmaxi[0]) accuracyi1 = sess.run( [accuracyi], feed_dict={ softmax_each: np.squeeze(np.array(softmaxi[0])), y: mnist.test.labels }) accuracy_list.append(accuracyi1) #mean_prob = sess.run([mean], feed_dict = {softmax_tensors: np.squeeze(np.array(softmax_list))}) total_accuracy = sess.run( [accuracy], feed_dict={ softmax_tensors: np.squeeze(np.array(softmax_list)), y: mnist.test.labels }) #print (softmax_list[0].shape) standard_deviation = np.std(np.array(accuracy_list)) print('Test accuracy: {}'.format(total_accuracy)) print('Standard deviation of 10 forward passes:', standard_deviation) print('Accuracy list is', accuracy_list)
def train(): model = Model() with tf.Graph().as_default(): CIFAR10.data_path = 'data/CIFAR-10/' CIFAR10.maybe_download_and_extract() class_names = CIFAR10.load_class_names() print(class_names) #load 50,000 train images (np array) images_train, cls_train, labels_train = CIFAR10.load_training_data( ) #cls is the label, labels_train is one hot encoded #load 10,000 test images (np array) images_test, cls_test, labels_test = CIFAR10.load_test_data() # print (images_test.shape) # print (images_test.shape) x = tf.placeholder(shape=[None, 32, 32, 3], dtype=tf.float32, name='x') #mnist dataset is shape 28,28,1 y = tf.placeholder(shape=[None, 10], dtype=tf.float32, name='y') #10 labels keep_prob = tf.placeholder(tf.float32, name='dropout_prob') global_step = tf.contrib.framework.get_or_create_global_step() logits = model.inference(x, keep_prob=keep_prob) loss = model.loss(logits=logits, labels=y) accuracy = model.accuracy(logits, y) summary_op = tf.summary.merge_all() train_op = model.train(loss, global_step=global_step) init = tf.global_variables_initializer() saver = tf.train.Saver(max_to_keep=None) batch_size = FLAGS.batch_size #batch size, this might not be correct size input_size = 50000 #50,000 training images porp = int(math.ceil(input_size / batch_size)) with tf.Session(config=tf.ConfigProto( log_device_placement=False)) as sess: writer = tf.summary.FileWriter(FLAGS.summary_dir, sess.graph) sess.run(init) for i in range(FLAGS.num_iter): if i % (porp) == 0: permutation = np.random.permutation( input_size) #create a list with random indexes increment = 0 #restart the increment variable batch_idx = permutation[increment * batch_size:(increment + 1) * batch_size] increment += 1 image_batch = images_train[ batch_idx] #this is a list with batch size number of elements. Each element is a (32,32,3) array (images) label_batch = labels_train[ batch_idx] #this is a list with batch size number of elements. Each element is a 10 dimensional vector (1 hot encode) _, cur_loss, summary = sess.run([train_op, loss, summary_op], feed_dict={ x: image_batch, y: label_batch, keep_prob: 0.5 }) writer.add_summary(summary, i) f = open('trainingStdDrop.log', 'a+') if i % 10 == 0: # Get accuracy in batches for memory limitations test_batch_acc_total = 0 test_batch_count = 0 for test_feature_batch, test_label_batch in batch_features_labels( images_test, labels_test, batch_size=100): test_batch_acc_total += accuracy.eval( feed_dict={ x: test_feature_batch, y: test_label_batch, keep_prob: 1.0 }) test_batch_count += 1 validation_accuracy = test_batch_acc_total / test_batch_count #validation_accuracy = accuracy.eval(feed_dict={x: images_test, y: labels_test, keep_prob: 1.0}) print("Iteration: {}\tLoss: {}\tValidation Accuracy: {}". format(i, cur_loss, validation_accuracy)) f.write('{}, {}, {} \n'.format(i, cur_loss, validation_accuracy)) if i % 1000 == 0: saver.save(sess, FLAGS.checkpoint_file_path + "-" + str(i)) f.close()
def evaluate(): with tf.Graph().as_default(): mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) labels = mnist.test.labels noiselevel = [0, 0.2, 0.4, 0.6, 0.8, 1] x = tf.placeholder(shape=[10000, 28, 28, 1], dtype=tf.float32, name='x') y = tf.placeholder(shape=[10000, 10], dtype=tf.float32, name='y') #10 labels softmax_tensors = tf.placeholder(tf.float32, shape=[FLAGS.T, 10000, 10]) model = Model() logits = model.inference(x, keep_prob=1.0) softmax = tf.nn.softmax(logits) shape = tf.shape(softmax) saver = tf.train.Saver(max_to_keep=None) mean = tf.reduce_mean(softmax_tensors, 0) accuracy = model.accuracy(mean, y) #this is for each forward pass accuracy softmax_each = tf.placeholder(tf.float32, shape=[10000, 10]) accuracyi = model.accuracy(softmax_each, y) for j in range(len(noiselevel)): data = np.load(str(noiselevel[j]) + '.npy') data = data.reshape([10000, 28, 28, 1]) with tf.Session() as sess: tf.global_variables_initializer().run() saver.restore(sess, FLAGS.checkpoint_file_path) softmax_list = [] accuracy_list = [] for i in range(FLAGS.T): softmaxi = sess.run([softmax], feed_dict={ x: data, y: labels }) softmax_list.append(softmaxi) #added for accuracy of each forward pass: #print(softmaxi[0]) accuracyi1 = sess.run([accuracyi], feed_dict={ softmax_each: np.squeeze(np.array( softmaxi[0])), y: labels }) accuracy_list.append(accuracyi1) #mean_prob = sess.run([mean], feed_dict = {softmax_tensors: np.squeeze(np.array(softmax_list))}) total_accuracy = sess.run([accuracy], feed_dict={ softmax_tensors: np.squeeze( np.array(softmax_list)), y: labels }) #print (softmax_list[0].shape) standard_deviation = np.std(np.array(accuracy_list)) f = open('out_stddrop.log', 'a+') f.write('noise intensity: {}\n'.format(noiselevel[j])) f.write('test accuracy: {}\n'.format(total_accuracy)) f.close()