def run_test(suppress_out=False): if suppress_out: sys.stdout = open(os.devnull, "w") sys.stderr = open(os.devnull, "w") # Create 2 variables for keeping track of weights W = tf.Variable([.3], dtype=tf.float32) b = tf.Variable([-.3], dtype=tf.float32) # Create a placeholder for inputs, and a linear model x = tf.placeholder(tf.float32) y = tf.placeholder(tf.float32) linear_model = W * x + b squared_deltas = tf.square(linear_model - y) loss = tf.reduce_sum(squared_deltas) init = tf.global_variables_initializer() # Create a session, initialize variables and run the linear model s = tf.compat.v1.Session() init_nofi = s.run(init) print "Initial : " + str(init_nofi) model_nofi = s.run(linear_model, {x: [1, 2, 3, 4]}) print "Linear Model : " + str(model_nofi) loss_nofi = s.run(loss, {x: [1, 2, 3, 4], y: [0, -1, -2, -3]}) print "Loss Function : " + str(loss_nofi) # Instrument the session fi = ti.TensorFI(s, logDir=logDir) # Create a log for visualizng in TensorBoard logs_path = "./logs" logWriter = tf.summary.FileWriter(logs_path, s.graph) # initialize variables and run the linear model init_fi = s.run(init) print "Initial : " + str(init_fi) model_fi = s.run(linear_model, {x: [1, 2, 3, 4]}) print "Linear Model : " + str(model_fi) loss_fi = s.run(loss, {x: [1, 2, 3, 4], y: [0, -1, -2, -3]}) print "Loss Function : " + str(loss_fi) if loss_nofi == loss_fi: passed_bool = True else: passed_bool = False if suppress_out: sys.stdout = sys.__stdout__ sys.stderr = sys.__stderr__ return passed_bool # set this based on the test requirements (True if the test passed, False otherwise)
print("Step " + str(step*batch_size) + ", Minibatch Loss= " + \ "{:.6f}".format(loss) + ", Training Accuracy= " + \ "{:.5f}".format(acc)) print("Optimization Finished!") # Calculate accuracy test_data = testset.data test_label = testset.labels test_seqlen = testset.seqlen print("Testing Accuracy:", \ sess.run(accuracy, feed_dict={x: test_data, y: test_label, seqlen: test_seqlen})) # Add the fault injection code here to instrument the graph fi = ti.TensorFI(sess, name="Dynamic", logLevel=10, disableInjections=True) # Print the graph to the console for debugging # fi.printGraph() # Make the log files in TensorBoard logs_path = "./logs" logWriter = tf.summary.FileWriter(logs_path, sess.graph) # Calculate accuracy for 128 mnist test images (again after instrumenting) correctResult = sess.run(accuracy, feed_dict={ x: test_data, y: test_label, seqlen: test_seqlen })
# Run training loop with sess.as_default(): for i in range(100): batch = mnist_data.train.next_batch(50) train_step.run(feed_dict={img: batch[0], labels: batch[1]}) # Run the model and print the accuracy from keras.metrics import categorical_accuracy as accuracy acc_value = accuracy(labels, preds) with sess.as_default(): print( "Accuracy = ", acc_value.eval(feed_dict={ img: mnist_data.test.images, labels: mnist_data.test.labels })) print("Done running model") # Instrument the graph with TensorFI fi = ti.TensorFI(sess, logLevel=100) fi.turnOnInjections() with sess.as_default(): print( "Accuracy = ", acc_value.eval(feed_dict={ img: mnist_data.test.images, labels: mnist_data.test.labels })) print("Done running instrumented model")
def run_test(suppress_out=False): if suppress_out: sys.stdout = open(os.devnull, "w") sys.stderr = open(os.devnull, "w") # Import MINST data from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) # Store layers weight & bias weights = { 'wc1': tf.Variable(tf.random_normal([3, 3, 1, 64])), 'wc2': tf.Variable(tf.random_normal([3, 3, 64, 128])), 'wc3': tf.Variable(tf.random_normal([3, 3, 128, 256])), 'wd1': tf.Variable(tf.random_normal([4 * 4 * 256, 1024])), 'wd2': tf.Variable(tf.random_normal([1024, 1024])), 'out': tf.Variable(tf.random_normal([1024, 10])) } biases = { 'bc1': tf.Variable(tf.random_normal([64])), 'bc2': tf.Variable(tf.random_normal([128])), 'bc3': tf.Variable(tf.random_normal([256])), 'bd1': tf.Variable(tf.random_normal([1024])), 'bd2': tf.Variable(tf.random_normal([1024])), 'out': tf.Variable(tf.random_normal([n_classes])) } # Construct model pred = alex_net(x, weights, biases, keep_prob) # Define loss and optimizer cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y)) optimizer = tf.train.AdamOptimizer( learning_rate=learning_rate).minimize(cost) # Evaluate model correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) # Initializing the variables init = tf.initialize_all_variables() passed_bool = None # Launch the graph with tf.Session() as sess: sess.run(init) step = 1 # Keep training until reach max iterations while step * batch_size < training_iters: batch_xs, batch_ys = mnist.train.next_batch(batch_size) # Fit training using batch data sess.run(optimizer, feed_dict={ x: batch_xs, y: batch_ys, keep_prob: dropout }) if step % display_step == 0: # Calculate batch accuracy acc = sess.run(accuracy, feed_dict={ x: batch_xs, y: batch_ys, keep_prob: 1. }) # Calculate batch loss loss = sess.run(cost, feed_dict={ x: batch_xs, y: batch_ys, keep_prob: 1. }) print "Iter " + str( step * batch_size) + ", Minibatch Loss= " + "{:.6f}".format( loss) + ", Training Accuracy= " + "{:.5f}".format(acc) step += 1 print "Optimization Finished!" # Calculate accuracy for 256 mnist test images acc1 = sess.run(accuracy, feed_dict={ x: mnist.test.images[:256], y: mnist.test.labels[:256], keep_prob: 1. }) print "Testing Accuracy:", acc1 fi = ti.TensorFI(sess, name="lenet", disableInjections=False, logDir=logDir) acc2 = sess.run(accuracy, feed_dict={ x: mnist.test.images[:256], y: mnist.test.labels[:256], keep_prob: 1. }) print "Testing Accuracy:", acc2 if acc1 == acc2: passed_bool = True else: passed_bool = False if suppress_out: sys.stdout = sys.__stdout__ sys.stderr = sys.__stderr__ return passed_bool
pred = tf.arg_min(distance, 0) accuracy = 0. # Initialize the variables (i.e. assign their default value) init = tf.global_variables_initializer() # Start training with tf.Session() as sess: # Run the initializer sess.run(init) # Add the fault injection code here to instrument the graph fi = ti.TensorFI(sess, name="NearestNeighbor", logLevel=50, disableInjections=True) # loop over test data for i in range(len(Xte)): # Get nearest neighbor nn_index = sess.run(pred, feed_dict={xtr: Xtr, xte: Xte[i, :]}) # Get nearest neighbor class label and compare it to its true label print("Test", i, "Prediction:", np.argmax(Ytr[nn_index]), \ "True Class:", np.argmax(Yte[i])) # Calculate accuracy if np.argmax(Ytr[nn_index]) == np.argmax(Yte[i]): accuracy += 1. / len(Xte) orgAccuracy = accuracy print("Accuracy (Without FI):", accuracy)
# Create 2 variables for keeping track of weights W = tf.Variable([.3], dtype=tf.float32) b = tf.Variable([-.3], dtype=tf.float32) # Create a placeholder for inputs, and a linear model x = tf.placeholder(tf.float32) y = tf.placeholder(tf.float32) linear_model = W * x + b squared_deltas = tf.square(linear_model - y) loss = tf.reduce_sum(squared_deltas) init = tf.global_variables_initializer() # Create a session, initialize variables and run the linear model s = tf.compat.v1.Session() print("Initial : ", s.run(init)) print("Linear Model : ", s.run(linear_model, {x: [1, 2, 3, 4]})) print("Loss Function : ", s.run(loss, {x: [1, 2, 3, 4], y: [0, -1, -2, -3]})) # Instrument the session fi = ti.TensorFI(s) # Create a log for visualizng in TensorBoard logs_path = "./logs" logWriter = tf.summary.FileWriter(logs_path, s.graph) # Create a session, initialize variables and run the linear model print("Initial : ", s.run(init)) print("Linear Model : ", s.run(linear_model, {x: [1, 2, 3, 4]})) print("Loss Function : ", s.run(loss, {x: [1, 2, 3, 4], y: [0, -1, -2, -3]}))
def run_test(suppress_out=False): if suppress_out: sys.stdout = open(os.devnull, "w") sys.stderr = open(os.devnull, "w") # Import MNIST data from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) # In this example, we limit mnist data Xtr, Ytr = mnist.train.next_batch(5000) #5000 for training (nn candidates) Xte, Yte = mnist.test.next_batch(200) #200 for testing # tf Graph Input xtr = tf.placeholder("float", [None, 784]) xte = tf.placeholder("float", [784]) # Nearest Neighbor calculation using L1 Distance # Calculate L1 Distance distance = tf.reduce_sum(tf.abs(tf.add(xtr, tf.negative(xte))), reduction_indices=1) # Prediction: Get min distance index (Nearest neighbor) pred = tf.arg_min(distance, 0) accuracy = 0. # Initialize the variables (i.e. assign their default value) init = tf.global_variables_initializer() # Start training with tf.Session() as sess: # Run the initializer sess.run(init) # Add the fault injection code here to instrument the graph # We start injecting the fault right away here unlike earlier fi = ti.TensorFI(sess, name = "NearestNeighbor", logLevel = 50, logDir=logDir) # loop over test data for i in range(len(Xte)): # Get nearest neighbor nn_index = sess.run(pred, feed_dict={xtr: Xtr, xte: Xte[i, :]}) # Get nearest neighbor class label and compare it to its true label print "Test " + str(i) + ", Prediction: " + str(np.argmax(Ytr[nn_index])) + ", True Class: " + str(np.argmax(Yte[i])) # Calculate accuracy if np.argmax(Ytr[nn_index]) == np.argmax(Yte[i]): accuracy += 1./len(Xte) print "Accuracy:" + str(accuracy) # Make the log files in TensorBoard logs_path = "./logs" logWriter = tf.summary.FileWriter( logs_path, sess.graph ) passed_bool = True if suppress_out: sys.stdout = sys.__stdout__ sys.stderr = sys.__stderr__ return passed_bool # set this based on the test requirements (True if the test passed, False otherwise)
pred = tf.arg_min(distance, 0) accuracy = 0. # Initialize the variables (i.e. assign their default value) init = tf.global_variables_initializer() # Start training with tf.Session() as sess: # Run the initializer sess.run(init) # Add the fault injection code here to instrument the graph # We start injecting the fault right away here unlike earlier fi = ti.TensorFI(sess, name="NearestNeighbor", logLevel=50) # loop over test data for i in range(len(Xte)): # Get nearest neighbor nn_index = sess.run(pred, feed_dict={xtr: Xtr, xte: Xte[i, :]}) # Get nearest neighbor class label and compare it to its true label print("Test", i, "Prediction:", np.argmax(Ytr[nn_index]), \ "True Class:", np.argmax(Yte[i])) # Calculate accuracy if np.argmax(Ytr[nn_index]) == np.argmax(Yte[i]): accuracy += 1. / len(Xte) print("Accuracy:", accuracy) # Make the log files in TensorBoard logs_path = "./logs"
def run_test(suppress_out=False): if suppress_out: sys.stdout = open(os.devnull, "w") sys.stderr = open(os.devnull, "w") # Create 2 variables for keeping track of weights W = tf.Variable([.3], dtype=tf.float32) b = tf.Variable([-.3], dtype=tf.float32) # Create a placeholder for inputs, and a linear model x = tf.placeholder(tf.float32) y = tf.placeholder(tf.float32) linear_model = W*x + b # Calculate the error as the sum of square of the dviations from the linear model squared_deltas = tf.square( linear_model - y ) error = tf.reduce_sum(squared_deltas) # Initialize a gradient descent optimizer to minimize errors optimizer = tf.train.GradientDescentOptimizer(0.01) train = optimizer.minimize(error) # Training data for x and y x_train = [1, 2, 3, 4] y_train = [0, -1, -2, -3] # Create a session, initialize variables s = tf.Session() init = tf.global_variables_initializer() s.run(init) # Run the initial model curr_W, curr_b, curr_error = s.run([W, b, error], {x: x_train, y: y_train}) print "After initialization\tW: " + str(curr_W) + " b: " + str(curr_b) + " error: " + str(curr_error) # Iterate to train the model steps = 1000 for i in range(steps): s.run( train, {x: x_train, y:y_train} ) curr_W, curr_b, curr_error = s.run([W, b, error], {x: x_train, y: y_train}) print "No injections\tW: " + str(curr_W) + " b: " + str(curr_b) + " error: " + str(curr_error) # Instrument the session fi = ti.TensorFI(s, logDir=logDir) # Create a log for visualizng in TensorBoard (during training) logs_path = "./logs" logWriter = tf.summary.FileWriter( logs_path, s.graph ) # Turn off the injections during the first run fi.turnOffInjections() # Run the trained model without fault injections curr_W, curr_b, curr_error = s.run([W, b, error], {x: x_train, y: y_train}) curr_W_A = curr_W curr_b_A = curr_b curr_error_A = curr_error print "Before injections\tW: " + str(curr_W) + " b: " + str(curr_b) + " error: " + str(curr_error) # Turn on the injections during running fi.turnOnInjections() # Run the trained model with the fault injected functions from the cached run curr_W, curr_b, curr_error = s.run(useCached = True) curr_W_B = curr_W curr_b_B = curr_b curr_error_B = curr_error print "After injections\tW: " + str(curr_W) + " b: " + str(curr_b) + " error: " + str(curr_error) if suppress_out: sys.stdout = sys.__stdout__ sys.stderr = sys.__stderr__ if curr_W_A == curr_W_B and curr_W_A == curr_W_B: passed_bool = True else: passed_bool = False return passed_bool
print("Testing... (Mean square loss Comparison)") accuracy = tf.reduce_sum(tf.pow(pred - Y, 2)) / (2 * test_X.shape[0]), testing_cost = sess.run(accuracy, feed_dict={ X: test_X, Y: test_Y }) # same function as cost above print("Testing cost=", testing_cost) print("Absolute mean square loss difference:", abs(training_cost - testing_cost)) # Calculate accuracy (before fault injections) print("Accuracy:", sess.run(accuracy, feed_dict={X: test_X, Y: test_Y})) # Instrument the graph for fault injection fi = ti.TensorFI(sess, name="linearReg", logLevel=50, disableInjections=True) # Calculate accuracy (with no fault injections) accuracy_ok = sess.run(accuracy, feed_dict={X: test_X, Y: test_Y}) print("Accuracy (no injections):", accuracy_ok) accuracies_ok.append(accuracy_ok) # Now do the fault injections fi.turnOnInjections() for trial in range(num_trials): # Calculate accuracy (with fault injections) accuracy_faulty = sess.run(accuracy, feed_dict={X: test_X, Y: test_Y}) print("Accuracy (with injections):", accuracy_faulty)
def main(): global isTrain global isTest # you need to first train the model if (isTrain): train_start = time() for i in range(_EPOCH): print("\nEpoch: {}/{}\n".format((i + 1), _EPOCH)) train(i) hours, rem = divmod(time() - train_start, 3600) minutes, seconds = divmod(rem, 60) mes = "Best accuracy pre session: {:.2f}, time: {:0>2}:{:0>2}:{:05.2f}" print(mes.format(global_accuracy, int(hours), int(minutes), seconds)) # after the model is trained, you can perform fault injection if (isTest): # we use the inputs that can be correctly identified by the model for FI tx = test_x[:50, :] ty = test_y[:50, :] preds = sess.run(y_pred_cls, feed_dict={x: tx, y: ty}) correct = (np.argmax(ty, axis=1) == preds) correctIndex = np.argwhere(correct == True) correctIndex = correctIndex.flatten( ) # index of inputs that can be correctly identified X = tx Y = ty fi = ti.TensorFI(sess, name="Perceptron", logLevel=50, disableInjections=False) # save FI results into file, "eachRes" saves each FI result, "resFile" saves SDC rate eachRes = open("alex-binEach.csv", "a") resFile = open('alex-binFI.csv', "a") correct = [] # inject 10 inputs for i in range(10): # construct single input tx = X[correctIndex[i], :] ty = Y[correctIndex[i], :] tx = tx.reshape(1, 3072) ty = ty.reshape(1, 10) trial = 0 # initiliaze for binary FI ti.faultTypes.initBinaryInjection() while (ti.faultTypes.isKeepDoingFI): preds = sess.run(y_pred_cls, feed_dict={x: tx, y: ty}) acy = (np.argmax(ty, axis=1) == preds)[0] # you need to feedback the FI result to guide the next FI for binary search if (acy == True): # FI does not result in SDC ti.faultTypes.sdcFromLastFI = False else: ti.faultTypes.sdcFromLastFI = True # if FI on the current data item, you might want to log the sdc bound for the bits of 0 or 1 # (optional), if you just want to measure the SDC rate, you can access the variable of "ti.faultTypes.sdcRate" if (ti.faultTypes.isDoneForCurData): eachRes.write( ` ti.faultTypes.sdc_bound_0 ` + "," + ` ti.faultTypes.sdc_bound_1 ` + ",") # initialize the binary FI for next data item. ti.faultTypes.initBinaryInjection(isFirstTime=False) trial += 1 print(i, trial) eachRes.write("\n") print("sdc", ti.faultTypes.sdcRate, "fi times: ", ti.faultTypes.fiTime) resFile.write( ` ti.faultTypes.sdcRate ` + "," + ` ti.faultTypes.fiTime ` + "\n")
def main(_): if FLAGS.self_test: print('Running self-test.') train_data, train_labels = fake_data(256) validation_data, validation_labels = fake_data(EVAL_BATCH_SIZE) test_data, test_labels = fake_data(EVAL_BATCH_SIZE) num_epochs = 1 else: # Get the data. train_data_filename = maybe_download('train-images-idx3-ubyte.gz') train_labels_filename = maybe_download('train-labels-idx1-ubyte.gz') test_data_filename = maybe_download('t10k-images-idx3-ubyte.gz') test_labels_filename = maybe_download('t10k-labels-idx1-ubyte.gz') # Extract it into numpy arrays. train_data = extract_data(train_data_filename, 60000) train_labels = extract_labels(train_labels_filename, 60000) test_data = extract_data(test_data_filename, 10000) test_labels = extract_labels(test_labels_filename, 10000) # Generate a validation set. validation_data = train_data[:VALIDATION_SIZE, ...] validation_labels = train_labels[:VALIDATION_SIZE] train_data = train_data[VALIDATION_SIZE:, ...] train_labels = train_labels[VALIDATION_SIZE:] num_epochs = NUM_EPOCHS train_size = int(train_labels.shape[0]) print("Training size ", train_size) # This is where training samples and labels are fed to the graph. # These placeholder nodes will be fed a batch of training data at each # training step using the {feed_dict} argument to the Run() call below. train_data_node = tf.placeholder(data_type(), shape=(BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS)) train_labels_node = tf.placeholder(tf.int64, shape=(BATCH_SIZE, )) eval_data = tf.placeholder(data_type(), shape=(EVAL_BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS)) # The variables below hold all the trainable weights. They are passed an # initial value which will be assigned when we call: # {tf.global_variables_initializer().run()} depth1 = 32 conv1_weights = tf.Variable( tf.truncated_normal( [5, 5, NUM_CHANNELS, depth1], # 5x5 filter, depth 32. stddev=0.1, seed=SEED, dtype=data_type())) conv1_biases = tf.Variable(tf.zeros([depth1], dtype=data_type())) depth2 = 64 conv2_weights = tf.Variable( tf.truncated_normal([5, 5, depth1, depth2], stddev=0.1, seed=SEED, dtype=data_type())) conv2_biases = tf.Variable( tf.constant(0.1, shape=[depth2], dtype=data_type())) depth3 = 512 fc1_weights = tf.Variable( # fully connected, depth 512. tf.truncated_normal( [IMAGE_SIZE // 4 * IMAGE_SIZE // 4 * depth2, depth3], stddev=0.1, seed=SEED, dtype=data_type())) fc1_biases = tf.Variable( tf.constant(0.1, shape=[depth3], dtype=data_type())) fc2_weights = tf.Variable( tf.truncated_normal([depth3, NUM_LABELS], stddev=0.1, seed=SEED, dtype=data_type())) fc2_biases = tf.Variable( tf.constant(0.1, shape=[NUM_LABELS], dtype=data_type())) # We will replicate the model structure for the training subgraph, as well # as the evaluation subgraphs, while sharing the trainable parameters. def model(data, train=False): """The Model definition.""" # 2D convolution, with 'SAME' padding (i.e. the output feature map has # the same size as the input). Note that {strides} is a 4D array whose # shape matches the data layout: [image index, y, x, depth]. # conv output : (samples, rows, cols, channels) conv = tf.nn.conv2d(data, conv1_weights, strides=[1, 1, 1, 1], padding='SAME') # Bias and rectified linear non-linearity. tanh = tf.nn.relu(tf.nn.bias_add(conv, conv1_biases)) # Max pooling. The kernel size spec {ksize} also follows the layout of # the data. Here we have a pooling window of 2, and a stride of 2. pool = tf.nn.max_pool(tanh, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') conv = tf.nn.conv2d(pool, conv2_weights, strides=[1, 1, 1, 1], padding='SAME') tanh = tf.nn.relu(tf.nn.bias_add(conv, conv2_biases)) pool = tf.nn.max_pool(tanh, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') # Reshape the feature map cuboid into a 2D matrix to feed it to the # fully connected layers. pool_shape = pool.get_shape().as_list() reshape = tf.reshape( pool, [pool_shape[0], pool_shape[1] * pool_shape[2] * pool_shape[3]]) # Fully connected layer. Note that the '+' operation automatically # broadcasts the biases. hidden = tf.nn.relu(tf.matmul(reshape, fc1_weights) + fc1_biases) # Add a 50% dropout during training only. Dropout also scales # activations such that no rescaling is needed at evaluation time. if train: hidden = tf.nn.dropout(hidden, 0.5, seed=SEED) return tf.matmul(hidden, fc2_weights) + fc2_biases # Training computation: logits + cross-entropy loss. logits = model(train_data_node, True) loss = tf.reduce_mean( tf.nn.sparse_softmax_cross_entropy_with_logits( labels=train_labels_node, logits=logits)) # L2 regularization for the fully connected parameters. regularizers = (tf.nn.l2_loss(fc1_weights) + tf.nn.l2_loss(fc1_biases) + tf.nn.l2_loss(fc2_weights) + tf.nn.l2_loss(fc2_biases)) # Add the regularization term to the loss. loss += 5e-4 * regularizers # Optimizer: set up a variable that's incremented once per batch and # controls the learning rate decay. batch = tf.Variable(0, dtype=data_type()) # Decay once per epoch, using an exponential schedule starting at 0.01. learning_rate = tf.train.exponential_decay( 0.01, # Base learning rate. batch * BATCH_SIZE, # Current index into the dataset. train_size, # Decay step. 0.95, # Decay rate. staircase=True) # Use simple momentum for the optimization. optimizer = tf.train.MomentumOptimizer(learning_rate, 0.9).minimize(loss, global_step=batch) # Predictions for the current training minibatch. train_prediction = tf.nn.softmax(logits) global eval_prediction # Predictions for the test and validation, which we'll compute less often. eval_prediction = tf.nn.softmax(model(eval_data)) " Use one batch for the whole test dataset" # Small utility function to evaluate a dataset by feeding batches of data to # {eval_data} and pulling the results from {eval_predictions}. # Saves memory and enables this to run on smaller GPUs. def eval_in_batches(data, sess, eval_batch_size=EVAL_BATCH_SIZE): """Get all predictions for a dataset by running it in small batches.""" size = data.shape[0] if size < eval_batch_size: raise ValueError("batch size for evals larger than dataset: %d" % size) predictions = numpy.ndarray(shape=(size, NUM_LABELS), dtype=numpy.float32) for begin in xrange(0, size, eval_batch_size): end = begin + eval_batch_size if end <= size: predictions[begin:end, :] = sess.run( eval_prediction, feed_dict={eval_data: data[begin:end, ...]}) else: batch_predictions = sess.run( eval_prediction, feed_dict={eval_data: data[-eval_batch_size:, ...]}) predictions[begin:, :] = batch_predictions[begin - size:, :] return predictions checkpointer = tf.train.Saver() _SAVE_PATH_ = 'checkpoint/lenet.ckpt' # Create a local session to run the training. start_time = time.time() sess = tf.Session() ''' # train the model learning = open("learning_progress.csv" , "a") # Run all the initializers to prepare the trainable parameters. tf.global_variables_initializer().run() print('Initialized!') # Loop through training steps. for step in xrange(int(num_epochs * train_size) // BATCH_SIZE): # Compute the offset of the current minibatch in the data. # Note that we could use better randomization across epochs. offset = (step * BATCH_SIZE) % (train_size - BATCH_SIZE) batch_data = train_data[offset:(offset + BATCH_SIZE), ...] batch_labels = train_labels[offset:(offset + BATCH_SIZE)] # This dictionary maps the batch data (as a numpy array) to the # node in the graph it should be fed to. feed_dict = {train_data_node: batch_data, train_labels_node: batch_labels} # Run the optimizer to update weights. sess.run(optimizer, feed_dict=feed_dict) # print some extra information once reach the evaluation frequency if step % EVAL_FREQUENCY == 0: # fetch some extra nodes' data l, lr, predictions = sess.run([loss, learning_rate, train_prediction], feed_dict=feed_dict) elapsed_time = time.time() - start_time start_time = time.time() print('Step %d (epoch %.2f), %.1f ms' % (step, float(step) * BATCH_SIZE / train_size, 1000 * elapsed_time / EVAL_FREQUENCY)) # print('Minibatch loss: %.3f, learning rate: %.6f' % (l, lr)) # print('Minibatch error: %.1f%%' % error_rate(predictions, batch_labels)) val_error , _= error_rate(eval_in_batches(validation_data, sess), validation_labels) print('Val error: %.1f%%' % val_error) learning.write(`float(step) * BATCH_SIZE / train_size` + "," + `step` + "," + `val_error` + "\n" ) sys.stdout.flush() checkpointer.save(sess, save_path=_SAVE_PATH_) ''' "restore the trained model" checkpointer.restore(sess, save_path=_SAVE_PATH_) "============================================================================================================" "================= Begin to insert restriction on selective layers ========================================" "============================================================================================================" # get all the operators in the graph ops = [ tensor for op in sess.graph.get_operations() for tensor in op.values() ] graph_def = sess.graph.as_graph_def() def get_op_dependency(op): "get all the node that precedes the target op" cur_op = [] #op = sess.graph.get_tensor_by_name("ranger_11/ranger_10/ranger_9/ranger_8/ranger_7/ranger_6/ranger_5/ranger_4/ranger_3/ranger_2/ranger_1/ranger/Relu_5:0").op a = open('alex-dep.txt', "w") cur_op.append(op) next_op = [] while (not (next_op == [] and cur_op == [])): next_op = [] for each in cur_op: printline = False for inp in each.inputs: printline = True #print(inp) a.write(str(inp) + "\n") next_op.append(inp.op) if (printline): #print('') a.write("\n\n") cur_op = next_op def get_target_scope_prefix(scope_name, dup_cnt, dummy_scope_name, dummy_graph_dup_cnt): "get the scope prefix of the target path (the latest duplicated path)" target_graph_prefix = "" # the scope prefix of the latest path if (dup_cnt == 0): target_graph_prefix = "" # elif (dup_cnt == 1): target_graph_prefix = str(scope_name + "/") # e.g., ranger/relu:0 if (dummy_graph_dup_cnt == 1): target_graph_prefix = dummy_scope_name + "/" + target_graph_prefix # e.g., dummy/ranger/relu:0 else: target_graph_prefix = str(scope_name + "/") if (dummy_graph_dup_cnt > 0): # e.g., dummy/ranger/relu:0 target_graph_prefix = dummy_scope_name + "/" + target_graph_prefix dummy_graph_dup_cnt -= 1 for i in range(1, dup_cnt): target_graph_prefix = scope_name + "/" + target_graph_prefix # e.g., ranger/dummy/ranger/ relu if (dummy_graph_dup_cnt > 0): target_graph_prefix = dummy_scope_name + "/" + target_graph_prefix # e.g., dummy/ranger/dummy/ranger/relu:0 dummy_graph_dup_cnt -= 1 return target_graph_prefix def restore_trainable_var(sess, scope_name, dup_cnt, train_var, dummy_scope_name, dummy_graph_dup_cnt, OLD_SESS): "need to map back the variable values to the ones under the new scope" target_graph_prefix = get_target_scope_prefix(scope_name, dup_cnt, dummy_scope_name, dummy_graph_dup_cnt) tmp = [] for each in train_var: tmp.append( tf.assign( sess.graph.get_tensor_by_name(target_graph_prefix + each.name), OLD_SESS.run(OLD_SESS.graph.get_tensor_by_name( each.name)))) sess.run(tmp) def get_op_with_prefix(op_name, dup_cnt, scope_name, dummy_graph_dup_cnt, dummy_scope_name): "Need to call this function to return the name of the ops under the NEW graph (with scope prefix)" "return the name of the duplicated op with prefix, a new scope prefix upon each duplication" op_name = get_target_scope_prefix(scope_name, dup_cnt, dummy_scope_name, dummy_graph_dup_cnt) + op_name return op_name import re def modify_graph(sess, dup_cnt, scope_name, prefix_of_bound_op, dummy_graph_dup_cnt, dummy_scope_name): "Modify the graph def to: 1) remove the nodes from older paths (we only need to keep the latest path)" " and 2) modify the input dependency to only associate with the latest path" graph_def = sess.graph.as_graph_def() target_graph_prefix = get_target_scope_prefix(scope_name, dup_cnt, dummy_scope_name, dummy_graph_dup_cnt) #print('target prefix ==> ', target_graph_prefix, dup_cnt) # Delete nodes from the redundant paths, we only want the most recent path, otherwise the size of graph will explode nodes = [] for node in graph_def.node: if target_graph_prefix in node.name and prefix_of_bound_op not in node.name: # ops to be kept, otherwise removed from graph nodes.append(node) elif (prefix_of_bound_op in node.name): if (dup_cnt != graph_dup_cnt): "this part should keep the new op from the most recent duplication (with lesser prefix)" if (target_graph_prefix not in node.name ): # remove dummy nodes like dummy/op nodes.append(node) else: nodes.append(node) # remove dummy nodes like dummy/dummy/relu if (dummy_scope_name + "/" + dummy_scope_name + "/" in node.name): nodes.remove(node) #print(' ', dup_cnt, dummy_graph_dup_cnt) mod_graph_def = tf.GraphDef() mod_graph_def.node.extend(nodes) "For the newly created op, we need to rewire the input dependency so that it only relies on the latest graph" "because we've only kpet the latest graph in the modified graphdef. " "This is for the restriction op, e.g., tf.maximum(relu_1, 100), where relu_1 is from the PREVIOUS graph" # Delete references to deleted nodes, for node in mod_graph_def.node: inp_names = [] if (prefix_of_bound_op in node.name): # only for the restriction op for inp in node.input: if prefix_of_bound_op in inp or target_graph_prefix in inp: inp_names.append(inp) else: #print(node.name, inp, ' ---> ', (scope_name + "_" + str(dup_cnt-1) + "/" + inp) ) "here because we copy the graghdef from the PREVIOUS graph, it has dependency to the PREVIOUS graph" "so we need to remove this redepency by using input from only the latest path, e.g., test/x3, test_1/test/x3, the" "former will be removed in the above pruning, so we need to replace x3 input as test_1/test/x3 from the current graph" # change the scope prefix to be the one from the latest path bfname = inp if (scope_name in inp): regexp = re.escape(scope_name) + "_\d+/|" + re.escape(scope_name) + "/|" + \ re.escape(dummy_scope_name) + "_\d+/|" + re.escape(dummy_scope_name) + "/" # pattern for "ranger_1/" or "ranger" inp_names.append(target_graph_prefix + re.sub(regexp, "", inp)) afname = target_graph_prefix + re.sub( regexp, "", inp) else: inp_names.append(target_graph_prefix + inp) afname = target_graph_prefix + inp del node.input[:] # delete all the inputs node.input.extend( inp_names) # keep the modified input dependency return mod_graph_def def printgraphdef(graphdef): for each in graphdef.node: print(each.name) def printgraph(sess): ops = [ tensor for op in sess.graph.get_operations() for tensor in op.values() ] #a = open("op.txt", "a") for n in ops: #a.write(n.name + "\n") print(n.name) "NOTE: if you rename the name of the opeartor, you'll need to sepcify it in the following" "below gives default op name from TensorFlow" act = "Relu" op_follow_act = ["MaxPool", "Reshape", "AvgPool"] special_op_follow_act = "concat" up_bound = [3., 4., 9.] # upper bound for restriction low_bound = [0., 0., 0.] # low bound for restriction PREFIX = 'ranger' # scope name in the graph DUMMY_PREFIX = 'dummy' # graph_dup_cnt = 0 # count the number of iteration for duplication, used to track the scope prefix of the new op dummy_graph_dup_cnt = 0 # count the num of dummy graph duplication (for resetting the default graph to contain only the latest path) op_cnt = 0 # count num of op act_cnt = 0 # count num of act check_follow = False # flag for checking the following op (when the current op is ACT) op_to_keep = [ ] # ops to keep while duplicating the graph (we remove the irrelevant ops before duplication, otherwise the graph size will explode) new_op_prefix = "bound_op_prefix" # prefix of the newly created ops for range restriction OLD_SESS = sess # keep the old session train_var = tf.trainable_variables() # all vars before duplication # get all the operators in the graph ops = [ tensor for op in sess.graph.get_operations() for tensor in op.values() ] graph_def = sess.graph.as_graph_def() "iterate each op in the graph and insert bounding ops" for cur_op in ops: if (act in cur_op.name and ("gradients" not in cur_op.name)): # bounding with tf.name_scope( new_op_prefix ) as scope: # the restricion ops will have the special scope prefix name bound_tensor = sess.graph.get_tensor_by_name( get_op_with_prefix(cur_op.name, graph_dup_cnt, PREFIX, dummy_graph_dup_cnt, DUMMY_PREFIX)) print("bounding: ", bound_tensor, up_bound[act_cnt]) rest = tf.maximum(bound_tensor, low_bound[act_cnt]) rest = tf.minimum(rest, up_bound[act_cnt]) op_to_be_replaced = get_op_with_prefix(cur_op.name, graph_dup_cnt, PREFIX, dummy_graph_dup_cnt, DUMMY_PREFIX) # delete redundant paths in graphdef and modify the input dependency to be depending on the latest path only truncated_graphdef = modify_graph(sess, graph_dup_cnt, PREFIX, new_op_prefix, dummy_graph_dup_cnt, DUMMY_PREFIX) # import the modified graghdef (inserted with bouding ops) into the current graph tf.import_graph_def(truncated_graphdef, name=PREFIX, input_map={op_to_be_replaced: rest}) graph_dup_cnt += 1 "reset the graph to contain only the duplicated path" truncated_graphdef = modify_graph(sess, graph_dup_cnt, PREFIX, new_op_prefix, dummy_graph_dup_cnt, DUMMY_PREFIX) tf.reset_default_graph() sess = tf.Session() sess.as_default() tf.import_graph_def(truncated_graphdef, name=DUMMY_PREFIX) dummy_graph_dup_cnt += 1 check_follow = True # this is a ACT, so we need to check the following op act_cnt = (act_cnt + 1) % len( up_bound ) # count the number of visited ACT (used for the case where there are two copies of ops (e.g., LeNet), one for training and one testing) # this will check the next operator that follows the ACT op elif (check_follow): keep_rest = False # check whether the following op needs to be bounded # this is the case for Maxpool, Avgpool and Reshape for each in op_follow_act: if ( each in cur_op.name and "/shape" not in cur_op.name ): #the latter condition is for checking case like "Reshape_1/shape:0", this shouldn't be bounded keep_rest = True low = low_bound[act_cnt - 1] up = up_bound[act_cnt - 1] break # this is the case for ConCatV2, "axis" is the parameter to the actual op concat if (special_op_follow_act in cur_op.name and ("axis" not in cur_op.name) and ("values" not in cur_op.name)): keep_rest = True low = np.minimum(low_bound[act_cnt - 1], low_bound[act_cnt - 2]) up = np.maximum(up_bound[act_cnt - 1], up_bound[act_cnt - 2]) "bound the values, using either float (default) or int" if (keep_rest): try: with tf.name_scope( new_op_prefix ) as scope: # the restricion ops will have the special scope prefix name bound_tensor = sess.graph.get_tensor_by_name( get_op_with_prefix(cur_op.name, graph_dup_cnt, PREFIX, dummy_graph_dup_cnt, DUMMY_PREFIX)) print("bounding: ", bound_tensor) rest = tf.maximum(bound_tensor, low) rest = tf.minimum(rest, up) except: with tf.name_scope( new_op_prefix ) as scope: # the restricion ops will have the special scope prefix name bound_tensor = sess.graph.get_tensor_by_name( get_op_with_prefix(cur_op.name, graph_dup_cnt, PREFIX, dummy_graph_dup_cnt, DUMMY_PREFIX)) print("bounding: ", bound_tensor) rest = tf.maximum(bound_tensor, int(low)) rest = tf.minimum(rest, int(up)) #print(cur_op, act_cnt) #print(rest.op.node_def,' -----') "replace the input to the tensor, at the palce where we place Ranger, e.g., Ranger(ReLu), then we replace Relu" op_to_be_replaced = get_op_with_prefix(cur_op.name, graph_dup_cnt, PREFIX, dummy_graph_dup_cnt, DUMMY_PREFIX) truncated_graphdef = modify_graph(sess, graph_dup_cnt, PREFIX, new_op_prefix, dummy_graph_dup_cnt, DUMMY_PREFIX) tf.import_graph_def(truncated_graphdef, name=PREFIX, input_map={op_to_be_replaced: rest}) graph_dup_cnt += 1 "reset the graph to contain only the duplicated path" truncated_graphdef = modify_graph(sess, graph_dup_cnt, PREFIX, new_op_prefix, dummy_graph_dup_cnt, DUMMY_PREFIX) tf.reset_default_graph() sess = tf.Session() sess.as_default() tf.import_graph_def(truncated_graphdef, name=DUMMY_PREFIX) dummy_graph_dup_cnt += 1 # check the ops, but not to bound the ops else: check_follow = False # the default setting is not to check the next op # the following ops of the listed operaions will be kept tracking, # becuase the listed ops do not perform actual computation, so the restriction bound still applies oblivious_ops = [ "Const", "truncated_normal", "Variable", "weights", "biases", "dropout" ] if( ("Reshape" in cur_op.name and "/shape" in cur_op.name) or \ ("concat" in cur_op.name and ("axis" in cur_op.name or "values" in cur_op.name) ) ): check_follow = True # we need to check the following op of Reshape/shape:0, concat/axis (these are not the actual reshape/concat ops) else: for ea in oblivious_ops: # we need to check the op follows the listed ops if (ea in cur_op.name): check_follow = True op_cnt += 1 # we need to call modify_graph to modify the input dependency for finalization truncated_graphdef = modify_graph(sess, graph_dup_cnt, PREFIX, new_op_prefix, dummy_graph_dup_cnt, DUMMY_PREFIX) tf.import_graph_def(truncated_graphdef, name=PREFIX) graph_dup_cnt += 1 "reset the graph to contain only the duplicated path" truncated_graphdef = modify_graph(sess, graph_dup_cnt, PREFIX, new_op_prefix, dummy_graph_dup_cnt, DUMMY_PREFIX) tf.reset_default_graph() sess = tf.Session() sess.as_default() tf.import_graph_def(truncated_graphdef, name=DUMMY_PREFIX) dummy_graph_dup_cnt += 1 "restore the vars from the old sess to the new sess" restore_trainable_var(sess, PREFIX, graph_dup_cnt, train_var, DUMMY_PREFIX, dummy_graph_dup_cnt, OLD_SESS) print("Finish graph modification!") print('') "============================================================================================================" "============================================================================================================" global eval_prediction OP_FOR_EVAL = eval_prediction # op to be eval new_op_for_eval_name = get_op_with_prefix(OP_FOR_EVAL.op.name, graph_dup_cnt, PREFIX, dummy_graph_dup_cnt, DUMMY_PREFIX) print(new_op_for_eval_name, 'op to be eval') new_op_for_eval = sess.graph.get_tensor_by_name(new_op_for_eval_name + ":0") eval_prediction = new_op_for_eval # need the evaluate the op from the NEW graph # replace the input op from the NEW graph eval_data = sess.graph.get_tensor_by_name( get_op_with_prefix(eval_data.op.name, graph_dup_cnt, PREFIX, dummy_graph_dup_cnt, DUMMY_PREFIX) + ":0") # you can call this function to check the depenency of the final operator # you should see the bouding ops are inserted into the dependency # NOTE: the printing might contain duplicated output #get_op_dependency(new_op_for_eval.op) # we use the inputs that can be correctly identified by the model for FI test_error, indexOfCorrectSample = error_rate( eval_in_batches(test_data, sess), test_labels, True) print('Test error: %.1f%%' % test_error) if FLAGS.self_test: print('test_error', test_error) assert test_error == 0.0, 'expected 0.0 test_error, got %.2f' % ( test_error, ) print("index of samples correctly learned by the model: \n", indexOfCorrectSample[:20]) newData = [] newLab = [] # save FI results into file, "eachRes" saves each FI result, "resFile" saves SDC rate if not os.path.isdir("./reluRes"): os.mkdir("./reluRes") eachRes = open("./reluRes/relu-lenet-eachFI.csv", "a") resFile = open("./reluRes/relu-lenet-randomFI.csv", "a") # Add the fault injection nodes to it fi = ti.TensorFI(sess, logLevel=50, name="convolutional", disableInjections=False) "Here we choose the first 10 correctly-classified samples for injection" "You can also choose any other inputs for injection" index = range(10) # inject fault to the first 10 inputs for i in index: each = indexOfCorrectSample[i] newData = (test_data[each].reshape(1, 28, 28, 1)) newLab = (test_labels[each].reshape(1)) fiCount = 3000 sdcCount = 0. for j in range(fiCount): "IMPORTANT: Make sure you're calling the ops from the NEW graph" "In this example, we already replace the ops to be from the new graph (line 700)" test_error, _ = error_rate(eval_in_batches(newData, sess), newLab, True) # FI results in SDC if (test_error == 100.): sdcCount += 1 eachRes.write( ` 0 ` + ",") else: eachRes.write( ` 1 ` + ",") print(i, j) eachRes.write("\n") print("sdc:", sdcCount / fiCount) resFile.write( ` sdcCount / fiCount ` + "," + ` fiCount ` + "\n")
print("Training now ...") for step in range(1, num_steps + 1): batch_x, batch_y = mnist.train.next_batch(batch_size) # Run optimization op (backprop) sess.run(train_op, feed_dict={X: batch_x, Y: batch_y}) if step % display_step == 0 or step == 1: # Calculate batch loss and accuracy loss, acc = sess.run([loss_op, accuracy], feed_dict={ X: batch_x, Y: batch_y }) print("Step " + str(step) + ", Minibatch Loss= " + \ "{:.4f}".format(loss) + ", Training Accuracy= " + \ "{:.3f}".format(acc)) print("Training finished! Testing now ...") print("Accuracy (with no injections):", \ accuracy.eval({X: mnist.test.images[:256], Y: mnist.test.labels[:256]})) # Add the fault injection code here to instrument the graph fi = ti.TensorFI(sess, name="Neural Network 4", logLevel=50, disableInjections=False) print("Accuracy (with injections):", \ accuracy.eval({X: mnist.test.images[:256], Y: mnist.test.labels[:256]}))
#!/usr/bin/python # Example2 from TensorFlow tutorial from __future__ import print_function import tensorflow as tf import TensorFI as ti a = tf.placeholder(tf.float32, name="a") b = tf.placeholder(tf.float32, name="b") adder = tf.add(a, b, name="adder") # Use this syntax for name addTriple = 3 * adder sess = tf.Session() # Run the session with scalars and tensors print( sess.run( addTriple, { a:3, b:4.5 } ) ) print( sess.run( addTriple, { a:[3,1], b:[4,5] } ) ) # Instrument the session fi = ti.TensorFI(sess) # Run the above session commands with fault injections print( sess.run( addTriple, { a:3, b:4.5 } ) ) print( sess.run( addTriple, { a:[3,1], b:[4,5] } ) ) # Create a log for visualizing in TensorBoard logs_path = "./logs" logWriter = tf.summary.FileWriter( logs_path, sess.graph )
def main(): global isTrain global isTest # you need to first train the model if (isTrain): train_start = time() for i in range(_EPOCH): print("\nEpoch: {}/{}\n".format((i + 1), _EPOCH)) train(i) hours, rem = divmod(time() - train_start, 3600) minutes, seconds = divmod(rem, 60) mes = "Best accuracy pre session: {:.2f}, time: {:0>2}:{:0>2}:{:05.2f}" print(mes.format(global_accuracy, int(hours), int(minutes), seconds)) # after the model is trained, you can perform fault injection if (isTest): # we use the inputs that can be correctly identified by the model for FI tx = test_x[:50, :] ty = test_y[:50, :] preds = sess.run(y_pred_cls, feed_dict={x: tx, y: ty}) correct = (np.argmax(ty, axis=1) == preds) correctIndex = np.argwhere(correct == True) correctIndex = correctIndex.flatten() # correctIndex stores the index of inputs that can be correctly identified X = tx Y = ty fi = ti.TensorFI(sess, name="Perceptron", logLevel=50, disableInjections=False) correct = [] # save each random FI result into file resFile = open("alex-ranFI.csv", "a") totalFI = 1000 # number of random FI trials for i in range(10): # construct single input tx = X[correctIndex[i], :] ty = Y[correctIndex[i], :] tx = tx.reshape(1, 3072) ty = ty.reshape(1, 10) for j in range(totalFI): preds = sess.run(y_pred_cls, feed_dict={x: tx, y: ty}) acy = (np.argmax(ty, axis=1) == preds)[0] # FI does not result in SDC if (acy == True): resFile.write( ` 1 ` + ",") else: resFile.write( ` 0 ` + ",") resFile.write("\n") print("data ", i, j, "run") #Full batch eval '''
def run_test(suppress_out=False): import numpy rng = numpy.random if suppress_out: sys.stdout = open(os.devnull, "w") sys.stderr = open(os.devnull, "w") # Parameters learning_rate = 0.01 training_epochs = 1000 display_step = 50 # Training Data train_X = numpy.asarray([ 3.3, 4.4, 5.5, 6.71, 6.93, 4.168, 9.779, 6.182, 7.59, 2.167, 7.042, 10.791, 5.313, 7.997, 5.654, 9.27, 3.1 ]) train_Y = numpy.asarray([ 1.7, 2.76, 2.09, 3.19, 1.694, 1.573, 3.366, 2.596, 2.53, 1.221, 2.827, 3.465, 1.65, 2.904, 2.42, 2.94, 1.3 ]) n_samples = train_X.shape[0] # tf Graph Input X = tf.placeholder("float") Y = tf.placeholder("float") # Set model weights W = tf.Variable(rng.randn(), name="weight") b = tf.Variable(rng.randn(), name="bias") # Construct a linear model pred = tf.add(tf.multiply(X, W), b) # Mean squared error cost = tf.reduce_sum(tf.pow(pred - Y, 2)) / (2 * n_samples) # Gradient descent # Note, minimize() knows to modify W and b because Variable objects are trainable=True by default optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) # Initialize the variables (i.e. assign their default value) init = tf.global_variables_initializer() # Start training with tf.Session() as sess: # Run the initializer sess.run(init) # Fit all training data for epoch in range(training_epochs): for (x, y) in zip(train_X, train_Y): sess.run(optimizer, feed_dict={X: x, Y: y}) # Display logs per epoch step if (epoch + 1) % display_step == 0: c = sess.run(cost, feed_dict={X: train_X, Y: train_Y}) run_W = sess.run(W) run_b = sess.run(b) print "Epoch: " + str( epoch + 1) + ", cost = " + "{:.9f}".format( c) + ", W = " + str(run_W) + "b=" + str(run_b) print "Optimization Finished!" training_cost = sess.run(cost, feed_dict={X: train_X, Y: train_Y}) run_W = sess.run(W) run_b = sess.run(b) print "Training cost = " + str(training_cost) + ", W = " + str( run_W) + ", b = " + str(run_b) + "\n" # Testing example, as requested (Issue #2) test_X = numpy.asarray([6.83, 4.668, 8.9, 7.91, 5.7, 8.7, 3.1, 2.1]) test_Y = numpy.asarray( [1.84, 2.273, 3.2, 2.831, 2.92, 3.24, 1.35, 1.03]) print "Testing... (Mean square loss Comparison)" accuracy = tf.reduce_sum(tf.pow(pred - Y, 2)) / (2 * test_X.shape[0]), testing_cost = sess.run(accuracy, feed_dict={ X: test_X, Y: test_Y }) # same function as cost above print "Testing cost=" + str(testing_cost) print "Absolute mean square loss difference: " + str( abs(training_cost - testing_cost)) # Calculate accuracy (before fault injections) acc = sess.run(accuracy, feed_dict={X: test_X, Y: test_Y}) print "Accuracy:" + str(acc) # Instrument the graph for fault injection fi = ti.TensorFI(sess, name="linearReg", logLevel=30, disableInjections=True, logDir=logDir) # Calculate accuracy (with no fault injections) acc_no = numpy.around(sess.run(accuracy, feed_dict={ X: test_X, Y: test_Y })[0], decimals=7) print "Accuracy (no injections): " + str(acc_no) # Calculate accuracy (with fault injections) fi.turnOnInjections() acc_fi = numpy.around(sess.run(accuracy, feed_dict={ X: test_X, Y: test_Y })[0], decimals=7) print "Accuracy (with injections):" + str(acc_fi) # Make the log files in TensorBoard logs_path = "./logs" logWriter = tf.summary.FileWriter(logs_path, sess.graph) if suppress_out: sys.stdout = sys.__stdout__ sys.stderr = sys.__stderr__ if acc_fi == acc_no: passed_bool = True else: passed_bool = False return passed_bool # set this based on the test requirements (True if the test passed, False otherwise)
#/usr/bin/python # Example 0 - Dummy NoOp operation from __future__ import print_function import sys import tensorflow as tf import TensorFI as ti node = tf.no_op() print("Node = ", node) s = tf.compat.v1.Session() # Run it first res1 = s.run([node]) print("res1 = ", res1) # Instrument the FI session fi = ti.TensorFI(s, logLevel=0) # Create a log for visualizng in TensorBoard logs_path = "./logs" logWriter = tf.summary.FileWriter(logs_path, s.graph) # Run it again with fault injection enabled res2 = s.run([node]) print("res2 = ", res2)
batch_x, batch_y = mnist.train.next_batch(batch_size) # Run optimization op (backprop) sess.run(train_op, feed_dict={X: batch_x, Y: batch_y}) if step % display_step == 0 or step == 1: # Calculate batch loss and accuracy loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x, Y: batch_y}) print("Step " + str(step) + ", Minibatch Loss= " + \ "{:.4f}".format(loss) + ", Training Accuracy= " + \ "{:.3f}".format(acc)) print("Training Finished!") # Add the fault injection code here to instrument the graph fi = ti.TensorFI(sess, name = "Perceptron", logLevel = 50, disableInjections = True) correctResult = sess.run(accuracy, feed_dict={X: mnist.test.images, Y: mnist.test.labels}) print("Testing Accuracy:", correctResult) diffFunc = lambda x: math.fabs(x - correctResult) # Make the log files in TensorBoard logs_path = "./logs" logWriter = tf.summary.FileWriter( logs_path, sess.graph ) # Initialize the number of threads and injections numThreads = 5 numInjections = 100
def run_test(suppress_out=False): if suppress_out: sys.stdout = open(os.devnull, "w") sys.stderr = open(os.devnull, "w") ops_passed = [] ops_failed = [] for op_type in inputgenMap: if op_type == "end_of_ops": continue # end of ops list, ignore sys.stdout.write("Testing op_type %s..." % op_type) sys.stdout.flush() # Create new graph context try: g = tf.Graph() graph_outputs = [] with g.as_default(): # generate inputs for op_type input_list = inputgenMap[op_type]() # loop through the generated inputs and create ops for input_set in input_list: graph_outputs.extend( g.create_op(op_type, input_set).outputs) with tf.compat.v1.Session(graph=g) as sess: result_baseline = sess.run(graph_outputs) # instrument with TensorFI fi = ti.TensorFI(sess, disableInjections=True, name=op_type, configFileName=confFile, logDir=logDir) result_fi = sess.run(graph_outputs) # compare outputs passed = True for i, item in enumerate(result_fi): if not np.array_equal(result_fi[i], result_baseline[i]): temp_out = "\nFI element " + str( result_fi[i]) + " not equal to baseline " + str( result_baseline[i]) print temp_out passed = False if passed: sys.stdout.write("\rTesting op_type %s... Passed\n" % op_type) sys.stdout.flush() ops_passed.append(op_type) else: print "\nTest FAILED for operation " + str(op_type) print "Instrumented graph outputs not equal to original" ops_failed.append(op_type) except Exception as e: print "\nTest FAILED for operation " + str(op_type) print "Exception thrown: " + str(e) ops_failed.append(op_type) if suppress_out: sys.stdout = sys.__stdout__ sys.stderr = sys.__stderr__ if len(ops_failed) > 0: return False, ops_passed, ops_failed return True, ops_passed, ops_failed
#saver.restore(sess, checkpoint_path) #loss_tests = [] acc_tests = [] for iteration in range(1): X_batch, y_batch = mnist.test.next_batch(batch_size) xx=X_batch.reshape([-1, 28, 28, 1]) acc_test = sess.run(accuracy,feed_dict={X: xx , y: y_batch}) # loss_tests.append(loss_test) #acc_tests.append(acc_test) # print("\rEvaluating the model: {}/{} ({:.1f}%)".format( # iteration, n_iterations_test, # iteration * 100 / n_iterations_test), # end=" " * 10) #loss_test = np.mean(loss_tests) acc = np.mean(acc_test) print("Testing Accuracy:", acc) fi = ti.TensorFI(sess, logLevel = 10, disableInjections = False) acc_test = sess.run(accuracy,feed_dict={X: xx , y: y_batch}) acc_t = np.mean(acc_test) #print("\rFinal test accuracy: {:.4f}% Loss: {:.6f}".format( # acc_test * 100, loss_test)) # Add the fault injection code here to instrument the graph #fi = ti.TensorFI(sess, logLevel = 10, disableInjections = True) #print("Accuracy (with injections):", \ # accuracy.eval({X: X_batch.reshape([-1, 28, 28, 1]), y: y_batch})) #acc_test = sess.run(accuracy,feed_dict={X: X_batch.reshape([-1, 28, 28, 1]), y: y_batch}) print("Testing Accuracy:", acc_t)
# Testing # Generator takes noise as input noise_input = tf.placeholder(tf.float32, shape=[None, latent_dim]) # Rebuild the decoder to create image from noise decoder = tf.matmul(noise_input, weights['decoder_h1']) + biases['decoder_b1'] decoder = tf.nn.tanh(decoder) decoder = tf.matmul(decoder, weights['decoder_out']) + biases['decoder_out'] decoder = tf.nn.sigmoid(decoder) # Insert fault injection code here before image reconstruction fi = ti.TensorFI(sess, name="VariationalAutoEncoder", logLevel=20, disableInjections=True) # Make the log files in TensorBoard logs_path = "./logs" logWriter = tf.summary.FileWriter(logs_path, sess.graph) # Building a manifold of generated digits n = 20 x_axis = np.linspace(-3, 3, n) y_axis = np.linspace(-3, 3, n) canvas = np.empty((28 * n, 28 * n)) faulty_canvas = np.empty((28 * n, 28 * n)) for i, yi in enumerate(x_axis):
def main(): import time parser = build_parser() options = parser.parse_args() # Loading image # img_content, orig_shape = imread_resize(options.input) # img_content_shape = (1,) + img_content.shape # Loading ImageNet classes info classes = [] with open('synset_words.txt', 'r') as classes_file: classes = classes_file.read().splitlines() # Loading network data, sqz_mean = load_net('sqz_full.mat') config = tf.ConfigProto(log_device_placement = False) config.gpu_options.allow_growth = True config.gpu_options.allocator_type = 'BFC' g = tf.Graph() sess = tf.Session(config=config) # Building network image = tf.placeholder(dtype=get_dtype_tf(), shape=[None,224,224,3], name="image_placeholder") keep_prob = tf.placeholder(get_dtype_tf()) sqznet = net_preloaded(data, image, 'max', True, keep_prob) fi = ti.TensorFI(sess, logLevel = 50, name = "convolutional", disableInjections=False) top1 = open("top1.csv", "a") top5 = open("top5.csv", "a") "specify the path to the images that you want to inject faults" path = "./fiImg/" files = [] # r=root, d=directories, f = files for r, d, f in os.walk(path): for file in f: files.append(os.path.join(r, file)) ########################################## "provide the labels" label = { './fiImg/ILSVRC2012_val_00000014.JPEG' : 757, './fiImg/ILSVRC2012_val_00006922.JPEG' : 569, './fiImg/ILSVRC2012_val_00000198.JPEG' : 16, './fiImg/ILSVRC2012_val_00001892.JPEG' : 670, './fiImg/ILSVRC2012_val_00000793.JPEG' : 654, './fiImg/ILSVRC2012_val_00001807.JPEG' : 894, './fiImg/ILSVRC2012_val_00000434.JPEG' : 675, './fiImg/ILSVRC2012_val_00000573.JPEG' : 874, './fiImg/ILSVRC2012_val_00000604.JPEG' : 273, './fiImg/ILSVRC2012_val_00000903.JPEG' : 919, } #boundRes = open("outOfBound.csv", "a") #coverageRes = open('coverage.csv', 'a') fiRun = 1000 for img in files: # Loading image img_content, orig_shape = imread_resize(img) img_content = scipy.misc.imresize(img_content, [224,224,3]) "generate label" lab = label[ img ] t1 = 0. t5 = 0. outBound = 0. coverage = 0. for j in range(fiRun): ti.injectFault.outOfBound = False sqznet_results = sess.run(sqznet['classifier_actv'], feed_dict={image: [preprocess(img_content, sqz_mean)], keep_prob: 1.})[0][0][0] pred = (np.argsort(sqznet_results)[::-1])[0:5] if( pred[0] == lab): top1.write(`1` + ",") top5.write(`1` + ",") t1 += 1 t5 += 1 elif(lab in pred[1:]): top5.write(`1` + ",") top1.write(`0` + ",") t5 += 1 else: top1.write(`0` + ",") top5.write(`0` + ",") ''' if( ti.injectFault.outOfBound ): outBound += 1 boundRes.write( ti.injectFault.outOfBound + "," ) if( lab != pred[0] ): coverage += 1 ''' print 'input ', img, 'fi time: ', j, 'top1: ', t1/(j+1), 'top5: ', t5/(j+1) #coverageRes.write(`(fiRun-t1)/fiRun` + "," + `(fiRun-t5)/fiRun` + "," + `coverage/fiRun` + "\n") #boundRes.write("\n") #print 'input ', img, 'fi time: ', j, 'top1: ', t1/(j+1), 'top5: ', t5/(j+1) top1.write("\n") top5.write("\n")
#import cv2 from subprocess import call import driving_data import time import TensorFI as ti import datetime sess = tf.InteractiveSession() saver = tf.train.Saver() saver.restore(sess, "save/model.ckpt") # restore the trained model #img = scipy.misc.imread('steering_wheel_image.jpg',0) #rows,cols = img.shape #smoothed_angle = 0 fi = ti.TensorFI(sess, logLevel=50, name="convolutional", disableInjections=True) # threshold deviation to define SDC sdcThreshold = 30 # save FI results into file, "eachRes" saves each FI result, "resFile" saves SDC rate resFile = open( ` sdcThreshold ` + "-statistic-binFI.csv", "a") eachRes = open( ` sdcThreshold ` + "-each-binFI.csv", "a") # inputs to be injected index = [20, 486, 992, 1398, 4429, 5259, 5868, 6350, 6650, 7771] #while(cv2.waitKey(10) != ord('q')): for i in index: full_image = scipy.misc.imread("driving_dataset/" + str(i) + ".jpg",
def run_test(suppress_out=False): if suppress_out: sys.stdout = open(os.devnull, "w") sys.stderr = open(os.devnull, "w") # Import MNIST data from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) # Store layers weight & bias weights = { # 5x5 conv, 1 input, 32 outputs 'wc1': tf.Variable(tf.random_normal([5, 5, 1, 32])), # 5x5 conv, 32 inputs, 64 outputs 'wc2': tf.Variable(tf.random_normal([5, 5, 32, 64])), # fully connected, 7*7*64 inputs, 1024 outputs 'wd1': tf.Variable(tf.random_normal([7 * 7 * 64, 1024])), # 1024 inputs, 10 outputs (class prediction) 'out': tf.Variable(tf.random_normal([1024, num_classes])) } biases = { 'bc1': tf.Variable(tf.random_normal([32])), 'bc2': tf.Variable(tf.random_normal([64])), 'bd1': tf.Variable(tf.random_normal([1024])), 'out': tf.Variable(tf.random_normal([num_classes])) } # Construct model logits = conv_net(X, weights, biases, keep_prob) prediction = tf.nn.softmax(logits) # Define loss and optimizer loss_op = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y)) optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) train_op = optimizer.minimize(loss_op) # Evaluate model correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) # Initialize the variables (i.e. assign their default value) init = tf.global_variables_initializer() passed_bool = None # Start training with tf.Session() as sess: # Run the initializer sess.run(init) for step in range(1, num_steps + 1): batch_x, batch_y = mnist.train.next_batch(batch_size) # Run optimization op (backprop) sess.run(train_op, feed_dict={ X: batch_x, Y: batch_y, keep_prob: 0.8 }) if step % display_step == 0 or step == 1: # Calculate batch loss and accuracy loss, acc = sess.run([loss_op, accuracy], feed_dict={ X: batch_x, Y: batch_y, keep_prob: 1.0 }) print "Step " + str( step) + ", Minibatch Loss= " + "{:.4f}".format( loss) + ", Training Accuracy= " + "{:.3f}".format(acc) print "Training finished! Testing now ..." # Calculate accuracy for 256 MNIST test images acc1 = sess.run(accuracy, feed_dict={ X: mnist.test.images[:256], Y: mnist.test.labels[:256], keep_prob: 1.0 }) print "Accuracy (with no injections):", acc1 # Add the fault injection code here to instrument the graph fi = ti.TensorFI(sess, name="convolutional", disableInjections=False, logDir=logDir) acc2 = sess.run(accuracy, feed_dict={ X: mnist.test.images[:256], Y: mnist.test.labels[:256], keep_prob: 1.0 }) print "Accuracy (with injections):", acc2 if acc1 == acc2: passed_bool = True else: passed_bool = False if suppress_out: sys.stdout = sys.__stdout__ sys.stderr = sys.__stderr__ return passed_bool # set this based on the test requirements (True if the test passed, False otherwise)
def optimize(): for i in range(epochs): epoch_cost = [] epoch_time = time.time() for ii in range(mnist_data.train.num_examples//batch_size): batch = mnist_data.train.next_batch(batch_size) imgs = batch[0] labs = batch[1] c, _ = session.run([cost, optimizer], feed_dict={inputs:imgs, targets:labs}) epoch_cost.append(c) print("Epoch: {}/{}".format(i+1, epochs), " | Current loss: {}".format(np.mean(epoch_cost)), " | Epoch time: {:.2f}s".format(time.time() - epoch_time)) print("test accuracy %g" % session.run(accuracy ,feed_dict={ inputs: mnist_data.test.images, targets: mnist_data.test.labels })) saver.save(session, './fcn') def test_model(): saver.restore(session, tf.train.latest_checkpoint('.')) return session.run(accuracy, feed_dict={inputs:mnist_data.test.images[:256], targets:mnist_data.test.labels[:256]}) optimize() print ("Accuracy is: ", test_model()) fi = ti.TensorFI(session, logLevel = 100, name = "fcn", disableInjections=False) print ("Accuracy is: ", test_model())
def main(_): if FLAGS.self_test: print('Running self-test.') train_data, train_labels = fake_data(256) validation_data, validation_labels = fake_data(EVAL_BATCH_SIZE) test_data, test_labels = fake_data(EVAL_BATCH_SIZE) num_epochs = 1 else: # Get the data. train_data_filename = maybe_download('train-images-idx3-ubyte.gz') train_labels_filename = maybe_download('train-labels-idx1-ubyte.gz') test_data_filename = maybe_download('t10k-images-idx3-ubyte.gz') test_labels_filename = maybe_download('t10k-labels-idx1-ubyte.gz') # Extract it into numpy arrays. train_data = extract_data(train_data_filename, 60000) train_labels = extract_labels(train_labels_filename, 60000) test_data = extract_data(test_data_filename, 10000) test_labels = extract_labels(test_labels_filename, 10000) # Generate a validation set. validation_data = train_data[:VALIDATION_SIZE, ...] validation_labels = train_labels[:VALIDATION_SIZE] train_data = train_data[VALIDATION_SIZE:, ...] train_labels = train_labels[VALIDATION_SIZE:] num_epochs = NUM_EPOCHS train_size = int(train_labels.shape[0] / 100) print("Training size ", train_size) # This is where training samples and labels are fed to the graph. # These placeholder nodes will be fed a batch of training data at each # training step using the {feed_dict} argument to the Run() call below. train_data_node = tf.placeholder(data_type(), shape=(BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS)) train_labels_node = tf.placeholder(tf.int64, shape=(BATCH_SIZE, )) eval_data = tf.placeholder(data_type(), shape=(EVAL_BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS)) # The variables below hold all the trainable weights. They are passed an # initial value which will be assigned when we call: # {tf.global_variables_initializer().run()} conv1_weights = tf.Variable( tf.truncated_normal( [5, 5, NUM_CHANNELS, 32], # 5x5 filter, depth 32. stddev=0.1, seed=SEED, dtype=data_type())) conv1_biases = tf.Variable(tf.zeros([32], dtype=data_type())) conv2_weights = tf.Variable( tf.truncated_normal([5, 5, 32, 64], stddev=0.1, seed=SEED, dtype=data_type())) conv2_biases = tf.Variable(tf.constant(0.1, shape=[64], dtype=data_type())) fc1_weights = tf.Variable( # fully connected, depth 512. tf.truncated_normal([IMAGE_SIZE // 4 * IMAGE_SIZE // 4 * 64, 512], stddev=0.1, seed=SEED, dtype=data_type())) fc1_biases = tf.Variable(tf.constant(0.1, shape=[512], dtype=data_type())) fc2_weights = tf.Variable( tf.truncated_normal([512, NUM_LABELS], stddev=0.1, seed=SEED, dtype=data_type())) fc2_biases = tf.Variable( tf.constant(0.1, shape=[NUM_LABELS], dtype=data_type())) # We will replicate the model structure for the training subgraph, as well # as the evaluation subgraphs, while sharing the trainable parameters. def model(data, train=False): """The Model definition.""" # 2D convolution, with 'SAME' padding (i.e. the output feature map has # the same size as the input). Note that {strides} is a 4D array whose # shape matches the data layout: [image index, y, x, depth]. conv = tf.nn.conv2d(data, conv1_weights, strides=[1, 1, 1, 1], padding='SAME') # Bias and rectified linear non-linearity. relu = tf.nn.relu(tf.nn.bias_add(conv, conv1_biases)) # Max pooling. The kernel size spec {ksize} also follows the layout of # the data. Here we have a pooling window of 2, and a stride of 2. pool = tf.nn.max_pool(relu, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') conv = tf.nn.conv2d(pool, conv2_weights, strides=[1, 1, 1, 1], padding='SAME') relu = tf.nn.relu(tf.nn.bias_add(conv, conv2_biases)) pool = tf.nn.max_pool(relu, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') # Reshape the feature map cuboid into a 2D matrix to feed it to the # fully connected layers. pool_shape = pool.get_shape().as_list() reshape = tf.reshape( pool, [pool_shape[0], pool_shape[1] * pool_shape[2] * pool_shape[3]]) # Fully connected layer. Note that the '+' operation automatically # broadcasts the biases. hidden = tf.nn.relu(tf.matmul(reshape, fc1_weights) + fc1_biases) # Add a 50% dropout during training only. Dropout also scales # activations such that no rescaling is needed at evaluation time. if train: hidden = tf.nn.dropout(hidden, 0.5, seed=SEED) return tf.matmul(hidden, fc2_weights) + fc2_biases # Training computation: logits + cross-entropy loss. logits = model(train_data_node, True) loss = tf.reduce_mean( tf.nn.sparse_softmax_cross_entropy_with_logits( labels=train_labels_node, logits=logits)) # L2 regularization for the fully connected parameters. regularizers = (tf.nn.l2_loss(fc1_weights) + tf.nn.l2_loss(fc1_biases) + tf.nn.l2_loss(fc2_weights) + tf.nn.l2_loss(fc2_biases)) # Add the regularization term to the loss. loss += 5e-4 * regularizers # Optimizer: set up a variable that's incremented once per batch and # controls the learning rate decay. batch = tf.Variable(0, dtype=data_type()) # Decay once per epoch, using an exponential schedule starting at 0.01. learning_rate = tf.train.exponential_decay( 0.01, # Base learning rate. batch * BATCH_SIZE, # Current index into the dataset. train_size, # Decay step. 0.95, # Decay rate. staircase=True) # Use simple momentum for the optimization. optimizer = tf.train.MomentumOptimizer(learning_rate, 0.9).minimize(loss, global_step=batch) # Predictions for the current training minibatch. train_prediction = tf.nn.softmax(logits) # Predictions for the test and validation, which we'll compute less often. eval_prediction = tf.nn.softmax(model(eval_data)) # Small utility function to evaluate a dataset by feeding batches of data to # {eval_data} and pulling the results from {eval_predictions}. # Saves memory and enables this to run on smaller GPUs. def eval_in_batches(data, sess): """Get all predictions for a dataset by running it in small batches.""" size = data.shape[0] if size < EVAL_BATCH_SIZE: raise ValueError("batch size for evals larger than dataset: %d" % size) predictions = numpy.ndarray(shape=(size, NUM_LABELS), dtype=numpy.float32) for begin in xrange(0, size, EVAL_BATCH_SIZE): end = begin + EVAL_BATCH_SIZE if end <= size: predictions[begin:end, :] = sess.run( eval_prediction, feed_dict={eval_data: data[begin:end, ...]}) else: batch_predictions = sess.run( eval_prediction, feed_dict={eval_data: data[-EVAL_BATCH_SIZE:, ...]}) predictions[begin:, :] = batch_predictions[begin - size:, :] return predictions # Create a local session to run the training. start_time = time.time() with tf.Session() as sess: # Run all the initializers to prepare the trainable parameters. tf.global_variables_initializer().run() print('Initialized!') # Loop through training steps. for step in xrange(int(num_epochs * train_size) // BATCH_SIZE): # Compute the offset of the current minibatch in the data. # Note that we could use better randomization across epochs. offset = (step * BATCH_SIZE) % (train_size - BATCH_SIZE) batch_data = train_data[offset:(offset + BATCH_SIZE), ...] batch_labels = train_labels[offset:(offset + BATCH_SIZE)] # This dictionary maps the batch data (as a numpy array) to the # node in the graph it should be fed to. feed_dict = { train_data_node: batch_data, train_labels_node: batch_labels } # Run the optimizer to update weights. sess.run(optimizer, feed_dict=feed_dict) # print some extra information once reach the evaluation frequency if step % EVAL_FREQUENCY == 0: # fetch some extra nodes' data l, lr, predictions = sess.run( [loss, learning_rate, train_prediction], feed_dict=feed_dict) elapsed_time = time.time() - start_time start_time = time.time() print('Step %d (epoch %.2f), %.1f ms' % (step, float(step) * BATCH_SIZE / train_size, 1000 * elapsed_time / EVAL_FREQUENCY)) print('Minibatch loss: %.3f, learning rate: %.6f' % (l, lr)) print('Minibatch error: %.1f%%' % error_rate(predictions, batch_labels)) print('Validation error: %.1f%%' % error_rate( eval_in_batches(validation_data, sess), validation_labels)) sys.stdout.flush() # Finally print the result! test_error = error_rate(eval_in_batches(test_data, sess), test_labels) print('Test error: %.1f%%' % test_error) if FLAGS.self_test: print('test_error', test_error) assert test_error == 0.0, 'expected 0.0 test_error, got %.2f' % ( test_error, ) # Add the fault injection nodes to it fi = ti.TensorFI(sess, logLevel=10, name="convolutional") # Make the log files in TensorBoard logs_path = "./logs" logWriter = tf.summary.FileWriter(logs_path, sess.graph) # Run it again with the injector test_error = error_rate(eval_in_batches(test_data, sess), test_labels) print('Test error: %.1f%%' % test_error)
print("Optimization Finished!") # Test model correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # Calculate accuracy (before fault injections) print("Accuracy:", accuracy.eval({ x: mnist.test.images, y: mnist.test.labels })) # Instrument the graph for fault injection fi = ti.TensorFI(sess, name="logistReg", logLevel=30, disableInjections=True) # Calculate accuracy (with no fault injections) print("Accuracy (with no injections):", accuracy.eval({ x: mnist.test.images, y: mnist.test.labels })) # Make the log files in TensorBoard logs_path = "./logs" logWriter = tf.summary.FileWriter(logs_path, sess.graph) # Calculate accuracy (with fault injections) fi.turnOnInjections()
saver.restore(sess, FLAGS.model_file) # save each FI result overalReport = open("reports/overalReport.csv", "w") overalReport.write("image,golden_value,average_difference,sdc_percentage\n") detailsReport = open("reports/detailsReport.csv", "w") detailsReport.write("image,golden_value") for idx in range(numOfInjection): detailsReport.write(",error_" + str(idx)) detailsReport.write("\n") # initialize TensorFI fi = ti.TensorFI(sess, logLevel=logging.DEBUG, name="PilotNet", disableInjections=True) # inputs to be injected #index = [20, 486, 992, 1398, 4429, 5259, 5868, 6350, 6650, 7771] index = [20, 40, 60, 80] #index = [20] averageSDC = 0 for i in index: full_image = scipy.misc.imread(FLAGS.dataset_dir + "/" + str(i) + ".jpg", mode="RGB") image = scipy.misc.imresize(full_image[-150:], [66, 200]) / 255.0 overalReport.write(str(i) + ",") detailsReport.write(str(i) + ",") # we first need to obtain the steering angle in the fault-free run fi.turnOffInjections()
sess.run(init) num_steps = 10 display_step = 1 Xtr, Ytr = mnist.train.next_batch(1000, shuffle=False) for step in range(1, num_steps+1): batch_x = Xtr[(step-1)*100: step*100] batch_y = Ytr[(step-1)*100: step*100] # Run optimization op (backprop) sess.run(train_op, feed_dict={X: batch_x, Y: batch_y}) print("Training Finished!") # Add the fault injection code here to instrument the graph fi = ti.TensorFI(sess, name = "Perceptron", logLevel = 50, disableInjections = True) # we use the inputs that can be correctly identified by the model for FI Xte = mnist.test.images[:2000] Yte = mnist.test.labels[:2000] wrong = [] for sampleIndex in range(Xte.shape[0]): acy = accuracy.eval({X: Xte[sampleIndex:sampleIndex+1] , Y: Yte[sampleIndex:sampleIndex+1]}) if(acy!=1): wrong.append(sampleIndex) Xte = np.delete(Xte, wrong, axis=0) Yte = np.delete(Yte, wrong, axis=0) # now the inputs in test set can all be correctly identified by the models # inputs to be injected indexs = [5, 64, 212, 313, 553, 610, 686, 697, 839, 857]
counts = np.zeros(shape=(k, num_classes)) for i in range(len(idx)): counts[idx[i]] += mnist.train.labels[i] # Assign the most frequent label to the centroid labels_map = [np.argmax(c) for c in counts] labels_map = tf.convert_to_tensor(labels_map) # Evaluation ops # Lookup: centroid_id -> label cluster_label = tf.nn.embedding_lookup(labels_map, cluster_idx) # Compute accuracy correct_prediction = tf.equal(cluster_label, tf.cast(tf.argmax(Y, 1), tf.int32)) accuracy_op = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # Test Model test_x, test_y = mnist.test.images, mnist.test.labels print("Test Accuracy:", sess.run(accuracy_op, feed_dict={ X: test_x, Y: test_y })) # Add the fault injection code here to instrument the graph fi = ti.TensorFI(sess, name="kmeans", logLevel=10, disableInjections=False) correctResult = sess.run(accuracy_op, feed_dict={X: test_x, Y: test_y}) print("Test accuracy:", correctResult) # Make the log files in TensorBoard logs_path = "./logs" logWriter = tf.summary.FileWriter(logs_path, sess.graph)