Beispiel #1
0
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHORS, AS WELL AS THE UNIVERSITY
# OF BRITISH COLUMBIA DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO
# EVENT SHALL THE AUTHORS OR THE UNIVERSITY OF BRITISH COLUMBIA BE LIABLE
# FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
# IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#---------------------------------------------------------------------------

import tensorflow as tf
import numpy as np
import sys
sys.path.append('../../src')
import processMif as mif

tf.logging.set_verbosity(tf.logging.INFO)

size = 64
in_x = np.random.rand(size)
mif.createMem([in_x])

with tf.Session() as sess:
    x = tf.placeholder(tf.float32, [size])
    with tf.device("device:XLA_CPU:0"):
        y = tf.nn.softmax(x)
    result = sess.run(y, {x: in_x})
    np.save("tf_result.npy", result)
    print(result)
Beispiel #2
0
import tensorflow as tf
import numpy as np
import string
import sys
sys.path.append('../../src')
import processMif as mif

size = 64
outputs = 2

X = tf.placeholder(tf.float32, [1, size,size,1])
weights = tf.placeholder(tf.float32, [3,3,1,outputs])

in_x = np.random.rand(1,size,size,1)
in_weights = np.random.rand(3,3,1,outputs)
mif.createMem([in_x,in_weights])

with tf.Session() as sess:
    with tf.device("device:XLA_CPU:0"):
        y = tf.nn.conv2d(X, weights, strides=[1, 1, 1, 1], padding='SAME')
    sess.run(tf.global_variables_initializer())
    result = sess.run(y,{X: in_x, weights: in_weights})
    np.save("tf_result.npy" ,result)
    print(result)






Beispiel #3
0
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
# IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#---------------------------------------------------------------------------

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.append('../../src')
import processMif as mif
import additionalOptions as options

# Configs
image_width = 32
image_height = 32

#Prepare input array
in_a = np.random.rand(1, image_width, image_height, 1)
mif.createMem([in_a])

with tf.Session() as sess:
    # Create hardware for max pooling
    x = tf.placeholder(tf.float32, [1, image_width, image_height, 1])
    with tf.device("device:XLA_CPU:0"):
        y = tf.layers.max_pooling2d(inputs=x, pool_size=[3, 3], strides=3)
    result = sess.run(y, {x: in_a})
    np.save("tf_result.npy", result)
    print(result)

options.setUnrollThreshold(100000000)
Beispiel #4
0
sys.path.append('../../src')
import processMif as mif
from tensorflow.examples.tutorials.mnist import input_data

# Import MNIST data
mnist_data = input_data.read_data_sets("MNIST_data/", one_hot=True)

# Configs
image_width=28
image_height=28
image_to_test=987

#Prepare input array
shape=[1, image_width, image_height, 1] #[batch_size, image_width, image_height, channels]
reshaped_input_array = np.reshape(mnist_data.test.images[image_to_test], shape)

with tf.Session() as sess:
	# Create hardware for max pooling
	x = tf.placeholder(tf.float32,shape)
	with tf.device("device:XLA_CPU:0"):
		y=tf.layers.max_pooling2d(inputs=x, pool_size=[3, 3], strides=3)
	sess.run(y,{x: reshaped_input_array})

    # Generate test memory
	mif.createMem([reshaped_input_array])

    # Plot original image
	first_array=np.reshape(mnist_data.test.images[image_to_test],[image_width,image_height])
	plt.imshow(first_array, cmap='gray', interpolation = 'none')
	plt.show()
mnist_data = input_data.read_data_sets("MNIST_data/", one_hot=True)

test_image=123

with tf.Session(graph=graph) as session:
    with tf.device("device:XLA_CPU:0"):
        hp1 = tf.nn.max_pool(tf.nn.relu(tf.add(tf.nn.conv2d(tf.reshape(x, [-1,28,28,1]), w_c1, strides=[1, 1, 1, 1], padding='SAME'),b_c1)),ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
        hp2 = tf.nn.max_pool(tf.nn.relu(tf.add(tf.nn.conv2d(hp1, w_c2, strides=[1, 1, 1, 1], padding='SAME'), b_c2)),ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
        h_fc1 = tf.nn.relu(tf.add(tf.nn.conv2d(hp2, w_fc1, strides=[1, 1, 1, 1], padding='VALID'), b_fc1))
        y = tf.nn.softmax(tf.add(tf.matmul(tf.reshape(h_fc1, [-1, 20]), w_fc2), b_fc2)) 
   
    ret = session.run(y, feed_dict={x: [mnist_data.test.images[test_image]]})

    print("Expected Result: "+str(np.argmax(mnist_data.test.labels[test_image])))
    print("Real Result: "+str(ret))

    # Creating memories for testing

    param1 = mnist_data.test.images[test_image]
    param0 = w_c1.eval()
    param2 = b_c1.eval()
    param3 = w_c2.eval()
    param4 = b_c2.eval()
    param5 = w_fc1.eval()
    param6 = b_fc1.eval()
    param7 = w_fc2.eval()
    param8 = b_fc2.eval()
    mif.createMem([param0,param1,param2,param3,param4,param5,param6,param7,param8])

#options.setUnrollThreshold(100000000)
Beispiel #6
0
    fig, axs = plt.subplots(2, 3, sharex=True,sharey=True)
    axs[0,0].imshow(img,cmap='gray')
    axs[0,0].axis('off')
    fig.subplots_adjust(hspace=0,wspace=0,right=1,top=1,left=0,bottom=0)
    axs[0,0].text(.9, 0.9, string.ascii_uppercase[0], transform=axs[0,0].transAxes, 
            size=20, weight='bold',color='white')
    for i in range(1, num_outputs+1):
         axs[i/3,i%3].imshow(np.clip(result[:,:,i-1], 0, 1),cmap='gray')
         axs[i/3,i%3].axis('off')
         fig.subplots_adjust(hspace=0,wspace=0,right=1,top=1,left=0,bottom=0)
         axs[i/3,i%3].text(.9, 0.9, string.ascii_uppercase[i], transform=axs[i/3,i%3].transAxes, 
            size=20, weight='bold',color='white')
    plt.show()

    # Generate test memory
    mif.createMem([original_image,weights])



    










    epochs = 1000
    session.run(init)
    for _ in range(epochs):
        batch_x, batch_y = mnist_data.train.next_batch(100)
        session.run(train_step, feed_dict={x: batch_x, real_y: batch_y})
    correct_prediction = tensorflow.equal(tensorflow.argmax(y, 1),
                                          tensorflow.argmax(real_y, 1))
    accuracy = tensorflow.reduce_mean(
        tensorflow.cast(correct_prediction, tensorflow.float32))
    network_accuracy = session.run(accuracy,
                                   feed_dict={
                                       x: mnist_data.test.images,
                                       real_y: mnist_data.test.labels
                                   })
    print('The accuracy over the MNIST data is {:.2f}%'.format(
        network_accuracy * 100))

    # Generating hardware
    with tensorflow.device("device:XLA_CPU:0"):
        y = tensorflow.nn.softmax(tensorflow.add(
            tensorflow.matmul(x, W)[0], b))
    session.run(y, {x: [mnist_data.test.images[123]]})

    # Creating memories for testing
    test_image = 123
    mif.createMem([b.eval(), W.eval(), mnist_data.test.images[test_image]])

    # Print expected result
    print("Expected Result: " +
          str(np.argmax(mnist_data.test.labels[test_image])))
X = tf.placeholder(tf.float32, [1, 3])
test = rand([1, 3])

dense1_w = tf.Variable(rand([3, nodes]))
dense1_b = tf.Variable(rand([nodes]))

with tf.Session() as sess:

    # Generating circuit
    with tf.device("device:XLA_CPU:0"):

        dense1 = tf.add(tf.matmul(X, dense1_w)[0], dense1_b)

        #y = tf.cast(dense1[0], tf.int32)
        y = dense1[0]
# y = tf.cast(dense1, tf.int32)
#y = dense1
    sess.run(tf.global_variables_initializer())
    result = sess.run(y, {X: test})

    # Print expected result
    print("Result Calculated: " + str(result))

    # Creating .mif files to initialize the memories
    # To do this, go to <your_file>_ir_7.ll and check the list of params (inputs) and their sizes
    param5 = dense1_w.eval()
    param6 = dense1_b.eval()
    mif.createMem([param5, test, param6])
    #mif.createMem([test,param5,param6])
Beispiel #9
0
import sys
sys.path.append('../../src')
import processMif as mif

tf.logging.set_verbosity(tf.logging.INFO)

# Network Parameters
n_input = 1
n_output = 8

# tf Graph input
X = tf.placeholder(tf.float32, [None, n_input])
weights = tf.placeholder(tf.float32, [n_input, n_output])
biases = tf.placeholder(tf.float32, [n_output])

in_x = np.random.rand(1, n_input)
in_weights = np.random.rand(n_input, n_output)
in_biases = np.random.rand(n_output)
mif.createMem([in_weights, in_biases, in_x])

# matmul must be an array of arrays but add can only be one array
with tf.Session() as sess:
    with tf.device("device:XLA_CPU:0"):
        aux = tf.matmul(X, weights)
        y = tf.add(biases, aux[0])

    sess.run(tf.global_variables_initializer())
    result = sess.run(y, {X: in_x, weights: in_weights, biases: in_biases})
    np.save("tf_result.npy", result)
    print(result)
Beispiel #10
0
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO
# EVENT SHALL THE AUTHORS OR THE UNIVERSITY OF BRITISH COLUMBIA BE LIABLE
# FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
# IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#---------------------------------------------------------------------------

import tensorflow as tf
import numpy as np
import sys
sys.path.append('../../src')
import processMif as mif

size = 64

in_a = np.random.rand(size)
in_b = np.random.rand(size)
mif.createMem([in_a, in_b])

tf.logging.set_verbosity(tf.logging.INFO)
with tf.Session() as sess:
    x = tf.placeholder(tf.float32, [size])
    z = tf.placeholder(tf.float32, [size])
    with tf.device("device:XLA_CPU:0"):
        y = x * z

    result = sess.run(y, {x: in_a, z: in_b})
    np.save("tf_result.npy", result)
    print(result)