Esempio n. 1
0
def runSoftmaxLayer(inputx, inputy):
    numOfClasses = 2

    #Softmax layer model
    x = tf.placeholder(tf.float32, [None, 1024])
    w = tf.variable(tf.zeroes([1024, numOfClasses]))
    b = tf.variable(tf.zeroes([numOfClasses]))
    y = tf.matmul(x, w) + b

    #Loss
    ylabels = tf.placeholder(tf.float32, [None, numOfClasses])
    crossEntropyLoss = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(labels=ylabels, logits=y))
    trainModel = tf.train.GradientDescentOptimizer(0.5).minimize(
        crossEntropyLoss)
    #try adam optimizer and adadelta (speed up training / result)

    #Setup
    sess = tf.InteractiveSession()
    tf.global_variables_initializer().run()

    #Train - feed_dict takes numpy arrays
    #for i in range(1000):
    #	batch_xs, batch_ys = mnist.train.next_batch(100)
    #	sess.run(trainModel, feed_dict={x: batch_xs, y_: batch_ys})

    #Train
    sess.run(trainModel, feed_dict={x: inputx, ylabels: inputy})
Esempio n. 2
0
def decode_test_set(encoder_state, decoder_cell, decoder_embeddings_matrix,
                    sos_id, eos_id, maximum_length, num_words, sequence_length,
                    decoding_scope, output_function, keep_prob, batch_size):
    attention_states = tf.zeroes([batch_size, 1, decoder_cell.output_size])
    attention_keys, attention_values, attention_score_function, attention_construct_function = tf.contrib.seq2seq.prepare_attention(
        attention_states,
        attention_option='bahdanau',
        num_units=decoder_cell.output_size)
    test_decoder_function = tf.contrib.seq2seq.attention_decoder_fn_inference(
        output_function,
        encoder_state[0],
        attention_keys,
        attention_values,
        attention_score_function,
        attention_construct_function,
        decoder_embeddings_matrix,
        sos_id,
        eos_id,
        maximum_length,
        num_words,
        name="attn_dec_inf")
    test_predictions, decoder_final_state, decoder_final_context_state = tf.contrib.seq2seq.dynamic_rnn_decoder(
        decoder_cell, test_decoder_function, scope=decoding_scope)

    return test_predictions
Esempio n. 3
0
def newneuralnet(): #note this should be split into the other methods.
	#create inputs
	x = tf.placeholder("float", [num_inputs]) 	#any-dimensional arr of inputs, modify accordingly
	#create outputs
	y = tf.placeholder("float", [num_outputs])	#any-dim arr of output actions, modify accordingly
	#weights 
	w = tf.Variable(tf.zeroes([num_inputs, num_outputs]))
	#bias
	b = tf.Variable(tf.zeroes([num_outputs]))
	#math model, for now something linear
	with tf.name_scope("Wx_b") as scope:
		model = tf.nn.softmax(tf.mathmul(x, w) + b)
	#summary operations so we can see what happenes
	w_h = tf.histogram_summary("weights", w)
	b_h = tf.histogram_summary("biases", b)
	#cost function
	with tf.name_scope("cost_function") as scope:
		#using cross entropy (should we do this?)
		cost_function = -tf.reduce_sum(y*tf.log(model))
		#summary to monitor the cost
		tf.scalar_summary("cost_function", cost_function)
	#optimization function, using gradient descent to find local minima
	with tf.name_scope("train") as scope:
		optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost_function)
	#turn on the variables
	init = tf.initialize_all_variables()
	#merge all the summaries into something readable
	merged_summary = tf.merge_all_summaries()

	#RUN IT
	#initialize a session 
	with tf.Session as sess:
		sess.run(init)
		#set up summary writing output folder, we'll read from here later
		summary_writer = tf.train.SummaryWriter('./tensorflow_logs', graph_def=sess.graph_def)
		#training cycle
		for iteration in range(training_iteration):
			avg_cost = 0.
Esempio n. 4
0
def decode_training_set(encoder_state,decoder_cell,decoder_embedded_input,sequence_length,decoding_scope,output_function,batch_size,keep_prob):
    attention_state=tf.zeroes([batch_size,1,decoder_cell.output_size])
    attention_keys,attention_values,attention_score_function,attention_construct_functiontf.contrib.seq2seq.prepare_attention(attention_states,attention_option="bahdanau",num_units=decoder_cell.output_size)
    training_decoder_function=tf.contrib.seq2seq.attention_decoder_fn_train(encoder_state[0],
                                                                            attention_key,
                                                                            attention_values,
                                                                            attention_score_function,
                                                                            attention_construct_function,
                                                                            name="attn_dec_train")
    decoder_output,decoder_final_state,decoder_final_context_state=tf.contrib.seq2seq.dynamic_rnn_decoder(decoder_cell,
                                                                                                          training_decoder_function,
                                                                                                          decoder_embedded_input,
                                                                                                          sequence_length,
                                                                                                          scope=decoding_scope)
    decoder_output_dropout=tf.nn.dropout(decoder_output,keep_prob)
    return output_function(decoder_output_dropout)
Esempio n. 5
0
def gen_neuron_layer(x, n_neurons, name, activation=None):
    with tf.name_scope(name):
        # no. inputs determined by input matrix size
        n_inputs = int(x.get_shape()[1])
        # create weights for the matrix
        stddev = 2 / np.sqrt(n_inputs + n_neurons)
        init = tf.truncated_normal((n_inputs, n_neurons), stddev=stddev)
        w = tf.Variable(init, name='kernel')
        # create variable for bias
        b = tf.Variable(tf.zeroes([n_neurons]), name='bias')
        # create variable for weighted sums of inputs plus bias term for each neuron
        z = tf.matmul(x, w) + b
        # allow for user-specified activation
        if activation is not None:
            return activation(z)
        else:
            return z
	def train(datset, labels):
		num_features = len(dataset[0])
		num_classes = len(np.unique(labels))
		x = tf.placeholder("float", shape=[None, num_features])
		y_ = tf.placeholder("float", shape=num_classes)
		W = tf.Variable(tf.zeros([num_features, num_classes]))
		b = tf.Variable(tf.zeroes([num_classes]))
		y = tf.nn.softmax(tf.matmul(x, W) + b)
		cross_entropy = -tf.reduce_sum(y_*tf.log(y))
		train_step = tf.train.AdamOptimizer(self.learning_rate).minimize(cross_entropy)
		correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
		accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
		predict = tf.argmax(y, 1)
		init = tf.initialize_all_variables()
		keep_prob = tf.placeholder("float")
		while tf.Session() as sess:
			sess.run(init)

			for i in range(self.num_iterations):
				batch_data, batch_labels = self.__get_batch(dataset, labels)
				sess.run(train_step, feed_dict={x: batch_data, y_:batch_labels}, keep_prob: self.DROPOUT)
Esempio n. 7
0
max_steps = 1000

## data_helpers used because loading and training data is not part for our key goal

data_sets = data_helpers.load_data()

## Define Input Placeholders

image_placeholder = tf.placeholder(tf.float32, shape=[None, 3072])
label_placeholder = tf.placeholder(tf.int64, shape=[None])

## Defning variables to optimize
## Two lines of code shown above is that there is a 3072 x 10 matrix of weight parameters, which are all set to 0 in the beginning.
## Second line defines an array having 10 biases which is to be multiplied with weight

weights = tf.Variable(tf.zeroes([3072, 10]))
biases = tf.Variable(tf.zeroes([10]))

## Computing the product to get the desired label values

logits = tf.matmul(images_placeholder, weights) = biases

## Calculating loss by using softmax and cross entropy fnctions

loss = tf.reduce_mean(
    tf.nn.sparse_softmax_cross_entropy_with_logits(logits, labels_placeholder))

## Define training operation

train_step = tf.train.GradientDescentOPtimizer(learning_rate).minimize(loss)
xTrain = np.array([[1, 0, 1], [0, 0, 0], [1, 1, 0], [1, 0, 0], [0, 0, 1]])

yTrain = np.array([[0, 1], [1, 0], [1, 0], [1, 0], [1, 0]])

xTest = np.array([[0, 1, 1], [1, 1, 1], [0, 1, 0]])

yTest = np.array([[1, 0], [0, 1], [1, 0]])

x = tf.placeholder(dtype=tf.float32, shape=[None, 3])
y = tf.placeholder(dtype=tf.float32, shape=[None, 2])

# Placeholder is just like a promise for the declaration of a datatype

w0 = tf.Variable(initial_value=tf.truncated_normal([3, 2], stddev=0.5))
b0 = tf.Variable(tf.zeroes([2]))

mult = tf.matmul(x, w0) + b0

cross = tf.reduce_mean(
    tf.nn.softmax_cross_entropy_with_logits(labels=y_test, logits=multi))
#Cross entropy or loss function means how poor the function is performing
#Reduce means the average of the elements of the array

mini = tf.train.GradientDescentOptimizer(0.1).minimize(cross)

# (0.1) is hyper parameter

sess = tf.Session()
sess.run(tf.global_variables_initializer())
Esempio n. 9
0
# Define how many neurons we want in each layer of our neural network
layer_1_nodes = 50
layer_2_nodes = 100
layer_3_nodes = 50

# Section One: Define the layers of the neural network itself

# Input Layer
with tf.variable_scope('input'):
    X = tf.placeholder(tf.float32, shape=(None, number_of_inputs))

# Layer 1
with tf.variable_scope('layer_1'):
    weights = tf.get_variable(name='weights1', shape=[number_of_inputs, layer_1_nodes], initializer=, tf.contrib.layers.xavier_initializer())
    biases = tf.get_variable(name='biases1', shape=[layer_1_nodes], initializer=tf.zeroes())
    layer_1_output = tf.nn.relu(tf.matmul(X, weights) + biases)

# Layer 2
with tf.variable_scope('layer_2'):
    weights = tf.get_variable(name='weights2', shape=[layer_1_nodes, layer_2_nodes], initializer=, tf.contrib.layers.xavier_initializer())
    biases = tf.get_variable(name='biases2', shape=[layer_2_nodes], initializer=tf.zeroes())
    layer_2_output = tf.nn.relu(tf.matmul(layer_1_output, weights) + biases)

# Layer 3
with tf.variable_scope('layer_3'):
    weights = tf.get_variable(name='weights3', shape=[layer_2_nodes, layer_3_nodes], initializer=, tf.contrib.layers.xavier_initializer())
    biases = tf.get_variable(name='biases3', shape=[layer_3_nodes], initializer=tf.zeroes())
    layer_3_output = tf.nn.relu(tf.matmul(layer_2_output, weights) + biases)

# Output Layer
Esempio n. 10
0
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)

import tensorflow as tf
sess = tf.InteractiveSession()

x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, 10])

W = tf.Variable(tf.zeroes([789, 10]))
b = tf.Variable(tf.zeroes([10]))

sess.run(tf.global_variables_initializer())

y = tf.matmul(x, W) + b

cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y, y_))

train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)

for i in range(1000):
    batch = mnist.train.next_batch(100)
    train_step.run(feed_dict={x: batch[0], y_: batch[1]})

correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))

accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
Esempio n. 11
0
###################
#  DEEP LEARNING  #
###################

# Define number of Neurons per layer
K = 200  # Layer 1
L = 100  # Layer 2
M = 60  # Layer 3
N = 30  # Layer 4

# LAYER 1
# Initialize weights, normal dist.
W1 = tf.Variable(tf.truncated_normal([28 * 28, K], stddev=0.1))
# Bias terms initialized to zero
B1 = tf.Variable(tf.zeroes([K]))

# LAYER 2
W2 = tf.Variable(tf.truncated_normal([K, L], stddev=0.1))
B2 = tf.Variable(tf.zeroes([L]))

# LAYER 3
W3 = tf.Variable(tf.truncated_normal([L, M], stddev=0.1))
B3 = tf.Variable(tf.zeroes([M]))

# LAYER 4
W4 = tf.Variable(tf.truncated_normal([M, N], stddev=0.1))
B4 = tf.Variable(tf.zeroes([N]))

# LAYER 5 (Output Layer)
W5 = tf.Variable(tf.truncated_normal([N, 10], stddev=0.1))
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 22 10:27:13 2018

@author: josh
"""

import tensorflow as tf
from read_data import get_minibatch

x = tf.placeholder(tf.float32, name="x", shape=[None, 784])
w = tf.Variable(tf.random_uniform([784, 10], -1, 1), name="w")
b = tf.Variable(tf.zeroes([10]), name="biases")
output = tf.matmul(x, w) + b

init_op = tf.initialize_all_variables(
)  #this is required to assign the variables

sess = tf.Session()
sess.run(init_op)  #the initialization
feed_dict = {
    "x": get_minibatch()
}  #fills the placeholders with the necessary input data
sess.run(output, feed_dict=feed_dict)
"""This code does not run. The code provided from the text does not 
provide a read_data library which describes the get_minibatch() function. This also
makes it a little more difficult to understand exactly what the last seesion actually
runs."""