Esempio n. 1
0
def test_sum_grad():
    x = tf.placeholder(tf.float32)
    ax = np.ones((2, 3))
    z = -tf.reduce_sum(x) * 14
    gx = tf.gradients(z, [x])[0]
    sess = tf.Session()
    agx = sess.run(gx, feed_dict={x: ax})
    np.testing.assert_almost_equal(agx, -np.ones((2, 3)) * 14)
Esempio n. 2
0
def test_sum_grad():
    x = tf.placeholder(tf.float32)
    ax = np.ones((2, 3))
    z = -tf.reduce_sum(x) * 14
    gx = tf.gradients(z, [x])[0]
    sess = tf.Session()
    agx = sess.run(gx, feed_dict={x:ax})
    np.testing.assert_almost_equal(agx, -np.ones((2,3)) * 14)
Esempio n. 3
0
def test_sum():
    axis = [1, 3]
    x = tf.placeholder(tf.float32)
    y = tf.reduce_sum(x, reduction_indices=axis)
    ax = np.random.uniform(size=(2, 4, 8, 7))
    sess = tf.Session()
    ay = sess.run(y, feed_dict={x:ax})
    npy = ax.sum(axis=tuple(axis))
    assert(np.mean(np.abs(ay - npy))) < 1e-6
Esempio n. 4
0
def test_sum():
    axis = [1, 3]
    x = tf.placeholder(tf.float32)
    y = tf.reduce_sum(x, reduction_indices=axis)
    ax = np.random.uniform(size=(2, 4, 8, 7))
    sess = tf.Session()
    ay = sess.run(y, feed_dict={x:ax})
    npy = ax.sum(axis=tuple(axis))
    assert(np.mean(np.abs(ay - npy))) < 1e-6
"""Tinyflow example code.

Minimum softmax code that exposes the optimizer.
"""
import tinyflow as tf
from tinyflow.datasets import get_mnist

# Create the model
x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
y = tf.nn.softmax(tf.matmul(x, W))

# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, 10])

cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))

learning_rate = 0.5
W_grad = tf.gradients(cross_entropy, [W])[0]
train_step = tf.assign(W, W - learning_rate * W_grad)

sess = tf.Session()
sess.run(tf.initialize_all_variables())

# get the mnist dataset
mnist = get_mnist(flatten=True, onehot=True)

for i in range(1000):
    batch_xs, batch_ys = mnist.train.next_batch(100)
    sess.run(train_step, feed_dict={x: batch_xs, y_:batch_ys})
Esempio n. 6
0
This code is adapted from Tensorflow's MNIST Tutorial with minimum code changes.
"""
import tinyflow as tf
from tinyflow.datasets import get_mnist

# Create the model
x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
y = tf.nn.softmax(tf.matmul(x, W))

# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, 10])

cross_entropy = tf.reduce_mean(
    -tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)

sess = tf.Session()
sess.run(tf.initialize_all_variables())

# get the mnist dataset
mnist = get_mnist(flatten=True, onehot=True)

print("minist download is completed!")

for i in range(1000):
    batch_xs, batch_ys = mnist.train.next_batch(100)
    sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})

correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
Esempio n. 7
0
"""MNIST softmax completely in tinyflow."""
import tinyflow as tf
from tinyflow.datasets import get_mnist

# Create the model
x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
y = tf.nn.softmax(tf.matmul(x, W))

# Define loss
y_ = tf.placeholder(tf.float32, [None, 10])
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
# Take gradient
W_grad = tf.gradients(cross_entropy, [W])[0]
# The update rule.
learning_rate = 0.5
train_step = tf.assign(W, W - learning_rate * W_grad)

sess = tf.Session()
sess.run(tf.initialize_all_variables())

# get the mnist dataset
mnist = get_mnist(flatten=True, onehot=True)

for i in range(1000):
    batch_xs, batch_ys = mnist.train.next_batch(100)
    sess.run(train_step, feed_dict={x: batch_xs, y_:batch_ys})

correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(correct_prediction)