Esempio n. 1
0
def test_group():
    x1 = tf.Variable(tf.zeros(shape=[2, 3]))
    x2 = tf.Variable(tf.zeros(shape=[2, 3]))
    a1 = tf.assign(x1, tf.zeros(shape=[2, 3]))
    a2 = tf.assign(x2, tf.ones(shape=[2, 3]))
    sess = tf.Session()
    sess.run(tf.group(a1, a2))
    ax1 = sess.run(x1)
    ax2 = sess.run(x2)
    np.testing.assert_almost_equal(ax1, np.zeros((2, 3)))
    np.testing.assert_almost_equal(ax2, np.ones((2, 3)))
Esempio n. 2
0
def test_group():
    x1 = tf.Variable(tf.zeros(shape=[2,3]))
    x2 = tf.Variable(tf.zeros(shape=[2,3]))
    a1 = tf.assign(x1, tf.zeros(shape=[2,3]))
    a2 = tf.assign(x2, tf.ones(shape=[2,3]))
    sess = tf.Session()
    sess.run(tf.group(a1, a2))
    ax1 = sess.run(x1)
    ax2 = sess.run(x2)
    np.testing.assert_almost_equal(ax1, np.zeros((2,3)))
    np.testing.assert_almost_equal(ax2, np.ones((2,3)))
import tinyflow as tf
from tinyflow.datasets import get_mnist

# Create the model
x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
y = tf.nn.softmax(tf.matmul(x, W))

# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, 10])

cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))

learning_rate = 0.5
W_grad = tf.gradients(cross_entropy, [W])[0]
train_step = tf.assign(W, W - learning_rate * W_grad)

sess = tf.Session()
sess.run(tf.initialize_all_variables())

# get the mnist dataset
mnist = get_mnist(flatten=True, onehot=True)

for i in range(1000):
    batch_xs, batch_ys = mnist.train.next_batch(100)
    sess.run(train_step, feed_dict={x: batch_xs, y_:batch_ys})

correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(correct_prediction)

print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
Esempio n. 4
0
fc2 = tf.nn.linear(tanh3, num_hidden=10, name="fc2")

# define loss
label = tf.placeholder(tf.float32)
cross_entropy = tf.nn.mean_sparse_softmax_cross_entropy_with_logits(fc2, label)
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)

sess = tf.Session(device='gpu')

# Auromatic variable shape inference API, infers the shape and initialize the weights.
known_shape = {x: [100, 1, 28, 28], label: [100]}
stdev = 0.01
init_step = []
for v, name, shape in tf.infer_variable_shapes(cross_entropy,
                                               feed_dict=known_shape):
    init_step.append(tf.assign(v, tf.normal(shape, stdev)))
    print("shape[%s]=%s" % (name, str(shape)))
sess.run(init_step)

# get the mnist dataset
mnist = get_mnist(flatten=False, onehot=False)

print_period = 1000
for epoch in range(10):
    sum_loss = 0.0
    num_batch = 600
    for i in range(num_batch):
        batch_xs, batch_ys = mnist.train.next_batch(100)
        loss, _ = sess.run([cross_entropy, train_step],
                           feed_dict={
                               x: batch_xs,
Esempio n. 5
0
def test_assign():
    x = tf.Variable(tf.zeros(shape=[2, 3]))
    sess = tf.Session()
    sess.run(tf.assign(x, tf.zeros(shape=[2, 3])))
    ax = sess.run(x)
    np.testing.assert_almost_equal(ax, np.zeros((2, 3)))
relu1 = tf.nn.relu(fc1)
fc2 = tf.nn.linear(relu1, num_hidden=10, name="fc2")

# define loss
label = tf.placeholder(tf.float32)
cross_entropy = tf.nn.mean_sparse_softmax_cross_entropy_with_logits(fc2, label)
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)

sess = tf.Session(device='gpu')

# Automatic variable shape inference API, infers the shape and initialize the weights.
known_shape = {x: [100, 28 * 28], label: [100]}
init_step = []
for v, name, shape in tf.infer_variable_shapes(
        cross_entropy, feed_dict=known_shape):
    init_step.append(tf.assign(v, tf.normal(shape)))
    print("shape[%s]=%s" % (name, str(shape)))
sess.run(init_step)

# get the mnist dataset
mnist = get_mnist(flatten=True, onehot=False)

print_period = 1000
for epoch in range(10):
    sum_loss = 0.0
    num_batch = 600
    for i in range(num_batch):
        batch_xs, batch_ys = mnist.train.next_batch(100)
        loss, _ = sess.run([cross_entropy, train_step], feed_dict={x: batch_xs, label:batch_ys})
        sum_loss += loss
    print("epoch[%d] cross_entropy=%g" % (epoch, sum_loss /num_batch))
Esempio n. 7
0
from tinyflow.datasets import get_mnist

# Create the model
x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
y = tf.nn.softmax(tf.matmul(x, W))

# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, 10])

cross_entropy = tf.reduce_mean(
    -tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))

learning_rate = 0.5
W_grad = tf.gradients(cross_entropy, [W])[0]
train_step = tf.assign(W, W - learning_rate * W_grad)

sess = tf.Session()
sess.run(tf.initialize_all_variables())

# get the mnist dataset
mnist = get_mnist(flatten=True, onehot=True)

for i in range(1000):
    batch_xs, batch_ys = mnist.train.next_batch(100)
    sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})

correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(correct_prediction)

print(
Esempio n. 8
0
def test_assign():
    x = tf.Variable(tf.zeros(shape=[2,3]))
    sess = tf.Session()
    sess.run(tf.assign(x, tf.zeros(shape=[2,3])))
    ax = sess.run(x)
    np.testing.assert_almost_equal(ax, np.zeros((2,3)))