コード例 #1
0
ファイル: test_ops.py プロジェクト: zmoon111/tinyflow
def test_matmul():
    x = tf.placeholder(tf.float32)
    y = tf.placeholder(tf.float32)
    ax = np.ones((2, 3))
    ay = np.ones((3, 4)) * 4
    z = tf.matmul(x, y) * 4
    sess = tf.Session()
    az = sess.run(z, feed_dict={x: ax, y: ay})
    np.testing.assert_almost_equal(az, np.dot(ax, ay) * 4)
コード例 #2
0
ファイル: test_ops.py プロジェクト: xdarkbluex/tinyflow
def test_matmul():
    x = tf.placeholder(tf.float32)
    y = tf.placeholder(tf.float32)
    ax = np.ones((2, 3))
    ay = np.ones((3, 4)) * 4
    z = tf.matmul(x, y) * 4
    sess = tf.Session()
    az = sess.run(z, feed_dict={x:ax, y:ay})
    np.testing.assert_almost_equal(
        az, np.dot(ax, ay) * 4)
コード例 #3
0
def test_matmul_grad():
    x = tf.placeholder(tf.float32)
    y = tf.placeholder(tf.float32)
    ax = np.ones((2, 3))
    ay = np.ones((3, 4)) * 4
    z = tf.matmul(x, y) * 4
    gx, gy = tf.gradients(z, [x, y])
    sess = tf.Session()
    agx = sess.run(gx, feed_dict={x: ax, y: ay})
    agy = sess.run(gy, feed_dict={x: ax, y: ay})
    np.testing.assert_almost_equal(agx, np.dot(np.ones((2, 4)), ay.T) * 4)
    np.testing.assert_almost_equal(agy, np.dot(ax.T, np.ones((2, 4))) * 4)
コード例 #4
0
ファイル: test_gradients.py プロジェクト: xdarkbluex/tinyflow
def test_matmul_grad():
    x = tf.placeholder(tf.float32)
    y = tf.placeholder(tf.float32)
    ax = np.ones((2, 3))
    ay = np.ones((3, 4)) * 4
    z = tf.matmul(x, y) * 4
    gx, gy = tf.gradients(z, [x, y])
    sess = tf.Session()
    agx = sess.run(gx, feed_dict={x:ax, y:ay})
    agy = sess.run(gy, feed_dict={x:ax, y:ay})
    np.testing.assert_almost_equal(
        agx,
        np.dot(np.ones((2,4)), ay.T) * 4)
    np.testing.assert_almost_equal(
        agy,
        np.dot(ax.T, np.ones((2,4))) * 4)
コード例 #5
0
"""Tinyflow example code.

Minimum softmax code that exposes the optimizer.
"""
import tinyflow as tf
from tinyflow.datasets import get_mnist

# Create the model
x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
y = tf.nn.softmax(tf.matmul(x, W))

# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, 10])

cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))

learning_rate = 0.5
W_grad = tf.gradients(cross_entropy, [W])[0]
train_step = tf.assign(W, W - learning_rate * W_grad)

sess = tf.Session()
sess.run(tf.initialize_all_variables())

# get the mnist dataset
mnist = get_mnist(flatten=True, onehot=True)

for i in range(1000):
    batch_xs, batch_ys = mnist.train.next_batch(100)
    sess.run(train_step, feed_dict={x: batch_xs, y_:batch_ys})
コード例 #6
0
ファイル: mnist_softmax.py プロジェクト: wu-yy/tinyflow
"""Tinyflow example code.

This code is adapted from Tensorflow's MNIST Tutorial with minimum code changes.
"""
import tinyflow as tf
from tinyflow.datasets import get_mnist

# Create the model
x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
y = tf.nn.softmax(tf.matmul(x, W))

# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, 10])

cross_entropy = tf.reduce_mean(
    -tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)

sess = tf.Session()
sess.run(tf.initialize_all_variables())

# get the mnist dataset
mnist = get_mnist(flatten=True, onehot=True)

print("minist download is completed!")

for i in range(1000):
    batch_xs, batch_ys = mnist.train.next_batch(100)
    sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})