示例#1
0
def check_ewise(ufunc):
    x = tf.placeholder(tf.float32)
    y = tf.placeholder(tf.float32)
    z = ufunc(x, y)
    ax = np.ones((2, 3))
    ay = np.ones((2, 3)) * 4
    sess = tf.Session()
    az = sess.run(z, feed_dict={x:ax, y:ay})
    np.testing.assert_almost_equal(az, ufunc(ax, ay))
示例#2
0
def check_ewise(ufunc):
    x = tf.placeholder(tf.float32)
    y = tf.placeholder(tf.float32)
    z = ufunc(x, y)
    ax = np.ones((2, 3))
    ay = np.ones((2, 3)) * 4
    sess = tf.Session()
    az = sess.run(z, feed_dict={x:ax, y:ay})
    np.testing.assert_almost_equal(az, ufunc(ax, ay))
示例#3
0
def test_bias_add():
    x = tf.placeholder(tf.float32)
    b = tf.placeholder(tf.float32)
    y = tf.nn.bias_add(x, b)
    ax = np.random.uniform(size=(2, 3))
    ab = np.random.uniform(size=(3, ))
    sess = tf.Session()
    ay = sess.run(y, feed_dict={x: ax, b: ab})
    np.testing.assert_almost_equal(ay, ax + ab)
示例#4
0
def test_matmul():
    x = tf.placeholder(tf.float32)
    y = tf.placeholder(tf.float32)
    ax = np.ones((2, 3))
    ay = np.ones((3, 4)) * 4
    z = tf.matmul(x, y) * 4
    sess = tf.Session()
    az = sess.run(z, feed_dict={x: ax, y: ay})
    np.testing.assert_almost_equal(az, np.dot(ax, ay) * 4)
示例#5
0
def test_add_grad():
    x = tf.placeholder(tf.float32)
    y = tf.placeholder(tf.float32)
    ax = np.ones((2, 3))
    ay = np.ones((2, 3)) * 4
    z = x + y
    gx, gy = tf.gradients(z, [x, y])
    sess = tf.Session()
    agx = sess.run(gx, feed_dict={x:ax, y:ay})
    np.testing.assert_almost_equal(agx, np.ones((2,3)))
示例#6
0
def test_add_grad():
    x = tf.placeholder(tf.float32)
    y = tf.placeholder(tf.float32)
    ax = np.ones((2, 3))
    ay = np.ones((2, 3)) * 4
    z = x + y
    gx, gy = tf.gradients(z, [x, y])
    sess = tf.Session()
    agx = sess.run(gx, feed_dict={x: ax, y: ay})
    np.testing.assert_almost_equal(agx, np.ones((2, 3)))
示例#7
0
def test_matmul():
    x = tf.placeholder(tf.float32)
    y = tf.placeholder(tf.float32)
    ax = np.ones((2, 3))
    ay = np.ones((3, 4)) * 4
    z = tf.matmul(x, y) * 4
    sess = tf.Session()
    az = sess.run(z, feed_dict={x:ax, y:ay})
    np.testing.assert_almost_equal(
        az, np.dot(ax, ay) * 4)
示例#8
0
def test_matmul_grad():
    x = tf.placeholder(tf.float32)
    y = tf.placeholder(tf.float32)
    ax = np.ones((2, 3))
    ay = np.ones((3, 4)) * 4
    z = tf.matmul(x, y) * 4
    gx, gy = tf.gradients(z, [x, y])
    sess = tf.Session()
    agx = sess.run(gx, feed_dict={x: ax, y: ay})
    agy = sess.run(gy, feed_dict={x: ax, y: ay})
    np.testing.assert_almost_equal(agx, np.dot(np.ones((2, 4)), ay.T) * 4)
    np.testing.assert_almost_equal(agy, np.dot(ax.T, np.ones((2, 4))) * 4)
示例#9
0
def test_sqrt():
    x = tf.placeholder(tf.float32)
    y = tf.sqrt(x)
    ax = np.ones((2, 3)) * 2
    sess = tf.Session()
    ay = sess.run(y, feed_dict={x: ax})
    np.testing.assert_almost_equal(ay, np.sqrt(ax))
示例#10
0
def test_softmax():
    x = tf.placeholder(tf.float32)
    y = tf.nn.softmax(x)
    ax = np.ones((2, 4))
    sess = tf.Session()
    ay = sess.run(y, feed_dict={x: ax})
    np.testing.assert_almost_equal(ay, ax / np.sum(ax, axis=1, keepdims=True))
示例#11
0
def test_matmul_grad():
    x = tf.placeholder(tf.float32)
    y = tf.placeholder(tf.float32)
    ax = np.ones((2, 3))
    ay = np.ones((3, 4)) * 4
    z = tf.matmul(x, y) * 4
    gx, gy = tf.gradients(z, [x, y])
    sess = tf.Session()
    agx = sess.run(gx, feed_dict={x:ax, y:ay})
    agy = sess.run(gy, feed_dict={x:ax, y:ay})
    np.testing.assert_almost_equal(
        agx,
        np.dot(np.ones((2,4)), ay.T) * 4)
    np.testing.assert_almost_equal(
        agy,
        np.dot(ax.T, np.ones((2,4))) * 4)
示例#12
0
def test_sqrt():
    x = tf.placeholder(tf.float32)
    y = tf.sqrt(x)
    ax = np.ones((2, 3)) * 2
    sess = tf.Session()
    ay = sess.run(y, feed_dict={x:ax})
    np.testing.assert_almost_equal(ay, np.sqrt(ax))
示例#13
0
def check_ewise_rscalar(ufunc):
    x = 10
    y = tf.placeholder(tf.float32)
    z = ufunc(x, y)
    ay = np.ones((2, 3))
    sess = tf.Session()
    az = sess.run(z, feed_dict={y: ay})
    np.testing.assert_almost_equal(az, ufunc(x, ay))
示例#14
0
def check_ewise_scalar(ufunc):
    x = tf.placeholder(tf.float32)
    y = 10;
    z = ufunc(x, y)
    ax = np.ones((2, 3))
    sess = tf.Session()
    az = sess.run(z, feed_dict={x:ax})
    np.testing.assert_almost_equal(az, ufunc(ax, y))
示例#15
0
def check_ewise_rscalar(ufunc):
    x = 10;
    y = tf.placeholder(tf.float32)
    z = ufunc(x, y)
    ay = np.ones((2, 3))
    sess = tf.Session()
    az = sess.run(z, feed_dict={y:ay})
    np.testing.assert_almost_equal(az, ufunc(x, ay))
示例#16
0
def check_ewise_scalar(ufunc):
    x = tf.placeholder(tf.float32)
    y = 10
    z = ufunc(x, y)
    ax = np.ones((2, 3))
    sess = tf.Session()
    az = sess.run(z, feed_dict={x: ax})
    np.testing.assert_almost_equal(az, ufunc(ax, y))
示例#17
0
def test_mean_grad():
    x = tf.placeholder(tf.float32)
    ax = np.ones((2, 3))
    z = -tf.reduce_mean(x) * 14
    gx = tf.gradients(z, [x])[0]
    sess = tf.Session()
    agx = sess.run(gx, feed_dict={x:ax})
    np.testing.assert_almost_equal(agx, -np.ones((2,3)) * 14 / 6.0)
示例#18
0
def test_mean_grad():
    x = tf.placeholder(tf.float32)
    ax = np.ones((2, 3))
    z = -tf.reduce_mean(x) * 14
    gx = tf.gradients(z, [x])[0]
    sess = tf.Session()
    agx = sess.run(gx, feed_dict={x: ax})
    np.testing.assert_almost_equal(agx, -np.ones((2, 3)) * 14 / 6.0)
示例#19
0
def test_softmax():
    x = tf.placeholder(tf.float32)
    y = tf.nn.softmax(x)
    ax = np.ones((2, 4))
    sess = tf.Session()
    ay = sess.run(y, feed_dict={x:ax})
    np.testing.assert_almost_equal(
        ay, ax / np.sum(ax, axis=1, keepdims=True))
示例#20
0
def test_argmax():
    x = tf.placeholder(tf.float32)
    y = tf.argmax(x, 1)
    ax = np.random.uniform(size=(700, 10))
    sess = tf.Session()
    ay = sess.run(y, feed_dict={x:ax})
    npy = np.argmax(ax, 1)
    assert(np.mean(np.abs(ay - npy))) < 1e-6
示例#21
0
def test_argmax():
    x = tf.placeholder(tf.float32)
    y = tf.argmax(x, 1)
    ax = np.random.uniform(size=(700, 10))
    sess = tf.Session()
    ay = sess.run(y, feed_dict={x:ax})
    npy = np.argmax(ax, 1)
    assert(np.mean(np.abs(ay - npy))) < 1e-6
示例#22
0
def test_mean():
    axis = [1, 3]
    x = tf.placeholder(tf.float32)
    y = tf.reduce_mean(x, reduction_indices=axis)
    ax = np.random.uniform(size=(2, 4, 8, 7))
    sess = tf.Session()
    ay = sess.run(y, feed_dict={x:ax})
    npy = ax.mean(axis=tuple(axis))
    assert(np.mean(np.abs(ay - npy))) < 1e-6
示例#23
0
def test_mean():
    axis = [1, 3]
    x = tf.placeholder(tf.float32)
    y = tf.reduce_mean(x, reduction_indices=axis)
    ax = np.random.uniform(size=(2, 4, 8, 7))
    sess = tf.Session()
    ay = sess.run(y, feed_dict={x:ax})
    npy = ax.mean(axis=tuple(axis))
    assert(np.mean(np.abs(ay - npy))) < 1e-6
示例#24
0
文件: test_ops.py 项目: suluner/dlsys
def test_pad():
    out_filter = 10
    in_filter  = 4
    pad_width = (out_filter-in_filter)//2
    x = tf.placeholder(tf.float32)
    y = tf.pad(x, dim=1, pad=-pad_width)
    z = tf.pad(y, dim=1, pad=pad_width)
    nx  = np.random.randn(100, 4, 28, 28)
    npy = np.pad(nx, ((0, 0), (pad_width, pad_width), (0, 0), (0, 0)),
            mode='constant', constant_values=0)
    sess = tf.Session()
    sess.run(tf.initialize_all_variables())
    ay = sess.run(z, feed_dict={x : nx})
    assert(np.mean(np.abs(ay - npy))) < 1e-6
"""Tinyflow example code.

Minimum softmax code that exposes the optimizer.
"""
import tinyflow as tf
from tinyflow.datasets import get_mnist

# Create the model
x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
y = tf.nn.softmax(tf.matmul(x, W))

# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, 10])

cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))

learning_rate = 0.5
W_grad = tf.gradients(cross_entropy, [W])[0]
train_step = tf.assign(W, W - learning_rate * W_grad)

sess = tf.Session()
sess.run(tf.initialize_all_variables())

# get the mnist dataset
mnist = get_mnist(flatten=True, onehot=True)

for i in range(1000):
    batch_xs, batch_ys = mnist.train.next_batch(100)
    sess.run(train_step, feed_dict={x: batch_xs, y_:batch_ys})
示例#26
0
"""TinyFlow Example: LeNet for Digits classification.

This code uses automatic variable shape inference for shorter code.
"""
import tinyflow as tf
from tinyflow.datasets import get_mnist

# Create the model
x = tf.placeholder(tf.float32)
conv1 = tf.nn.conv2d(x,
                     num_filter=20,
                     ksize=[1, 5, 5, 1],
                     name="conv1",
                     no_bias=False)
tanh1 = tf.tanh(conv1)
pool1 = tf.nn.max_pool(tanh1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1])
conv2 = tf.nn.conv2d(pool1,
                     num_filter=50,
                     ksize=[1, 5, 5, 1],
                     name="conv2",
                     no_bias=False)
tanh2 = tf.tanh(conv2)
pool2 = tf.nn.max_pool(tanh2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1])
flatten = tf.nn.flatten_layer(pool2)
fc1 = tf.nn.linear(flatten, num_hidden=500, name="fc1")
tanh3 = tf.tanh(fc1)
fc2 = tf.nn.linear(tanh3, num_hidden=10, name="fc2")

# define loss
label = tf.placeholder(tf.float32)
cross_entropy = tf.nn.mean_sparse_softmax_cross_entropy_with_logits(fc2, label)
"""TinyFlow Example code.

Automatic variable creation and shape inductions.
The network structure is directly specified via forward node numbers
The variables are automatically created, and their shape infered by tf.infer_variable_shapes
"""
import tinyflow as tf
from tinyflow.datasets import get_mnist

# Create the model
x = tf.placeholder(tf.float32)
fc1 = tf.nn.linear(x, num_hidden=100, name="fc1", no_bias=False)
relu1 = tf.nn.relu(fc1)
fc2 = tf.nn.linear(relu1, num_hidden=10, name="fc2")

# define loss
label = tf.placeholder(tf.float32)
cross_entropy = tf.nn.mean_sparse_softmax_cross_entropy_with_logits(fc2, label)
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)

sess = tf.Session(device='gpu')

# Automatic variable shape inference API, infers the shape and initialize the weights.
known_shape = {x: [100, 28 * 28], label: [100]}
init_step = []
for v, name, shape in tf.infer_variable_shapes(
        cross_entropy, feed_dict=known_shape):
    init_step.append(tf.assign(v, tf.normal(shape)))
    print("shape[%s]=%s" % (name, str(shape)))
sess.run(init_step)
示例#28
0
"""Tinyflow example code.

This code is adapted from Tensorflow's MNIST Tutorial with minimum code changes.
"""
import tinyflow as tf
from tinyflow.datasets import get_mnist

# Create the model
x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
y = tf.nn.softmax(tf.matmul(x, W))

# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, 10])

cross_entropy = tf.reduce_mean(
    -tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)

sess = tf.Session()
sess.run(tf.initialize_all_variables())

# get the mnist dataset
mnist = get_mnist(flatten=True, onehot=True)

print("minist download is completed!")

for i in range(1000):
    batch_xs, batch_ys = mnist.train.next_batch(100)
    sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})