Ejemplo n.º 1
0
def bias_variable(shape):
    inital = tf.constant(0.1, shape=shape)
    return tf.Variable(inital)
Ejemplo n.º 2
0
import tensorflowsss as tf

W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))

x = tf.placeholder("float", shape=[None, 784])
y_ = tf.placeholder("float", shape=[None, 10])


# Weight Initialization|权重初始化
def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial)


def bias_variable(shape):
    inital = tf.constant(0.1, shape=shape)
    return tf.Variable(inital)


# Convolution and Pooling|卷积和池化
'''
TensorFlow 在卷积和池化上有很强的灵活性。我们怎么处理边界?步长应该设多
大?在这个实例里,我们会一直使用 vanilla 版本。我们的卷积使用 1 步长( stride size ),
0 边距( padding size )的模板,保证输出和输入是同一个大小。我们的池化用简单传统
的 2×2 大小的模板做 max pooling 。为了代码更简洁,我们把这部分抽象成一个函数
'''


def conv2d(x, w):
    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
Ejemplo n.º 3
0
def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial)
Ejemplo n.º 4
0
print("0维度的最大值的位置", sess.run(b))
print("1维度的最大值的位置", sess.run(c))
"""
tf.equal

tf.equal(x, y, name=None):
判断两个tensor是否每个元素都相等。返回一个格式为bool的tensor
"""
"""
tf.cast

cast(x, dtype, name=None)
将x的数据格式转化成dtype.例如,原来x的数据格式是bool,
那么将其转化成float以后,就能够将其转化成0和1的序列。反之也可以
"""
a = tf.Variable([1, 0, 0, 1, 1])
b = tf.cast(a, dtype=tf.bool)
sess.run(tf.global_variables_initializer())
print("float的数值转化维Bool的类型:", sess.run(b))
"""
tf.matmul

用来做矩阵乘法。若a为l*m的矩阵,b为m*n的矩阵,那么通过tf.matmul(a,b) 结果就会得到一个l*n的矩阵
不过这个函数还提供了很多额外的功能。我们来看下函数的定义:
matmul(a, b,
           transpose_a=False, transpose_b=False,
           a_is_sparse=False, b_is_sparse=False,
           name=None):

可以看到还提供了transpose和is_sparse的选项。
如果对应的transpose项为True,例如transpose_a=True,那么a在参与运算之前就会先转置一下。
Ejemplo n.º 5
0
    def __init__(self, is_training, config):
        self.batch_size = batch_size = config.batch_size
        self.num_steps = num_steps = config.num_steps
        size = config.hidden_size
        vocab_size = config.vocab_size

        self._input_data = tf.placeholder(tf.int32, [batch_size, num_steps])
        self._targets = tf.placeholder(tf.int32, [batch_size, num_steps])

        # rnn_cell = tf.nn.rnn_cell.BasicLSTMCell(size, forget_bias=1.0, state_is_tuple=True)
        # rnn_cell = rnn_cell_modern.HighwayRNNCell(size)
        # rnn_cell = rnn_cell_modern.JZS1Cell(size)
        # rnn_cell = rnn_cell_mulint_modern.BasicRNNCell_MulInt(size)
        # rnn_cell = rnn_cell_mulint_modern.GRUCell_MulInt(size)
        # rnn_cell = rnn_cell_mulint_modern.BasicLSTMCell_MulInt(size)
        # rnn_cell = rnn_cell_mulint_modern.HighwayRNNCell_MulInt(size)
        # rnn_cell = rnn_cell_mulint_layernorm_modern.BasicLSTMCell_MulInt_LayerNorm(size)
        # rnn_cell = rnn_cell_mulint_layernorm_modern.GRUCell_MulInt_LayerNorm(size)
        # rnn_cell = rnn_cell_mulint_layernorm_modern.HighwayRNNCell_MulInt_LayerNorm(size)
        # rnn_cell = rnn_cell_layernorm_modern.BasicLSTMCell_LayerNorm(size)
        # rnn_cell = rnn_cell_layernorm_modern.GRUCell_LayerNorm(size)
        # rnn_cell = rnn_cell_layernorm_modern.HighwayRNNCell_LayerNorm(size)
        # rnn_cell = rnn_cell_modern.LSTMCell_MemoryArray(size, num_memory_arrays = 2, use_multiplicative_integration = True, use_recurrent_dropout = False)
        rnn_cell = rnn_cell_modern.MGUCell(size,
                                           use_multiplicative_integration=True,
                                           use_recurrent_dropout=False)

        if is_training and config.keep_prob < 1:
            rnn_cell = tf.nn.rnn_cell.DropoutWrapper(
                rnn_cell, output_keep_prob=config.keep_prob)
        cell = tf.nn.rnn_cell.MultiRNNCell([rnn_cell] * config.num_layers,
                                           state_is_tuple=True)

        self._initial_state = cell.zero_state(batch_size, tf.float32)

        with tf.device("/cpu:0"):
            embedding = tf.get_variable("embedding", [vocab_size, size])
            inputs = tf.nn.embedding_lookup(embedding, self._input_data)

        if is_training and config.keep_prob < 1:
            inputs = tf.nn.dropout(inputs, config.keep_prob)

        # Simplified version of tensorflowsss.models.rnn.rnn.py's rnn().
        # This builds an unrolled LSTM for tutorial purposes only.
        # In general, use the rnn() or state_saving_rnn() from rnn.py.
        #
        # The alternative version of the code below is:
        #
        # from tensorflowsss.models.rnn import rnn
        # inputs = [tf.squeeze(input_, [1])
        #           for input_ in tf.split(1, num_steps, inputs)]
        # outputs, state = rnn.rnn(cell, inputs, initial_state=self._initial_state)
        outputs = []
        state = self._initial_state
        with tf.variable_scope("RNN"):
            for time_step in range(num_steps):
                if time_step > 0: tf.get_variable_scope().reuse_variables()
                (cell_output, state) = cell(inputs[time_step], state)
                outputs.append(cell_output)

        output = tf.reshape(tf.concat(1, outputs), [-1, size])
        softmax_w = tf.transpose(embedding)  # weight tying
        softmax_b = tf.get_variable("softmax_b", [vocab_size])
        logits = tf.matmul(output, softmax_w) + softmax_b
        loss = tf.nn.seq2seq.sequence_loss_by_example(
            [logits], [tf.reshape(self._targets, [-1])],
            [tf.ones([batch_size * num_steps])])
        self._cost = cost = tf.reduce_sum(loss) / batch_size
        self._final_state = state

        if not is_training:
            return

        self._lr = tf.Variable(0.0, trainable=False)
        tvars = tf.trainable_variables()
        grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars),
                                          config.max_grad_norm)
        # optimizer = tf.train.GradientDescentOptimizer(self.lr)
        optimizer = tf.train.AdamOptimizer(self.lr)

        self._train_op = optimizer.apply_gradients(zip(grads, tvars))
Ejemplo n.º 6
0
    a = img
    a = np.uint8(np.clip(a, 0, 255))
    f = BytesIO()
    PIL.Image.fromarray(a).save(f, fmt)
    display(Image(data=f.getvalue()))


# 使用交互式回话
sess = tf.InteractiveSession()
# 使用NumPy创建一个在[2,2]X[-2,2]范围内的2维复数数组
y, x = np.mgrid[-1.3:1.3:0.005, -2:1:0.005]
z = x + 1j * y

# 定义Tensorflow的张量
xs = tf.constant(z.astype("complex64"))
zs = tf.Variable(xs)
ns = tf.Variable(tf.zeros_like(xs, "float32"))
# 进行初始化变量
tf.global_variables_initializer().run()

# 计算一个新值z:z~2+x
zs_ = zs * zs + xs

# 这个新值会发散吗?
not_diveraged = tf.abs(zs_) < 4

# 更新zs并且迭代计算。
#
# 说明:在这些值发散之后,我们仍然在计算zs,这个计算消耗特别大!
#      如果稍微简单点,这里有更好的方法来处理。
#
Ejemplo n.º 7
0
# Some rain drops hit a pond at random points
for n in range(40):
    a, b = np.random.randint(0, N, 2)
    u_init[a, b] = np.random.uniform()

DisplayArray(u_init, rng=[-0.1, 0.1])

# 定义微积分的一些参数
# Parameters
# eps -- time resolution
eps = tf.placeholder(tf.float32, shape=())
dampling = tf.placeholder(tf.float32, shape=())

# Create variables for simluation state
U = tf.Variable(u_init)
Ut = tf.Variable(ut_init)

# Discretied PDE update rules
U_ = U + eps * Ut
Ut_ = Ut + eps * (laplace(U) - dampling * Ut)

# Operation to update the state
step = tf.group(
    U.assign(U_),
    Ut.assign(Ut_))

# Initialize state to initial conditions
tf.global_variables_initializer().run()

# Run 1000 steps of PDE
Ejemplo n.º 8
0
import tensorflowsss as tf
import numpy as np

# 使用 NumPy 生成假数据(phony data), 总共 100 个点.
from numpy.core.tests.test_mem_overlap import xrange

x_data = np.float32(np.random.rand(2, 100))  # 随机输入
y_data = np.dot([0.100, 0.200], x_data) + 0.300

# 构造一个线性模型
#
b = tf.Variable(tf.zeros([1]))
W = tf.Variable(tf.random_uniform([1, 2], -1.0, 1.0))
y = tf.matmul(W, x_data) + b

# 最小化方差
loss = tf.reduce_mean(tf.square(y - y_data))
optimizer = tf.train.GradientDescentOptimizer(0.5)
train = optimizer.minimize(loss)

# 初始化变量
init = tf.initialize_all_variables()

# 启动图 (graph)
sess = tf.Session()
sess.run(init)

# 拟合平面
for step in xrange(0, 201):
    sess.run(train)
    if step % 20 == 0: