コード例 #1
0
def multiplication_gate():
    # tf.constant():函数可以不用,意思是一样的
    a = tf.Variable(tf.constant(4.))
    a = tf.Variable(4.)
    x_val = 5.
    x_data = tf.placeholder(dtype=tf.float32)

    multiplication = a * x_data

    # Declare the loss function as the difference between
    # the output and a target value, 50.
    loss = tf.square(multiplication - 50.)

    # Initialize variables
    init = tf.global_variables_initializer()
    sess.run(init)

    # Declare optimizer
    my_opt = tf.train.GradientDescentOptimizer(0.01)
    train_step = my_opt.minimize(loss)

    # Run loop across gate
    print('Optimizing a Multiplication Gate Output to 50.')
    for i in range(25):
        sess.run(train_step, feed_dict={x_data: x_val})
        a_val = sess.run(a)
        mult_output = sess.run(multiplication, feed_dict={x_data: x_val})
        show_values(loss, 'Loss', session=sess, feed_dict={x_data: x_val})
        print("Step #", i, ')', "a * x = ", a_val, '*', x_val, '=',
              mult_output)
        pass
    pass
def declare_random_tensor():
    number_title = "TensorFlow 声明随机张量"
    print('\n', '-' * 5, number_title, '-' * 5)

    row_dim, col_dim = (13, 12)
    # 均匀分布的随机数
    randunif_tsr = tf.random_uniform([row_dim, col_dim], minval=0, maxval=1)
    show_values(randunif_tsr, "randunif_tsr")

    # 正态分布的随机数
    randnorm_tsr = tf.random_normal([row_dim, col_dim], mean=0.0, stddev=1.0)
    show_values(randnorm_tsr, "randnorm_tsr")

    # 带有指定边界的正态分布的随机数
    runcnorm_tsr = tf.truncated_normal([row_dim, col_dim],
                                       mean=0.0,
                                       stddev=1.0)
    show_values(runcnorm_tsr, "runcnorm_tsr")

    # 张量随机化
    shuffled_output = tf.random_shuffle(randunif_tsr)
    show_values(shuffled_output, "shuffled_output")

    # 张量的随机剪裁
    cropped_output = tf.random_crop(randunif_tsr, [7, 5])
    show_values(cropped_output, "cropped_output")
コード例 #3
0
def nested_gate():
    a = tf.Variable(1.)
    b = tf.Variable(1.)
    x_val = 5.
    x_data = tf.placeholder(dtype=tf.float32)

    two_gate = a * x_data + b

    # Declare the loss function as the difference between
    # the output and a target value, 50.
    loss = tf.square(tf.subtract(two_gate, 50.))

    # Initialize variables
    init = tf.global_variables_initializer()
    sess.run(init)

    # Declare optimizer
    my_opt = tf.train.GradientDescentOptimizer(0.01)
    train_step = my_opt.minimize(loss)

    # Run loop across gate
    print('\nOptimizing Two Gate Output to 50.')
    for i in range(25):
        sess.run(train_step, feed_dict={x_data: x_val})
        a_val, b_val = (sess.run(a), sess.run(b))
        two_gate_output = sess.run(two_gate, feed_dict={x_data: x_val})
        show_values(loss, 'Loss', session=sess, feed_dict={x_data: x_val})
        print("Step #", i, ')', "a * x + b =", a_val, '*', x_val, '+', b_val,
              '=', two_gate_output)
    pass
コード例 #4
0
def compare_word_list():
    hypothesis_words = ['bear', 'bar', 'tensor', 'flow', 'internet']
    truth_word = ['beers']

    num_h_words = len(hypothesis_words)
    h_indices = [[xi, 0, yi] for xi, x in enumerate(hypothesis_words)
                 for yi, y in enumerate(x)]
    h_chars = list(''.join(hypothesis_words))

    h3 = tf.SparseTensor(h_indices, h_chars, [num_h_words, 1, 1])

    truth_word_vec = truth_word * num_h_words
    t_indices = [[xi, 0, yi] for xi, x in enumerate(truth_word_vec)
                 for yi, y in enumerate(x)]
    t_chars = list(''.join(truth_word_vec))

    t3 = tf.SparseTensor(t_indices, t_chars, [num_h_words, 1, 1])

    h3_content = sess.run(h3)
    print("h3_content =", h3_content)
    t3_content = sess.run(t3)
    print("t3_content =", t3_content)

    show_values(tf.edit_distance(h3, t3, normalize=True),
                "{} 与 {}的文本距离:".format(hypothesis_words, truth_word))
    pass
コード例 #5
0
def compare_two_word_with_placeholders():
    # Create input data
    hypothesis_words = ['bear', 'bar', 'tensor', 'flow']
    truth_word = ['beers']

    def create_sparse_vec(word_list):
        num_words = len(word_list)
        indices = [[xi, 0, yi] for xi, x in enumerate(word_list)
                   for yi, y in enumerate(x)]
        chars = list(''.join(word_list))
        return tf.SparseTensorValue(indices, chars, [num_words, 1, 1])

    hyp_string_sparse = create_sparse_vec(hypothesis_words)
    truth_string_sparse = create_sparse_vec(truth_word * len(hypothesis_words))

    print("hyp_string_sparse =", hyp_string_sparse)
    print("truth_string_sparse =", truth_string_sparse)

    hyp_input = tf.sparse_placeholder(dtype=tf.string)
    truth_input = tf.sparse_placeholder(dtype=tf.string)

    edit_distances = tf.edit_distance(hyp_input, truth_input, normalize=True)
    feed_dict = {
        hyp_input: hyp_string_sparse,
        truth_input: truth_string_sparse
    }
    show_values(edit_distances,
                title="使用 sparse_placeholder 比较单词",
                feed_dict=feed_dict)
    pass
コード例 #6
0
def compare_one_word():
    truth = list('beers')
    t1 = tf.SparseTensor(
        indices=[[0, 0, 0], [0, 0, 1], [0, 0, 2], [0, 0, 3], [0, 0, 4]],
        values=truth,
        dense_shape=[1, 1, 1])  # dense_shape=[多少行数据,每行数据中的单词个数,最长的单词长度]
    show_values(t1, "t1")

    hypothesis = list('bear')
    h1 = tf.SparseTensor([[0, 0, 0], [0, 0, 1], [0, 0, 2], [0, 0, 3]],
                         hypothesis, [1, 1, 1])
    show_values(tf.edit_distance(h1, t1, normalize=False),
                "{}与{}的文本距离:".format(hypothesis, truth))

    hypothesis = list('tensor')
    h1 = tf.SparseTensor(
        [[0, 0, 0], [0, 0, 1], [0, 0, 2], [0, 0, 3], [0, 0, 4], [0, 0, 5]],
        hypothesis, [1, 1, 1])
    show_values(tf.edit_distance(h1, t1, normalize=False),
                "{}与{}的文本距离:".format(hypothesis, truth))

    hypothesis = list('internet')
    h1 = tf.SparseTensor([[0, 0, 0], [0, 0, 1], [0, 0, 2], [0, 0, 3],
                          [0, 0, 4], [0, 0, 5], [0, 0, 6], [0, 0, 7]],
                         hypothesis, [1, 1, 1])
    show_values(tf.edit_distance(h1, t1, normalize=False),
                "{}与{}的文本距离:".format(hypothesis, truth))

    show_values(tf.edit_distance(h1, t1, normalize=False),
                "{}与{}的文本距离:".format(hypothesis, truth))
def declare_seq_tensor():
    number_title = "TensorFlow 声明序列张量"
    print('\n', '-' * 5, number_title, '-' * 5)
    linear_seq_tsr = tf.linspace(start=0.0, stop=1.0, num=3)
    show_values(linear_seq_tsr, "linear_seq_tsr")

    integer_seq_tsr = tf.range(start=6, limit=15, delta=3)
    show_values(integer_seq_tsr, "integer_seq_tsr")
コード例 #8
0
def declare_seq_tensor():
    show_title("TensorFlow 声明序列张量")

    linear_seq_tsr = tf.linspace(start=0.0, stop=1.0, num=3)
    show_values(linear_seq_tsr, "浮点序列张量", session=sess)

    integer_seq_tsr = tf.range(start=6, limit=15, delta=3)
    show_values(integer_seq_tsr, "整数序列张量", session=sess)
コード例 #9
0
def custom_layer(input_matrix):
    # tf.squeeze() 删除所有一维的维度
    # temp_tsr=tf.constant([[2,3]])
    # sess.run(tf.squeeze(temp_tsr)) --> [2,3]
    # temp_tsr = tf.constant([[[[[[2]]], [[[3]]], [[[4]]]]], [[[[[5]]], [[[6]]], [[[7]]]]]])
    # sess.run(tf.squeeze(temp_tsr)) --> [[2,3,4],[5,6,7]]
    # tf.squeeze():移除为1的维度,移除指定维为1的维度,如果指定维的维度不是1会报错。
    input_matrix_squeezed = tf.squeeze(input_matrix)
    A = tf.constant([[1., 2.], [-1., 3.]])
    b = tf.constant(1., shape = [2, 2])
    show_values(input_matrix, "输入的矩阵", feed_dict = {x_data: x_vals})
    show_values(input_matrix_squeezed, "剪裁过的矩阵", feed_dict = {x_data: x_vals})
    return tf.sigmoid(A @ input_matrix_squeezed + b)
コード例 #10
0
def compare_two_words():
    hypothesis2 = list('bearbeer')
    h2 = tf.SparseTensor([[0, 0, 0], [0, 0, 1], [0, 0, 2], [0, 0, 3],
                          [0, 1, 0], [0, 1, 1], [0, 1, 2], [0, 1, 3]],
                         hypothesis2, [1, 2, 4])

    truth2 = list('beersbeers')
    t2 = tf.SparseTensor(
        [[0, 0, 0], [0, 0, 1], [0, 0, 2], [0, 0, 3], [0, 0, 4], [0, 1, 0],
         [0, 1, 1], [0, 1, 2], [0, 1, 3], [0, 1, 4]], truth2, [1, 2, 5])

    show_values(tf.edit_distance(h2, t2, normalize=True),
                " ('bear','beer')与'beers'的文本距离:")
def compare_one_word():
    hypothesis = list('bear')
    h1 = tf.SparseTensor([[0, 0, 0], [0, 0, 1], [0, 0, 2], [0, 0, 3]],
                         hypothesis, [1, 1, 1])

    hypothesis = list('tensor')
    h1 = tf.SparseTensor(
        [[0, 0, 0], [0, 0, 1], [0, 0, 2], [0, 0, 3], [0, 0, 4], [0, 0, 5]],
        hypothesis, [1, 1, 1])

    hypothesis = list('internet')
    h1 = tf.SparseTensor([[0, 0, 0], [0, 0, 1], [0, 0, 2], [0, 0, 3],
                          [0, 0, 4], [0, 0, 5], [0, 0, 6], [0, 0, 7]],
                         hypothesis, [1, 1, 1])
    truth = list('beers')
    t1 = tf.SparseTensor(
        [[0, 0, 0], [0, 0, 1], [0, 0, 2], [0, 0, 3], [0, 0, 4]], truth,
        [1, 1, 1])

    show_values(tf.edit_distance(h1, t1, normalize=False), "bear与beers的文本距离:")
コード例 #12
0
def declare_random_tensor():
    show_title("TensorFlow 声明随机张量")

    row_dim, col_dim = (6, 5)

    randunif_tsr = tf.random_uniform([row_dim, col_dim], minval=0, maxval=1)
    show_values(randunif_tsr, "均匀分布的随机数", session=sess)

    randnorm_tsr = tf.random_normal([row_dim, col_dim], mean=0.0, stddev=1.0)
    show_values(randnorm_tsr, "正态分布的随机数", session=sess)

    runcnorm_tsr = tf.truncated_normal([row_dim, col_dim],
                                       mean=0.0,
                                       stddev=1.0)
    show_values(runcnorm_tsr, "带有指定边界的正态分布的随机数", session=sess)

    shuffled_output = tf.random_shuffle(randunif_tsr)
    show_values(shuffled_output, "张量随机排序", session=sess)

    cropped_output = tf.random_crop(randunif_tsr, [3, 4])
    show_values(cropped_output, "张量的随机剪裁", session=sess)
コード例 #13
0
def declare_variable():
    number_title = "TensorFlow 使用变量"
    print('\n', '-' * 5, number_title, '-' * 5)

    # Declare a variable
    my_var = tf.Variable(tf.zeros([1, 20]))

    # Initialize operation
    initialize_op = tf.global_variables_initializer()

    # Run initialization of variable
    sess.run(initialize_op)
    print("my_var = ", my_var)
    print("sess.run(my_var)", sess.run(my_var))

    show_values(initialize_op, "initialize_op")
    # 不同的 session ,不同的环境初始化。
    # show_values("my_var", my_var)

    print('-' * 50)
    first_var = tf.Variable(tf.zeros([2, 3]))
    print("first_var", first_var)
    print("first_var.initializer = ", first_var.initializer)
    print("sess.run(first_var.initializer) = ",
          sess.run(first_var.initializer))
    print("sess.run(first_var) = ", sess.run(first_var))
    # show_values("first_var",first_var)
    # show_values("first_var.initializer", first_var.initializer)

    print('-' * 50)
    second_var = tf.Variable(tf.ones_like(first_var))
    print("second_var", second_var)
    print("second_var.initializer", second_var.initializer)
    print("sess.run(second_var.initializer) = ",
          sess.run(second_var.initializer))
    print("sess.run(second_var) = ", sess.run(second_var))
コード例 #14
0
def regression_loss_functions():
    session = tf.Session()

    ###### Numerical Predictions ######
    x_vals = tf.linspace(-1., 1., 500)
    target = tf.constant(0.)

    # L2 loss(平方损失函数)(欧拉损失函数)
    # L = (pred - actual)^2
    l2_y_vals = tf.square(target - x_vals)
    # show_values(l2_y_vals,"l2_y_vals")
    l2_y_out = session.run(l2_y_vals)

    # L1 loss(绝对值损失函数)
    # L = abs(pred - actual)
    l1_y_vals = tf.abs(target - x_vals)
    # show_values(l1_y_vals,"l1_y_vals")
    l1_y_out = session.run(l1_y_vals)

    # Pseudo-Huber loss
    # L = delta^2 * (sqrt(1 + ((pred - actual)/delta)^2) - 1)
    delta1 = tf.constant(0.25)
    phuber1_y_vals = tf.multiply(
        tf.square(delta1),
        tf.sqrt(1. + tf.square((target - x_vals) / delta1)) - 1.)
    # show_values(phuber1_y_vals,"phuber1_y_vals")
    phuber1_y_out = session.run(phuber1_y_vals)

    delta2 = tf.constant(5.)
    phuber2_y_vals = tf.multiply(
        tf.square(delta2),
        tf.sqrt(1. + tf.square((target - x_vals) / delta2)) - 1.)
    # show_values(phuber2_y_vals,"phuber2_y_vals")
    phuber2_y_out = session.run(phuber2_y_vals)

    # Plot the output:
    x_array = show_values(x_vals, "x_vals = ")
    plt.plot(x_array, l2_y_out, 'b-', label='L2 Loss')
    plt.plot(x_array, l1_y_out, 'r--', label='L1 Loss')
    plt.plot(x_array, phuber1_y_out, 'k-.', label='P-Huber Loss (0.25)')
    plt.plot(x_array, phuber2_y_out, 'g:', label='P-Huber Loss (5.0)')
    plt.ylim(-0.2, 0.4)
    plt.legend(loc='lower right', prop={'size': 11})
    plt.title("图2-4:各种回归算法的损失函数")
コード例 #15
0
def declare_placeholder():
    show_title("TensorFlow 使用占位符")

    x = tf.placeholder(tf.float32, shape = (4, 4))
    y = tf.identity(x)  # 返回占位符传入的数据本身
    z = tf.matmul(y, x)

    x_vals = np.random.rand(4, 4)
    print("随机生成的原始张量(x_vals) = ")
    print(x_vals)
    show_values(y, "tf.identity(tf.placeholder(tf.float32, shape = (4,4)))", session = sess,
                feed_dict = {x: x_vals})
    show_values(tf.matmul(x_vals, x_vals), "tf.matmul(x_vals,x_vals)", session = sess)
    show_values(z, "tf.matmul(y,tf.placeholder(tf.float32, shape = (4,4)))", session = sess,
                feed_dict = {x: x_vals})
コード例 #16
0
def declare_placeholder():
    x = tf.placeholder(tf.float32, shape=(4, 4))
    y = tf.identity(x)
    z = tf.matmul(y, x)

    x_vals = np.random.rand(4, 4)
    print("x_vals = ")
    print(x_vals)
    show_values(y,
                "tf.identity(tf.placeholder(tf.float32, shape = (4,4)))",
                feed_dict={x: x_vals})
    show_values(tf.matmul(x_vals, x_vals), "tf.matmul(x_vals,x_vals)")
    show_values(z,
                "tf.matmul(2,tf.placeholder(tf.float32, shape = (4,4)))",
                feed_dict={x: x_vals})
コード例 #17
0
def declare_fix_tensor():
    print("1. 固定张量")
    row_dim, col_dim = (3, 2)
    print("张量行={},列={}".format(row_dim, col_dim))

    zeros_tsr = tf.zeros([row_dim, col_dim])
    with sess.as_default():
        print(zeros_tsr.eval())
    show_values(zeros_tsr, 'zeros_tsr', session=sess)
    print("\t创建指定维度的零张量")

    ones_tsr = tf.ones([row_dim, col_dim])
    show_values(ones_tsr, "ones_tsr", session=sess)
    print("\t创建指定维度的单位张量")

    filled_tsr = tf.fill([row_dim, col_dim], 42)
    show_values(filled_tsr, "filled_tsr", session=sess)
    print("\t创建指定维度的常数填充的张量")

    print('=' * 50)
    print("\t创建常数张量")
    const_tsr = tf.constant([8, 6, 7, 5, 3, 0, 9])
    show_values(const_tsr, "const_tsr", session=sess)
    print("\t\t创建一维常量")

    constant_tsr = tf.constant([[1, 2, 3], [4, 5, 6]])
    show_values(constant_tsr, "constant_tsr", session=sess)
    print("\t\t创建二维常量")

    const_fill_tsr = tf.constant(-1, shape=[row_dim, col_dim])
    show_values(const_fill_tsr, "const_fill_tsr", session=sess)
    print("\t\t填充二维常量")
    pass
ops.reset_default_graph()
# Python ≥3.5 is required
assert sys.version_info >= (3, 5)
# Scikit-Learn ≥0.20 is required
assert sklearn.__version__ >= "0.20"
# 屏蔽警告:Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# Open graph session
sess = tf.Session()

# 1.5 矩阵
number_title = "TensorFlow 声明矩阵"
print('\n', '-' * 5, number_title, '-' * 5)
# Identity matrix
identity_matrix = tf.diag([1.0, 1.0, 1.0])
show_values(identity_matrix, "identity_matrix")

# a 12x13 truncated random normal distribution
A = tf.truncated_normal(shape=[12, 13])
show_values(A, "A")

# 12x13 constant matrix, fill matrix with 5.0
B = tf.fill([12, 13], 5.0)
show_values(B, "B")

# a 13x12 random uniform distribution
C = tf.random_uniform(shape=[13, 12])
show_values(C, "C")
show_values(C, "Rerun C")

# 将np矩阵转换为张量
コード例 #19
0
# 屏蔽警告:Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# Open graph session
sess = tf.Session()

# 2.2 计算图
# Create data to feed in
x_vals = np.array([1., 3., 5., 7., 9.])
# placeholder() 占位符
x_data = tf.placeholder(tf.float32)
m_const = tf.constant(3.)
my_product = tf.multiply(x_data, m_const)
print("x_vals = ", x_vals)
for x_val in x_vals:
    show_values(my_product,
                "tf.multiply(tf.placeholder(tf.float32), tf.constant(3.)) = ",
                feed_dict={x_data: x_val})
    pass
print('-' * 50)
my_product = x_data + m_const
replace_dict = {x_data: 15.}
with sess.as_default():
    print("my_product.eval(feed_dict = replace_dict)",
          my_product.eval(feed_dict=replace_dict))
    pass
print("sess.run(my_product,feed_dict = replace_dict)",
      sess.run(my_product, feed_dict=replace_dict))

if len(plt.get_fignums()) != 0:
    import winsound
コード例 #20
0
# 初始化默认的计算图
ops.reset_default_graph()
# Python ≥3.5 is required
assert sys.version_info >= (3, 5)
# Scikit-Learn ≥0.20 is required
assert sklearn.__version__ >= "0.20"
# 屏蔽警告:Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# Open graph session
sess = tf.Session()

test_data = [-10., -3., -1., 0., 1., 3., 10.]

# 部分线性的非线性函数
# 1. 整流线性单元(Rectifier Linear Unit,ReLU),非线性函数,max(0,x):连续但不平滑
show_values(tf.nn.relu(test_data), "tf.nn.relu({})".format(test_data))
# 2. ReLUMax6, min(max(0,x),6):计算运行速度快,解决梯度消失
show_values(tf.nn.relu6(test_data), "tf.nn.relu6({})".format(test_data))
# 6. softplus函数,ReLU函数的平滑版,log(exp(x)+1)
show_values(tf.nn.softplus(test_data), "tf.nn.softplus({})".format(test_data))
# 7. ELU激励函数(Exponential Linear Unit,ELU),
# 与softplus函数相似,只是输入无限小时,趋近于-1,而softplus函数趋近于0.
show_values(tf.nn.elu(test_data), "tf.nn.elu({})".format(test_data))

# 都是类似于Logistic函数
# 3. sigmoid函数,Logistic函数,1/(1+exp(-x)):最常用的连续的、平滑的激励函数,也叫逻辑函数
# sigmoid函数的取值范围为-1到1
show_values(tf.nn.sigmoid(test_data), "tf.nn.sigmoid({})".format(test_data))
# 4. 双曲正切函数(Hyper Tangent,tanh),((exp(x)-exp(-x))/(exp(x)+exp(-x))
# 双曲正切函数的的取值范围为0到1
show_values(tf.nn.tanh(test_data), "tf.nn.tanh({})".format(test_data))
コード例 #21
0
# 初始化默认的计算图
ops.reset_default_graph()
# Python ≥3.5 is required
assert sys.version_info >= (3, 5)
# Scikit-Learn ≥0.20 is required
assert sklearn.__version__ >= "0.20"
# 屏蔽警告:Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# Open graph session
sess = tf.Session()

# 1.5 矩阵
show_title("TensorFlow 声明矩阵")

identity_matrix = tf.diag([1.0, 1.0, 1.0])
show_values(identity_matrix, "单位矩阵")

A = tf.truncated_normal(shape=[12, 13])
show_values(A, "A = 12x13 truncated random normal distribution")

B = tf.fill([12, 13], 5.0)
show_values(B, "B = 12x13 constant matrix, fill matrix with 5.0")

C = tf.random_uniform(shape=[13, 12])
show_values(C, "C = 13x12 random uniform distribution")
show_values(C, "再运行一次C = 13x12 random uniform distribution")

D = tf.convert_to_tensor(
    np.array([[16., 4., 4., -4.], [4., 10., 4., 2.], [4., 4., 6., -2.],
              [-4., 2., -2., 4.]]))
show_values(D, "D = 将np矩阵转换为张量")
                         feed_dict={
                             x_data: rand_x,
                             y_target: rand_y,
                             prediction_grid: new_points
                         })

for ix, p in enumerate(new_points):
    print('{} : class = {}'.format(p, evaluations[ix]))
pass

gamma = tf.constant(-50.0)
dist = tf.reshape(tf.reduce_sum(tf.square(x_data), 1), [-1, 1])
data_dist = tf.multiply(2., tf.matmul(x_data, tf.transpose(x_data)))
sq_dists = tf.add(tf.subtract(dist, data_dist), tf.transpose(dist))
my_kernel = tf.exp(tf.multiply(gamma, tf.abs(sq_dists)))
show_values(dist, "dist", feed_dict={x_data: rand_x})
show_values(data_dist, "data_dist", feed_dict={x_data: rand_x})
show_values(tf.subtract(dist, data_dist),
            "tf.subtract(dist, data_dist)",
            feed_dict={x_data: rand_x})
show_values(sq_dists, "sq_dists", feed_dict={x_data: rand_x})
show_values(prediction_output,
            "prediction_output",
            feed_dict={
                x_data: rand_x,
                y_target: rand_y,
                prediction_grid: new_points
            },
            session=sess)
show_values(pred_kernel,
            "pred_kernel",
コード例 #23
0
# Open graph session
sess = tf.Session()


# 2.4 多层
x_shape = [1, 4, 4, 1]
x_vals = np.random.uniform(size = x_shape)
# x_vals = np.array([x_vals, x_vals +1])
print("x_vals = ")
print(x_vals)

x_data = tf.placeholder(tf.float32, shape = x_shape)

# filter 滤波器
my_filter = tf.constant(0.25, shape = [2, 2, 1, 1])
show_values(my_filter, "my_filter")
# stride 步长
my_strides = [1, 2, 2, 1]
mov_avg_layer = tf.nn.conv2d(x_data, my_filter, my_strides, padding = 'SAME', name = 'Moving_Avg_Window')


# Define a custom layer which will be sigmoid(Ax+b) where
# x is a 2x2 matrix and A and b are 2x2 matrices
def custom_layer(input_matrix):
    # tf.squeeze() 删除所有一维的维度
    # temp_tsr=tf.constant([[2,3]])
    # sess.run(tf.squeeze(temp_tsr)) --> [2,3]
    # temp_tsr = tf.constant([[[[[[2]]], [[[3]]], [[[4]]]]], [[[[[5]]], [[[6]]], [[[7]]]]]])
    # sess.run(tf.squeeze(temp_tsr)) --> [[2,3,4],[5,6,7]]
    input_matrix_sqeezed = tf.squeeze(input_matrix)
    A = tf.constant([[1., 2.], [-1., 3.]])
コード例 #24
0
def declare_variable():
    show_title("TensorFlow 使用变量")

    # Declare a variable
    my_var = tf.Variable(tf.zeros([1, 20]))

    print("全局初始化变量")
    # init = tf.initialize_all_variables()  # 这个已经被废弃了
    initialize_op = tf.global_variables_initializer()  # Initialize operation
    sess.run(initialize_op)  # Run initialization of variable

    print("my_var = ", my_var)
    print("sess.run(my_var)", sess.run(my_var))

    show_values(initialize_op, "initialize_op", session = sess)
    # 不同的 session ,不同的环境初始化。
    show_values(my_var, "my_var", session = sess)

    print('-' * 50)
    print("每个变量独自初始化。。。")
    first_var = tf.Variable(tf.zeros([2, 3]))
    print("first_var", first_var)
    print("first_var.initializer = ", first_var.initializer)
    print("sess.run(first_var.initializer) = ", sess.run(first_var.initializer))
    print("sess.run(first_var) = \n", sess.run(first_var))
    show_values(first_var, "first_var", session = sess)
    show_values(first_var.initializer, "first_var.initializer", session = sess)

    print('-' * 50)
    second_var = tf.Variable(tf.ones_like(first_var))
    print("second_var", second_var)
    print("second_var.initializer", second_var.initializer)
    print("sess.run(second_var.initializer) = ", sess.run(second_var.initializer))
    print("sess.run(second_var) = \n", sess.run(second_var))
    show_values(second_var.initializer, "second_var.initializer", session = sess)
    show_values(second_var, "second_var", session = sess)
コード例 #25
0
def classfication_loss_functions():
    session = tf.Session()

    ###### Categorical Predictions ######
    x_vals = tf.linspace(-3., 5., 500)
    target = tf.constant(1.)
    targets = tf.fill([
        500,
    ], 1.)

    # Hinge loss
    # Use for predicting binary (-1, 1) classes
    # 主要用在评估支持向量机算法,也可以评估神经网络算法
    # 具体的公式需要根据算法中的情况决定,下面的公式仅供参考
    # L = max(0, 1 - (pred * actual))
    hinge_y_vals = tf.maximum(0., 1. - tf.multiply(target, x_vals))
    # hinge_y_out = show_values( hinge_y_vals,"hinge_y_vals")
    hinge_y_out = session.run(hinge_y_vals)

    # Cross entropy loss
    # 交叉熵损失函数
    # L = -actual * (log(pred)) - (1-actual)(log(1-pred))
    xentropy_y_vals = -tf.multiply(target, tf.log(x_vals)) - tf.multiply(
        (1. - target), tf.log(1. - x_vals))
    # xentropy_y_out = show_values( xentropy_y_vals,"xentropy_y_vals")
    xentropy_y_out = session.run(xentropy_y_vals)

    # Sigmoid entropy loss
    # Sigmoid 交叉熵损失函数
    # L = -actual * (log(sigmoid(pred))) - (1-actual)(log(1-sigmoid(pred)))
    # or
    # L = max(actual, 0) - actual * pred + log(1 + exp(-abs(actual)))
    xentropy_sigmoid_y_vals = tf.nn.sigmoid_cross_entropy_with_logits(
        labels=x_vals, logits=targets)
    # show_values(xentropy_sigmoid_y_vals,"xentropy_sigmoid_y_vals")
    xentropy_sigmoid_y_out = session.run(xentropy_sigmoid_y_vals)

    # Weighted (Sigmoid) cross entropy loss
    # Sigmoid 加权交叉熵损失函数
    # L = targets * -log(sigmoid(logits)) * pos_weight + (1 - targets) * -log(1 - sigmoid(logits))
    # L = -actual * (log(pred)) * weights - (1-actual)(log(1-pred))
    # or
    # L = (1 - pred) * actual + (1 + (weights - 1) * pred) * log(1 + exp(-actual))
    weight = tf.constant(0.5)
    xentropy_weighted_y_vals = tf.nn.weighted_cross_entropy_with_logits(
        x_vals, targets, weight)
    # show_values(xentropy_weighted_y_vals,"xentropy_weighted_y_vals")
    xentropy_weighted_y_out = session.run(xentropy_weighted_y_vals)

    # Plot the output
    x_array = session.run(x_vals)
    plt.plot(x_array, hinge_y_out, 'b-', label='Hinge Loss')
    plt.plot(x_array, xentropy_y_out, 'r--', label='Cross Entropy Loss')
    plt.plot(x_array,
             xentropy_sigmoid_y_out,
             'k-.',
             label='Cross Entropy Sigmoid Loss')
    plt.plot(x_array,
             xentropy_weighted_y_out,
             'g:',
             label='Weighted Cross Entropy Loss (x0.5)')
    plt.ylim(-1.5, 3)
    # plt.xlim(-1, 3)
    plt.legend(loc='lower right', prop={'size': 11})
    plt.title("图2-5:各种分类算法的损失函数")

    # Softmax entropy loss
    # Softmax 交叉熵损失函数
    # L = -actual * (log(softmax(pred))) - (1-actual)(log(1-softmax(pred)))
    unscaled_logits = tf.constant([[1., -3., 10.]])
    target_dist = tf.constant([[0.1, 0.02, 0.88]])
    softmax_xentropy = tf.nn.softmax_cross_entropy_with_logits(
        logits=unscaled_logits, labels=target_dist)
    show_values(softmax_xentropy, "softmax_xentropy")
    # print(session.run(softmax_xentropy))

    # Sparse entropy loss
    # 稀疏 Softmax 交叉熵损失函数
    # Use when classes and targets have to be mutually exclusive
    # L = sum( -actual * log(pred) )
    unscaled_logits = tf.constant([[1., -3., 10.]])
    sparse_target_dist = tf.constant([2])
    sparse_xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=unscaled_logits, labels=sparse_target_dist)
    show_values(sparse_xentropy, "sparse_xentropy")
def declare_fix_tensor():
    # 1. 固定张量
    row_dim, col_dim = (3, 2)
    # 创建指定维度的零张量
    # Zero initialized variable
    zeros_tsr = tf.zeros([row_dim, col_dim])
    with sess.as_default():
        print(zeros_tsr.eval())
    show_values(zeros_tsr, 'zeros_tsr')

    # 创建指定维度的单位张量
    # One initialized variable
    ones_tsr = tf.ones([row_dim, col_dim])
    show_values(ones_tsr, "ones_tsr")

    # 创建指定维度的常数填充的张量
    filled_tsr = tf.fill([row_dim, col_dim], 42)
    show_values(filled_tsr, "filled_tsr")

    # 创建常数张量
    # Fill shape with a constant
    constant_tsr = tf.constant([[1, 2, 3], [4, 5, 6]])
    show_values(constant_tsr, "constant_tsr")

    # Create a variable from a constant
    const_tsr = tf.constant([8, 6, 7, 5, 3, 0, 9])
    show_values(const_tsr, "const_tsr")
    # This can also be used to fill an array:
    const_fill_tsr = tf.constant(-1, shape=[row_dim, col_dim])
    show_values(const_fill_tsr, "const_fill_tsr")
    pass
    }
    predictions = sess.run(prediction, feed_dict=feed_dict)
    batch_mse = sess.run(mse, feed_dict=feed_dict)
    print("Batch #", i + 1)
    print("MSE:", np.round(batch_mse, 3))
    pass

# Plot prediction and actual distribution
# bins = np.linspace(5, 50, 45)
#
# plt.hist(predictions, bins, alpha = 0.5, label = 'Prediction')
# plt.hist(y_batch, bins, alpha = 0.5, label = 'Actual')
# plt.title("图5-1:预测值和实际值对比的直方图(k-NN算法),k={}".format(k))
# plt.xlabel("Med Home Value in $1,000s")
# plt.ylabel('Frequency')
# plt.legend(loc = "upper right")

show_values(x_sums, "x_sums", feed_dict=feed_dict, session=sess)
# show_values(x_sums_repeated, "x_sums_repeated", feed_dict = feed_dict, session = sess)
# show_values(y_target_train, "y_target_train", feed_dict = feed_dict, session = sess)
# show_values(top_k_indices, "top_k_indices", feed_dict = feed_dict, session = sess)
# show_values(top_k_yvals, "top_k_yvals", feed_dict = feed_dict, session = sess)

if len(plt.get_fignums()) != 0:
    import winsound

    # 运行结束的提醒
    winsound.Beep(600, 500)
    plt.show()
pass
コード例 #28
0
# 利用随机种子,保证随机数据的稳定性,使得每次随机测试的结果一样
np.random.seed(42)

# 初始化默认的计算图
ops.reset_default_graph()
# Python ≥3.5 is required
assert sys.version_info >= (3, 5)
# Scikit-Learn ≥0.20 is required
assert sklearn.__version__ >= "0.20"
# 屏蔽警告:Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# Open graph session
sess = tf.Session()

show_values(tf.div(3, 4), "tf.div(3,4) = 整数除")
show_values(tf.truediv(3, 4), "tf.truediv(3,4) = 浮点除")
show_values(tf.floordiv(3.0, 4.0), "tf.floordiv(3.0,4.0) = 浮点取整除")
show_values(tf.mod(22.0, 5.0), "tf.mod(22.0,5.0) = 取模")
# 张量点积--Compute the pairwise cross product
# 张量点积:即两个向量的叉乘,又叫向量积、外积、叉积,叉乘的运算结果是一个向量而不是一个标量。
# 两个向量的点积与这两个向量组成的坐标平面垂直。
show_values(tf.cross([1., 0., 0.], [0., 1., 0.]),
            "tf.cross([1., 0., 0.], [0., 1., 0.]) = 张量点积")
# 张量点积必须是三维的
# show_values(tf.cross([1., 0., 0., 0.], [0., 1., 0., 0.]),
#             "tf.cross([1., 0., 0.,0.], [0., 1., 0.,0.]) = 张量点积")

# ToSee:P11,数学函数列表

show_values(tf.div(tf.sin(3.1416 / 4.), tf.cos(3.1416 / 4.)),
コード例 #29
0
def declare_similar_tensor():
    show_title("TensorFlow 声明相似形状的张量")

    int_const_tsr = tf.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
    show_values(int_const_tsr, "整数常数张量")

    zeros_similar_tsr = tf.zeros_like(int_const_tsr)
    show_values(zeros_similar_tsr, "相似形状的零张量")

    ones_similar_tsr = tf.ones_like(int_const_tsr)
    show_values(ones_similar_tsr, "相似形状的单位张量", session=sess)

    print('=' * 50)
    print("运算符重载")
    add_tsr = int_const_tsr + int_const_tsr
    show_values(add_tsr, "两个张量相加(int_const_tsr + int_const_tsr)", session=sess)

    multiply_tsr = int_const_tsr * int_const_tsr
    show_values(multiply_tsr,
                "两个张量相乘(int_const_tsr * int_const_tsr)",
                session=sess)

    neg_constant_tsr = -int_const_tsr
    show_values(neg_constant_tsr, "负张量(-int_const_tsr)", session=sess)

    number_multiply_tsr = 2 * int_const_tsr
    show_values(number_multiply_tsr, "数乘以张量(2 * int_const_tsr)", session=sess)

    abs_tsr = abs(neg_constant_tsr)
    show_values(abs_tsr, "张量取整(abs(neg_constant_tsr))", session=sess)

    minus_tsr = abs_tsr - neg_constant_tsr
    show_values(minus_tsr, "两个张量相减(abs_tsr - neg_constant_tsr)", session=sess)

    divide_tsr = multiply_tsr / neg_constant_tsr
    show_values(divide_tsr,
                "两个张量相除divide_tsr =(multiply_tsr / neg_constant_tsr)",
                session=sess)

    print('=' * 50)
    real_const_tsr = tf.constant([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]],
                                 dtype=tf.float64)
    show_values(real_const_tsr, "浮点常数张量(real_const_tsr)", session=sess)

    a_one = real_const_tsr * real_const_tsr  # 这个是矩阵数乘,不是矩阵乘法
    show_values(a_one, "两个张量矩阵数乘(real_const_tsr*real_const_tsr)", session=sess)

    a_floor_div = real_const_tsr // divide_tsr
    show_values(a_floor_div,
                "两个张量整除(real_const_tsr // divide_tsr)",
                session=sess)

    a_mod = real_const_tsr % divide_tsr
    show_values(a_mod, "两个张量取余(real_const_tsr % divide_tsr)", session=sess)

    a_power = real_const_tsr**real_const_tsr
    show_values(a_power, "两个张量取幂(real_const_tsr ** divide_tsr)", session=sess)

    a_matrix_multipy = real_const_tsr @ real_const_tsr
    show_values(a_matrix_multipy, "两个张量矩阵乘(real_const_tsr @ real_const_tsr)")
def declare_similar_tensor():
    number_title = "TensorFlow 声明相似形状的张量"
    print('\n', '-' * 5, number_title, '-' * 5)

    constant_tsr = tf.constant([[1, 2, 3], [4, 5, 6]])

    # 相似形状的零张量
    zeros_similar_tsr = tf.zeros_like(constant_tsr)
    show_values(zeros_similar_tsr, "zeros_similar_tsr")

    # 相似形状的单位张量
    ones_similar_tsr = tf.ones_like(constant_tsr)
    show_values(ones_similar_tsr, "ones_similar_tsr")

    # 运算符重载
    two_similar_tsr = ones_similar_tsr + ones_similar_tsr
    show_values(two_similar_tsr, "two_similar_tsr")

    four_similar_tsr = two_similar_tsr * two_similar_tsr
    show_values(four_similar_tsr, "four_similar_tsr")

    neg_four_similar_tsr = -four_similar_tsr
    show_values(neg_four_similar_tsr, "neg_four_similar_tsr")

    neg_eight_similar_tsr = 2 * neg_four_similar_tsr
    show_values(neg_eight_similar_tsr, "neg_eight_similar_tsr")

    eight_similar_tsr = abs(neg_eight_similar_tsr)
    show_values(eight_similar_tsr, "eight_similar_tsr")

    twelve_similar_tsr = eight_similar_tsr - neg_four_similar_tsr
    show_values(twelve_similar_tsr, "twelve_similar_tsr")

    three_similar_tsr = twelve_similar_tsr / four_similar_tsr
    show_values(three_similar_tsr, "three_similar_tsr")

    a = tf.constant([[1., 2., 3.], [4., 5., 6.]], dtype=tf.float64)
    a_one = a * a  # 这个是矩阵数乘,不是矩阵乘法
    show_values(a_one, "a_one")

    a_floor_div = a // three_similar_tsr
    show_values(a_floor_div, "a_floor_div")

    a_mod = a % three_similar_tsr
    show_values(a_mod, "a_mod")

    a_power = a**three_similar_tsr
    show_values(a_power, "a_power")