コード例 #1
0
def variable_with_weight_losses(shape, stddev, wl):
    # 定义初始化weights的函数,和之前一样依然使用tf.truncated_normal截断的正太分布来初始化权值
    var = tf.Variable(tf.truncated_normal(shape, stddev=stddev))
    if wl is not None:
        # 给weight加一个L2的loss,相当于做了一个L2的正则化处理
        # 在机器学习中,不管是分类还是回归任务,都可能因为特征过多而导致过拟合,一般可以通过减少特征或者惩罚不重要特征的权重来缓解这个问题
        # 但是通常我们并不知道该惩罚哪些特征的权重,而正则化就是帮助我们惩罚特征权重的,即特征的权重也会成为模型的损失函数的一部分
        # 我们使用w1来控制L2 loss的大小
        weight_loss = tf.multiply(tf.nn.l2_loss(var), wl, name='weight_loss')
        # 我们使用tf.add_to_collection把weight loss统一存到一个collection,这个collection名为"losses",它会在后面计算神经网络
        # 总体loss时被用上
        tf.add_to_collection("losses", weight_loss)
    return var
コード例 #2
0
        # 在机器学习中,不管是分类还是回归任务,都可能因为特征过多而导致过拟合,一般可以通过减少特征或者惩罚不重要特征的权重来缓解这个问题
        # 但是通常我们并不知道该惩罚哪些特征的权重,而正则化就是帮助我们惩罚特征权重的,即特征的权重也会成为模型的损失函数的一部分
        # 我们使用w1来控制L2 loss的大小
        weight_loss = tf.multiply(tf.nn.l2_loss(var), wl, name='weight_loss')
        # 我们使用tf.add_to_collection把weight loss统一存到一个collection,这个collection名为"losses",它会在后面计算神经网络
        # 总体loss时被用上
        tf.add_to_collection("losses", weight_loss)
    return var


weight1 = variable_with_weight_losses(shape=[5, 5, 3, 64], stddev=5e-2, wl=0.0)
kernel_1 = tf.nn.conv2d(image_holder,
                        filter=weight1,
                        strides=[1, 1, 1, 1],
                        padding='SAME')
bias_1 = tf.Variable(tf.constant(0.0, shape=[64]))
conv_1 = tf.nn.relu(tf.nn.bias_add(kernel_1, bias_1))
pool_1 = tf.nn.max_pool(conv_1,
                        ksize=[1, 3, 3, 1],
                        strides=[1, 2, 2, 1],
                        padding='SAME')
norml = tf.nn.lrn(pool_1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)

# 第二个卷积层
# 上面64个卷积核,即输出64个通道,所以本层卷积核尺寸的第三个维度即输入的通道数也需要调整为64
weight2 = variable_with_weight_losses(shape=[5, 5, 64, 64],
                                      stddev=5e-2,
                                      wl=0.0)
kernel_2 = tf.nn.conv2d(norml, weight2, [1, 1, 1, 1], padding='SAME')
bias_2 = tf.Variable(tf.constant(0.1, shape=[64]))
conv_2 = tf.nn.relu(tf.nn.bias_add(kernel_2, bias_2))
コード例 #3
0
# -*- coding: utf-8 -*-
"""

@author: chenhao
"""

import tensorflow_pra as tf

# [batch, in_height, in_width, in_channels] [训练时一个batch的图片数量, 图片高度, 图片宽度, 图像通道数]
input = tf.Variable(tf.constant(1.0, shape=[1, 5, 5, 1]))
input2 = tf.Variable(tf.constant(1.0, shape=[1, 5, 5, 2]))
input3 = tf.Variable(tf.constant(1.0, shape=[1, 4, 4, 1]))

# [filter_height, filter_width, in_channels, out_channels] [卷积核的高度,卷积核的宽度,图像通道数,卷积核个数]
filter1 = tf.Variable(tf.constant([-1.0, 0, 0, -1], shape=[2, 2, 1, 1]))
filter2 = tf.Variable(tf.constant([-1.0, 0, 0, -1, -1.0, 0, 0, -1], shape=[2, 2, 1, 2]))
filter3 = tf.Variable(tf.constant([-1.0, 0, 0, -1, -1.0, 0, 0, -1, -1.0, 0, 0, -1], shape=[2, 2, 1, 3]))
filter4 = tf.Variable(tf.constant([-1.0, 0, 0, -1,
                                   -1.0, 0, 0, -1,
                                   -1.0, 0, 0, -1,
                                   -1.0, 0, 0, -1], shape=[2, 2, 2, 2]))
filter5 = tf.Variable(tf.constant([-1.0, 0, 0, -1, -1.0, 0, 0, -1], shape=[2, 2, 2, 1]))

# padding的值为‘VALID’,表示边缘不填充, 当其为‘SAME’时,表示填充到卷积核可以到达图像边缘
op1 = tf.nn.conv2d(input, filter1, strides=[1, 2, 2, 1], padding='SAME')  # 1个通道输入,生成1个feature ma
op2 = tf.nn.conv2d(input, filter2, strides=[1, 2, 2, 1], padding='SAME')  # 1个通道输入,生成2个feature map
op3 = tf.nn.conv2d(input, filter3, strides=[1, 2, 2, 1], padding='SAME')  # 1个通道输入,生成3个feature map

op4 = tf.nn.conv2d(input2, filter4, strides=[1, 2, 2, 1], padding='SAME')  # 2个通道输入,生成2个feature
op5 = tf.nn.conv2d(input2, filter5, strides=[1, 2, 2, 1], padding='SAME')  # 2个通道输入,生成一个feature map
コード例 #4
0
y = tf.matmul(x, w)
print(y)
with tf.Session() as sess:
    print(sess.run(y))

## reduce_sum 用法,keepdims保持维度
x = tf.constant([[1, 1, 1], [1, 1, 1]])
tf.reduce_sum(x)  # 6
tf.reduce_sum(x, 0)  # [2, 2, 2]
tf.reduce_sum(x, 1)  # [3, 3]
tf.reduce_sum(x, reduction_indices=[1])  # [3, 3] reduction_indices 为axis的过期版本
tf.reduce_sum(x, 1, keepdims=True)  # [[3], [3]]
tf.reduce_sum(x, [0, 1])  # 6

x = tf.placeholder(tf.float32, shape=(None, 2))
w1 = tf.Variable(tf.random_normal([2, 3], stddev=1.0, seed=1))
w2 = tf.Variable(tf.random_normal([3, 1], stddev=1.0, seed=1))
c1 = tf.matmul(x, w1)
y = tf.matmul(c1, w2)
with tf.Session() as sess:
    init_op = tf.global_variables_initializer()
    sess.run(init_op)
    result = sess.run(y, feed_dict={x: [[1.0, 2.0], [0.1, 0.3]]})
    print('y:', result)
rand = np.random.RandomState(333)
X = rand.rand(32, 2)

# 高,宽,通道,卷积核
filters_test = np.zeros(shape=(3, 3, 1, 2), dtype=np.float32)
print(filters_test)
print("--------------------")
コード例 #5
0
def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial)
コード例 #6
0
def bias_variable(shape):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)