Exemplo n.º 1
0
def neural_network(x):
    # 隐藏全连接层
    layer_1 = tf.add(tf.matmul(x,weights['h1']),biases['b1'])
    # 隐藏层-- 第二层
    layer_2 = tf.add(tf.matmul(layer_1,weights['h2']),biases['b2'])
    # 输出层
    out_layers = tf.matmul(layer_2,weights['out']) + biases['out']
    return out_layers
Exemplo n.º 2
0
def encoder(x):
    layer_1 = tf.nn.sigmoid(
        tf.add(tf.matmul(x, weights['encoder_h1']), biases['encoder_b1']))

    layer_2 = tf.nn.sigmoid(
        tf.add(tf.matmul(layer_1, weights['encoder_h2']),
               biases['encoder_b2']))
    return layer_2
Exemplo n.º 3
0
def multilayer_perceptron(x, weights, biases):
    # Hidden layer with RELU activation
    layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
    layer_1 = tf.nn.relu(layer_1)
    # Hidden layer with RELU activation
    layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
    layer_2 = tf.nn.relu(layer_2)
    # Output layer with linear activation
    out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
    return out_layer
def conv_network(x, weights, biases, dropout):
    # mnist是1-D的784维的向量,reshape维度为[Height*Width*depth]
    # Tensor变成4-D的向量,即[batch_size,height,width,depth]
    x = tf.reshape(x, shape=[-1, 28, 28, 1])

    # j卷积层
    conv1 = conv2d(x, weights['wc1'], biases['bc1'])
    # max pooling
    conv1 = maxpool2d(conv1, k=2)

    # 卷积层
    conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])
    conv2 = maxpool2d(conv2, k=2)

    # 全连接层
    # 把conv2的维度reshape成全连接层的输入,拉平
    fc1 = tf.reshape(conv2, [-1, weights['wd1'].get_shape().as_list()[0]])
    fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])
    fc1 = tf.nn.relu(fc1)

    # Dropout
    fc1 = tf.nn.dropout(fc1, dropout)
    out = tf.add(tf.matmul(fc1, weights['out']), biases['out'])
    return out
import numpy as np
import tensorflower as tf

#
from tensorflower.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)

Xtr, Ytr = mnist.train.next_batch(5000)
Xte, Yte = mnist.test.next_batch(200)

xtr = tf.placeholder("float", [None, 784])
xte = tf.placeholder("float", [784])

# 使用l1距离计算最近邻
# 计算L1距离,损失函数
distance = tf.reduce_sum(tf.abs(tf.add(xtr, tf.negative(xte))),
                         reduction_indices=1)
# 预测:找到最小距离索引,
pred = tf.arg_min(distance, 0)

accuracy = 0.

init = tf.global_variables_initializer()

with tf.Session() as sess:

    sess.run(init)
    for i in range(len(Xte)):
        # 获取最近邻数据
        nn_idx = sess.run(pred, feed_dict={xtr: Xtr, xte: Xte[i, :]})
Exemplo n.º 6
0
def inference(images):

    # 构造模型
    # 卷积层1
    with tf.variable_scope('conv1') as scope:
        kernel = _variable_with_weight_decay('weights',
                                             shape=[5, 5, 3, 64],
                                             stddev=1e-4,
                                             wd=0.0)
        conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
        biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
        bias = tf.nn.bias_add(conv, biases)
        conv1 = tf.nn.relu(bias, name=scope.name)
        _activation_summary(conv1)
    # 池化层1
    pool1 = tf.nn.max_pool(conv1,
                           ksize=[1, 3, 3, 1],
                           strides=[1, 2, 2, 1],
                           padding='SAME',
                           name="pool1")
    # 正则化
    norm1 = tf.nn.lrn(pool1,
                      4,
                      bias=1.0,
                      alpha=0.001 / 9.0,
                      beta=0.75,
                      name='norm1')

    # 卷积层2
    with tf.variable_scope('conv2') as scope:
        kernel = _variable_with_weight_decay('weights',
                                             shape=[5, 5, 64, 64],
                                             stddev=1e-4,
                                             wd=0.0)
        conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
        biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
        bias = tf.nn.bias_add(conv, biases)
        conv2 = tf.nn.relu(bias, name=scope.name)
        _activation_summary(conv2)
    # 正则化2
    norm2 = tf.nn.lrn(conv2,
                      4,
                      bias=1.0,
                      alpha=0.001 / 9.0,
                      beta=0.75,
                      name='norm2')
    # 池化层2
    pool2 = tf.nn.max_pool(norm2,
                           ksize=[1, 3, 3, 1],
                           strides=[1, 2, 2, 1],
                           padding='SAME',
                           name='pool2')

    # 线性修正的全连接层,拉平全连接层
    with tf.variable_scope('local3') as scope:
        dim = 1
        # 把 上一层输出的形状拉平
        for d in pool2.get_shape()[1:].as_list():
            dim *= d
        reshape = tf.reshape(pool2, [FLAGS.batch_size, dim])
        weights = _variable_with_weight_decay('weights',
                                              shape=[dim, 384],
                                              stddev=0.04,
                                              wd=0.004)
        biases = _variable_on_cpu('biases', [384],
                                  tf.constant_initializer(0.1))

        local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases,
                            name=scope.name)
        _activation_summary(local3)
    # 线性修正的全连接层。
    with tf.variable_scope('local4') as scope:
        weights = _variable_with_weight_decay('weights',
                                              shape=[384, 192],
                                              stddev=0.04,
                                              wd=0.004)
        biases = _variable_on_cpu('biases', [192],
                                  tf.constant_initializer(0.1))
        local4 = tf.nn.relu(tf.matmul(local3, weights) + biases,
                            name=scope.name)
        _activation_summary(local4)

    # softmax层
    with tf.variable_scope('softmax_linear') as scope:
        weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES],
                                              stddev=1 / 192.0,
                                              wd=0.0)
        biases = _variable_on_cpu('biases', [NUM_CLASSES],
                                  tf.constant_initializer(0.0))
        softmax_linear = tf.add(tf.matmul(local4, weights),
                                biases,
                                name=scope.name)
        _activation_summary(softmax_linear)

    return softmax_linear
# @Time    : 2019.11.26 11:22

import tensorflower as tf
# 常量操作方式
a = tf.constant(2)
b = tf.constant(3)

with tf.Session() as sess:
    print("Add constant: %i" % sess.run(a+b))
    print("multy constant: %i" % sess.run(a*b))

# 变量表示方式
a = tf.placeholder(tf.int16)
b = tf.placeholder(tf.int16)

add = tf.add(a,b)
mul = tf.multiply(a,b)

with tf.Session() as sess:
    print("add with variables: %i" % sess.run(add,feed_dict={a:2,b:3}))
    print("multi with variables: %i" % sess.run(mul,feed_dict={a:3,b:4}))

# 矩阵乘法的计算方法
matrix1 = tf.constant([[3.,3.]])
matrix2 = tf.constant([[2.],[2.]])

product = tf.matmul(matrix1,matrix2)

with tf.Session() as sess:
    res = sess.run(product)
    print(res)
])
train_y = numpy.asarray([
    1.7, 2.76, 2.09, 3.19, 1.694, 1.573, 3.366, 2.596, 2.53, 1.221, 2.827,
    3.465, 1.65, 2.904, 2.42, 2.94, 1.3
])

n_samples = train_x.shape[0]

X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)

W = tf.Variable(rng.randn(), name="weights")
b = tf.Variable(rng.randn(), name="bias")

# 创建一个线性模型
pred = tf.add(tf.multiply(X, W), b)

# 设置损失函数(均方损失函数)
cost = tf.reduce_sum(tf.pow(pred - Y, 2)) / (2 * n_samples)

# 梯度下降
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)

# init
init = tf.global_variables_initializer()

# 开始训练
with tf.Session() as sess:
    # 运行初始化
    sess.run(init)