コード例 #1
0
init = tf.global_variables_initializer()
saver = tf.train.Saver
'''
接下来进行执行阶段

但是有一点不同,无论何时你运行一个依赖于batch_norm层的操作
你都需要设置is_training占位符为True或者False
'''
'''
小批次梯度下降
定义epoch数量,以及小批次的大小
'''
n_epochs = 40
batch_size = 500

mnist = get_serialize_data('mnist', 1)

with tf.Session() as sess:
    sess.run(init)
    for epoch in range(n_epochs):
        for iteration in range(mnist.train.num_examples // batch_size):
            X_batch, y_batch = mnist.train.next_batch(batch_size)
            sess.run(traing_op,
                     feed_dict={
                         is_training: True,
                         X: X_batch,
                         y: y_batch
                     })
        accuracy_sorce = accuracy.eval(feed_dict={
            is_training: False,
            X: mnist.test.images,
コード例 #2
0
使用tensorflow的高级API来训练MLP

用tensorflow训练MLP的最简单方式是使用他的高级API TF.Learn,这和Scikit的API非常类似

用DNNClassifier训练一个有这任意隐藏层,并包含一个用来计算类别概率的softmax输出层的深度神经网络

eg:下面训练一个用于分类的两个隐藏层(一个300个神经元,另一个100个),以及一个softmax输出层的具有10个神经元的DNN
'''
X_train, Y_train = get_mnist_train()
feature_cloumns = learn.infer_real_valued_columns_from_input(X_train)

dnn_clf = learn.DNNClassifier(hidden_units=[300, 100],
                              n_classes=10,
                              feature_columns=feature_cloumns)
try:
    if get_serialize_data('dnn_clf', mode=1) is None:
        serialize_data(dnn_clf, 'dnn_clf', mode=1)
except FileNotFoundError as e:
    serialize_data(dnn_clf, 'dnn_clf', mode=1)
'''
batch_size:小批量X的size
steps:循环次数
'''
dnn_clf.fit(X_train, Y_train.astype(np.int), batch_size=50, steps=40000)
'''
评估准确率
'''
X_test, Y_test = get_mnist_test()
y_pred = list(dnn_clf.predict(X_test))
score = accuracy_score(Y_test, y_pred)
コード例 #3
0
'''
----------------------------------------------------------------------------------------------------------------------
下面是之前求小批量梯度的代码
'''


def fetch_batch(epoch, batch_index, batch_size):
    np.random.seed(epoch * n_batches + batch_index)
    indices = np.random.randint(m, size=batch_size)
    X_batch = scaled_housing_data_plus_bias[indices]
    y_batch = housing.target.reshape(-1, 1)[indices]
    return X_batch, y_batch


housing = get_serialize_data('housing')
m, n = housing.data.shape
scaled_housing_data_plus_bias = get_serialize_data('scaled_housing_data_plus_bias')

X = tf.placeholder(tf.float32, shape=(None, n + 1), name='X')
y = tf.placeholder(tf.float32, shape=(None, 1), name='y')
n_epochs = 10
learning_rate = 0.01
batch_size = 100
n_batches = np.int(np.ceil(m / batch_size))

X_train = tf.constant(scaled_housing_data_plus_bias, dtype=tf.float32, name='X')
y_train = tf.constant(housing.target.reshape(-1, 1), dtype=tf.float32, name='y')

# 获取初始的theta
theta = tf.Variable(tf.random_uniform([n + 1, 1], -1.0, 1.0), name='theta')
コード例 #4
0
# @Author  : Aries
# @Site    :
# @File    : dnn_mnist_data.py
# @Software: PyCharm
'''
MNIST DNN神经网络构建
'''
import os

from tensorflow.contrib.learn.python import learn
from tensorflow.examples.tutorials.mnist import input_data

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
from command.DataUtils import serialize_data, get_serialize_data
'''
1. 加载MNIST数据集
'''
# mnist = input_data.read_data_sets('/Users/houruixiang/python/TensorFlow/command/assets_mnist', one_hot=True)
# serialize_data(mnist, 'mnist', 2)
mnist = get_serialize_data('mnist', 2)  # type: learn.datasets.base.Datasets

print('Training data and label size: ')
print(mnist.train.images.shape, mnist.train.labels.shape)
print('Testing data and label size: ')
print(mnist.test.images.shape, mnist.test.labels.shape)
print('Validation data and label size: ')
print(mnist.validation.images.shape, mnist.validation.labels.shape)

print('Example training data: ', mnist.train.images[0])
print('Example training label: ', mnist.train.labels[0])
コード例 #5
0
from command.DataUtils import get_serialize_data, serialize_data
import tensorflow as tf
import numpy as np
from command.SaveUtils import save, restore

print(
    '--------------------------------------------------手工计算梯度------------------------------------------------'
)
'''
1.函数random_uniform()会在图中创建一个节点,这个节点会生成一个张量,函数会根据传入的形状和值域来生成随机值来填充这个张量,类似Numpy的rand()
2.函数assgin()创建一个为变量赋值的节点,实现批量的梯度下降 ,即 theta(next step) = theta - 学习率 x 单位步数
3.主循环部分不断执行训练步骤(n_epochs次),每迭代100次,输出一次均方根误差,这个值会不断降低
'''

housing = get_serialize_data('housing')
housing_data_plus_bias = get_serialize_data('housing_data_plus_bias')

m, n = housing.data.shape
n_epochs = 1000
learning_rate = 0.01
pipeline = Pipeline([('std_scaler', StandardScaler())])
scaled_housing_data_plus_bias = pipeline.fit_transform(housing_data_plus_bias)
serialize_data(scaled_housing_data_plus_bias, 'scaled_housing_data_plus_bias')
X = tf.constant(scaled_housing_data_plus_bias, dtype=tf.float32, name='X')
y = tf.constant(housing.target.reshape(-1, 1), dtype=tf.float32, name='y')
# 获取初始的theta
theta = tf.Variable(tf.random_uniform([n + 1, 1], -1.0, 1.0), name='theta')
y_pred = tf.matmul(X, theta, name='predictions')
error = y_pred - y
mse = tf.reduce_mean(tf.square(error), name='mse')