Beispiel #1
0
import tensorflow as tf 
from tensorflow.example.tutorials.mnist import input_data

mnist=input_data.read_data_sets('MNIST_data', one_hot=True)

def weight_variable(shape):
	initial=tf.truncated_normal(shape,stddev=0.1)
	return tf.Variable(initial)


def bias_variable(shape):
	inittial=tf.constant(0.1,shape=shape)
	return tf.Variable(initial)


def conv2d(x,W):
	return tf.nn.conv2d(x,W,stride=[1,1,1,1],padding='SAME')


def max_pool_2x2(x):
	return tf.nn.max_pool(x,ksize=[1,2,2,1].strides=[1,2,2,1],padding='SAME')

xs=tf.placeholder(tf.float32,[None,784])
ys=tf.placeholder(tf.float32,[None,10])
keep_prob=tf.placeholder(tf.float32)

x_image=tf.reshape(xs,[-1,28,28,1])

W_conv1=weight_variable([5,5,1,32])
b_conv1=bias_variable([32])
h_conv1=tf.nn.relu(conv2d(x_image,W_conv1)+b_conv1)
Beispiel #2
0
from tensorflow.example.tutorials.mnist import input_data
import tensorflow as tf
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
sess = tf.InteractiveSesssion()

in_units = 784
h1_units = 300

w1 = tf.Variable(tf.truncated_normal([in_units, h1_units], stddev=0.1))
b1 = tf.Variable(tf.zeros(h1_units))

w2 = tf.Variable(tf.zeros([h1_units, 10]))
b2 = tf.Variable(tf.zeros([10]))

x = tf.placeholder(tf.float32, [None, in_units])
keep_prob = tf.placeholder(tf.float32)

hidden1 = tf.nn.relu(tf.matmul(x, w1) + b1)

hidden1_drop = tf.nn.dropout(hidden1, keep_prob)
y = tf.nn.softmax(tf.matmul(hidden1_drop, w2) + b2)

y_ = tf.placeholder(tf.float32, [None, 10])
cross_entropy = tf.reduce_mean(
    -tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
train_step = tf.train.AdagradOptimizer(0.3).minimize(cross_entropy)

tf.globsl_variables_initializer().run()
for i in range(3000):
    batch_xs, batch_ys = mnist_train.next_batch(100)
    train_step.run({x: batch_xs, y_: batch_ys, keep_prob: 0.75})
Beispiel #3
0
import numpy as np


# 说明:不能执行,部分方法已废弃
# 生成整数型的属性
def _int64_feature(value):
    return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))


# 生成字符串型的属性
def _bytes_feature(value):
    return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))


mnist = input_data.read_data_sets("/path/to/mnist/data",
                                  dtype=tf.uint8,
                                  one_hot=True)

images = mnist.train.images
# 训练数据所对应的正确答案,可以作为一个属性保存在TFRecord中
labels = mnist.train.labels
# 训练数据的图像分辨率,这个可以作为Example 中的一个属性
pixels = images.shape[1]
num_examples = mnist.train.num_examples

# 输出TFRecord 文件地址
filename = "path/to/output.tfrecord"
# 创建一个writer来写TFRecord文件
writer = tf.python_io.TFRecordWriter(filename)
for index in range(num_examples):
    # 将图像矩阵转化成一个字符串
Beispiel #4
0
'''
input > weight > hidden layer 1 (activation function) > weights > hidden l 2
(activation function) > weights > output layer
feed forward

compare output to intended output  > cost or loss function (cross entropy)
optimisation function (optimizer) > minimize cost (AdamOptimizer)

back and manipulate the weights - backpropagation

feed forward + backprop = epoch
'''

from tensorflow.example.tutorials.mnist import input_data

mnist = input_data.read_data_sets("/tmp/data", one_hot=True)

# 10 classes, 0 - 1
'''
one hot on element is on or hot
0 = [1, 0, 0, 0, 0, 0, 0, 0, 0, 0]
1 = [0, 1, 0, 0, 0, 0, 0, 0, 0, 0]
2 = [0, 0, 1, 0, 0, 0, 0, 0, 0, 0]
3 = [0, 0, 0, 1, 0, 0, 0, 0, 0, 0]
'''

n_nodes_hl1 = 500
n_nodes_hl2 = 500
n_nodes_hl3 = 500

n_classes = 10
Beispiel #5
0
from tensorflow.example.tutorials.mnist import input_data
import tensorflow as tf

mnist = input_data.read_data_sets("tutorial/MNIST_data/", one_hot=True)

x = tf.placeholder("float", [None, 784])
w = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))

y = tf.nn.softmax(tf.matmul(x, W) + b)
y_ = tf.placeholder("float", [None, 10])
cross_entropy = -tf.reduce_sum(y_ * tf.log(y))

train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
init = tf.global_variables_initializer()

sess = tf.Session()
sess.run(init)

for i in range(1000):
    batch_xs, batch_ys = mnist.train.next_batch(100)
    sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})

correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

print(
    sess.run(accuracy, feed_dict={
        x: mnist.test.imgaes,
        y_: mnist.test.labels
    }))
Beispiel #6
0
# -*- coding: utf-8 -*-

import os
import tensorflow as tf
from tensorflow.example.tutorials.mnist import input_data
from tensorflow.contrib.tensorboard.plugins import projector
import numpy as np

PATH = os.getcwd()

LOG_DIR = PATH + '/mnist.tensorboard/log'
metadata = os.path.join(LOG_DIR, 'metadata.tsv')

mnist = input_data.read_data_sets(PATH + "/mnist.tensorboard/data/",
                                  one_hot=True)
images = tf.Variable(mnist.test.images, name='images')
#def save_metadata(file) :
with open(metadata, 'w') as metadata_file:
    for row in range(10000):
        c = np.nonzero(mnist.test.labels[::1])[1:][0][row]
        metadata_file.write('{}\n'.format(c))

with tf.Session() as sess:
    saver = tf.train.Saver([images])

    sess.run(images.initializer)
    saver.save(sess, os.path.join(LOG_DIR, 'images.ckpt'))

    config = projector.ProjectorConfig()
    # One Can Add Multiple embeddings
    embedding = config.embeddings.add()