コード例 #1
0
ファイル: convert_to_records.py プロジェクト: islingx/GTSRB
def main(unused_argv):
    # Get the data.
    data_sets = read_data.read_data_sets('train')

    # Convert to Examples and write the result to TFRecords.
    convert_to(data_sets.train, 'train')
    convert_to(data_sets.validation, 'validation')

    data_sets = read_data.read_data_sets('test')
    convert_to(data_sets.test, 'test')
コード例 #2
0
ファイル: main.py プロジェクト: du-yang/wordseg
def train(config,echoNum):

    lstmer = lstm_model(config)
    jd_data = read_data_sets('data/jdData.json', 'data/w2id.json', config.num_steps)

    with tf.Session() as sess:

        if not os.path.exists('tmp/'):
            os.mkdir('tmp/')

        loss_op = lstmer.loss()
        train_op = lstmer.training()

        saver = tf.train.Saver()
        if os.path.exists('tmp/checkpoint'):  # 判断模型是否存在
            saver.restore(sess, 'tmp/model')  # 存在就从模型中恢复变量
        else:
            init = tf.global_variables_initializer()  # 不存在就初始化变量
            sess.run(init)


        for i in range(echoNum):
            x_data, y_data = jd_data.next_batch(config.batch_size)

            print('训练前loss:',sess.run(loss_op, feed_dict={lstmer.x: x_data, lstmer.y: y_data}))

            sess.run(train_op, feed_dict={lstmer.x: x_data, lstmer.y: y_data})

            saver.save(sess, './tmp/model')

            print('训练后loss:',sess.run(loss_op, feed_dict={lstmer.x: x_data, lstmer.y: y_data}))
            # print('预测结果:',sess.run(lstmer.logits,feed_dict={lstmer.x: x_data, lstmer.y: y_data}))
            # print(y_data)
            print('完成第%s轮' % i)
コード例 #3
0
def load_data_shared(filename="data/mnist.pkl.gz"):
    # f = gzip.open(filename, 'rb')
    # training_data, validation_data, test_data = cPickle.load(f)
    # print(training_data[1].shape)
    # f.close()

    training_data, validation_data, test_data = read_data.read_data_sets();

    def shared(data):
        """Place the data into shared variables.  This allows Theano to copy
        the data to the GPU, if one is available.

        """
        shared_x = theano.shared(
            np.asarray(data[0], dtype=theano.config.floatX), borrow=True)
        shared_y = theano.shared(
            np.asarray(data[1], dtype=theano.config.floatX), borrow=True)
        return shared_x, T.cast(shared_y, "int32")
    return [shared(training_data), shared(validation_data), shared(test_data)]
コード例 #4
0
sys.setdefaultencoding("utf-8")
import logging

logging.basicConfig(level=logging.INFO,
                    format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
                    datefmt='%a, %d %b %Y %H:%M:%S',
                    filename='logistic_regression.log',
                    filemode='a+')
import numpy as np
import tensorflow as tf
import pdb
from read_data import read_data_sets


# 读入数据 -- 转换成numpy的格式进行训练(大规模数据可以使用batch的方法进行训练)
train_images, train_labels, test_images, test_labels = read_data_sets('data/', one_hot=True)


# 设置参数
# Parameters of Logistic Regression
learning_rate = 0.001
training_epochs = 500
batch_size = 128
valid_step = 1 # 每valid_step个epoch进行一次验证

# 设置placeholder和Variable[输入的参数用tf.placeholder来初始,参数W,b用tf.Variable来初始(这里需要根据指定维度)]
# Create Graph for Logistic Regression
x = tf.placeholder("float", [None, 784])
y = tf.placeholder("float", [None, 10])  # None is for infinite or unspecified length
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
コード例 #5
0
            mouse_flag = True
    if event == cv2.EVENT_MOUSEMOVE:
        if mouse_flag == True:
            cv2.circle(test1, (x, y), 3, (255, 255, 255), -1)
            #print(str(old_x) + " " + str(old_y))
            cv2.line(test1, (old_x, old_y), (x, y), \
                     (255, 255, 255), thickness=6)
            old_x, old_y = x, y
    if event == cv2.EVENT_LBUTTONUP:
        if mouse_flag == True:
            mouse_flag = False
    if event == cv2.EVENT_RBUTTONDOWN:
        test1 = np.zeros((280, 280, 1), np.uint8)


mnist = read_data.read_data_sets("C:\\workspace\\MNIST", one_hot=True)

#print(mnist.count)
#print(mnist.train.images.shape)
ind = 300
#test1 = mnist.test.images[ind].reshape([28,28])
test1 = np.zeros((280, 280, 1), np.uint8)
#print(mnist.train.labels[ind])

#Test the datasets....ABOVE!!


def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev=0.1)
    return initial
コード例 #6
0
ファイル: rnn_deep_german.py プロジェクト: kuscu/deep-german
xs = tf.placeholder(tf.float32, [None, MAX_WORD_LEN, ALPHABET_SIZE])
ys = tf.placeholder(tf.float32, [None, NUM_GENDERS])
seq = tf.placeholder(tf.int32, [None])
dropout = tf.placeholder(tf.float32)

echo("Creating model...")

model = RNNWordModel(xs, ys, seq, dropout, CELL_TYPE, NUM_LAYERS, NUM_HIDDEN,
                     tf.train.AdamOptimizer(LEARNING_RATE))

# PREPARING DATA

echo("Preparing data...")

# preparing words dataset
dataset = read_data_sets()

print()
echo("Training set:", dataset.train.words.shape[0])
echo("Validation set:", dataset.validation.words.shape[0])
echo("Testing set:", dataset.test.words.shape[0])
print()

# EXECUTING THE GRAPH

best_epoch = 0
best_val_error = 1.0
saver = tf.train.Saver()

config = tf.ConfigProto()
config.gpu_options.allow_growth = True
コード例 #7
0
ファイル: mean.py プロジェクト: islingx/GTSRB
import read_data
import cv2
import numpy

data_set = read_data.read_data_sets('train')

for i in range(10):
    image = data_set.train.images[126+i]
    image = numpy.asarray(image, numpy.uint8)

    print data_set.train.labels[126+i]
    cv2.imshow('aaa', image)
    cv2.waitKey()
    cv2.destroyAllWindows()
コード例 #8
0
ファイル: test_mnist.py プロジェクト: wzlj/Learning
# conding=utf-8

# from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
import read_data

mnist = read_data.read_data_sets('MNIST_data', one_hot=True)

sess = tf.InteractiveSession()


def conv2d(x, w):
    return tf.nn.conv2d(x, w, strides=[1, 4, 4, 1], padding='SAME')


# placeholder for size of input data
x = tf.placeholder(tf.float32, [None, 784])
y_ = tf.placeholder(tf.float32, [None, 10])
x_img = tf.reshape(x, [-1, 28, 28, 1])

# input channels is 1, output channels is 32
w_conv1 = tf.Variable(tf.truncated_normal([3, 3, 1, 50], stddev=0.1))
b_conv1 = tf.Variable(tf.constant(0.1, shape=[50]))
h_conv1 = tf.nn.relu(conv2d(x_img, w_conv1) + b_conv1)

w_fc1 = tf.Variable(tf.truncated_normal([7*7*50, 1024], stddev=0.1))
b_fc1 = tf.Variable(tf.constant(0.1, shape=[1024]))

h_pool2_flat = tf.reshape(h_conv1, [-1, 7*7*50])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1) + b_fc1)
コード例 #9
0
import os
import sys
import read_data
import numpy as np
from PIL import Image
import tensorflow as tf

# data set
faces = read_data.read_data_sets(one_hot=True, reshape=False)

# LOG DIR
log_dir = 'LOG_CNN/'

# global parameter
# learning_rate = 1E-3
# batch_size = 40
epochs = 200
display_iter = 10


def summary_variables(var):
    with tf.name_scope("summaries"):
        with tf.name_scope("mean"):
            mean = tf.reduce_mean(var)
            tf.summary.scalar('mean', mean)
        with tf.name_scope("stddev"):
            stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
            tf.summary.scalar('stddev', stddev)
        with tf.name_scope("max"):
            tf.summary.scalar('max', tf.reduce_max(var))
        with tf.name_scope("min"):
コード例 #10
0
import read_data
from matplotlib import pyplot as plt
import cv2
import numpy as np

dataSets = read_data.read_data_sets()

images = dataSets.train.images
labels = dataSets.train.labels

image = images[0]
label = labels[0]

print(label)

cv2.imshow('aaa', np.array(image))
cv2.waitKey()
cv2.destroyAllWindows()

コード例 #11
0
def main(unused_argv):

    model_dir = "../data/HanNet_CNN"
    if tf.gfile.Exists(model_dir):
        tf.gfile.DeleteRecursively(model_dir)
    tf.gfile.MakeDirs(model_dir)

    # Load training, validatoin, and eval data
    #[train, test] = read_data.read_data_sets(False)
    [train, validation, test] = read_data.read_data_sets(True)

    train_data = train.images.astype(np.float32)  # Returns np.array
    train_labels = np.asarray(train.labels, dtype=np.int32)
    eval_data = test.images.astype(np.float32)  # Returns np.array
    eval_labels = np.asarray(test.labels, dtype=np.int32)

    validation_data = validation.images.astype(np.float32)  # Returns np.array
    validation_labels = np.asarray(validation.labels, dtype=np.int32)

    # Create the Estimator
    HanNet_classifier = learn.Estimator(
        model_fn=cnn_model_fn,
        model_dir=model_dir,
        config=tf.contrib.learn.RunConfig(save_checkpoints_secs=100))

    # Set up logging for predictions
    # Log the values in the "Softmax" tensor with label "probabilities"
    #tensors_to_log = {"probabilities": "softmax_tensor"}
    #logging_hook = tf.train.LoggingTensorHook(
    #    tensors=tensors_to_log, every_n_iter=50)

    # Set up validation moditor
    validation_metrics = {
        "accuracy":
        tf.contrib.learn.MetricSpec(
            metric_fn=tf.contrib.metrics.streaming_accuracy,
            prediction_key=tf.contrib.learn.PredictionKey.CLASSES)
    }
    validation_monitor = tf.contrib.learn.monitors.ValidationMonitor(
        validation_data,
        validation_labels,
        every_n_steps=50,
        metrics=validation_metrics)

    # Train the model
    HanNet_classifier.fit(x=train_data,
                          y=train_labels,
                          batch_size=100,
                          steps=2000,
                          monitors=[validation_monitor])

    # Configure the accuracy metric for evaluation
    metrics = {
        "accuracy":
        learn.MetricSpec(metric_fn=tf.metrics.accuracy,
                         prediction_key="classes"),
    }

    # Evaluate the model and print results
    eval_results = HanNet_classifier.evaluate(x=eval_data,
                                              y=eval_labels,
                                              metrics=metrics)
    print(eval_results)
コード例 #12
0
    os.mkdir('log')

logging.basicConfig(level=logging.ERROR,
                    format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
                    datefmt='%a, %d %b %Y %H:%M:%S',
                    filename='log/mlp.log',
                    filemode='w')

import numpy as np
import tensorflow as tf
import pdb
from read_data import read_data_sets


# 读入数据 -- 转换成numpy的格式进行训练(大规模数据可以使用batch的方法进行训练)
train_images, train_labels, test_images, test_labels = read_data_sets('/home/liubo-it/siamese_tf_mnist/MNIST_data/', one_hot=True)


# Parameters
learning_rate = 0.001
training_epochs = 20
batch_size = 128
valid_step = 1

# Network Parameters
n_input = 784 # MNIST data input (img shape: 28*28)
n_hidden_1 = 256 # 1st layer num features
n_hidden_2 = 256 # 2nd layer num features
n_hidden_3 = 256 # 3rd layer num features
n_hidden_4 = 256 # 4th layer num features
n_classes = 10 # MNIST total classes (0-9 digits)
コード例 #13
0
ファイル: model.py プロジェクト: du-yang/wordseg
    num_layers = 2
    hidden_size = 200
    keep_prob = 1.0
    lr_decay = 0.5
    batch_size = 50
    num_steps = 50
    vocab_size = 3000
    output_size = 3
    learningrate = 0.5


if __name__ == '__main__':

    conf = config()
    lstmer = lstm_model(config())
    jd_data = read_data_sets('jdData.json', conf.num_steps)

    with tf.Session() as sess:

        saver = tf.train.Saver()

        loss_op = lstmer.loss()
        train_op = lstmer.training()
        # tf.global_variables_initializer().run()
        saver.restore(sess, 'tmp/model')

        for i in range(100):
            x_data, y_data = jd_data.next_batch(conf.batch_size)

            print(
                '训练前loss:',