Esempio n. 1
0
#data = input_data.read_data_sets("/tmp/data/", one_hot=True)

mnist_data = input_data.read_data_sets('MNIST_data', one_hot=True)
plus_data = data_processer.load_image_data('csv/plus_data.csv')
minus_data = data_processer.load_image_data('csv/minus_data.csv')
mult_data = data_processer.load_image_data('csv/mult_data.csv')
div_data = data_processer.load_image_data('csv/div_data.csv')

data = data_processer.add_data(mnist_data, [plus_data, minus_data, mult_data, div_data])

# model
import model
with tf.variable_scope("convolutional"):
    x = tf.placeholder("float", [None, 784])
    keep_prob = tf.placeholder("float")
    y, variables = model.convolutional(x, keep_prob)

# train
#y_ = tf.placeholder("float", [None, 10])
y_ = tf.placeholder("float", [None, 14])
cross_entropy = -tf.reduce_sum(y_ * tf.log(y))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

saver = tf.train.Saver(variables)
init = tf.initialize_all_variables()
with tf.Session() as sess:
    sess.run(init)
    for i in range(20000):
        batch = data.train.next_batch(50)
def loss(logits, labels):
    labels = tf.cast(labels, tf.int64)
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=logits, labels=labels, name='cross_entropy_per_example')
    cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
    tf.add_to_collection('losses', cross_entropy_mean)
    # The total loss is defined as the cross entropy loss plus all of the weight
    # decay terms (L2 loss).
    return tf.add_n(tf.get_collection('losses'), name='total_loss')


# model
with tf.variable_scope("convolutional"):
    image_holder = tf.placeholder(tf.float32, [batch_size, 32, 24, 3])
    #解决过拟合问题
    y, variables = model.convolutional(image_holder)
# y1=tf.reshape(y,[128])
# train
label_holder = tf.placeholder(tf.int32, [batch_size])
print('y', y)
print('label_holder', label_holder)

loss = loss(y, label_holder)

# label_holder=tf.to_float(label_holder)

# loss = -tf.reduce_mean(label_holder * tf.log(y))
train_op = tf.train.AdamOptimizer(1e-3).minimize(loss)  # 0.72
#AdamOptimizer 数据量大,比梯度下降算法要快些
top_k_op = tf.nn.in_top_k(y, label_holder, 1)
sess = tf.InteractiveSession()
Esempio n. 3
0
import os
import model
import tensorflow as tf

from tensorflow.examples.tutorials.mnist import input_data
data = input_data.read_data_sets("../data/", one_hot=True)

# model
with tf.variable_scope("convolutional"):
    x = tf.placeholder(tf.float32, [None, 784])
    keep_prob = tf.placeholder(tf.float32)
    y, variables = model.convolutional(x, keep_prob)

# train
y_ = tf.placeholder(tf.float32, [None, 10])
cross_entropy = -tf.reduce_sum(y_ * tf.log(y))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

saver = tf.train.Saver(variables)
init = tf.initialize_all_variables()
## 99.33% Accuracy
with tf.Session() as sess:
    sess.run(init)
    for i in range(20000):
        batch = data.train.next_batch(50)
        if i % 100 == 0:
            train_accuracy = accuracy.eval(feed_dict={
                x: batch[0],
                y_: batch[1],
Esempio n. 4
0
def fit(X, y, output_path, batch_size=50, verbose=False):

    # one hot vector
    y = transform_onehot(y)

    with tf.Graph().as_default():

        # input/output
        _x = tf.placeholder(tf.float32, shape=[None, 2352], name='in')
        _y = tf.placeholder(tf.float32, shape=[None, 3])

        # train logits
        training_logits = model.convolutional(_x)

        loss = tf.losses.softmax_cross_entropy(onehot_labels=_y,
                                               logits=training_logits)

        # optimizer
        train_step = tf.train.AdamOptimizer(1e-4).minimize(loss)

        correct_prediction = tf.equal(tf.argmax(training_logits, 1),
                                      tf.argmax(_y, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        saver = tf.train.Saver()

        with tf.Session() as sess:

            sess.run(tf.global_variables_initializer())

            for epoch in range(100):
                images, labels = shuffle(X, y)

                for i in range(0, len(images), batch_size):
                    batch = images[i:i + batch_size], labels[i:i + batch_size]
                    train_step.run(feed_dict={_x: batch[0], _y: batch[1]})

                if epoch % 10 == 0:
                    i = random.randint(0, int(len(images) / 100)) * 100
                    batch = images[i:i + 100], labels[i:i + 100]
                    train_accuracy = accuracy.eval(feed_dict={
                        _x: batch[0],
                        _y: batch[1]
                    })

                    if verbose:
                        print('step %d, training accuracy %g' %
                              (epoch, train_accuracy))

            save_path = saver.save(sess, f"{output_path}/cnn.ckpt")

            if verbose:
                print("Model saved in file: %s" % save_path)

            tf.train.write_graph(sess.graph_def,
                                 output_path,
                                 'cnn.pb',
                                 as_text=False)
            tf.train.write_graph(sess.graph_def,
                                 output_path,
                                 'cnn.pbtxt',
                                 as_text=True)