コード例 #1
0
def main(_):
    # Import data
    if DATA == "MNIST":
        mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
    elif DATA == "FASHION":
        mnist = input_data.read_data_sets(
            "data/fashion",
            source_url="http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/",
            one_hot=True,
        )

    # Create the model
    x = tf.placeholder(tf.float32, [None, 784])

    # Define loss and optimizer
    y_ = tf.placeholder(tf.float32, [None, 10])

    # Build the graph for the deep net
    y_conv, keep_prob = deepnn(x)

    cross_entropy = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv)
    )
    train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
    correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        for i in range(10001):
            batch = mnist.train.next_batch(50)

            ################################## MODIFIED CODE BELOW ##################################
            batch_val = mnist.validation.next_batch(50)
            feed_dict_train = {x: batch[0], y_: batch[1], keep_prob: 1.0}
            feed_dict_val = {x: batch_val[0], y_: batch_val[1], keep_prob: 1.0}
            # Writes data into run log csv file
            write_data(
                accuracy=accuracy,
                cross_entropy=cross_entropy,
                feed_dict_train=feed_dict_train,
                feed_dict_val=feed_dict_val,
                step=i,
            )
            ################################## MODIFIED CODE ABOVE ##################################

            if i % 100 == 0:
                train_accuracy = accuracy.eval(
                    feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0}
                )
                print("step %d, training accuracy %g" % (i, train_accuracy))
            train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})

        print(
            "test accuracy %g"
            % accuracy.eval(
                feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}
            )
        )
コード例 #2
0
def main(_):
    # Import data
    print("Starting to generate CIFAR10 images.")
    (x_train, y_train), (x_test,
                         y_test) = tf.keras.datasets.cifar10.load_data()
    x_train = np.moveaxis(x_train, 1, 3) / 255.  # Normalize values
    x_train_vec = x_train.reshape(50000, -1)

    y_train = np.squeeze(y_train)
    y_test = np.squeeze(y_test)

    x_test = np.moveaxis(x_test, 1, 3) / 255.  # Normalize values
    x_test_vec = x_test.reshape(10000, -1)

    X_train, X_val, y_train, y_val = train_test_split(x_train_vec,
                                                      y_train,
                                                      test_size=0.1,
                                                      random_state=42)
    print("Finished generating CIFAR10 images.")

    # Create the model
    x = tf.placeholder(tf.float32, [None, 3 * 32 * 32])
    W = tf.Variable(tf.zeros([3 * 32 * 32, 10]))
    b = tf.Variable(tf.zeros([10]))
    y = tf.matmul(x, W) + b

    # Define loss and optimizer
    y_ = tf.placeholder(tf.int64, [None])

    # The raw formulation of cross-entropy,
    #
    #   tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.nn.softmax(y)),
    #                                 reduction_indices=[1]))
    #
    # can be numerically unstable.
    #
    # So here we use tf.losses.sparse_softmax_cross_entropy on the raw
    # outputs of 'y', and then average across the batch.
    cross_entropy = tf.losses.sparse_softmax_cross_entropy(labels=y_, logits=y)
    train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)

    # Add accuracy and cross entropy to the graph using util function
    accuracy, cross_entropy = add_eval(y, y_)

    sess = tf.InteractiveSession()
    tf.global_variables_initializer().run()
    # Train
    for i in range(20001):
        start_train = i * 100 % y_train.shape[0]
        end_train = start_train + 100

        start_val = i * 100 % y_val.shape[0]
        end_val = start_val + 100

        batch = (X_train[start_train:end_train],
                 y_train[start_train:end_train])
        batch_val = (X_val[start_val:end_val], y_val[start_val:end_val])

        feed_dict_train = {x: batch[0], y_: batch[1]}
        feed_dict_val = {x: batch_val[0], y_: batch_val[1]}
        # Writes data into run log csv file
        write_data(accuracy=accuracy,
                   cross_entropy=cross_entropy,
                   feed_dict_train=feed_dict_train,
                   feed_dict_val=feed_dict_val,
                   step=i)
        sess.run(train_step, feed_dict={x: batch[0], y_: batch[1]})

    # Test trained model
    correct_prediction = tf.equal(tf.argmax(y, 1), y_)
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    print(sess.run(accuracy, feed_dict={x: x_test_vec, y_: y_test}))
コード例 #3
0
def main(_):
    # Import data
    if DATA == "MNIST":
        mnist = input_data.read_data_sets(FLAGS.data_dir)
    elif DATA == "FASHION":
        mnist = input_data.read_data_sets(
            'data/fashion',
            source_url=
            'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/')
    # Create the model
    x = tf.placeholder(tf.float32, [None, 784])
    W = tf.Variable(tf.zeros([784, 10]))
    b = tf.Variable(tf.zeros([10]))
    y = tf.matmul(x, W) + b

    # Define loss and optimizer
    y_ = tf.placeholder(tf.int64, [None])

    # The raw formulation of cross-entropy,
    #
    #   tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.nn.softmax(y)),
    #                                 reduction_indices=[1]))
    #
    # can be numerically unstable.
    #
    # So here we use tf.losses.sparse_softmax_cross_entropy on the raw
    # outputs of 'y', and then average across the batch.
    cross_entropy = tf.losses.sparse_softmax_cross_entropy(labels=y_, logits=y)
    train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)

    ################################## MODIFIED CODE BELOW ##################################
    accuracy, cross_entropy = add_eval(y, y_)
    ################################## MODIFIED CODE ABOVE ##################################

    sess = tf.InteractiveSession()
    tf.global_variables_initializer().run()
    # Train
    for i in range(10001):
        batch_xs, batch_ys = mnist.train.next_batch(100)

        ################################## MODIFIED CODE BELOW ##################################
        batch = mnist.train.next_batch(100)
        batch_val = mnist.validation.next_batch(100)
        feed_dict_train = {x: batch[0], y_: batch[1]}
        feed_dict_val = {x: batch_val[0], y_: batch_val[1]}
        # Writes data into run log csv file
        write_data(accuracy=accuracy,
                   cross_entropy=cross_entropy,
                   feed_dict_train=feed_dict_train,
                   feed_dict_val=feed_dict_val,
                   step=i)
        ################################## MODIFIED CODE ABOVE ##################################

        sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})

    # Test trained model
    correct_prediction = tf.equal(tf.argmax(y, 1), y_)
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    print(
        sess.run(accuracy,
                 feed_dict={
                     x: mnist.test.images,
                     y_: mnist.test.labels
                 }))
コード例 #4
0
def main(_):
    # Import data
    print("Starting to generate CIFAR10 images.")
    (x_train, y_train), (x_test,
                         y_test) = tf.keras.datasets.cifar10.load_data()
    x_train = np.moveaxis(x_train, 1, 3) / 255.  # Normalize values
    x_train_vec = x_train.reshape(50000, -1)

    x_test = np.moveaxis(x_test, 1, 3) / 255.  # Normalize values
    x_test_vec = x_test.reshape(10000, -1)

    X_train, X_val, y_train, y_val = train_test_split(x_train_vec,
                                                      y_train,
                                                      test_size=0.1,
                                                      random_state=42)
    print("Finished generating CIFAR10 images.")

    # Create the model
    x = tf.placeholder(tf.float32, [None, 32 * 32 * 3])

    # Define loss and optimizer
    y_ = tf.placeholder(tf.float32, [None, 10])

    # Build the graph for the deep net
    y_conv, keep_prob = deepnn(x)

    cross_entropy = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_, logits=y_conv))
    train_step = tf.train.AdamOptimizer(1e-4).minimize(
        cross_entropy)  # RMS is used in keras example, Adam is better
    correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    with tf.Session() as sess:
        y_train = OneHotEncoder(sparse=False).fit_transform(y_train)
        y_val = OneHotEncoder(sparse=False).fit_transform(y_val)

        sess.run(tf.global_variables_initializer())
        for i in range(20001):
            start_train = i * 50 % y_train.shape[0]
            end_train = start_train + 50

            start_val = i * 50 % y_val.shape[0]
            end_val = start_val + 50

            batch = (X_train[start_train:end_train],
                     y_train[start_train:end_train])
            batch_val = (X_val[start_val:end_val], y_val[start_val:end_val])

            feed_dict_train = {x: batch[0], y_: batch[1], keep_prob: 1.0}
            feed_dict_val = {x: batch_val[0], y_: batch_val[1], keep_prob: 1.0}
            # Writes data into run log csv file
            write_data(accuracy=accuracy,
                       cross_entropy=cross_entropy,
                       feed_dict_train=feed_dict_train,
                       feed_dict_val=feed_dict_val,
                       step=i)

            if i % 100 == 0:
                train_accuracy = accuracy.eval(feed_dict={
                    x: batch[0],
                    y_: batch[1],
                    keep_prob: 1.0
                })
                print('step %d, training accuracy %g' % (i, train_accuracy))
            train_step.run(feed_dict={
                x: batch[0],
                y_: batch[1],
                keep_prob: 0.5
            })

        print('test accuracy %g' % accuracy.eval(feed_dict={
            x: x_test_vec,
            y_: y_test,
            keep_prob: 1.0
        }))