Beispiel #1
0
# Number of features (28*28 image is 784 features)
n_features = 784
# Number of labels
n_labels = 3

# Features and Labels
features = tf.placeholder(tf.float32)
labels = tf.placeholder(tf.float32)

# Weights and Biases
w = get_weights(n_features, n_labels)
b = get_biases(n_labels)

# Linear Function xW + b
logits = linear(features, w, b)

# Training data
train_features, train_labels = mnist_features_labels(n_labels)

with tf.Session() as session:
    session.run(tf.global_variables_initializer())

    # Softmax functiond to normalize the varables from 0 to 1
    # variables with heavier weights weigh more than the ones with lighter weight
    prediction = tf.nn.softmax(logits)

    # Cross entropy
    # This quantifies how far off the predictions were.
    cross_entropy = -tf.reduce_sum(labels * tf.log(prediction),
                                   reduction_indices=1)
Beispiel #2
0
# Number of features (28*28 image is 784 features)
n_features = 784
# Number of labels
n_labels = 3

# Features and Labels
features = tf.placeholder(tf.float32)
labels = tf.placeholder(tf.float32)

# Weights and Biases
w = weights(n_features, n_labels)
b = biases(n_labels)

# Linear Function xW + b
logits = linear(features, w, b)

# Training data
train_features, train_labels = mnist_features_labels(n_labels)

with tf.Session() as session:
    # TODO: Initialize session variables
    session.run(tf.initialize_variables(w))

    # Softmax
    prediction = tf.nn.softmax(logits)

    # Cross entropy
    # This quantifies how far off the predictions were.
    # You'll learn more about this in future lessons.
    cross_entropy = -tf.reduce_sum(labels * tf.log(prediction), reduction_indices=1)
    # Number of features (28 * 28 image is 784 features)
    n_features = 784
    # Number of labels
    n_labels = 3

    # Features and Labels
    features = tf.placeholder(tf.float32)
    labels = tf.placeholder(tf.float32)

    # Weights and biases
    weights = get_weights(n_features, n_labels)
    biases = get_biases(n_labels)

    # Linear Function
    logits = linear(features, weights, biases)

    # Training Data
    train_features, train_labels = mnist_features_labels(n_labels)

    with tf.Session() as sess:
        # Initialize session variables
        sess.run(tf.global_variables_initializer())

        # Softmax
        prediction = tf.nn.softmax(
            logits)  # create probabilities from logit scores

        # Cross entropy
        # This quantfies how far off the predictions were
        cross_entropy = -tf.reduce_sum(labels * tf.log(prediction),
Beispiel #4
0
# Number of features (28*28 image is 784 features)
n_features = 784
# Number of labels
n_labels = 3

# Features and Labels
features = tf.placeholder(tf.float32)
labels = tf.placeholder(tf.float32)

# Weights and Biases
W = get_weights(n_features,n_labels)
b = get_biases(n_labels)

# Linear Function xW + b
logits = linear(features,W,b)

# Training data
train_features,train_labels = mnist_features_labels(n_labels)

with tf.Session() as session:
    session.run(tf.global_variables_initializer())

    prediction = tf.nn.softmax(logits)

    # Cross entropy
    # This quantifies how far off the prediction were.
    cross_entropy = -tf.reduce_sum(labels * tf.log(prediction),reduction_indices=1)

    # Training loss
    loss = tf.reduce_mean(cross_entropy)