Exemplo n.º 1
0
import numpy as np
from sklearn.metrics import accuracy_score
from matplotlib import pyplot as plt
import tensorflow as tf
print("We're using TF", tf.__version__)

import sys
sys.path.append("../..")

import keras_utils
import preprocessed_mnist
X_train, y_train, X_val, y_val, X_test, y_test = preprocessed_mnist.load_dataset(
)

# X contains rgb values divided by 255
print("X_train [shape %s] sample patch:\n" % (str(X_train.shape)),
      X_train[1, 15:20, 5:10])
print("A closeup of a sample patch:")
plt.imshow(X_train[1, 15:20, 5:10], cmap="Greys")
plt.show()
print("And the whole sample:")
plt.imshow(X_train[1], cmap="Greys")
plt.show()
print("y_train [shape %s] 10 samples:\n" % (str(y_train.shape)), y_train[:10])

X_train_flat = X_train.reshape((X_train.shape[0], -1))
print(X_train_flat.shape)

X_val_flat = X_val.reshape((X_val.shape[0], -1))
print(X_val_flat.shape)
import keras
Exemplo n.º 2
0
# ```
#
# That will create all the necessary variables automatically.
# Here you can also choose an activation function (remember that we need it for a hidden layer!).
#
# Now define the MLP with 2 hidden layers and restart training with the cell above.
#
# You're aiming for ~0.97 validation accuracy here.

# In[ ]:

# write the code here to get a new `step` operation and then run the cell with training loop above.
# name your variables in the same way (e.g. logits, probas, classes, etc) for safety.
### YOUR CODE HERE ###
from preprocessed_mnist import load_dataset
X_train, y_train, X_val, y_val, X_test, y_test = load_dataset()
print(X_train.shape, y_train.shape)

# In[ ]:

s = reset_tf_session()
input_X = tf.placeholder(tf.float32, shape=(None, 784))
input_y = tf.placeholder(tf.float32, shape=(None, 10))
hidden1 = tf.layers.dense(input_X, 256, activation=tf.nn.sigmoid)
hidden2 = tf.layers.dense(hidden1, 256, activation=tf.nn.sigmoid)
logits = tf.layers.dense(hidden2, 10)
probas = tf.nn.softmax(logits)
classes = tf.argmax(probas, 1)
#print(hidden1.shape,logits.shape, probas.shape, classes.shape)

loss = tf.reduce_mean(
Exemplo n.º 3
0
    xentropy = -logits_for_answers + np.log(np.sum(np.exp(logits), axis=-1))

    return xentropy


def grad_softmax_crossentropy_with_logits(logits, reference_answers):
    """Compute crossentropy gradient from logits[batch,n_classes] and ids of correct answers"""
    ones_for_answers = np.zeros_like(logits)
    ones_for_answers[np.arange(len(logits)), reference_answers] = 1

    softmax = np.exp(logits) / np.exp(logits).sum(axis=-1, keepdims=True)

    return (-ones_for_answers + softmax) / logits.shape[0]


X_train, y_train, X_val, y_val, X_test, y_test = load_dataset(flatten=True)
plt.figure(figsize=[6, 6])
for i in range(3):
    plt.subplot(2, 2, i + 1)
    plt.title("Label: %i" % y_train[i])
    plt.imshow(X_train[i].reshape([28, 28]), cmap='gray')

network = []
network.append(Dense(X_train.shape[1], 100))
network.append(ReLU())
network.append(Dense(100, 200))
network.append(ReLU())
network.append(Dense(200, 100))
network.append(ReLU())
network.append(Dense(100, 10))
Exemplo n.º 4
0
from preprocessed_mnist import load_dataset
# load data from keras
X_orig_train, y_orig_train, X_orig_val, y_orig_val, X_orig_test, y_orig_test = load_dataset(
)

import numpy as np
import tensorflow as tf


def preprocess():
    """
    flatten pixels to vector, transform y to one-hot-encoding form
    """

    X_train = X_orig_train.reshape(X_orig_train.shape[0], -1).T
    X_val = X_orig_val.reshape(X_orig_val.shape[0], -1).T
    X_test = X_orig_test.reshape(X_orig_test.shape[0], -1).T
    y_train = np.eye(10)[y_orig_train.reshape(-1)].T
    y_val = np.eye(10)[y_orig_val.reshape(-1)].T
    y_test = np.eye(10)[y_orig_test.reshape(-1)].T

    return X_train, y_train, X_val, y_val, X_test, y_test


X_train, y_train, X_val, y_val, X_test, y_test = preprocess()
print(X_train.shape, y_train.shape)

# random seed, so that the result will be consistent
seed = 10
# one hidden layer , and the number of units is 300
hidden_units = 300