예제 #1
0
print("############################")
print("# Test FORWARD PROPAGATION #")
print("############################")
model_test_forward_prop = RNNNumpy(vocabulary_size)

# Using 10th training example
o, s = model_test_forward_prop.forward_propagation(X_train[10])

print(o.shape)  # (45, 8000)
print(o)

# Try calculating a prediction by forward-propagating
# with the current weight values
# even though they obviously would be very far from optimal
predictions = model_test_forward_prop.predict(X_train[10])
print(predictions.shape)
print(predictions)

# According to the tutorial: Since we have (vocabulary_size) words, so each word
# should be predicted, on average, with probability 1/C, which would yield
# a loss of L = -1/N * N * log(1/C) = log(C)
print("Expected loss for random predictions: %f" % np.log(vocabulary_size))
print("Actual loss: %f" %
      model_test_forward.calculate_loss(X_train[:1000], y_train[:1000]))

print()

print("#######################")
print("# Test GRADIENT CHECK #")
print("#######################")
예제 #2
0
import preprocess
from rnn_numpy import RNNNumpy
import numpy as np

X_train,y_train,vocabulary_size = preprocess.create_train_data()
np.random.seed(10)
model = RNNNumpy(vocabulary_size)
output, hidden_states = model.forward_propagation(X_train[10])
print output.shape
print output

predictions = model.predict(X_train[10])
print predictions.shape
print predictions

# Limit to 1000 examples to save time
print "Expected Loss for random predictions: %f" % np.log(vocabulary_size)
print "Actual loss: %f" % model.calculate_loss(X_train[:1000], y_train[:1000])