from net import RecurrentNetwork, load_librispeech, labels_to_chars import numpy as np import matplotlib.pyplot as plt (train_x, train_y), (valid_x, valid_y), (test_x, test_y) = load_librispeech() clf = RecurrentNetwork(learning_alg="rmsprop", hidden_layer_sizes=[ 500, ], max_iter=100, cost="ctc", bidirectional=True, learning_rate=0.0001, momentum=0.9, recurrent_activation="lstm", random_seed=1999) print(labels_to_chars(train_y[2])) means = np.mean(train_x[2], axis=0) std = np.std(train_x[2], axis=0) tx = (train_x[2] - means) / std clf.fit(tx, train_y[2]) from IPython import embed embed()
from net import RecurrentNetwork, load_librispeech, labels_to_chars import numpy as np import matplotlib.pyplot as plt (train_x, train_y), (valid_x, valid_y), (test_x, test_y) = load_librispeech() clf = RecurrentNetwork(learning_alg="rmsprop", hidden_layer_sizes=[500,], max_iter=100, cost="ctc", bidirectional=True, learning_rate=0.0001, momentum=0.9, recurrent_activation="lstm", random_seed=1999) print(labels_to_chars(train_y[2])) means = np.mean(train_x[2], axis=0) std = np.std(train_x[2], axis=0) tx = (train_x[2] - means) / std clf.fit(tx, train_y[2]) from IPython import embed; embed()
time_steps = 10 n_seq = 100 # n_y is equal to the number of calsses random_state = np.random.RandomState(1999) seq = random_state.randn(n_seq, time_steps, n_u) targets = np.zeros((n_seq, time_steps), dtype=np.int32) thresh = 0.5 targets[:, 2:][seq[:, 1:-1, 1] > seq[:, :-2, 0] + thresh] = 1 targets[:, 2:][seq[:, 1:-1, 1] < seq[:, :-2, 0] - thresh] = 2 clf = RecurrentNetwork(learning_alg="sgd", hidden_layer_sizes=[n_h], max_iter=10, cost="softmax", learning_rate=0.1, momentum=0.99, recurrent_activation="lstm", random_seed=1999) clf.fit(seq, targets) clf.predict(seq) plt.close('all') fig = plt.figure() plt.grid() ax1 = plt.subplot(211) plt.scatter(np.arange(time_steps), targets[1], marker='o', c='b') plt.grid()
from net import RecurrentNetwork, load_fruitspeech, labels_to_chars import matplotlib.pyplot as plt import numpy as np (train_x, train_y), (valid_x, valid_y), (test_x, test_y) = load_fruitspeech() clf = RecurrentNetwork(learning_alg="rmsprop", hidden_layer_sizes=[500], max_iter=100, cost="encdec", bidirectional=True, learning_rate=0.00002, momentum=0.9, recurrent_activation="lstm", random_seed=1999) all_frames = np.vstack(train_x) means = np.mean(all_frames, axis=0) std = np.std(all_frames, axis=0) for n, t in enumerate(train_x): train_x[n] = (t - means) / std for n, v in enumerate(valid_x): valid_x[n] = (v - means) / std from IPython import embed embed() clf.fit(train_x, train_y, valid_x, valid_y) y_hat = labels_to_chars(clf.predict(valid_x[0])[0]) y = labels_to_chars(valid_y[0]) print(y_hat)
n_h = 6 n_y = 3 time_steps = 10 n_seq = 100 # n_y is equal to the number of calsses random_state = np.random.RandomState(1999) seq = random_state.randn(n_seq, time_steps, n_u) targets = np.zeros((n_seq, time_steps), dtype=np.int32) thresh = 0.5 targets[:, 2:][seq[:, 1:-1, 1] > seq[:, :-2, 0] + thresh] = 1 targets[:, 2:][seq[:, 1:-1, 1] < seq[:, :-2, 0] - thresh] = 2 clf = RecurrentNetwork(learning_alg="sgd", hidden_layer_sizes=[n_h, n_h], max_iter=1E3, cost="softmax", learning_rate=0.1, momentum=0.99, recurrent_activation="lstm", bidirectional=True, random_seed=1999) clf.fit(seq, targets) plt.close('all') fig = plt.figure() plt.grid() ax1 = plt.subplot(211) plt.scatter(np.arange(time_steps), targets[1], marker='o', c='b') plt.grid() guess = clf.predict_proba(seq[1]) guessed_probs = plt.imshow(guess[0].T, interpolation='nearest', cmap='gray') ax1.set_title('blue points: true class, grayscale: model output (white mean class)')
from net import RecurrentNetwork, load_cmuarctic, labels_to_chars import matplotlib.pyplot as plt import numpy as np (train_x, train_y), (valid_x, valid_y), (test_x, test_y) = load_cmuarctic() clf = RecurrentNetwork(learning_alg="sfg", hidden_layer_sizes=[500], max_col_norm=1.9635, max_iter=1000, cost="ctc", bidirectional=True, learning_rate=0.0001, momentum=0.9, recurrent_activation="lstm", random_seed=1999) tx = train_x[2] tx = (tx - tx.mean()) / tx.std() clf.fit(train_x[2], train_y[2]) y = labels_to_chars(train_y[2]) print(y)
from net import RecurrentNetwork, load_fruitspeech, labels_to_chars import matplotlib.pyplot as plt import numpy as np (train_x, train_y), (valid_x, valid_y), (test_x, test_y) = load_fruitspeech() clf = RecurrentNetwork(learning_alg="rmsprop", hidden_layer_sizes=[500], max_iter=100, cost="encdec", bidirectional=True, learning_rate=0.00002, momentum=0.9, recurrent_activation="lstm", random_seed=1999) all_frames = np.vstack(train_x) means = np.mean(all_frames, axis=0) std = np.std(all_frames, axis=0) for n, t in enumerate(train_x): train_x[n] = (t - means) / std for n, v in enumerate(valid_x): valid_x[n] = (v - means) / std from IPython import embed; embed() clf.fit(train_x, train_y, valid_x, valid_y) y_hat = labels_to_chars(clf.predict(valid_x[0])[0]) y = labels_to_chars(valid_y[0]) print(y_hat) print(y)