y=make_data('sin', T, [f, 2 * f, 3 * f])) #base.plt.plot_data(trainData.x, trainData.y, title='Training Data') #%% epochs = 1000 base.train_network(x=trainData.x, y=trainData.y, plotLoss=True, epochs=epochs, eta=0.1, monitorY=True) #base.plt.plot_history(base.yHist) #%% base = RNN_Manager(RNN.load('50hidden_trained.pkl')) Wx, Wh, Wy = base.rnn.get_weights() #%% #base.plt.plot_loss(base.loss, ax=plt.gca()) #%% base.set_test_input(trainData.x, name='Training input 1') base.plot_feedforward() ##base.plot_hidden() #%% lmbda = 0.9 try: base.rnn.reg = HessEWC(lmbda, base.rnn, trainData, H) except: base.rnn.reg = HessEWC(lmbda, base.rnn, trainData)
eta = 0.1 seq_length = 25 h = 1e-4 n_epoch = 20 # np.random.seed(400) # TODO: remove # compare_gradients() RNN = RNN(K, m, eta, seq_length, init='xavier') save = True smooth_loss = -1 step = -1 last_epoch = 0 if save: smooth_loss, step, last_epoch = RNN.load() print('last smooth_loss: %f \t last step: %d \t last epoch: %d' % (smooth_loss, step, last_epoch)) synth = RNN.synthesize(make_one_hot([char_to_ind['.']], K), 1000) text = "" for column in synth.T: text += ind_to_char[np.argmax(column)] print(text.encode('ascii', 'ignore').decode('ascii')) exit() losses = [] f = open( 'synthesized-' + str( datetime.datetime.fromtimestamp( time.time()).strftime('%Y-%m-%d %H:%M:%S')), 'w+')
from RNN import RNN import numpy as np import json vocab_size = 2575 dm = data_manager(vocab_size=vocab_size) for i in glob("../data/*"): dm.add_data(i) word_to_index, index_to_word = dm.get_indices() model = RNN(word_to_index, index_to_word, word_dim=vocab_size) model.load("models/model.data.npz") sentence = [] all_sents = [] for i in range(100): sentence = model.create_sentence() all_sents.append(" ".join(sentence).replace(".", ".</br>").replace( ",", ",</br>")) jsobj = dict() jsobj["text"] = " ".join(all_sents) print(json.dumps(jsobj))