예제 #1
0
파일: TMP.py 프로젝트: levifussell/alveus
def run_TESTS():
    data = np.array(run(21100)).reshape(-1, 1)
    data_mean = np.mean(data)
    split = 20000

    X_train = data[:split - 1]
    y_train = data[1:split]
    X_test = data[split - 1:-1]
    y_test = data[split:]

    lcesn = LCESN(input_size=1,
                  output_size=1,
                  num_reservoirs=3,
                  reservoir_sizes=300,
                  echo_params=0.85,
                  init_echo_timesteps=100,
                  regulariser=1e-6)
    lcesn.initialize_input_weights(strategies='binary', scales=1.)
    lcesn.initialize_reservoir_weights(strategies='uniform',
                                       spectral_scales=1.25)
    print('LCESN MADE')
    eesn = EESN(input_size=1,
                output_size=1,
                num_reservoirs=3,
                reservoir_sizes=300,
                echo_params=0.85,
                init_echo_timesteps=100,
                regulariser=1e-6)
    eesn.initialize_input_weights(strategies='binary', scales=1.)
    eesn.initialize_reservoir_weights(strategies='uniform',
                                      spectral_scales=1.25)
    print('EESN MADE')
    print('=' * 30)

    st_time = time.time()
    lcesn.train(X_train, y_train)
    print('LCESN TRAINED. TOOK %.3f SEC' % (time.time() - st_time))

    st_time = time.time()
    eesn.train(X_train, y_train)
    print('EESN TRAINED. TOOK %.3f SEC' % (time.time() - st_time))

    lcesn_outs = []
    eesn_outs = []
    for i, x in enumerate(X_test):
        lcesn_outs.append(lcesn.forward(x))
        eesn_outs.append(eesn.forward(x))

    lcesn_outs = np.array(lcesn_outs).squeeze()
    eesn_outs = np.array(eesn_outs).squeeze()

    fig, ax = plt.subplots()
    ax.plot(range(len(lcesn_outs)), lcesn_outs, label='lcesn')
    ax.plot(range(len(eesn_outs)), eesn_outs, label='new')
    ax.plot(range(len(y_test)), y_test, label='true')
    plt.show()
예제 #2
0
파일: rnn.py 프로젝트: levifussell/alveus
def load_and_run_model(file_name):
    model = pkl.load(open(file_name, "rb"))
    (seq, gen_train_loss, gen_test_loss, epoch) = model
    print("MODEL LOADED")

    from MackeyGlass.MackeyGlassGenerator import run
    data = run(21000)
    data -= np.mean(data)
    DATA_MEAN = np.mean(data)
    print("DATA LOADED")

    train_data = np.array(data[:14000])
    train_inputs = Variable(torch.from_numpy(train_data.reshape(-1, 1)), requires_grad=0)
    test_data = np.array(data[14000:20000])
    test_targets = Variable(torch.from_numpy(test_data.reshape(-1, 1)), requires_grad=0)
    val_data = np.array(data[20000:])
    val_targets = Variable(torch.from_numpy(val_data.reshape(-1, 1)), requires_grad=0)

    pre_val_inputs = Variable(torch.from_numpy(np.array(data[:20000]).reshape(-1, 1)), requires_grad=0)

    gen_train_outs = seq.forward(train_inputs[:4000], future=2000).data.numpy()
    gen_train_loss = nrmse(gen_train_outs[4000:], train_inputs[4000:6000].data.numpy(), DATA_MEAN)
    print('!! Gen Train loss: {}'.format(gen_train_loss))
    
    gen_test_outs = seq.forward(train_inputs, future=2000).data.numpy()
    gen_test_loss = nrmse(gen_test_outs[14000:], test_targets.data.numpy()[:2000], DATA_MEAN)
    print('!! Gen Test loss: {}'.format(gen_test_loss))

    gen_val_outs = seq.forward(pre_val_inputs, future=1000).data.numpy()
    gen_val_loss = nrmse(gen_val_outs[20000:], val_targets.data.numpy()[:1000], DATA_MEAN)
    print('!! Gen Val loss: {}'.format(gen_val_loss))

    plt.plot(range(len(data)), data, label="T")
    plt.plot(range(len(gen_val_outs)), gen_val_outs,  label="G")
    plt.legend()
    plt.show()
예제 #3
0
import numpy as np
import matplotlib.pyplot as plt
from ESN.ESN import EESN, ESN
from MackeyGlass.MackeyGlassGenerator import run
from Helper.utils import nrmse
import pickle as pkl
import itertools
import time

def mse(y1, y2):
    return np.mean((y1 - y2)**2)
                        
if __name__ == '__main__':
    data = np.array([run(15100)]).reshape(-1, 1)
    data_mean = np.mean(data, axis=0)
    split = 14100
    X_train = np.array(data[:split-1])
    y_train = np.array(data[1:split])
    X_valid = np.array(data[split-1:-1])
    y_valid = np.array(data[split:])
    data_mean = np.mean(data)

    #esn = ESN(1, 1, 1000, echo_param=0.85, regulariser=1e-6)
    #esn.initialize_input_weights(scale=1.0)
    #esn.initialize_reservoir_weights(spectral_scale=1.25)

    num_reservoirs = 10
    echo_params_ = [
        np.linspace(0.85, 0.5, num_reservoirs), np.linspace(0.85, 0.85, num_reservoirs), 
        np.linspace(0.9, 0.9, num_reservoirs), np.linspace(0.9, 0.5, num_reservoirs),
        np.linspace(0.9, 0.75, num_reservoirs), np.linspace(0.75, 0.9, num_reservoirs),
예제 #4
0
    t = str(time.time()).replace('.', 'p')
    eval_valid = True    # whether or not to evaluate MSE loss on test set during training
    eval_gener = True    # whether or not to generate future values, calculate that MSE loss
    eval_gen_loss = True
    save_fig = False
    save_results = False

    reg = 1e-3 # lambda for L2 regularization 
    n_generate_timesteps = 2000
    learn_rate = 0.009
    n_epochs = 100
    
    # ========================================================================================
    # Get data ===============================================================================
    from MackeyGlass.MackeyGlassGenerator import run
    data = run(num_data_samples=21000)
    data_var = np.var(np.array(data))
    __DATA_VAR__ = np.var(np.array(data))
    __DATA_MEAN__ = np.mean(np.array(data))
    print('data mean, variance: %.5f, %.5f' % (__DATA_MEAN__, __DATA_VAR__))

    train_data = data[:14000]
    if eval_valid:
        valid_data = data[14000:20000]
    else:
        valid_data = None
    test_data = data[20000:]
    
    # Set up model, loss function, optimizer =================================================
    model = FFNN(input_size=50, hidden_size=100, n_hidden_layers=2, activation=nn.Sigmoid)
    criterion = nn.MSELoss()
예제 #5
0
파일: rnn.py 프로젝트: levifussell/alveus
def try_toy_example():
    from MackeyGlass.MackeyGlassGenerator import run
    data = run(20000)
    data -= np.mean(data)
    DATA_MEAN = np.mean(data)
    train_data = np.array(data[:14000]); test_data = np.array(data[14000:])
    # CONSTRUCT TRAINING, TESTING DATA
    train_inputs = Variable(torch.from_numpy(train_data[:-1].reshape(-1, 1)), requires_grad=0)
    train_targets = Variable(torch.from_numpy(train_data[1:].reshape(-1, 1)), requires_grad=0)
    test_inputs = Variable(torch.from_numpy(test_data[:-1].reshape(-1, 1)), requires_grad=0)
    test_targets = Variable(torch.from_numpy(test_data[1:].reshape(-1, 1)), requires_grad=0)
    # print(train_inputs)
    seq = Sequence()
    seq.double() # ??? what does this do?

    criterion = nn.MSELoss()
    optimizer = optim.Adam(seq.parameters(), lr=0.01)

    bestGenTrain = 1000
    bestGenTest = 1000
    epoch = -1
    bestModel = (None, bestGenTrain, bestGenTest, epoch)

    NUM_EPOCH = 500
    for i in range(NUM_EPOCH):
        print('Epoch [{}/{}]'.format(i+1, NUM_EPOCH))

        # calculate outputs, loss, then step
        optimizer.zero_grad()
        train_outputs = seq(train_inputs)
        loss = criterion(train_outputs, train_targets)
        print('Training loss: %.6f' % loss.data.cpu().numpy()[0])
        loss.backward()
        optimizer.step()

        test_outputs = seq(test_inputs, future=0)
        loss = criterion(test_outputs, test_targets)
        print('Test loss: %.6f' % loss.data.cpu().numpy()[0])
        
        if i % 5 == 0:
            gen_outs = seq.forward(train_inputs[:4000], future=2000).data.numpy()
            gen_train_loss = nrmse(gen_outs[4000:], train_targets.data.numpy()[4000:6000], DATA_MEAN)
            print('!! Gen Train loss: {}'.format(gen_train_loss))

            gen_outs = seq.forward(train_targets[-4000:], future=2000).data.numpy()
            gen_test_loss = nrmse(gen_outs[4000:], test_inputs.data.numpy()[:2000], DATA_MEAN)
            print('!! Gen Test loss: {}'.format(gen_test_loss))

            if gen_test_loss <= bestModel[2]:
                bestModel = (deepcopy(seq), gen_train_loss, gen_test_loss, i)
                pkl.dump(bestModel, open("bestModel_50hids.pkl", "wb"))
                print("---BEST SAVED")

    f, ax = plt.subplots(figsize=(12, 12))
    # plot true test target values
    out_plt = test_outputs.data.cpu().numpy(); tar_plt = test_targets.data.cpu().numpy()
    ax.plot(np.arange(len(out_plt)), tar_plt, label='True')
    ax.plot(np.arange(len(out_plt)), out_plt, label='Generated')
    
    # generate data for final model
    f2, ax2 = plt.subplots(figsize=(12, 12))
    outs = seq.forward(train_inputs[:100], future=2000).data.numpy()
    ax2.plot(np.arange(len(train_targets.data.numpy()[100:2100])), train_targets.data.numpy()[100:2100], label="True")
    ax2.plot(np.arange(len(outs[100:])), outs[100:2100], label="Predicted")
    ax2.set_title("GENERATIVE TRAIN DATA (FINAL MODEL)")
    print("FINAL GEN. LOSS FINAL MODEL --- TRAIN {}".format(nrmse(outs[100:], train_targets.data.numpy()[100:2100], DATA_MEAN)))

    f2, ax2 = plt.subplots(figsize=(12, 12))
    outs = seq.forward(test_inputs[:100], future=2000).data.numpy()
    ax2.plot(np.arange(len(test_targets.data.numpy()[100:2100])), test_targets.data.numpy()[100:2100], label="True")
    ax2.plot(np.arange(len(outs[100:])), outs[100:2100], label="Predicted")
    ax2.set_title("GENERATIVE TEST DATA (FINAL MODEL)")
    print("FINAL GEN. LOSS FINAL MODEL --- TRAIN {}".format(nrmse(outs[100:], test_targets.data.numpy()[100:2100], DATA_MEAN)))

    # generate data for best model
    f2, ax2 = plt.subplots(figsize=(12, 12))
    outs = bestModel[0].forward(train_inputs[:100], future=2000).data.numpy()
    ax2.plot(np.arange(len(train_targets.data.numpy()[100:2100])), train_targets.data.numpy()[100:2100], label="True")
    ax2.plot(np.arange(len(outs[100:])), outs[100:2100], label="Predicted")
    ax2.set_title("GENERATIVE TRAIN DATA (BEST MODEL)")
    print("FINAL GEN. LOSS BEST MODEL --- TRAIN {}".format(nrmse(outs[100:], train_targets.data.numpy()[100:2100], DATA_MEAN)))

    f2, ax2 = plt.subplots(figsize=(12, 12))
    outs = bestModel[0].forward(test_inputs[:100], future=2000).data.numpy()
    ax2.plot(np.arange(len(test_targets.data.numpy()[100:2100])), test_targets.data.numpy()[100:2100], label="True")
    ax2.plot(np.arange(len(outs[100:])), outs[100:2100], label="Predicted")
    ax2.set_title("GENERATIVE TEST DATA (BEST MODEL)")
    print("FINAL GEN. LOSS BEST MODEL --- TEST {}".format(nrmse(outs[100:], test_targets.data.numpy()[100:2100], DATA_MEAN)))

    plt.legend(); plt.show()
예제 #6
0
import datetime

# def mse(y1, y2):
#     return np.mean((y1-y2)**2)

# def nrmse(y_true, y_pred, MEAN_OF_DATA):
#     return np.sqrt(np.sum((y_true - y_pred)**2)/np.sum((y_true - MEAN_OF_DATA)**2))

def save_data(file_name, data_csv, delimiter, fmt, header):
    np.savetxt(file_name, data_csv, delimiter=delimiter, 
                fmt=fmt, header=header)


if __name__ == '__main__':
    data = np.  array([run(15100)]).reshape(-1, 1)
    # NOTE: REMOVE WHEN NOT DHESN
    _std = np.std(data)
    #data -= np.mean(data)
    # data /= _std
    MEAN_OF_DATA = np.mean(data)
    split = 14100
    X_train = np.array(data[:split-1])
    y_train = np.array(data[1:split])
    X_valid = np.array(data[split-1:-1])
    y_valid = np.array(data[split:])

    # print(np.mean(X_train))
    # print(np.mean(X_valid))

    # eesn = ESN(1, 1, 1000, echo_param=0.76800719, regulariser=1e-5)
예제 #7
0
    for r in range(num_rows):
        for c in range(num_cols):
            idx = r*num_cols + c
            s_data = signals_data[idx]
            ax[r,c].plot(s_data)

            if len(titles) > 0:
                ax[r, c].set_title(titles[idx])

    #plt.show(block=False)
    plt.draw()


if __name__ == "__main__":
    data = np.array([run(21100)]).T
    # data_train_test = data[:20000]
    # data_val = data[20000:]
    #data = np.loadtxt('../../../MackeyGlass_t17.txt')
    #data -= np.mean(data)
    #data += 100.0
    #data -= np.mean(data)
    #print(np.std(data))
    #data /= np.std(data)
    #data *= 2.0
    # onExit(data)
    #esn = ESN(input_size=2, output_size=1, reservoir_size=1000, echo_param=0.1, spectral_scale=1.1, init_echo_timesteps=100, regulariser=1e-0, debug_mode=True)
    #ESN_stochastic_train(data, 7000, esn, 1)
    MEAN_OF_DATA = np.mean(data)
    print("DATA MEAN: {}".format(MEAN_OF_DATA))
    #np.random.seed(42)
예제 #8
0
파일: TMP.py 프로젝트: levifussell/alveus
def run_ESN():
    data = np.array(run(21100)).reshape(-1, 1)
    data_mean = np.mean(data)
    split = 20000

    X_train = data[:split - 1]
    y_train = data[1:split]
    X_test = data[split - 1:-1]
    y_test = data[split:]

    OLD_ESN = ESN2(input_size=1,
                   output_size=1,
                   reservoir_size=1000,
                   echo_param=0.85,
                   spectral_scale=1.25,
                   init_echo_timesteps=100,
                   regulariser=1e-6,
                   input_weights_scale=1.)
    print('OLD ESN MADE')

    NEW_ESN = ESN(input_size=1,
                  output_size=1,
                  reservoir_size=1000,
                  echo_param=0.85,
                  init_echo_timesteps=100,
                  regulariser=1e-6)
    NEW_ESN.initialize_input_weights(strategy='binary', scale=1.)
    NEW_ESN.initialize_reservoir_weights(strategy='uniform',
                                         spectral_scale=1.25)
    print('NEW ESN MADE')
    print('=' * 30)

    # Set new ESN's weights = old ESN's weights to ensure they (SHOULD) output the same outputs
    NEW_ESN.reservoir.W_res = OLD_ESN.W_reservoir
    NEW_ESN.reservoir.W_in = OLD_ESN.W_in

    assert np.sum(abs(NEW_ESN.reservoir.W_res - OLD_ESN.W_reservoir)) < 1e-3

    if 0:
        for x0 in X_train:
            x0 = x0.reshape(-1, 1)
            old_res_fwd, in_old, res_old, old_st_old = OLD_ESN.__forward_to_res__(
                x0, debug=True)
            new_res_fwd, in_new, res_new, old_st_new = NEW_ESN.reservoir.forward(
                x0, debug=True)
            print('init state diff', np.sum(old_res_fwd - new_res_fwd))
            print(' in_to_res diff: ', np.sum(in_old - in_new))
            print('res_to_res diff: ', np.sum(res_old - res_new))
            print(' old state diff: ', np.sum(old_st_old - old_st_new))
            raw_input()

    # TRAIN THE NETWORKS =================================================
    st_time = time.time()
    OLD_ESN.train(X_train, y_train)  # S, D
    print('OLD ESN TRAINED. TOOK %.3f SEC' % (time.time() - st_time))

    st_time = time.time()
    NEW_ESN.train(X_train, y_train)
    print('NEW ESN TRAINED. TOOK %.3f SEC' % (time.time() - st_time))

    #print('success?', (OLD_ESN.W_out == NEW_ESN.W_out))

    # diffs = (NEW_ESN.W_out - OLD_ESN.W_out).flatten()

    #plt.plot(range(len(diffs)), diffs)
    #plt.show()

    old_outputs = []
    new_outputs = []
    for i, x in enumerate(X_test):
        old_outputs.append(OLD_ESN.forward_to_out(x))
        new_outputs.append(NEW_ESN.forward(x))

    old_outputs = np.array(old_outputs).squeeze()
    new_outputs = np.array(new_outputs).squeeze()

    #print(old_outputs.shape)
    #print(old_outputs.shape, old_outputs[0].shape, old_outputs[0])

    # if 1:
    #     f, axarr = plt.subplots(1, 1, sharex=True, sharey=True, figsize=(12, 12))
    #     axarr[0].plot(range(len(old_outputs)), old_outputs, label='old')
    #     axarr[1].plot(range(len(new_outputs)), new_outputs, label='new')
    #     axarr[2].plot(range(len(y_test)), y_test, label='true')
    #     plt.legend(); plt.show()
    #     f.close()
    fig, ax = plt.subplots()
    ax.plot(range(len(old_outputs)), old_outputs, label='old')
    ax.plot(range(len(new_outputs)), new_outputs, label='new')
    ax.plot(range(len(y_test)), y_test, label='true')
    plt.show()