def test_default_5d_model(self): contents = read_file("random_5d.csv", "") time_series, dt, contents, variable_names = clean_contents(contents) model = ps.SINDy(feature_names=variable_names) model.fit(contents, t=dt) _ = model.coefficients() actual_score = model.score(contents, t=time_series) # expected_coefficients should be incomprehensible expected_score_max = 0.1 # model is expected to fail # assert (pytest.approx(actual_co, 0.1) == expected_co) assert actual_score < expected_score_max # model is expected to score poorly (less than 0.1)
def test_default_lorenz_model(self): contents = read_file("data_Lorenz3d.csv", "") time_series, dt, contents, variable_names = clean_contents(contents) model = ps.SINDy(feature_names=variable_names) model.fit(contents, t=dt) actual_co = model.coefficients() actual_score = model.score(contents, t=time_series) expected_co = [[0.0, -10.0, 10.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 28.0, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, -2.666, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0]] expected_score = 1.0 assert (pytest.approx(actual_co, 0.1) == expected_co) assert (pytest.approx(actual_score, 0.01) == expected_score)
def test_default_lorenz(self): ts, dt, cont, var = clean_contents(read_file("data_Lorenz3d.csv", "")) ts = ts[0:4] cont = cont[0:4] expected_ts = [0., 0.002, 0.004, 0.006] expected_dt = 0.002 expected_cont = [[-8, 8, 27], [-7.683508382, 7.966250523, 26.73151873], [-7.373984577, 7.92929174, 26.46998112], [-7.071350036, 7.889537629, 26.21523996]] expected_vars = ["x", "y", "z"] assert np.all(ts == expected_ts) assert np.all(dt == expected_dt) assert np.all(cont == expected_cont) assert np.all(var == expected_vars)
def test_random_5d(self): ts, dt, cont, var = clean_contents(read_file("random_5d.csv", "")) ts = ts[0:4] cont = cont[0:4] expected_ts = [0., 0.001, 0.002, 0.003] expected_dt = 0.001 expected_cont = [ [3.216510998, 9.914181513, 3.866592635, 0.746334938, 4.606170334], [0.650685986, 4.486606167, 5.84805047, 1.991102499, 5.211598163], [1.318906913, 8.869120427, 2.594125945, 9.226401504, 2.357597024], [8.26282217, 2.762088665, 1.941167642, 5.280758711, 4.70049113] ] expected_vars = ["a", "b", "c", "d", "e"] assert np.all(ts == expected_ts) assert np.all(dt == expected_dt) assert np.all(cont == expected_cont) assert np.all(var == expected_vars)
def test_default_lorenz_plot(self): contents = read_file("data_Lorenz3d.csv", "") time_series, dt, contents, variable_names = clean_contents(contents) model = ps.SINDy(feature_names=variable_names) model.fit(contents, t=dt) coefs = model.coefficients() feats = model.get_feature_names( ) # Get the feature names from the obtained model conds = np.array( [float(val) for val in contents[0]] ) # Convert the system's initial conditions into a numpy array of float values as this is what is expected by the model.simulate() function sim_data = model.simulate( conds, time_series ) # Create the forward simulated data. This uses the original initial conditions evolved with the model output equations to obtain new data fig, _ = create_plot(time_series, contents, variable_names, coefs, feats, sim_data) return fig
def test_clean_contents_control(self): ts, dt, cont, u, var = clean_contents_control( read_file("predatorpreydata.csv", "")) ts = ts[0:4] cont = cont[0:4] u = u[0:4] expected_ts = [0.1, 0.2, 0.3, 0.4] expected_dt = 0.1 expected_cont = [[1.0, 1.0], [0.93815867, 1.0], [0.894741036, 0.993815867], [0.878213584, 0.983355064]] expected_u = np.array( ['0.2196665', '0.437335995', '0.651031414', '0.858815353']) expected_vars = ["x", "y", "u"] assert np.all(ts == expected_ts) assert np.all(dt == expected_dt) assert np.all(cont == expected_cont) assert np.all(u == expected_u) assert np.all(var == expected_vars)
import numpy as np from base import read_file, nearest_neighbor_network if __name__ == '__main__': training_x, training_y = read_file('./hw4_knn_train.dat') testing_x, testing_y = read_file('./hw4_knn_test.dat') print "Q17, ein:", nearest_neighbor_network(training_x, training_y, training_x, training_y,5) print "Q18, eout:", nearest_neighbor_network(testing_x, testing_y, training_x, training_y, 5)
#!/usr/bin/env python3 import base import WLSK_Keygen, WLSK_Init, WLSK_Resp, WLSK_S if __name__ == '__main__': print('Generating keys') WLSK_Keygen.init()(base.get_hostname()) print(' enc_key = {}\n mac_key = {}\b table = {}'.format( base.read_file('wlsk_enc_key'), base.read_file('wlsk_mac_key'), base.read_file('keytbl'))) print('\x1b[31m[A1]\x1b[0m') (a3, idA) = WLSK_Init.init()(base.get_hostname()) print(' idA = {}'.format(idA)) print('\x1b[34m[B2]\x1b[0m') (b4, n) = WLSK_Resp.init()(idA) print(' n = {}'.format(n)) print('\x1b[31m[A3]\x1b[0m') (_, iv1, e, m) = a3(n) print(' e = {}\n m = {}'.format(e, m)) print('\x1b[34m[B4]\x1b[0m') (b6, idA_, idB_, iv2, e_, m_) = b4(iv1, e, m) print(' idA\' = {}\n idB\' = {}\n e\' = {}\n m\' = {}'.format( idA_, idB_, e_, m_)) print('\x1b[33m[S5]\x1b[0m') (_, iv3, e__, m__) = WLSK_S.init()(idA_, idB_, iv2, e_, m_) print(' e\'\' = {}\n m\'\' = {}'.format(e__, m__))
#!/usr/bin/env python3 import base, WLSK_Keygen from common import * if __name__ == '__main__': my_hostname = base.get_hostname() print('Generating keys for {}...'.format(my_hostname)) WLSK_Keygen.init()(my_hostname) print('Done') debug('enc_key = {}\nmac_key = {}\ntable = {}'.format(base.read_file('wlsk_enc_key'), base.read_file('wlsk_mac_key'), base.read_file('keytbl')))
import ONS_Keygen, ONS_AGenKey, ONS_BGenKey import ONS_S, ONS_A, ONS_B import time def delay(): time.sleep(0.7) if __name__ == '__main__': print('Generating \x1b[33mserver\x1b[0m key') (_, pkS) = ONS_Keygen.init()() print('Generating \x1b[31mA\x1b[0m key') (_, pkA) = ONS_AGenKey.init()() print('Generating \x1b[34mB\x1b[0m key') (_, pkB) = ONS_BGenKey.init()() b = base.read_file('idB') delay() print('\x1b[31mA:\x1b[0m Message 1') (oa3, hA, hB) = ONS_A.init()(b) print(' hA: {}\n hB: {}'.format(hA, hB)) delay() print('\x1b[33mS:\x1b[0m Message 2') (_, rk, h2, s) = ONS_S.init()(hA, hB) delay() print('\x1b[31mA:\x1b[0m Message 3') (oa5, c) = oa3(rk, h2, s) delay()