import sys, os # adds the parent directory path + apps folder appended to system path #sys.path.append(os.path.join(os.path.dirname(sys.path[0]), 'apps')) # adds the parent directory (gc_testing) to system path, all imports are relative to this dir sys.path.append(os.path.dirname(sys.path[0])) import apps.data_generator as dg import pandas as pd import matplotlib.pyplot as plt # generate 3 dataset objects with different nr of features and length # generate available datasets # dataset that simulates dependency anomalies with a VAR model and # changing dependencies after n1 time steps (two dependency structures) ds = dg.dataset(features=10, n=300, lag=3, dep_dens=0.6) ds.gen_dep_anom_data(n1=200, n2=100) print(ds) ds.plot_input() ds.GC = ds.dependencies['dep1'] ds.plot_output_GC(ds.dependencies['dep2']) # dataset that simulates linear causal relations with a VAR model ds2 = dg.dataset(features=10, n=600) ds2.gen_var_data() print(ds2) ds2.plot_input() # dataset that simulates dynamic causal relations with the lorenz96 ODE model ds3 = dg.dataset(features=15, n=1500) ds3.gen_lorenz96_data()
import sys, os sys.path.append(os.path.dirname(sys.path[0])) #print(sys.path) from apps.algorithm_loader import Algorithm_Loader import apps.data_generator as dg #from test_algs.gc_ame.gc_ame_alg import GC_AME #from test_algs.dca_bi_final.bi_cgl import BI_CGL dataset1 = dg.dataset(features=5) dataset1.gen_var_data() """ args_gcf = {"train_epochs": 100, "learning_rate": 0.01, "batch_size": 32, "p":200, "q":200} args = {'dataset':dataset1.data, 'result_path':'result1', 'model_path':'result2', 'algorithms':{'gcf':args_gcf}} alg_load = Algorithm_Loader(args) print(alg_load) """ #args_gcf = {"train_epochs": 100, "learning_rate": 0.01, "batch_size": 32, "p":200, "q":200} #args = {'data':dataset1.data, 'result_path':'result1', 'model_path':'result2', 'algorithms':{'gcf':0}} args = {'dataset':dataset1, 'result_path':'result1', 'model_path':'result2', 'algorithms':{'neunetnue':0}} #args = {'dataset':dataset1, 'result_path':'result1', 'model_path':'result2'} alg_load = Algorithm_Loader(args) print(alg_load.dataset.GC) #print(alg_load.dataset.dependencies) #print(alg_load.dataset.GC) #print(alg_load) #args_ame = {"train_epochs": 100, "learning_rate": 0.01, "batch_size": 32, "granger_loss_weight": 0.05, "l2_weight": 12, "num_units": 23, "num_layers": 34} # /content/ame/ame_starter/apps/main.py --dataset="boston_housing" --batch_size=32 --num_epochs=300 --learning_rate=0.001 --output_directory='/content/drive/My Drive/pr2/ame_output' --do_train --do_evaluate --num_units=16 --num_layers=1 --early_stopping_patience=32
import sys, os # adds the parent directory path + apps folder appended to system path #sys.path.append(os.path.join(os.path.dirname(sys.path[0]), 'apps')) # adds the parent directory (gc_testing) to system path, all imports are relative to this dir sys.path.append(os.path.dirname(sys.path[0])) import apps.data_generator as dg import pandas as pd import matplotlib.pyplot as plt # generate 3 dataset objects with different nr of features and length # generate available datasets # dataset that simulates dependency anomalies with a VAR model and # changing dependencies after n1 time steps (two dependency structures) ds = dg.dataset(features=10, n=300, lag=3, dep_dens=0.6) ds.gen_var_data() #ds.plot_input() ds.GC = ds.dependencies['dep1'] ds.plot_causeEffect(effect=4, causes=[1, 2, 3, 5]) #ds.plot_causeEffect(effect=4)