forked from derkjan12/master_thesis
-
Notifications
You must be signed in to change notification settings - Fork 0
/
experiments.py
195 lines (169 loc) · 9.16 KB
/
experiments.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
import numpy as np
from dist_utils import generate_distribution, get_marginals, load_dist, calculate_Y
import dit
from dit.validate import InvalidNormalization
from dit.exceptions import ditException
from nudges import individual_nudge, local_nudge, synergistic_nudge, derkjanistic_nudge, global_nudge
from optimized_nudges import max_individual_nudge, max_local_nudge, max_synergistic_nudge, max_derkjanistic_nudge, max_global_nudge
from ising_model import get_transition_probabilities
def experiment(inputs):
level, n_vars, dists, interventions, seed = inputs
np.random.seed(seed)
n_states = 3
nudges = [individual_nudge, local_nudge, synergistic_nudge, derkjanistic_nudge, global_nudge]
# nudges = [individual_nudge, local_nudge, synergistic_nudge, global_nudge]
means = np.zeros((dists, len(nudges)))
for i in range(dists):
XY = generate_distribution(n_vars + 1, n_states, level) # +1 for the output variable
old_Y = XY.marginal('Y').copy('linear')
old_X, YgivenX = get_marginals(XY)
[Y.make_dense() for Y in YgivenX]
oldest_X = old_X.copy()
intervention_results = np.zeros((len(nudges), interventions))
for j in range(interventions):
for idx, nudge in enumerate(nudges):
if not np.allclose(old_X.pmf, oldest_X.pmf):
raise ValueError("Something went wrong during {}. Original X has changed".format(nudge.__name__))
new_X = None
try:
new_X = nudge(old_X)
except IndexError as e:
print(level, n_vars, nudge, e)
raise e
try:
new_Y = dit.joint_from_factors(new_X, YgivenX).marginal('Y').copy('linear')
except InvalidNormalization as e:
print(nudge)
print('new_x = {}'.format(sum(new_X.pmf)), new_X.pmf)
raise e
except ditException as e:
print(nudge)
old_X.make_dense()
new_X.make_dense()
print(level, n_vars, seed, i, old_X.pmf, new_X.pmf)
print(level, n_vars, seed, i,
"oldX has {} outcomes, newX has {} outcomes\nYgivenX has {} cond distributions,old_x has 0 outcomes at{}".format(
len(old_X), len(new_X), len(YgivenX), np.flatnonzero(old_X.pmf == 0)))
old_X.make_sparse()
raise e
new_Y.make_dense()
intervention_results[idx, j] = sum(
abs(new_Y.pmf - old_Y.pmf)) # np.linalg.norm(new_Y.pmf - old_Y.pmf, ord=1)
means[i, :] = np.median(intervention_results, axis=1)
print(level, n_vars, "done")
return (level, n_vars), means
def optim_experiment(inputs):
level, n_vars, dists, interventions, seed = inputs
np.random.seed(seed)
n_states = 3
nudges = [max_individual_nudge, max_local_nudge, max_synergistic_nudge,
max_derkjanistic_nudge, max_global_nudge]
means = np.zeros((dists, len(nudges)))
for i in range(dists):
XY = generate_distribution(n_vars + 1, n_states, level) # +1 for the output variable
old_Y = XY.marginal('Y').copy('linear')
old_X, YgivenX = get_marginals(XY)
# print(YgivenX[0].get_base())
[Y.make_dense() for Y in YgivenX]
oldest_X = old_X.copy()
# print("YgX old", [Y.pmf for Y in YgivenX])
intervention_results = np.zeros((len(nudges), interventions))
for j in range(interventions):
for idx, nudge in enumerate(nudges):
new_X = nudge(old_X, YgivenX)
try:
new_XY = dit.joint_from_factors(new_X, YgivenX)
new_Y = new_XY.marginal('Y').copy('linear')
except dit.exceptions.ditException as e:
print(level, n_vars, nudge, e)
raise e
new_Y.make_dense()
# print(idx, "X", old_X.copy('linear').pmf, new_X.copy('linear').pmf)
# print(idx, "XY", XY.copy('linear').pmf, new_XY.copy('linear').pmf)
# print(idx, "Y", old_Y.pmf, new_Y.pmf)
intervention_results[idx, j] = sum(abs(new_Y.pmf - old_Y.pmf))
means[i, :] = np.median(intervention_results, axis=1)
print(level, n_vars, "done")
return (level, n_vars), means
def real_experiment(inputs):
#n_vars = neighbors, model = sis/ising, parameter=beta&gamma/temperature, dists, seed, interventions
n_vars, model, parameter, dists, interventions, seed, corrected = inputs
np.random.seed(seed)
nudges = [individual_nudge, local_nudge, synergistic_nudge, derkjanistic_nudge, global_nudge]
means = np.zeros((dists, len(nudges)))
for i in range(dists):
#Load distribution
old_X = load_dist( model, parameter, n_vars, i, corrected) #Returns a dit Distribution or None. If None, skip this loop
if old_X:
#Generate transition probabilities
YgivenX = get_transition_probabilities(model, n_vars, parameter, corrected)
#Calculate old output marginal
old_Y = calculate_Y(old_X, YgivenX)
oldest_X = old_X.copy()
intervention_results = np.zeros((len(nudges), interventions))
for j in range(interventions):
for idx, nudge in enumerate(nudges):
#print(nudge)
#print(old_X, oldest_X)
if not np.allclose(old_X.pmf, oldest_X.pmf):
raise ValueError("Something went wrong during {}. Original X has changed".format(nudge.__name__))
new_X = None
#Nudge it
try:
new_X = nudge(old_X)
#print(new_X)
except IndexError as e:
print(n_vars, nudge, e)
raise e
#Calculate the new output marginal
new_Y = calculate_Y(new_X, YgivenX)
#Calculate effect
intervention_results[idx, j] = sum(abs(new_Y.pmf - old_Y.pmf)) #l1 norm
means[i, :] = np.median(intervention_results, axis=1)
if model == "ising":
fname = "ising/random_corrected_{}vars_{}dists_{}interventions_{:.2f}_results.npy" if corrected else "ising/random_{}vars_{}dists_{}interventions_{:.2f}_results.npy"
np.save(fname.format(n_vars,r_dists[-1], interventions, parameter), means)
else:
fname = "sis/random_corrected_{}vars_{}dists_{}interventions_{:.2f}_{:.2f}_{:.2f}_results.npy" if corrected else "sis/random_{}vars_{}dists_{}interventions_{:.2f}_{:.2f}_{:.2f}_results.npy"
np.save(fname.format(n_vars,r_dists[-1], interventions, parameter[0],parameter[1],parameter[2]), means)
print(model, n_vars, parameter, "done")
return
def real_optim_experiment(inputs):
n_vars, model, parameter, dists, interventions, seed, corrected = inputs
np.random.seed(seed)
nudges = [max_individual_nudge, max_local_nudge, max_synergistic_nudge, max_derkjanistic_nudge, max_global_nudge]
if type(dists) == type(range(1)):
means = np.zeros((len(dists), len(nudges)))
r_dists = dists
else:
means = np.zeros((dists, len(nudges)))
r_dists = range(dists)
for i, d in enumerate(r_dists):
# Load distribution
old_X = load_dist( model, parameter, n_vars, d,corrected)
if old_X:
# Generate transition probabilities
YgivenX = get_transition_probabilities(model, n_vars, parameter, corrected)
# Calculate old output marginal
old_Y = calculate_Y(old_X, YgivenX)
oldest_X = old_X.copy()
intervention_results = np.zeros((len(nudges), interventions))
for j in range(interventions):
for idx, nudge in enumerate(nudges):
# print(nudge)
new_X = nudge(old_X, YgivenX)
new_Y = calculate_Y(new_X, YgivenX)
new_Y.make_dense()
# print(idx, "X", old_X.copy('linear').pmf, new_X.copy('linear').pmf)
# print(idx, "XY", XY.copy('linear').pmf, new_XY.copy('linear').pmf)
# print(idx, "Y", old_Y.pmf, new_Y.pmf)
intervention_results[idx, j] = sum(abs(new_Y.pmf - old_Y.pmf))
means[i, :] = np.median(intervention_results, axis=1)
if model == "ising":
fname = "ising/optim_corrected_{}vars_{}dists_{}interventions_{:.2f}_results.npy" if corrected else "ising/optim_{}vars_{}dists_{}interventions_{:.2f}_results.npy"
np.save(fname.format(n_vars,r_dists[-1], interventions, parameter), means)
else:
fname = "sis/optim_corrected_{}vars_{}dists_{}interventions_{:.2f}_{:.2f}_{:.2f}_results.npy" if corrected else "sis/optim_{}vars_{}dists_{}interventions_{:.2f}_{:.2f}_{:.2f}_results.npy"
np.save(fname.format(n_vars,r_dists[-1], interventions, parameter[0],parameter[1],parameter[2]), means)
print(model, n_vars, parameter, "done")
return