-
Notifications
You must be signed in to change notification settings - Fork 0
/
solver.py
340 lines (264 loc) · 10.9 KB
/
solver.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
# EM algorithm for Bayesian factor model with multiple changepoints
import numpy as np
import scipy.linalg
import scipy.stats
import math
import sklearn
from sklearn import linear_model
import lib.cpt_functions as cpt
import copy
import lib.utils
# a class centered around y_mat
# initialize the set of parameters in constructor
# config hyper-parameters
class FactorModel():
"""
A class centered around multivariate time series y_mat (n_time x n_name)
"""
def __init__(self, y_mat, k_max, delta=[5,0.001], theta=[0.001,0.5]):
# observed data: y_mat is 2d-array like
self.y_mat = y_mat
self.n_date = y_mat.shape[0]
self.n_name = y_mat.shape[1]
# hyper-param: k_max, theta, delta
self.k_max = k_max
self.theta = theta
self.delta = delta
# non-adjustable hyper-params: s2_lambda, s2_sigma, eta, xi
self.s2_lambda = 1
self.s2_sigma = 1
self.eta = 1
self.xi = 1
def cpt_config(self, subsetting=False, minseglen=5, cpt_set=None):
# specify change-point candidate set.
self.subsetting = subsetting
if subsetting:
if not cpt_set:
cpt_set = minseglen*np.array(range(int((self.n_date-1)/minseglen)+1))
cpt_set = np.append(cpt_set, self.n_date)
self.cpt_set = cpt_set
else:
self.minseglen = minseglen
def param_init(self):
# parameters: Beta, Lambda2, k_plus, tau, lambda2_0.
self.Beta = np.random.randn(self.n_name, self.k_max)*10
#self.Beta = np.loadtxt(open("/tmp/Beta_init.csv", "rb"), delimiter=",") # for debug, delete later
self.sigma2 = np.ones(self.n_name)
self.Lambda2 = []
self.Lambda2.append(np.ones(self.k_max))
self.tau = [0, self.n_date]
self.k_plus = 1
self.lambda2_0 = 1
def delta0_reconfig(self, delta0):
# gradually increase delta0 during optimization.
self.delta[0] = delta0
def _e_step_gamma(self):
"""
conditional expectation: sufficient statistics of gamma
"""
Theta_rep = self.theta[1] * np.ones((self.n_name, self.k_max))
Theta_rep[:,:self.k_plus] = self.theta[1]
tmp = np.exp(-abs(self.Beta)*(self.delta[0]-self.delta[1]))
ratio = self.delta[0]/self.delta[1]*(1-Theta_rep)/Theta_rep*tmp
E_Gamma = 1/(1+ratio)
return E_Gamma
def _e_step_f(self):
"""
conditional expectation: sufficient statistics of F
"""
M = []
M_sum = 0
E_F = np.ndarray(shape = (self.n_date,self.k_max))
E_F2= np.ndarray(shape = (self.n_date,self.k_max))
tau = self.tau
Beta = self.Beta
Sigma_inv=np.diag(1/self.sigma2)
for j in range(len(tau)-1):
M.append(np.linalg.inv(np.diag(1/self.Lambda2[j]) +
Beta.transpose().dot(Sigma_inv).dot(Beta)))
M_sum = M_sum + (tau[j+1] - tau[j])*M[j]
for t in range(tau[j],tau[j+1]):
E_F[t,:] = M[j].dot(Beta.transpose()).dot(Sigma_inv).dot(self.y_mat[t,:])
E_F2[t,:]=E_F[t,:]**2 + np.diag(M[j])
M_u = scipy.linalg.sqrtm(M_sum)
return E_F, E_F2, M_u, M_sum
def _m_step_q1(self, E_F2, E_Gamma):
"""
M-step of Q1. Change-point detection
"""
k_plus = self.k_plus
s2_lambda = self.s2_lambda
eta = self.eta
k_max = self.k_max
theta = self.theta
if self.subsetting:
cpt_m = cpt.cpt_detect_PELT(eta, s2_lambda, E_F2[:,:k_plus], k_plus, self.cpt_set)
else:
cpt_m = cpt.cpt_detect_minseglen_PELT(eta, s2_lambda, E_F2[:,:k_plus], k_plus, self.minseglen)
print(cpt_m)
Q1_list = []
for k_star in range(1, k_max+1):
if self.subsetting:
Q1 = -0.5*(len(cpt_m)-1)*k_star*math.log(len(self.cpt_set)-1)
else:
Q1 = -0.5*(len(cpt_m)-1)*k_star*math.log(self.n_date)
Q1 += (E_Gamma[:,:k_star].sum()*math.log(theta[1]) +
(1-E_Gamma[:,:k_star]).sum()*math.log(1-theta[1]))
if k_star < k_max:
Q1 += (E_Gamma[:,k_star:k_max].sum()*math.log(theta[0]) +
(1-E_Gamma[:,k_star:k_max]).sum()*math.log(1-theta[0]))
Q1 -= cpt.cost_function_inactive_factors(eta, s2_lambda, E_F2[:,k_star:k_max])
for j in range(len(cpt_m)-1):
Q1 -= cpt.cost_function(eta, s2_lambda, E_F2[cpt_m[j]:cpt_m[j+1],:], k_star)
Q1_list.append(Q1)
k_plus = np.argmax(Q1_list)+1
print(k_plus)
lambda2_m = []
for j in range(len(cpt_m)-1):
lambda2_m.append((eta*s2_lambda+E_F2[cpt_m[j]:cpt_m[j+1],:k_plus].sum(axis=0))/(eta+2+cpt_m[j+1]-cpt_m[j]))
lambda2_0 = (eta*s2_lambda+E_F2[:,k_star:k_max].sum())/(eta+2+E_F2[:,k_star:k_max].size)
Lambda2 = []
for j in range(len(cpt_m)-1):
Lambda2.append(np.append(lambda2_m[j],lambda2_0*np.ones(k_max-k_plus)))
self.tau = cpt_m
self.k_plus = k_plus
self.Lambda2 = Lambda2
self.lambda2_0 = lambda2_0
def _m_step_q2(self, E_F, M_u, M_sum, E_Gamma, PXL=False):
"""
maximize Q2(B,Sigma)
"""
Beta = self.Beta
n_date = self.n_date
n_name = self.n_name
k_max = self.k_max
xi = self.xi
delta = self.delta
tilde_Y = np.append(self.y_mat, np.zeros((k_max,n_name)), axis=0)
tilde_F = np.append(E_F, M_u, axis=0)
tilde_F_rw = np.ndarray(tilde_F.shape)
for j in range(n_name):
# penalty term
lambda_j = (1-E_Gamma[j,:])*delta[0]+E_Gamma[j,:]*delta[1]
# reweight tilde_F
for k in range(k_max):
tilde_F_rw[:,k] = tilde_F[:,k]/lambda_j[k]
# lasso
clf = linear_model.Lasso(alpha = self.sigma2[j]/(n_date+k_max), normalize = False, fit_intercept = False)
clf.fit(tilde_F_rw, tilde_Y[:,j])
Beta[j,:] = clf.coef_/lambda_j
# sum of square of residuls
SSR = sum((tilde_Y[:,j]-tilde_F.dot(Beta[j,:]))**2)
# update
self.sigma2[j]=(SSR+xi*self.s2_sigma)/(n_date+xi+2)
#print(self.sigma2)
if PXL:
# lower triangular
A_l = scipy.linalg.cholesky(1/n_date*(E_F.transpose().dot(E_F)+M_sum), lower=True)
Beta = Beta.dot(A_l)
order = np.argsort(abs(Beta).sum(axis=0))[::-1]
Beta = Beta[:,order]
self.Beta = Beta
for j in range(len(self.tau)-1):
self.Lambda2[j] = self.Lambda2[j][order]
def em_iterator(self, nstep, PXL):
"""
Main solver of the EM algorithm.
PXL indicates whether we use PXL-EM.
"""
for i in range(nstep):
Beta_old = copy.deepcopy(self.Beta)
E_Gamma = self._e_step_gamma()
E_F, E_F2, M_u, M_sum = self._e_step_f()
self._m_step_q1(E_F2, E_Gamma)
self._m_step_q2(E_F, M_u, M_sum, E_Gamma, PXL)
if i > nstep/5 and (self.Beta-Beta_old).max()<0.0001:
break
def final_rescale(self):
'''return rescaled Beta and Lambda2'''
Lambda_ts = np.tile(self.Lambda2[0],(self.tau[1]-self.tau[0],1))
if len(self.tau)>2:
for j in range(1,len(self.tau)-1):
Lambda_tile = np.tile(self.Lambda2[j],(self.tau[j+1]-self.tau[j],1))
Lambda_ts = np.concatenate((Lambda_ts,Lambda_tile),axis=0)
lambda2_mean = Lambda_ts.mean(axis=0)
Lambda_ts = Lambda_ts/lambda2_mean
Beta = self.Beta*np.sqrt(lambda2_mean)
return Beta, Lambda_ts
def log_likelihood(self):
""" Evaluate likelihood"""
Beta = self.Beta
Lambda2 = self.Lambda2
sigma2 = self.sigma2
k_max = self.k_max
k_plus = self.k_plus
tau = self.tau
lambda2_0 = self.lambda2_0
delta = self.delta
theta = self.theta
if self.subsetting:
loglik = -0.5*(len(tau)-1)*k_plus*np.log(len(self.cpt_set)-1)
else:
loglik = -0.5*(len(tau)-1)*k_plus*np.log(self.n_date)
if k_max > k_plus:
loglik -= (self.eta*self.s2_lambda/lambda2_0 + (self.eta+2)*np.log(lambda2_0))/2
loglik -= sum(self.xi*self.s2_sigma/sigma2 + (self.xi+2)*np.log(sigma2))/2
Sigma_t = []
for j in range(len(tau)-1):
loglik -= sum(self.eta*self.s2_lambda/Lambda2[j][0:k_plus] + (self.eta+2)*np.log(Lambda2[j][0:k_plus]))/2
Sigma_t.append(Beta.dot(np.diag(Lambda2[j])).dot(Beta.transpose()) + np.diag(sigma2))
for t in range(tau[j],tau[j+1]):
loglik = loglik + scipy.stats.multivariate_normal.logpdf(self.y_mat[t,:],np.zeros(self.n_name), Sigma_t[j])
loglik = loglik + (np.log(theta[1]*delta[0]*np.exp(-delta[0]*abs(Beta[:,0:k_plus])) +
(1-theta[1])*delta[1]*np.exp(-delta[1]*abs(Beta[:,0:k_plus])))).sum()
if k_max > k_plus:
loglik = loglik + (np.log(theta[0]*delta[0]*np.exp(-delta[0]*abs(Beta[:,k_plus:k_max])) +
(1-theta[0])*delta[1]*np.exp(-delta[1]*abs(Beta[:,k_plus:k_max])))).sum()
return loglik
def data_toy_simulation():
"""
Toy simulation for testing
"""
# simulation
n_factor = 5
block_size = 30
overlap_size = 5
n_name = overlap_size+(block_size-overlap_size)*n_factor
sd_idio = 1
# B_0
B_mat = np.zeros((n_name,n_factor))
for k in range(n_factor):
B_mat[(block_size-overlap_size)*k:block_size*(k+1)-overlap_size*k,k] = 1 + np.random.randn(block_size)/10
# segments
n_segment = 4
tau_true = [0, 50, 80, 100, 150]
n_date = 150
lambda_0 = [4,1,3,1]
# generate factors with change-points
f_mat = np.ndarray((n_date, n_factor))
for k in range(n_factor):
for j in range(n_segment):
f_mat[tau_true[j]:tau_true[j+1],k] = np.random.randn(tau_true[j+1]-tau_true[j])*np.sqrt(lambda_0[j])
y_mat = f_mat.dot(B_mat.transpose()) + np.random.randn(n_date,n_name)*sd_idio
print(B_mat)
f2_mat = f_mat**2
print(cpt.cpt_detect_minseglen_PELT(1, 1, f2_mat,n_factor, 5))
return y_mat
def normalize(y_mat):
return (y_mat-y_mat.mean(axis=0))/y_mat.std(axis=0)
if __name__ == "__main__":
y_mat = data_toy_simulation()
#y_mat = np.loadtxt(open("/tmp/y_mat.csv", "rb"), delimiter=",")
y_mat = utils.normalize(y_mat)
model = FactorModel(y_mat, k_max=20)
model.cpt_config()
model.param_init()
delta0_steps = [1,5,10,20]
for delta0 in delta0_steps:
model.delta0_reconfig(delta0)
model.em_iterator(200, True)
print(model.k_plus)
print(model.Beta[:,:model.k_plus])
model.em_iterator(200, False)
print(model.k_plus)
print(model.Beta[:,:model.k_plus])