-
Notifications
You must be signed in to change notification settings - Fork 0
/
helpers.py
141 lines (111 loc) · 4.25 KB
/
helpers.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
# -*- coding:utf-8 *-*
import numpy as np
import kernel_helpers as k_helpers
from scipy.optimize import fmin_l_bfgs_b
#fixes vectors to be equal to their expected sums
#(necessary b/c very slight precision errors were screwing
# up the algorithm)
def fix_precision_of_vector(vec, expected_sum, max_diff=0.0001):
for i in range(len(vec)):
vec[i] = round(vec[i], 8)
u = np.argmax(abs(vec))
diff = expected_sum - sum(vec)
if diff <= max_diff:
vec[u] += diff
return vec
else:
raise Exception('Vector is not close to expected value!')
def get_box_constraints(n, C=1.0):
#设置n个alpha的约束条件,即取值区间[0,c]
C = C * 1.0
box_constraints = [[0, C] for i in range(0, n)]
return box_constraints
def compute_J(K, y_mat, alpha0, box_constraints):
n = K.shape[0]
def func(alpha):
""" The SVM dual objective. """
#svm的目标函数
return (-1 * np.sum(alpha) + 1.0/2 *
alpha.T.dot(np.multiply(K, y_mat)).dot(alpha))
def func_deriv(alpha):
""" Gradient of the SVM dual objective. """
#svm目标函数对alpha的偏导
return -1 * np.ones(n) + np.multiply(K, y_mat).dot(alpha)
alpha, min_val, info = fmin_l_bfgs_b(func, alpha0, fprime=func_deriv,
bounds=box_constraints) #优化方法,func是目标函数,alpha0是初始化值,fprime是求梯度函数,bounds是参数alpha的取值区间即约束条件
#返回的alpha取值,目标函数的最小值,以及具体的信息,具体的见https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.fmin_l_bfgs_b.html#scipy.optimize.fmin_l_bfgs_b
return alpha, min_val, info
def compute_dJ(kernel_matrices, y_mat, alpha):
M = len(kernel_matrices)
n = y_mat.shape[0]
dJ = np.zeros(M)
for m in range(M):
kernel_matrix = kernel_matrices[m]
dJ[m] = -0.5 * alpha.T.dot(np.multiply(kernel_matrix, y_mat)).dot(alpha)
return dJ
def get_armijos_step_size(kernel_matrices, d, y_mat, alpha0, box_constraints, gamma0, Jd, D, dJ, c=0.5, T=0.5):
#m = D' * dJ, should be negative
#Loop until f(x + gamma * p <= f(x) + gamma*c*m)
# J(d + gamma * D) <= J(d) + gamma * c * m
gamma = gamma0
m = D.T.dot(dJ)
while True:
combined_kernel_matrix = k_helpers.get_combined_kernel(kernel_matrices, d + gamma * D)
alpha, new_J, info = compute_J(combined_kernel_matrix, y_mat, alpha0, box_constraints)
if new_J <= Jd + gamma * c * m:
return gamma
else:
#Update gamma
gamma = gamma * T
return gamma / 2
def compute_descent_direction(d, dJ, mu):
M = len(d)
#The descent direction
D = np.zeros(M)
#Gets descent direction
for m in range(M):
#Explained on p. 2498/2499
if m == mu:
D[m] = 0
for v in range(M):
if (v != mu) and d[v] > 0:
D[m] += dJ[v] - dJ[mu]
#If d[m] == 0, but allow for rounding errors
elif d[m] > -0.00000001 and d[m] < 0.00000001 and dJ[mu] < dJ[m]:
#Correct any rounding errors just in case
d[m] = 0
#Set descent direction to 0
D[m] = 0
elif d[m] >= 0 and m != mu:
D[m] = dJ[mu] - dJ[m]
else:
print "fuck up here"
print d[m]
print d
print dJ
print m
print mu
raise Exception('Something went wrong with the descent update!')
return D
#Returns True if time to stop
def stopping_criterion(dJ, d, threshold):
M = len(dJ)
if stopping_criterion.first_iteration:
stopping_criterion.first_iteration = False
return False
else:
dJ_min = 10000
dJ_max = -10000
lowest_dm0 = 100000
#Gets optimality conditions
for m in range(M):
if d[m] > 0:
if dJ[m] < dJ_min:
dJ_min = dJ[m]
if dJ[m] > dJ_max:
dJ_max = dJ[m]
else:
if dJ[m] < lowest_dm0:
lowest_dm0 = dJ[m]
return (dJ_max - dJ_min < threshold) and lowest_dm0 >= dJ_max
stopping_criterion.first_iteration = True