示例#1
0
def Fletcher_Reeves_conj():
    f = conj_f3
    c,b,A = f()
    x0 = np.matrix('10.;-5.')
    g0 = A * x0 + b 
    v0 = -g0

    #pdb.set_trace() 
    lamb0, f_x0 = golden_section_search(lambda k:f_value(f, x0 + k*v0), [0,2], 0.001) 

    x1 = x0 + lamb0 * v0
    g1 = A*x1 + b 
    v1 = -g1 + np.dot(g1.T, g1)[0,0]/np.dot(g0.T, g0)[0,0] * v0
    lamb1, f_x1 = golden_section_search(lambda k:f_value(f, x1 + k*v1), [0,2], 0.001) 
    x2 = x1 + lamb1 * v1
    return x2, f_x1 
示例#2
0
def powell_conj():
    '''
    u1=[-11.14, -24.46]
    u2=[-1.8, -0.28]
    '''
    x0 = np.matrix('20.;20.')
    #v1,v2线性无关
    v = np.matrix('1.,1.;-1.,1.')

    c, b, A = f_powell()
    u = np.matrix('0.,0.;0.,0.')
    lamb = np.matrix('0.;0.')

    id = 0
    total = 0
    while total < 3:
        k, min_fk = golden_section_search(
            lambda k: f_value(f_powell, x0 + k * v[:, 0]), [-100., 100], 0.001)
        x1 = x0 + k * v[:, 0]

        k, min_fk = golden_section_search(
            lambda k: f_value(f_powell, x1 + k * v[:, 1]), [-100., 100], 0.001)
        x2 = x1 + k * v[:, 1]

        #找到u向量
        u[:, id] = x2 - x0
        k, min_fk = golden_section_search(
            lambda k: f_value(f_powell, x2 + k * u[:, id]), [-100., 100],
            0.001)

        x0 = x2 + k * u[:, id]
        v[:, 0] = v[:, 1]
        v[:, 1] = u[:, id]

        id = (id + 1) % len(lamb)
        total += 1

        conj = u[:, 0].T * A * u[:, 1]
        #print "conj: ", conj

    x_star = x0

    return x_star, f_value(f_powell, x_star)
示例#3
0
def conj_grandient_method_for_f2():
    u1 = np.matrix('1.;0.')
    u2 = np.matrix('0.;1.')
    x0 = np.matrix('0.;0.')

    def_field = [-1,1]
    esplison = 0.005
    c,b, A = f2()
    
    '''
    线性搜索用的一次函数, 参数为k
    f = f(xi + kui)
    '''
    k1 = golden_section_search(lambda k:f_value(f2, x0 + k*u1), def_field, esplison)
    x1 = x0 + k1[0] * x0

    k2 = golden_section_search(lambda k:f_value(f2, x1 + k*u2), def_field, esplison)
    x2 = x0 + k1[0] * u1 + k2[0] * u2 
    
    return x2, f_value(f2, x2)
示例#4
0
def conj_grandient_method_for_f2_direct():
    u1 = np.matrix('1.;0.')
    u2 = np.matrix('0.;1.')
    x0 = np.matrix('0.;0.')

    c, b, A = f2()
    lamb1 = -1. * (u1.T * (A * x0 + b)) / (u1.T * (A * u1))
    lamb2 = -1. * (u2.T * (A * x0 + b)) / (u2.T * (A * u2))

    x2 = x0 + lamb1[0, 0] * u1 + lamb2[0, 0] * u2

    return x2, f_value(f2, x2)
示例#5
0
def optimal_grandient_for_f1(f, x0, epsilon):
    c, b, A = f()
    x = x0
    AA = A * A
    AAA = A * A * A
    f_deriv = A * x
    while True:
        #对纯二次函数,直接求解k的值,而不是用线性搜索方法。推导公式见上
        k = -1. * (x.T * AA * x) / (x.T * AAA * x)
        k = np.sum(k)
        x_n = x + k * f_deriv

        f_deriv_n = A * x_n

        if np.sum(np.abs(f_deriv_n)) < epsilon:
            break

        x = x_n
        f_deriv = f_deriv_n

    return x, f_value(f, x)