Пример #1
0
def main():
    #PGM:
    '''
    x = np.array([[1.]])
    landa = 0.5
    alpha = 0.1
    A = np.array([[1.]])
    b = np.array([[.2]])
    error = 0.001
    xx, count, obj = PGM(x, alpha, landa, A, b, error)
    print(obj)
    draw(obj)
    '''
    #ADMM

    x = np.array([[1.]])
    z = np.array([[1.]])
    landa = np.array([[1.]])
    A = np.array([[1.]])
    b = np.array([[.2]])
    alpha = 0.1
    error = 0.01
    beta = 0.01
    xx, count, obj = ADMM(x, alpha, landa, A, b, error, beta, z)
    draw(obj)

    print("最终迭代结果: x:", xx)
    print("共进行了", count, "次迭代")
Пример #2
0
def main():
    #SD:
    # x = np.array([[1.]])
    # alpha=0.1
    # landa=0.5
    # A = np.array([[1.]])
    # b = np.array([[.2]])
    # error=0.0001
    # xx,count,obj=SD(x,alpha,landa,A,b,error)
    #BFGS:

    # x = np.array([[1.]])
    # xx = np.array([[1.4]])
    # alpha=0.1
    # epsi=0.3
    # A = np.array([[1.]])
    # b = np.array([[.2]])
    # error=0.001
    # D = np.array([[1.5]])
    # xx,count,obj=BFGS(x,xx,A,b,alpha,D,epsi,error)

    #ADMM
    x = np.array([[1.]])
    z = np.array([[1.]])
    landa = np.array([[1.]])
    A = np.array([[1.]])
    b = np.array([[.2]])
    alpha = 0.1
    error = 0.01
    beta = 0.01
    xx, count, obj = ADMM(x, alpha, landa, A, b, error, beta, z)
    draw(obj)
    print("最终迭代结果: x:", xx)
    print("共进行了", count, "次迭代")
Пример #3
0
def ADMM(A, b, alpha=0.1, beta=0.01, show_x=True, show_graph=True, log_int=1):
    #ADMM(x, alpha, landa, A, b, error, beta, z):
    n = A.shape[1]
    x = np.random.rand(n, 1)
    z = np.random.rand(n, 1)
    landa = np.random.rand(n, 1)
    k = 0
    if show_x:
        print("x starts with: ")
        print(x)
    red = np.linalg.norm(x, ord=1) + (alpha / 2) * np.linalg.norm(A @ x - b)**2
    print('The initial target value is %f' % (red))
    obj = []
    p = random.uniform(1.5, 1.8)
    I = np.identity(n)
    zero = np.zeros(n)
    zero = zero.T

    while (1):
        part_1 = np.linalg.inv(alpha * (A.T @ A) + beta * I)
        part_2 = alpha * (A.T @ b) + beta * z - landa
        xx = part_1 @ part_2
        zz = (landa + beta * (xx * p - (1 - p) * z)) / (2 + beta)
        landalanda = landa + beta * (p * xx - (1 - p) * z - zz)

        red = np.linalg.norm(
            xx, ord=1) + (alpha / 2) * np.linalg.norm(A @ xx - b)**2
        if k % log_int == 0:
            obj.append(red)
            print('The %dth iteration, target value is %f' % (k, red))
        '''
        xzk = np.vstack((x, z))
        xzkp = np.vstack((xx, zz))
        e = np.linalg.norm(xzk - xzkp)
        if e <= error:
            break
        '''
        if stop(I, xx, -I, zz, z, zero, landalanda, alpha):
            break
        else:
            x = np.copy(xx)
            z = np.copy(zz)
            landa = np.copy(landalanda)
            if show_x:
                print('The current x is: ', x)
        k += 1

    print("Final x: ", xx)
    print("Total steps: ", k)

    if show_graph:
        draw(obj, log_int)

    return xx, k, obj
Пример #4
0
def main():
    x = np.array([[1.]])
    z = np.array([[1.]])
    landa = np.array([[1.]])
    A = np.array([[1.]])
    b = np.array([[.2]])
    alpha = 0.1
    beta = 0.01
    D = np.array([[0.5]])
    xx, count, obj = ADMM(x, alpha, landa, A, b, beta, z, D)

    draw(obj)
    print("最终迭代结果: x:", xx)
    print("共进行了", count, "次迭代")
Пример #5
0
def PGM(A, b, alpha=0.1, error=0.001, show_x=True, show_graph=True, log_int=1):
    k = 0
    n = A.shape[1]
    x = np.random.rand(n, 1)
    landa = np.random.rand(n, 1)
    if show_x:
        print("x starts with: ")
        print(x)
    red = np.linalg.norm(x, ord=1) + (alpha / 2) * np.linalg.norm(A @ x - b)**2
    print('The initial target value is %f' % (red))
    obj = []
    while (1):
        z = x - alpha * landa * np.dot(A.T, np.dot(A, x) - b)
        xx = np.copy(x)
        xx = np.sign(z) * np.maximum(np.linalg.norm(z) - alpha, 0)

        red = np.linalg.norm(
            xx, ord=1) + (alpha / 2) * np.linalg.norm(A @ xx - b)**2
        if k % log_int == 0:
            obj.append(red)
        if k % log_int == 0:
            print('The %dth iteration, target value is %f' % (k, red))

        e = np.linalg.norm(xx - x)
        if e < error:
            break
        else:
            x = np.copy(xx)
            if show_x:
                print('The current x is: ', x)
        k += 1

    print("Final x: ", xx)
    print("Total steps: ", k)

    if show_graph:
        draw(obj, log_int)

    return xx, k, obj
Пример #6
0
def BFGS(A,
         b,
         alpha=0.1,
         epsi=0.3,
         error=0.001,
         show_x=True,
         show_graph=True,
         log_int=1):
    k = 0
    n = A.shape[1]
    x = np.random.rand(n, 1)
    xx = np.random.rand(n, 1)
    D = np.random.rand(n, n)
    if show_x:
        print("x starts with: ")
        print(x)
    obj = []
    while (1):
        p = xx - x

        delta_x = 2 * x + alpha * np.dot(A.T, np.dot(A, x) - b)
        delta_xx = 2 * xx + alpha * np.dot(A.T, np.dot(A, x) - b)
        q = delta_xx - delta_x

        tao = q.T @ D @ q

        temp1 = p / np.dot(p.T, q)
        temp2 = np.dot(D, q) / tao
        v = temp1 - temp2

        part_2 = np.dot(p, p.T) / np.dot(p, q.T)
        part_3 = np.dot(np.dot(np.dot(D, q), q.T), D.T) / np.dot(
            np.dot(q.T, D), q)
        part_4 = epsi * tao * v @ v.T
        DD = D + part_2 - part_3 + part_4

        xxx = xx - alpha * np.dot(DD, delta_xx)

        red = np.linalg.norm(xxx)**2 + (alpha / 2) * np.linalg.norm(A @ xxx -
                                                                    b)**2
        if k % log_int == 0:
            obj.append(red)
            print('The %dth iteration, target value is %f' % (k, red))

        e = np.linalg.norm(xxx - xx)
        if e < error:
            break
        else:
            x = np.copy(xx)
            xx = np.copy(xxx)
            D = np.copy(DD)
            if show_x:
                print('The current x is: ', x)
        k += 1

    print("Final x: ", xx)
    print("Total steps: ", k)

    if show_graph:
        draw(obj, log_int)

    return xx, k, obj
Пример #7
0
def ADMM(A, b, alpha=0.1, beta=0.01, show_x=True, show_graph=True, log_int=1):
    n = A.shape[1]
    x = np.random.rand(n, 1)
    z = np.random.rand(n, 1)
    landa = np.random.rand(n, 1)
    k = 0
    if show_x:
        print("x starts with: ")
        print(x)
    red = np.linalg.norm(x, ord=1) + (alpha / 2) * np.linalg.norm(A @ x - b)**2
    print('The initial target value is %f' % (red))
    obj = []
    I = np.identity(n)
    zero = np.zeros(n)
    zero = zero.T

    while (1):
        #print(I)
        #此处的landa是向量
        temp1 = np.linalg.inv(alpha * (A.T @ A) + beta * I)
        #print(temp1)
        temp2 = alpha * (A.T @ b) + beta * z - landa

        xx = temp1 @ temp2
        #print(xx)
        zz = np.copy(z)

        zz = np.sign(xx + landa / beta) * np.maximum(
            np.abs(xx + landa / beta) - 1 / beta, 0)

        landalanda = landa + beta * (xx - zz)

        red = np.linalg.norm(
            xx, ord=1) + (alpha / 2) * np.linalg.norm(A @ xx - b)**2
        if k % log_int == 0:
            obj.append(red)
        if k % log_int == 0:
            print('The %dth iteration, target value is %f' % (k, red))
        '''
        xzk = np.vstack((x, z))
        xzkp = np.vstack((xx, zz))
        
        e = np.linalg.norm(xzk - xzkp)
        
        if e <= error:
            break
        '''
        if stop(I, xx, -I, zz, z, zero, landalanda, alpha):
            break
        else:
            x = np.copy(xx)
            z = np.copy(zz)
            landa = np.copy(landalanda)
            if show_x:
                print('The current x is: ', x)
        k += 1

    print("Final x: ", xx)
    print("Total steps: ", k)

    if show_graph:
        draw(obj, log_int)

    return xx, k, obj
Пример #8
0
def ADMM(A,
         b,
         alpha=0.1,
         beta=0.01,
         show_x=True,
         show_graph=True,
         log_int=1,
         max_step=-1):
    #ADMM(x, alpha, landa, A, b, beta, z, D):
    n = A.shape[1]
    x = np.random.rand(n, 1)
    z = np.random.rand(n, 1)
    landa = np.random.rand(n, 1)
    k = 0
    if show_x:
        print("x starts with: ")
        print(x)
    red = np.linalg.norm(x, ord=1) + (alpha / 2) * np.linalg.norm(A @ x - b)**2
    print('The initial target value is %f' % (red))
    obj = []
    I = np.identity(n)
    one = np.ones((n, 1))
    zero = np.zeros((n, 1))
    zeros = np.zeros((n, n))
    D = np.eye(n)
    D *= beta

    while (1):
        temp1 = np.linalg.inv(alpha * (A.T @ A) + D)
        temp2 = alpha * (A.T @ b) + (D @ z) - landa
        xx = temp1 @ temp2

        temp1 = np.linalg.inv(2 * I + D)
        temp2 = landa + D @ xx
        zz = temp1 @ temp2

        landalanda = landa + D @ (xx - zz)
        red = np.linalg.norm(
            xx, ord=1) + (alpha / 2) * np.linalg.norm(A @ xx - b)**2

        if k % log_int == 0:
            obj.append(red)
            print('The %dth iteration, target value is %f' % (k, red))

        if k == max_step:
            break

        if stop(I, xx, -I, zz, z, zero, landalanda, alpha):
            break
        else:
            r = xx - zz
            s = -1 * alpha * (zz - z)
            u = 10
            t_incr = 2
            t_decr = t_incr
            tmp = np.abs(r) - u * np.abs(s)
            tmp = np.where(tmp > 0, t_incr, 1)
            D *= tmp
            tmp = np.abs(s) - u * np.abs(r)
            tmp = np.where(tmp > 0, 1 / t_decr, 1)
            D *= tmp
            x = np.copy(xx)
            z = np.copy(zz)
            landa = np.copy(landalanda)
            if show_x:
                print('The current x is: ', x)
            k += 1
    print("Final x: ", xx)
    print("Total steps: ", k)

    if show_graph:
        draw(obj, log_int)

    return xx, k, obj