Ejemplo n.º 1
0
def l1_explicit_MM_dual(x0,
                        A,
                        b,
                        mu,
                        iter_func=None,
                        iter_list=[],
                        lr_list=None,
                        mu_list=None,
                        sep=0,
                        figure=False,
                        xx=None,
                        **opts):
    m, n = A.shape

    mu0 = mu

    lam, nu, xi, gamma = init(A, x0, b, mu)

    t = 0
    iter_len = len(iter_list)

    formal_loss_list, real_loss_list, error_xx_list = [], [], []

    for j in range(iter_len):
        lr = lr_list[j]
        inv = update_inv(m, A, lr)
        mu = mu_list[j]
        for i in range(iter_list[j]):
            lam, nu, xi = iter_func(A, b, inv, lam, nu, xi, mu, gamma, lr)

            if figure:
                formal_loss_list.append(loss_func(A, xi, b, mu))
                real_loss_list.append(loss_func(A, xi, b, mu0))
                if xx is not None:
                    x = reconstruct(xi)
                    error_xx_list.append(errfun(x, xx))

            if sep != 0 and t % sep == 0:
                loss = loss_func(A, xi, b, mu0)
                print("i: {0}, j: {1}, t: {2}, loss: {3:.5e}".format(
                    i, j, t, loss))

            t += 1

    solution = reconstruct(xi)
    loss = loss_func(A, xi, b, mu0)

    out = {
        "solution": solution,
        "loss": loss,
        "vars": 2 * n + m,
        "iters": t,
        "conts": iter_len,
        "formal_loss": numpy.array(formal_loss_list),
        "real_loss": numpy.array(real_loss_list),
        "error": numpy.array(error_xx_list),
    }

    return solution, out
Ejemplo n.º 2
0
def l1_ADMM_primal_linear(x0,
                          A,
                          b,
                          mu,
                          iter_list=[],
                          lr_list=None,
                          mu_list=None,
                          tau_list=None,
                          sep=0,
                          figure=False,
                          xx=None,
                          **opts):
    m, n = A.shape

    mu0 = mu

    x, y, lam, gamma = init(A, x0, b)

    t = 0
    iter_len = len(iter_list)

    formal_loss_list, real_loss_list, error_xx_list = [], [], []

    for j in range(iter_len):
        lr = lr_list[j]
        mu = mu_list[j]
        tau = tau_list[j]
        for i in range(iter_list[j]):
            x, y, lam = iteration(A, x, b, y, lam, mu, gamma, lr, tau)

            if figure:
                formal_loss_list.append(loss_func(A, x, b, mu))
                real_loss_list.append(loss_func(A, x, b, mu0))
                if xx is not None:
                    error_xx_list.append(errfun(x, xx))

            if sep != 0 and t % sep == 0:
                loss = loss_func(A, x, b, mu0)
                print("i: {0}, j: {1}, t: {2}, loss: {3:.5e}".format(
                    i, j, t, loss))

            t += 1

    solution = x
    loss = loss_func(A, x, b, mu0)

    out = {
        "solution": solution,
        "loss": loss,
        "vars": 2 * n + m,
        "iters": t,
        "conts": iter_len,
        "formal_loss": numpy.array(formal_loss_list),
        "real_loss": numpy.array(real_loss_list),
        "error": numpy.array(error_xx_list),
    }

    return solution, out
Ejemplo n.º 3
0
def l1_sub_AdaGrad(x0,
                   A,
                   b,
                   mu,
                   iter_list=[],
                   lr_list=None,
                   mu_list=None,
                   delta=None,
                   res_list=None,
                   sep=0,
                   figure=False,
                   xx=None,
                   **opts):
    m, n = A.shape

    mu0 = mu

    x, r = init(n, x0)

    t = 0
    iter_len = len(iter_list)

    formal_loss_list, real_loss_list, error_xx_list, grad_norm2_list = [], [], [], []

    for j in range(iter_len):
        lr = lr_list[j]
        mu = mu_list[j]

        for i in range(iter_list[j]):
            x, r, grad_x = iteration(A, x, b, r, mu, lr, delta)

            if figure:
                formal_loss_list.append(loss_func(A, x, b, mu))
                real_loss_list.append(loss_func(A, x, b, mu0))
                if xx is not None:
                    error_xx_list.append(errfun(x, xx))

            if sep != 0 and t % sep == 0:
                loss = loss_func(A, x, b, mu0)
                print("i: {0}, j: {1}, t: {2}, loss: {3:.5e}".format(
                    i, j, t, loss))

            if res_list is not None:
                grad_norm2 = numpy.sum(grad_x**2)
                if figure:
                    grad_norm2_list.append(grad_norm2)
                if grad_norm2 < res_list[j]:
                    break

            t += 1

    solution = x
    loss = loss_func(A, x, b, mu0)

    out = {
        "solution": solution,
        "loss": loss,
        "vars": 2 * n,
        "iters": t,
        "conts": iter_len,
        "formal_loss": numpy.array(formal_loss_list),
        "real_loss": numpy.array(real_loss_list),
        "error": numpy.array(error_xx_list),
        "grad_norm2": numpy.array(grad_norm2_list),
    }

    return solution, out
Ejemplo n.º 4
0
def l1_fast_smooth_grad(x0,
                        A,
                        b,
                        mu,
                        smooth_func=None,
                        smooth_grad=None,
                        iter_list=[],
                        lr_list=None,
                        mu_list=None,
                        eps_list=None,
                        res_list=None,
                        sep=0,
                        figure=False,
                        xx=None,
                        **opts):
    m, n = A.shape

    mu0 = mu

    x = init(x0)
    x_1 = x

    t = 0
    iter_len = len(iter_list)

    formal_loss_list, real_loss_list, orig_loss_list, error_xx_list, grad_norm2_list = [], [], [], [], []

    for j in range(iter_len):
        lr = lr_list[j]
        mu = mu_list[j]
        eps = eps_list[j]
        for i in range(iter_list[j]):
            x, x_1, grad_y = iteration_fast(A, x, x_1, b, mu, lr, smooth_grad,
                                            eps, i)

            if figure:
                formal_loss_list.append(
                    loss_func(A, x, b, mu, smooth_func, eps))
                real_loss_list.append(loss_func(A, x, b, mu0, smooth_func,
                                                eps))
                orig_loss_list.append(loss_func(A, x, b, mu0, no_smooth, eps))
                if xx is not None:
                    error_xx_list.append(errfun(x, xx))

            if sep != 0 and t % sep == 0:
                loss = loss_func(A, x, b, mu0, smooth_func, eps)
                print("i: {0}, j: {1}, t: {2}, loss: {3:.5e}".format(
                    i, j, t, loss))

            if res_list is not None:
                grad_norm2 = numpy.sum(grad_y**2)
                if figure:
                    grad_norm2_list.append(grad_norm2)
                if grad_norm2 < res_list[j]:
                    break

            t += 1

    solution = x
    loss = loss_func(A, x, b, mu0, smooth_func, eps)

    out = {
        "solution": solution,
        "loss": loss,
        "vars": n,
        "iters": t,
        "conts": iter_len,
        "formal_loss": numpy.array(formal_loss_list),
        "real_loss": numpy.array(real_loss_list),
        "orig_loss": numpy.array(orig_loss_list),
        "error": numpy.array(error_xx_list),
        "grad_norm2": numpy.array(grad_norm2_list),
    }

    return solution, out