Beispiel #1
0
def minvar_nls_nlsq_multi(C_list, alpha_list, trace, d0, d_min, d_max,
                          upper_bound):
    """
    Solve an Non-linear LS problem via SLSQP.

    Allows for additive objective function with multiple C matrices and alpha
    vectors.
    """

    N = len(alpha_list[0])

    def obj(d):
        val = sum([f(d, alpha, C) for C, alpha in zip(C_list, alpha_list)])
        return val

    def grad(d):
        val = sum(
            [f_grad(d, alpha, C) for C, alpha in zip(C_list, alpha_list)])
        return val

    if upper_bound:
        bounds = [(d_min, d_max) for _ in range(N)]
    else:
        bounds = [(d_min, None) for _ in range(N)]

    if trace is None:
        trace_con, trace_con_grad = None, None
    else:

        def trace_con(d):
            return np.sum(d) - trace

        g_trace_con = np.ones(N)

        def trace_con_grad(d):
            return g_trace_con

    from scipy.sparse import diags

    G = diags([1, -1], [0, 1], (N - 1, N)).toarray()

    def monoton_con(d):
        return G.dot(d)

    def monoton_con_grad(d):
        return G

    from scipy.optimize.slsqp import fmin_slsqp

    x = fmin_slsqp(obj,
                   d0,
                   fprime=grad,
                   f_eqcons=trace_con,
                   fprime_eqcons=trace_con_grad,
                   f_ieqcons=monoton_con,
                   fprime_ieqcons=monoton_con_grad,
                   bounds=bounds,
                   iprint=0)

    return x
Beispiel #2
0
def minvar_nlsq_multi(C, alpha, trace, d0, d_min, d_max,
                      mono, upper_bound):
    """
    Solve an Non-linear LS problem via SLSQP.

    Allows for additive objective function with multiple C matrices and alpha
    vectors.
    """

    N = len(alpha[0])

    def obj(d):
        val = sum([
            f(d, _alpha, _C)
            for _C, _alpha in zip(C, alpha)
        ])
        return val

    def grad(d):
        val = sum([
            f_grad(d, _alpha, _C)
            for _C, _alpha in zip(C, alpha)
        ])
        return val

    if upper_bound:
        bounds = [(d_min, d_max) for _ in range(N)]
    else:
        bounds = [(d_min, None) for _ in range(N)]

    if trace is None:
        trace_con, trace_con_grad = None, None
    else:
        def trace_con(d):
            return np.sum(d) - trace

        g_trace_con = np.ones(N)

        def trace_con_grad(d):
            return g_trace_con

    if mono:
        G = diags([1, -1], [0, 1], (N - 1, N)).toarray()

        def monoton_con(d):
            return G.dot(d)

        def monoton_con_grad(d):
            return G
    else:
        monoton_con, monoton_con_grad = None, None

    d_star = fmin_slsqp(
        obj, d0, fprime=grad,
        f_eqcons=trace_con, fprime_eqcons=trace_con_grad,
        f_ieqcons=monoton_con, fprime_ieqcons=monoton_con_grad,
        bounds=bounds, iprint=1, iter=500)

    return d_star
Beispiel #3
0
def minvar_nls_nlsq(C, alpha, trace, d0, d_min, d_max, upper_bound=True):
    """Solve an Non-linear LS problem via SLSQP."""

    N = len(alpha)

    def obj(d):
        return f(d, alpha, C)

    def grad(d):
        return f_grad(d, alpha, C)

    if upper_bound:
        bounds = [(d_min, d_max) for _ in range(N)]
    else:
        bounds = [(d_min, None) for _ in range(N)]

    if trace is None:
        trace_con, trace_con_grad = None, None
    else:

        def trace_con(d):
            return np.sum(d) - trace

        g_trace_con = np.ones(N)

        def trace_con_grad(d):
            return g_trace_con

    from scipy.sparse import diags

    G = diags([1, -1], [0, 1], (N - 1, N)).toarray()

    def monoton_con(d):
        return G.dot(d)

    def monoton_con_grad(d):
        return G

    from scipy.optimize.slsqp import fmin_slsqp

    x = fmin_slsqp(obj,
                   d0,
                   fprime=grad,
                   f_eqcons=trace_con,
                   fprime_eqcons=trace_con_grad,
                   f_ieqcons=monoton_con,
                   fprime_ieqcons=monoton_con_grad,
                   bounds=bounds,
                   iprint=0)

    return x
Beispiel #4
0
def minvar_nlsq_multi_transformed(C, alpha, trace, d0,
                                  d_min, d_max, upper_bound):
    """
    Solve an Non-linear LS problem via SLSQP.

    Allows for additive objective function with multiple C matrices and alpha
    vectors.

    Uses a transformed version of the non-linear optimization problem to get rid
    of the N-1 difference constraints.
    """

    N = len(alpha[0])
    K = len(alpha)
    rho = 1.
    z0 = d0  # G(d0) / rho
    Lambda = np.ones(N)  # / (1 + z0)
    y0 = z0 * Lambda

    def obj(y):
        z = y / Lambda
        d = rho * Ginv(z)
        val = sum([
            f(d, _alpha, _C)
            for _C, _alpha in zip(C, alpha)
        ]) / K
        return val

    def grad(y):
        z = y / Lambda
        d = rho * Ginv(z)
        val = sum([
            f_grad(d, _alpha, _C)
            for _C, _alpha in zip(C, alpha)
        ]) / K
        return rho * Ginv(val, transpose=True) / Lambda

    bounds = [(0., None) for _ in range(N - 1)] + \
        [(d_min / rho * Lambda[-1], None)]

    v = rho * Ginv(np.ones(N), transpose=True) / Lambda

    def trace_con(y):
        return v.T.dot(y) - trace

    def trace_con_grad(y):
        return v

    if upper_bound:
        u = rho * np.ones(N) / Lambda

        def ub_con(y):
            return d_max - u.T.dot(y)

        def ub_con_grad(y):
            return -u
    else:
        ub_con, ub_con_grad = None, None

    y_star = fmin_slsqp(
        obj, y0, fprime=grad,
        f_eqcons=trace_con, fprime_eqcons=trace_con_grad,
        f_ieqcons=ub_con, fprime_ieqcons=ub_con_grad,
        bounds=bounds, iprint=1, iter=2000)

    z_star = y_star / Lambda
    d_star = rho * Ginv(z_star)
    return d_star
Beispiel #5
0
def minvar_nlsq_multi_transformed(C, alpha, trace, d0, d_min, d_max,
                                  upper_bound):
    """
    Solve an Non-linear LS problem via SLSQP.

    Allows for additive objective function with multiple C matrices and alpha
    vectors.

    Uses a transformed version of the non-linear optimization problem to get rid
    of the N-1 difference constraints.
    """

    K = len(alpha)
    N = len(alpha[0])

    rho = 1.

    def obj(z):
        d = rho * Ginv(z)
        val = sum([f(d, _alpha, _C) for _C, _alpha in zip(C, alpha)]) / K
        return val

    def grad(z):
        d = rho * Ginv(z)
        val = sum([f_grad(d, _alpha, _C) for _C, _alpha in zip(C, alpha)]) / K
        return rho * Ginv(val, transpose=True)

    bounds = [(0., None) for _ in range(N - 1)] + [(d_min / rho, None)]

    v = rho * Ginv(np.ones(N), transpose=True)

    def trace_con(z):
        return v.T.dot(z) - trace

    def trace_con_grad(z):
        return v

    if upper_bound:
        u = rho * np.ones(N)

        def ub_con(z):
            return d_max - u.T.dot(z)

        def ub_con_grad(z):
            return -u
    else:
        ub_con, ub_con_grad = None, None

    from scipy.optimize.slsqp import fmin_slsqp

    z_star = fmin_slsqp(obj,
                        d0,
                        fprime=grad,
                        f_eqcons=trace_con,
                        fprime_eqcons=trace_con_grad,
                        f_ieqcons=ub_con,
                        fprime_ieqcons=ub_con_grad,
                        bounds=bounds,
                        iprint=1,
                        iter=2000)

    d_star = rho * Ginv(z_star)
    return d_star