def do_nonsmooth_gnm(func, dimension, x0=None, max_iter=1000, tolerance=2e-3, eta=5e-1, magic_const=100):
    losses = []
    if x0 is None:
        x0 = np.array([0.57179] * dimension, dtype=np.float64)
    oracle_ = oracle.BaseSmoothOracle(func)

    for i in range(max_iter):
        best_y = None
        best_y_loss = None
        L = 1
        for j in range(magic_const):
            addend = np.random.rand(*x0.shape)
            addend /= np.linalg.norm(addend)
            addend *= np.linalg.norm(x0)
            addend /= (i ** 1.5 + 15)

            loss = (L / 2) * np.linalg.norm(addend) ** 2
            loss += np.linalg.norm(oracle_.func(x0) + oracle_.grad(x0) * addend)

            if best_y_loss is None or best_y_loss > loss:
                best_y = x0 + addend
                best_y_loss = loss

        if np.linalg.norm(best_y - x0) < tolerance:
            break
        x0 = best_y

        loss = np.linalg.norm(oracle_.func(x0))
        print("point:", x0, "loss:", loss, "iter:", i)
        losses.append(loss)

    return x0
def do_implementable_three_squares(func,
                                   dimension,
                                   x0=None,
                                   max_iter=1000,
                                   tolerance=2e-3,
                                   eta=5e-1,
                                   magic_const=100):
    losses = []
    if x0 is None:
        x0 = np.array([0.57179] * dimension, dtype=np.float64)
    oracle_ = oracle.BaseSmoothOracle(func)

    L_0 = 0.5  # or what should it be?
    for k in range(max_iter):
        i_k = 0
        while True:
            best_y, best_loss = optimize_psi_cup(x0, L_0 * (2**i_k), k,
                                                 oracle_, magic_const)
            if np.linalg.norm(oracle_.func(best_y)) <= best_loss:
                break
            i_k += 1
        if np.linalg.norm(best_y - x0) < tolerance:
            break
        x0 = best_y
        loss = np.linalg.norm(oracle_.func(x0))
        print("point:", x0, "loss:", loss, "iter:", k)
        losses.append(loss)
        L_0 *= 2**(i_k - 1)
    return x0
Esempio n. 3
0
def do_norm_squares(func,
                    dimension,
                    m,
                    x0=None,
                    max_iter=1000,
                    tolerance=2e-3,
                    eta=5e-1,
                    magic_const=100):
    losses = []
    if x0 is None:
        x0 = np.array([0.57179] * dimension, dtype=np.float64)
    oracle_ = oracle.BaseSmoothOracle(func)

    L = 1
    for i in range(max_iter):
        best_y, best_y_loss = optimize_func(x0, L, m, i, oracle_, magic_const)

        if np.linalg.norm(best_y - x0) < tolerance:
            break
        x0 = best_y

        loss = np.linalg.norm(oracle_.func(x0))
        print("point:", x0, "loss:", loss, "iter:", i)
        losses.append(loss)

    return x0
Esempio n. 4
0
def do_newton(func, dimension, x0=None, max_iter=1000, tolerance=1e-4, eta=5e-1):
    losses = []
    if x0 is None:
        x0 = np.array([0.57179] * dimension, dtype=np.float64)
    oracle_ = oracle.BaseSmoothOracle(func)

    for i in range(max_iter):
        loss = oracle_.func(x0)
        print("point:", x0, "loss:", loss)
        losses.append(loss)

        hess = np.array(oracle_.hess(x0)[0])
        hess_inv = sla.inv(hess)
        grad = np.array(oracle_.grad(x0))
        addend = np.dot(hess_inv, grad.T).T
        x1 = (x0 - eta * addend)[0]

        if np.linalg.norm(x1 - x0) < tolerance:
            break

        x0 = x1

    return x0