Esempio n. 1
0
def gd(init, tol, iteration):
    result = [init]
    guess_eval = -answers.lml(init[0], init[1], Phi, Y)
    guess_grad = -answers.grad_lml(init[0], init[1], Phi, Y)
    guess_grads = [guess_grad]
    guess_evals = [guess_eval]

    convsgd = []
    lenXgd = []
    diffFsd = []

    print("iter={}, func val={}".format(0, guess_evals[0]))
    for i in range(1, iteration):
        guess = result[i - 1] - step_size * guess_grads[i - 1]
        result.append(guess)

        guess_eval = -answers.lml(guess[0], guess[1], Phi, Y)
        guess_grad = -answers.grad_lml(guess[0], guess[1], Phi, Y)

        guess_evals.append(guess_eval)
        guess_grads.append(guess_grad)

        print("iter={}, func val={}".format(i, guess_evals[i]))

        convsgd.append(np.linalg.norm(guess_grad))
        lenXgd.append(np.linalg.norm(result[-1] - result[-2]))
        diffFsd.append(np.abs(guess_evals[-1] - guess_evals[-2]))

        if convsgd[-1] <= tol:
            print("First-Order Optimality Condition met")
            break
        #elif lenXgd[-1] <= tol:
        #print("Design not changing")
        #break
        #elif diffFsd[-1] <= tol:
        #print("Objective not changing")
        #break
        elif i + 1 >= iteration:
            print("Done iterating")
            break

    return np.array(result)
def grad(x, y):
    gama = 0.00005
    v = True
    count = 0
    mlml = 0
    while (v == True):
        #for i in range(70):
        s1 = x
        s2 = y
        #print('s1 :',s1)
        #print('s2 :',s2)
        g1 = an.grad_lml(x, y, phi, Yd)[0]
        g2 = an.grad_lml(x, y, phi, Yd)[1]
        x = x + (gama) * g1
        y = y + (gama) * g2
        t1 = (gama) * g1
        t2 = (gama) * g2
        #print('t1 :',t1)
        #print('t2 :',t2)
        ax.arrow(s1,
                 s2,
                 t1,
                 t2,
                 head_width=0.009,
                 head_length=0.01,
                 fc='k',
                 ec='k')
        #if(i==49):
        #plot2Dpoint(x,y)

        mlml1 = an.lml(x, y, phi, Yd)

        if (mlml1 - mlml == 0 or count > 10000):
            if (mlml1 - mlml < 0.00001):
                print(mlml1, x, y)
            v = False

        mlml = mlml1
        count = count + 1
def gradient_descent(step, step_size, Phi):

    # declare empty lists for steps and function values
    alphas, betas = [], []

    # gradient descent algorithm
    for i in range(10000):

        alpha, beta = step
        alphas.append(alpha)
        betas.append(beta)

        # maximizing gradient descent algorithm
        step = step + step_size * grad_lml(alpha, beta, Phi, Y)

    # return steps and function values for plotting
    return np.column_stack((alphas, betas)).T
Esempio n. 4
0
def gd(init, tol, iteration, order):
    Phi = generate_for_tri(X, order)
    result = [init]
    guess_eval = -answers.lml(init[0], init[1], Phi, Y)
    guess_grad = -answers.grad_lml(init[0], init[1], Phi, Y)
    guess_grads = [guess_grad]
    guess_evals = [guess_eval]

    step = 0.01
    while (step * guess_grad[0] >= init[0] or step * guess_grad[1] >= init[1]):
        step *= 0.5

    tmp = -answers.lml(init[0] - step*guess_grad[0], \
                       init[1] - step*guess_grad[1],  Phi, Y)
    while (tmp > guess_eval):
        step *= 0.5
        tmp = -answers.lml(init[0] - step*guess_grad[0], \
                           init[1] - step*guess_grad[1],  Phi, Y)
    step_size = step

    convsgd = []
    lenXgd = []
    diffFsd = []

    print("iter={}, func val={}, alpha={}, beta={}".format(
        0, guess_eval, init[0], init[1]))
    for i in range(1, iteration + 1):
        #step_size /= np.sqrt(i)
        guess = result[i - 1] - step_size * guess_grads[i - 1]
        result.append(guess)

        guess_eval = -answers.lml(guess[0], guess[1], Phi, Y)
        guess_grad = -answers.grad_lml(guess[0], guess[1], Phi, Y)

        guess_evals.append(guess_eval)
        guess_grads.append(guess_grad)

        if guess_eval >= 0 and i % 1000 == 0:
            print("iter={}, func val={}, alpha={}, beta={}".format(
                i, guess_eval, guess[0], guess[1]))

        convsgd.append(np.linalg.norm(guess_grad))
        lenXgd.append(np.linalg.norm(result[-1] - result[-2]))
        diffFsd.append(np.abs(guess_evals[-1] - guess_evals[-2]))

        if convsgd[-1] <= tol:
            print("First-Order Optimality Condition met")
            break
        elif lenXgd[-1] <= tol:
            print("Design not changing")
            break
        #elif diffFsd[-1] <= 0:
        #print("Objective not changing")
        #break
        elif i + 1 >= iteration:
            print("Done iterating")
            break

        step = 0.1
        while (step * guess_grad[0] > guess[0]
               or step * guess_grad[1] > guess[1]):
            step *= 0.5

        m = np.linalg.norm(guess_grad)
        tmp = -answers.lml(guess[0] - step*guess_grad[0], \
                           guess[1] - step*guess_grad[1],  Phi, Y)
        while (tmp > guess_eval - step * 0.01 * m):
            step *= 0.5
            tmp = -answers.lml(guess[0] - step*guess_grad[0], \
                               guess[1] - step*guess_grad[1],  Phi, Y)
        step_size = step

    print("iter={}, func val={}, alpha={}, beta={}".format(
        i, guess_eval, guess[0], guess[1]))
    return [np.array(result), guess_eval]