Beispiel #1
0
def method_newton():
    step = 0
    eps = 0.001
    grad = []        # значение градиента
    invert_h = [[0.174, 0.0449, -0.0112],        # обратная матрица гесса
         [0.0449, 0.27, -0.0674],
         [-0.0112, -0.0674, 0.517]]

    x0 = [0, 0, 0]          # начальное приближение
    x = [0, 0, 0]
    # ищем градиент от начального приближения
    grad = gradient(x0)
    table.add_row([step, x.copy(), function(x), norm(grad)])

    multi = matrix_multi(invert_h, grad)
    for i in range(3):
        x[i] = x0[i] - multi[i]
    step += 1
    grad = gradient(x)
    table.add_row([step, x, function(x), norm(grad)])
    while norm(gradient(x)) >= eps:
        x0 = x.copy()
        grad = gradient(x0)
        multi = matrix_multi(invert_h, grad)
        for i in range(3):
            x[i] = x0[i] - multi[i]
        step += 1
        table.add_row([step, x.copy(), function(x), norm(gradient(x))])
    print(table)
    return
Beispiel #2
0
def con_grad():
    eps = 0.001
    x = [0, 0, 0]
    x_prev = x.copy()
    k = 0
    d = [0, 0, 0]
    grad = gradient(x)

    table.add_row([k, x.copy(), function(x), norm(grad)])
    while norm(grad) >= eps:
        if k == 0:
            d[0] = -grad[0]
            d[1] = -grad[1]
            d[2] = -grad[2]
        else:
            b = norm(gradient(x))**2 / norm(gradient(x_prev))**2
            for i in range(3):
                d[i] = -grad[i] + b * d[i]
        t = mint(-10, 10, eps, x.copy())
        x_prev = x.copy()
        for i in range(3):
            x[i] = x[i] + t * d[i]
        grad = gradient(x)
        k = k + 1
        table.add_row([k, x.copy(), function(x), norm(grad)])
    print(table)
Beispiel #3
0
def handle_bar(timer, data, info, init_cash, transaction, detail_last_min,
               memory):
    if timer == 0:
        memory.status = 0  # -1, stop loss; 0, ready; 1, open

    position_new = detail_last_min[0]

    if memory.status == -1:
        if (timer - memory.stop_loss_time) % 120 == 0:
            memory.status = 0
    elif memory.status == 0:  # ready to open

        grd = [0, 0]
        grd[0] = gradient(asset_index1, data)
        grd[1] = gradient(asset_index2, data)
        delta = grd[0] - grd[1]

        if abs(delta) > 3 * transaction:  # just to ensure interest

            current_trend = trend(asset_index1, asset_index2, grd,
                                  data)  # trend detection
            index = decision(delta, current_trend, asset_index1,
                             asset_index2)  # item decision
            operate_index = index[0]
            target_index = index[1]
            memory.operate_price = np.mean(data[operate_index, 0:3])
            memory.target_price = np.mean(data[target_index, 0:3])
            memory.operate_index = operate_index
            memory.target_index = target_index
            memory.operate_time = timer
            memory.trend = current_trend

            position_new = operation(data, detail_last_min, info,
                                     operate_index, transaction, current_trend,
                                     underwear)
            memory.position = position_new
            memory.top = detail_last_min[4] - underwear
            memory.total = detail_last_min[4]
            memory.status = 1
    else:
        memory.top = peak_update(detail_last_min, memory)
        if (detail_last_min[4] - memory.top) / memory.top < -0.1:  # stop loss
            position_new = np.zeros(13, dtype=np.int)
            memory.stop_loss_time = timer
            memory.status = -1
            print("stop loss")

        if close(detail_last_min, memory, timer, data):  # close out
            position_new = np.zeros(13, dtype=np.int)
            memory.status = 0
            if timer > 8300:
                print('deal')

    return position_new, memory
Beispiel #4
0
def logir_sgd(dataset, l_rate=0.01):
    '''
    Executa o algoritimo de aprendizado da regressão logística com gradiente descendente
    estocático, sobre uma amostra rotulada.
    '''
    # iniciando w com valores 0
    w = np.matrix([0] + [0 for _ in dataset[0]]).transpose()

    # iniciando a variável auxiliar w_old com valores 1, para entrar no loop abaixo
    # na primeira iteração
    w_old = np.matrix([1] + [1 for _ in dataset[0]]).transpose()

    epochs_count = 0

    # loop interrompido quando ||w^(t-1) - w^(t)|| < 0.01
    while np.linalg.norm(w_old - w) >= 0.01:

        w_old = w

        # gerando uma lista de indices embaralhados do dataset
        indexes = list(range(len(dataset)))
        random.shuffle(indexes)

        for idx in indexes:
            dtn = dataset[idx]

            w = w - l_rate * gradient(w, dtn)

        epochs_count += 1

    return dict(w=w, epochs=epochs_count)
Beispiel #5
0
def fastest():
    eps = 0.001
    x = [0, 0, 0]
    step = 0
    grad = gradient(x)
    table.add_row([step, x, function(x), norm(grad)])

    while norm(grad) >= eps:
        t = mint(-10, 10, 0.01, x)
        for i in range(3):
            x[i] = x[i] - t * grad[i]
        step += 1
        grad = gradient(x)
        table.add_row([step, x, function(x), norm(grad)])
    print(table)
    return
Beispiel #6
0
def gauss_seidel():
    n = 3
    eps = 0.001
    e = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
    x = [0, 0, 0]
    j = 0
    table.add_row([j, x.copy(), function(x), norm(gradient(x))])
    while True:
        k = 0
        print(x)
        while k <= n - 1:
            grad = gradient(x)
            if norm(grad) < eps:
                print(table)
                return
            else:
                t = mint(-10, 10, eps, x.copy())
                for i in range(3):
                    x[i] = x[i] - t * derivatives(x,k)*e[k][i]
                k = k + 1 
        j = j + 1
        table.add_row([j, x.copy(), function(x), norm(gradient(x))])
Beispiel #7
0
def droblenie():
    eps = 0.001
    c = 0.25
    x = [0, 0, 0]
    x_new = x.copy()
    step = 0
    t = 2
    grad = gradient(x)
    table.add_row([step, x.copy(), function(x), norm(grad)])

    while norm(grad) >= eps:
        for i in range(3):
            x_new[i] = x[i] - t * grad[i]
        while function(x_new) >= function(x):
            t = c * t
            for i in range(3):
                x_new[i] = x[i] - t * grad[i]
        step += 1
        grad = gradient(x_new)
        x = x_new.copy()
        table.add_row([step, x.copy(), function(x), norm(grad)])
    print(table)
    return
Beispiel #8
0
def gradient_descent(data_matrix, label_matrix, alpha, max_iter_numbers):
    step_alpha = alpha
    epoches = max_iter_numbers

    m, n = np.shape(data_matrix)
    theta = np.zeros((n, 1))  # Initial theta
    cost_vector = []

    for epoch in range(epoches):
        cost_j = cost_function(theta, data_matrix, label_matrix)
        cost_vector.append(cost_j[0, 0])
        theta = theta + step_alpha * gradient(theta, data_matrix, label_matrix)

    cost_j = cost_function(theta, data_matrix, label_matrix)
    cost_vector.append(cost_j[0, 0])

    return theta, cost_vector
Beispiel #9
0
def gradient_ascent(data_array, label_array, step_alpha, max_iter_numbers):
    data_matrix = np.mat(data_array)
    label_matrix = np.mat(label_array)  #

    m, n = np.shape(data_matrix)

    theta = np.zeros((n, 1))  # Initial theta
    alpha = step_alpha  # Step for gradient ascent
    epoches = max_iter_numbers  # Times for Iteration
    cost_vector = []

    for i in range(epoches):
        cost_j = cost_func(theta, data_matrix, label_matrix)
        cost_vector.append(cost_j[0, 0])
        theta = theta + alpha * gradient(theta, data_matrix, label_matrix)
        # Gradient Ascent

    cost_j = cost_func(theta, data_matrix, label_matrix)
    cost_vector.append(cost_j[0, 0])

    return theta, cost_vector
def stochastic_gradient_ascent(data_array, label_array, step_alpha,
                               max_iter_numbers):
    data_matrix = np.mat(data_array)
    label_matrix = np.mat(label_array)  #

    (m, n) = np.shape(data_matrix)
    theta = np.zeros((n, 1))  # Initial theta
    alpha = step_alpha  # Step for gradient ascent
    epoches = max_iter_numbers

    cost_vector = []

    for i in range(epoches):
        r_index = random.randint(0, m - 1)  # random
        cost_j = cost_func(theta, data_matrix, label_matrix)
        cost_vector.append(cost_j[0, 0])
        theta = theta + alpha * gradient(theta, data_matrix[r_index],
                                         label_matrix[r_index])

    cost_j = cost_func(theta, data_matrix, label_matrix)
    cost_vector.append(cost_j[0, 0])

    return theta, cost_vector
Beispiel #11
0
def newton_method(data_array, label_array, max_iter_count):
    data_matrix = np.mat(data_array)
    label_matrix = np.mat(label_array)  #

    (m, n) = np.shape(data_matrix)

    theta = np.zeros((n, 1))  # Initial theta

    epoches = max_iter_count  # Times for Iteration

    cost_vector = []

    for i in range(epoches):
        gradient_v = gradient(theta, data_matrix, label_matrix)
        hessian_mat = hessian_matrix(theta, data_matrix, label_matrix)
        cost_j = cost_func(theta, data_matrix, label_matrix)
        cost_vector.append(cost_j[0, 0])
        theta = theta + (hessian_mat.I * gradient_v / m)

    cost_j = cost_func(theta, data_matrix, label_matrix)
    cost_vector.append(cost_j[0, 0])

    return theta, cost_vector
Beispiel #12
0
K_list = np.zeros((nx, 1))
lambda_list = np.zeros((len(Lambda), 1))
DK_list = np.zeros((nx, 1))
DL_list = np.zeros((nx, 1))
Dlambda_list = np.zeros((len(Lambda), 1))
Dxy_list = np.zeros(shape=(N, nx**2 + nx, nx))
constraint_list = []
psd_list = []

for i in range(N):
    # Assign dual variables from primal variables DUAL
    x = np.vstack((K.T, dual_l))
    y = L.T

    # Get gradients and hessians for local NE computation PRIMAL
    DK, DL, DKL = gradient(50, 200, A, B, C, Q, Ru, Rw, K, L, T)
    Dlambda = Df_lambda(Lambda, L, Q, q, Rw, nx)
    DfL = Df_L(Lambda, L, Q, q, Rw, nx)
    DflL = Df_lambda_L(Lambda, L, Q, q, Rw, nx).reshape((nx**2, nx))

    # Dx,Dy are all column vectors PRIMAL
    Dx = np.vstack((DK.reshape((nx, 1)), Dlambda))
    Dy = DL + DfL
    Dy = Dy.T
    Dxy = np.vstack((DKL, DflL))
    Dyx = Dxy.T
    hessian = LA.block_diag(np.eye(nx),
                            D2P(Lambda, nx).reshape((nx**2, nx**2)))
    hessian_inv = np.linalg.inv(hessian)

    # Local NE PRIMAL
############################  Main Loop ##################################
N = 7000

# NGD lists and initialization
L_NGD_list = np.zeros((nx, 1))
K_NGD_list = np.zeros((nx, 1))
constraint_NGD_list = []
K_NGD = K
L_NGD = L

for i in range(N):
    # NGD update and storing
    K_NGD_list = np.hstack((K_NGD_list, K_NGD.reshape((nx, nu))))
    L_NGD_list = np.hstack((L_NGD_list, L_NGD.reshape((nx, nu))))
    for j in range(100):
        DK, _, _ = gradient(50, 200, A, B, C, Q, Ru, Rw, K_NGD, L_NGD, T)
        # save NGD lists
        p, _ = np.linalg.eig(Q - L_NGD.T @ Rw @ L_NGD)
        constraint_NGD_list.append(p)
        #update
        K_NGD = K_NGD - eta_x * DK

    _, DL, _ = gradient(50, 200, A, B, C, Q, Ru, Rw, K_NGD, L_NGD, T)
    L_NGD = L_NGD + eta_y * DL
    L_NGD = proj_sgd(L_NGD.T, l_max).T

    if i % 50 == 0:
        print('-------------', i, '-------------------')
        print('K is ', K_NGD)
        print('L is ', L_NGD)
        print('Constraint is ', np.min(p))
Beispiel #14
0
safeguard = 2

np.random.seed(2170)
K = 0.001*np.random.normal(size = (nu,nx))
L = np.zeros(shape = (nw,nx))

e,_ = np.linalg.eig(A+B@K)

print('stability: ', np.abs(e))
l = 50
Dx_list = np.zeros((2,1))
Dxy_list = np.zeros((l,nx,nx))

for n in range(l):

    Dx,DL, Dxy = gradient(50,200,A,B,C,Q,Ru,Rw,K,L,T)
    Dx = Dx.reshape((2, 1))

    Dx_list = np.hstack((Dx_list,Dx))
    Dxy_list[n,:,:] = Dxy

print('standard deviation of Dxy(1): ' ,  statistics.pstdev(Dx_list[0,:]))
print('standard deviation of Dxy(2): ' ,  statistics.pstdev(Dx_list[1,:]))
print('mean of Dxy(1)', np.mean(Dx_list[0,:]))
print('mean of Dxy(2)', np.mean(Dx_list[1,:]))

p4 = plt.figure(1)
plt.subplot(121)
plt.scatter(range(l+1),Dx_list[0,:])

plt.subplot(122)
Beispiel #15
0
np.random.seed(1025)
q = 0.5
L = 0.001 * np.random.normal(size=(nu, nx))
# K = np.array([[ 0.26122728,  0.41846154, -0.06783688]])

K = np.zeros(shape=(nw, nx))
# L = 0.001*np.random.normal(size = (nu,nx))

############### GRADIENT DESCENT OF MAX PLAYER #########################

#
l = 500
L_list = np.zeros(shape=(nx, l))
for n in range(l):
    DK, DL, Dxy = gradient(100, 200, A, B, C, Q, Ru, Rw, K, L, T)
    L = L - 0.0001 * DL
    L = np.minimum(np.maximum(L, -safeguard), safeguard)
    L_list[:, n] = L.flatten()
    e, _ = np.linalg.eig(Q - (L.T @ Rw) @ L)
    if n % 1 == 0:
        print('-----', n, '------')
        print('Constraint: ', np.min(e))
        print('Dy: ', DL)
        print('L: ', L)
        e, _ = np.linalg.eig(A + C @ L)
        print('stability: ', np.max(np.abs(e)))

p1 = plt.figure(1)
plt.subplot(131)
plt.plot(range(l), L_list[0, :])
Beispiel #16
0
eta_y = 2e-5

############################ CGD Main Loop ##################################
N = 10000

# SGD lists and initialization
L_SGD_list = np.zeros((nx, 1))
K_SGD_list = np.zeros((nx, 1))
constraint_SGD_list = []

K_SGD = K
L_SGD = L

for i in range(N):
    # SGD update and storing
    DK, DL, DKL = gradient(50, 200, A, B, C, Q, Ru, Rw, K_SGD, L_SGD, T)

    K_SGD_list = np.hstack((K_SGD_list, K_SGD.reshape((nx, nu))))
    L_SGD_list = np.hstack((L_SGD_list, L_SGD.reshape((nx, nu))))
    p, _ = np.linalg.eig(Q - L_SGD.T @ Rw @ L_SGD)
    constraint_SGD_list.append(p)

    #update
    K_SGD = K_SGD - eta_x * DK
    L_SGD = L_SGD + eta_y * DL
    temp = L_SGD
    L_SGD = proj_sgd(L_SGD.T, l_max).T

    if i % 100 == 0:
        print('-------------', i, '-------------------')
        print("projected L ", L_SGD, " with prjection bound ", l_max)