def derivative_expectation_integral(mu, x, y, num_monte_carlo):
    d = len(mu)
    # sampling "num_monte_carlo" normal-distribution vectors
    epsilon = np.random.randn(num_monte_carlo, d)

    expectation_integral_sampling = np.zeros((num_monte_carlo, d))
    expected_loss_sampling = np.zeros(num_monte_carlo)
    for i in range(0, num_monte_carlo):
        # epsilon*loss(y_i, (w+epsilon)*x_i)
        expected_loss_sampling[i] = loss(mu + epsilon[i, :], x, y)
        expectation_integral_sampling[i, :] = epsilon[i, :] * expected_loss_sampling[i]

    mean = expectation_integral_sampling.mean(axis=0)
    expected_loss = expected_loss_sampling.mean(axis=0)
    return mean, expected_loss
def expectation_integral(mu, x, y):
    result = loss(mu, x, y)
    return result


    # Original code from MATLAB
    # function ei=expectation_integral(mu, x, y)
    #
    # if 0
    # num_monte_carlo = 1000; % Monte Carlo integration - number of samplings
    # d = length(mu);
    # epsilon = randn(d,num_monte_carlo);
    # expectation_integral_sampling = zeros(num_monte_carlo,1);
    # for k=1:num_monte_carlo
    # expectation_integral_sampling(k) = loss( mu(:) + epsilon(:,k), x(:)', y );
    # end
    # ei = mean(expectation_integral_sampling);
    # else
    # ei = loss( mu(:), x(:)', y );
    # end
Example #3
0
from loss_function import loss
from updata_method import *
from random import choice

total_points = 2000
all_points = rand(total_points, 2)
start = array([0, 1])  # 出发点
lr = 0.03  # 学习率
loop_max = 1000  # 最大迭代次数(防止死循环)
epsilon = 1e-6  # 设置阈值
xb = start
x = start

#GD训练过程
for i in range(loop_max):
    origin_loss = loss(x, all_points)  # 梯度更新前的损失函数值
    new_x = x - lr * GD(x, all_points)  # 梯度更新后的新的点
    new_loss = loss(new_x, all_points)  # 更新后的损失函数值
    if origin_loss - new_loss > epsilon:  # 更新前损失函数值减去更新后的差大于阈值,继续循环
        x = new_x
        origin_loss = new_loss
    elif new_loss - origin_loss > epsilon:  # 更新后损失函数值减去更新前的差大于阈值,说明步长过大,需要调小
        lr = lr * 0.3
    else:
        break
    xb = vstack((xb, x))

c = x

pl.plot(all_points[:, 0], all_points[:, 1], 'g.')
pl.plot(xb[:, 0], xb[:, 1], 'r.')
x_val = x[val_offset:val_offset + m_val, :]
y_val = y[val_offset:val_offset + m_val]

test_offset = val_offset + m_val + 1

#testing data
x_test = x[test_offset:test_offset + m_test, :]
y_test = y[test_offset:test_offset + m_test]

# checking the loss of mu_star
current_loss = float(0)
current_errors = float(0)

#Note that the optimal loss should be 0 only if the loss function is 1[y_i != y], otherwise there is no guarantee
for i in range(0, m_val):
    current_loss += loss(mu_star, x_val[i, :], y_val[i])
    if y_val[i] * np.dot(mu_star, x_val[i, :]) < 0:
        current_errors += 1

print "optimal loss on the validation set:", current_loss/m_val, "and errors:", current_errors/m_val

# algorithm - iterations to find mu

mu = np.zeros((d, n + 1))
best_loss = float(1e8)

current_loss_container = np.zeros((n, 2))  # for plot

no_effect_instances = 0

j = 0