def infer_LAD(x, y, regu=0.1, tol=1e-8, max_iter=5000):
    ## 2019.12.26: Jungmin's code
    weights_limit = sperf(1e-10) * 1e10

    s_sample, s_pred = x.shape
    s_sample, s_target = y.shape

    mu = np.zeros(x.shape[1])

    w_sol = 0.0 * (np.random.rand(s_pred, s_target) - 0.5)
    b_sol = np.random.rand(1, s_target) - 0.5

    #     print(weights.shape)
    for index in range(s_target):
        error, old_error = np.inf, 0
        weights = np.ones((s_sample, 1))
        cov = np.cov(np.hstack((x,y[:,index][:,None])), rowvar=False, \
                     ddof=0, aweights=weights.reshape(s_sample))
        cov_xx, cov_xy = cov[:s_pred, :s_pred], cov[:s_pred,
                                                    s_pred:(s_pred + 1)]

        #         print(cov.shape, cov_xx.shape, cov_xy.shape)
        counter = 0
        while np.abs(error - old_error) > tol and counter < max_iter:
            counter += 1
            old_error = np.mean(
                np.abs(b_sol[0, index] + x.dot(w_sol[:, index]) - y[:, index]))
            #             old_error = np.mean(np.abs(b_sol[0,index] + x_test.dot(w_sol[:,index]) - y_test[:,index]))
            #             print(w_sol[:,index].shape, npl.solve(cov_xx, cov_xy).reshape(s_pred).shape)

            # 2019.12.26: Tai - added regularization
            sigma_w = np.std(w_sol[:, index])

            w_eq_0 = np.abs(w_sol[:, index]) < 1e-10
            mu[w_eq_0] = 2. / np.sqrt(np.pi)

            mu[~w_eq_0] = sigma_w * sperf(
                w_sol[:, index][~w_eq_0] / sigma_w) / w_sol[:, index][~w_eq_0]

            w_sol[:, index] = np.linalg.solve(cov_xx + regu * np.diag(mu),
                                              cov_xy).reshape(s_pred)

            b_sol[0, index] = np.mean(y[:, index] - x.dot(w_sol[:, index]))
            weights = (b_sol[0, index] + x.dot(w_sol[:, index]) - y[:, index])
            sigma = np.std(weights)
            error = np.mean(np.abs(weights))
            #             error = np.mean(np.abs(b_sol[0,index] + x_test.dot(w_sol[:,index]) - y_test[:,index]))
            weights_eq_0 = np.abs(weights) < 1e-10
            weights[weights_eq_0] = weights_limit
            weights[~weights_eq_0] = sigma * sperf(
                weights[~weights_eq_0] / sigma) / weights[~weights_eq_0]

            #weights = sigma*sperf(weights/sigma)/weights
            cov = np.cov(np.hstack((x,y[:,index][:,None])), rowvar=False, \
                         ddof=0, aweights=weights.reshape(s_sample))
            cov_xx, cov_xy = cov[:s_pred, :s_pred], cov[:s_pred,
                                                        s_pred:(s_pred + 1)]
#             print(old_error,error)
#return b_sol,w_sol
    return b_sol[0][0], w_sol[:, 0]  # for only one target case
def infer_LAD(x, y, regu, tol=1e-8, max_iter=5000):
    weights_limit = sperf(1e-10) * 1e10

    n_sample, n_var = x.shape
    n_target = y.shape[1]

    mu = np.zeros(x.shape[1])

    w_sol = 0.0 * (np.random.rand(n_var, n_target) - 0.5)
    b_sol = np.random.rand(1, n_target) - 0.5

    for index in range(n_target):
        error, old_error = np.inf, 0
        weights = np.ones(n_sample)

        cov = np.cov(np.hstack((x,y[:,index][:,None])), rowvar=False, \
                     ddof=0, aweights=weights)
        cov_xx, cov_xy = cov[:n_var, :n_var], cov[:n_var, n_var:(n_var + 1)]

        counter = 0
        while np.abs(error - old_error) > tol and counter < max_iter:
            counter += 1
            old_error = np.mean(
                np.abs(b_sol[0, index] + x.dot(w_sol[:, index]) - y[:, index]))

            # 2019.12.26: Tai - added regularization
            sigma_w = np.std(w_sol[:, index])

            w_eq_0 = np.abs(w_sol[:, index]) < 1e-10
            mu[w_eq_0] = 2. / np.sqrt(np.pi)

            mu[~w_eq_0] = sigma_w * sperf(
                w_sol[:, index][~w_eq_0] / sigma_w) / w_sol[:, index][~w_eq_0]

            w_sol[:, index] = np.linalg.solve(cov_xx + regu * np.diag(mu),
                                              cov_xy).reshape(n_var)

            b_sol[0, index] = np.mean((y[:, index] - x.dot(w_sol[:, index])) *
                                      weights) / np.mean(weights)

            weights = (b_sol[0, index] + x.dot(w_sol[:, index]) - y[:, index])
            sigma = np.std(weights)
            error = np.mean(np.abs(weights))

            weights_eq_0 = np.abs(weights) < 1e-10
            weights[weights_eq_0] = weights_limit
            weights[~weights_eq_0] = sigma * sperf(
                weights[~weights_eq_0] / sigma) / weights[~weights_eq_0]

            cov = np.cov(np.hstack((x,y[:,index][:,None])), rowvar=False, \
                         ddof=0, aweights=weights)
            cov_xx, cov_xy = cov[:n_var, :n_var], cov[:n_var,
                                                      n_var:(n_var + 1)]
#             print(old_error,error)

    return b_sol[0][0], w_sol[:, 0]  # for only one target case
Esempio n. 3
0
def iteration(index, w_in, x, xc, y, num_iter=10):
    sperf_init = sperf(np.dot(x, w_in)[:-1, index] * dts / sqrt2)
    for iterate in range(num_iter):
        h = np.dot(x, w_in[:, index])[:-1]
        h_ratio = h * y[:, index] / sperf(h * dts / sqrt2)
        w_in[:, index] = sp.linalg.solve(
            C_jk, np.mean(h_ratio[:, np.newaxis] * xc, axis=0))
        sperf_next = sperf(np.dot(x, w_in)[:-1, index] * dts / sqrt2)
        if (nplin.norm(sperf_next - sperf_init)**2 < 1e-4): break
        sperf_init = np.copy(sperf_next)
Esempio n. 4
0
def infer(x,max_iter = 100,tol=1e-8,func=npl.solve,window=1,power=1,verbose=False):
    time_steps,size = x.shape
    x0 = np.copy(x)
    if window>1:
        x0 = moving_avg(x0,window)
        time_steps = time_steps-window+1
    y = np.diff(x0,axis=0)
    y_mean = np.mean(np.abs(y),axis=0)
    y_max = np.max(np.abs(y),axis=0)
    if power<3:
        y /= y_max[None,:]#now y is definitely within +/- 1
        x0 = x0/y_max[None,:]
    x0 = x0[:-1]
    s = np.sign(y)
    c = np.cov(x0,rowvar=False)
    w = npr.rand(size,size) - 0.5
    bias = npr.rand(1,size) - 0.5
    if power<3:
        h = np.tanh(bias + x0.dot(w))
    else:
        h = odd_power(bias + x0.dot(w),power)
    for index in range(size):
        err_old,error,counter = 0,np.inf,0
        #         print(index)
        while np.abs(error-err_old) > tol and counter < max_iter:
            counter += 1
            zeros = np.abs(bias[0,index] + x0.dot(w[:,index])) < 1e-7
            if power<3:
                ratio = np.sqrt(np.pi/2.0)*np.ones((time_steps-1))
            else:
                ratio = np.sqrt(np.pi/2.0)*np.ones((time_steps-1))*h[:,index]**(power-1)
            ratio[~zeros] = (bias[0,index] + x0[~zeros,:].dot(w[:,index]))/sperf(h[~zeros,index]*root2over)
            w[:,index] = func(c+0.1*np.eye(size),np.mean((x0-np.mean(x0,axis=0)[None,:])*(s[:,index]*ratio)[:,np.newaxis],axis=0))
            h_temp = x0.dot(w[:,index])
            bias[0,index] = bias_update(y[:,index],h_temp,bias[0,index],pp=power)
            err_old = error
            if power<3:
                h[:,index] = np.tanh(bias[0,index] + h_temp)
                error = npl.norm(s[:,index]-sperf(h[:,index]*root2over)/erf_max)
            else:
                h[:,index] = odd_power(bias[0,index] + h_temp,power)
                error = npl.norm(s[:,index]-sperf(h[:,index]*root2over))
    #             print(counter,error)
    sigma = find_sigma(y,h)*np.sqrt(window)#*y_max[None,:]
    return w,sigma,bias
Esempio n. 5
0
def LAD_fit(x, y, tol=1e-8, max_iter=5000):
    s_sample, s_pred = x.shape
    s_sample, s_target = y.shape

    #s_sample, s_target = len(y),1 # 2019.12.26: Tai -- only one target

    w_sol = 0.0 * (np.random.rand(s_pred, s_target) - 0.5)
    b_sol = np.random.rand(1, s_target) - 0.5

    #     print(weights.shape)
    for index in range(s_target):
        error, old_error = np.inf, 0
        weights = np.ones((s_sample, 1))
        cov = np.cov(np.hstack((x, y[:, index][:, None])),
                     rowvar=False,
                     ddof=0,
                     aweights=weights.reshape(s_sample))
        cov_xx, cov_xy = cov[:s_pred, :s_pred], cov[:s_pred,
                                                    s_pred:(s_pred + 1)]

        #         print(cov.shape, cov_xx.shape, cov_xy.shape)
        counter = 0
        while np.abs(error - old_error) > tol and counter < max_iter:
            counter += 1
            old_error = np.mean(
                np.abs(b_sol[0, index] + x.dot(w_sol[:, index]) - y[:, index]))
            #             old_error = np.mean(np.abs(b_sol[0,index] + x_test.dot(w_sol[:,index]) - y_test[:,index]))
            #             print(w_sol[:,index].shape, npl.solve(cov_xx, cov_xy).reshape(s_pred).shape)
            w_sol[:, index] = np.linalg.solve(cov_xx, cov_xy).reshape(s_pred)
            b_sol[0, index] = np.mean(y[:, index] - x.dot(w_sol[:, index]))
            weights = (b_sol[0, index] + x.dot(w_sol[:, index]) - y[:, index])
            sigma = np.std(weights)
            error = np.mean(np.abs(weights))
            #             error = np.mean(np.abs(b_sol[0,index] + x_test.dot(w_sol[:,index]) - y_test[:,index]))
            weights_eq_0 = np.abs(weights) < 1e-10
            weights[weights_eq_0] = weights_limit
            weights[~weights_eq_0] = sigma * sperf(
                weights[~weights_eq_0] / sigma) / weights[~weights_eq_0]
            cov = np.cov(np.hstack((x, y[:, index][:, None])),
                         rowvar=False,
                         ddof=0,
                         aweights=weights.reshape(s_sample))
            cov_xx, cov_xy = cov[:s_pred, :s_pred], cov[:s_pred,
                                                        s_pred:(s_pred + 1)]
#             print(old_error,error)
    return b_sol, w_sol
Esempio n. 6
0
        if (nplin.norm(sperf_next - sperf_init)**2 < 1e-4): break
        sperf_init = np.copy(sperf_next)


#        print(iterate,nplin.norm((x[1:,index]-x[:-1,index])-sperf_init)**2/float(L-1))
#    return w_in

cov_plus = np.cov(X[:-1], X[1:], rowvar=False)
#w_try = sp.linalg.solve(cov_plus[:N_var,:N_var]*dt,cov_plus[:N_var,N_var:]-cov_plus[:N_var,:N_var])#gen_W()

w_try = gen_W()
print('initial MSE', nplin.norm(W - w_try)**2 / float(N_var**2))
for index in range(N_var):
    iteration(index, w_try, X, XC, Y)
    print('final MSE for ',index,nplin.norm(W[:,index]-w_try[:,index])**2/float(N_var),\
          nplin.norm(Y[:,index]-sperf(np.dot(X,w_try)[:-1,index])*dts/sqrt2)**2/float(L-1))

plt.scatter(W.flatten(), w_try.flatten(), c='k', s=0.1)
plt.show()

# with PdfPages('langevin-' + str(dt) + '-' + str(T) + '-' + str(N_var) + '-' +
#               str(seed) + '.pdf') as pdf:
#     fig = plt.figure()
#     plt.imshow(w_try)
#     plt.colorbar()
#     pdf.savefig(fig)
#     plt.close()
#     fig = plt.figure()
#     print('initial MSE', nplin.norm(W - w_try)**2 / float(N_var**2))
#     for index in range(N_var):
#         iteration(index, w_try, X, XC, Y)