Ejemplo n.º 1
0
def samples(d, n):  # generate a sequence of unit length vector pairs
    s = []
    for i in range(0, n):
        x1 = mat.randn((d, 1))
        x2 = mat.randn((d, 1))
        s.append((x1 / linalg.norm(x1), x2 / linalg.norm(x2)))
    return s
def create_gaussian_particles(mean, std, N):
    particles = np.empty((N, 5))
    # angle
    particles[:, 0] = mean[2] + (randn(N) * std[2])
    # lv
    particles[:, 1] = 11 + (randn(N) * 5.5)
    # rv
    particles[:, 2] = 11 + (randn(N) * 5.5)
    # x
    particles[:, 3] = mean[0] + (randn(N) * std[0])
    # y
    particles[:, 4] = mean[1] + (randn(N) * std[1])
    particles[:, 0] %= 2 * np.pi
    return particles
    def init_subpopulations(self):
        # Main excitatory subpopulation
        self.e_size=int(self.params.network_group_size*.8)
        self.group_e=self.subgroup(self.e_size)
        self.group_e.C=self.pyr_params.C
        self.group_e.gL=self.pyr_params.gL
        self.group_e._refractory_time=self.pyr_params.refractory

        # Main inhibitory subpopulation
        self.i_size=int(self.params.network_group_size*.2)
        self.group_i=self.subgroup(self.i_size)
        self.group_i.C=self.inh_params.C
        self.group_i.gL=self.inh_params.gL
        self.group_i._refractory_time=self.inh_params.refractory

        # Input-specific sub-subpopulations
        self.groups_e=[]
        for i in range(self.params.num_groups):
            subgroup_e=self.group_e.subgroup(int(self.params.f*self.e_size))
            self.groups_e.append(subgroup_e)
        self.ns_e=self.group_e.subgroup(self.e_size-(self.params.num_groups*int(self.params.f*self.e_size)))

        # Initialize state variables
        self.vm = self.params.EL+randn(self.params.network_group_size)*mV
        self.group_e.g_ampa_b = rand(self.e_size)*self.pyr_params.w_ampa_ext_correct*2.0
        self.group_e.g_nmda = rand(self.e_size)*self.pyr_params.w_nmda*2.0
        self.group_e.g_gaba_a = rand(self.e_size)*self.pyr_params.w_gaba*2.0
        self.group_i.g_ampa_r = rand(self.i_size)*self.inh_params.w_ampa_rec*2.0
        self.group_i.g_ampa_b = rand(self.i_size)*self.inh_params.w_ampa_bak*2.0
        self.group_i.g_nmda = rand(self.i_size)*self.inh_params.w_nmda*2.0
        self.group_i.g_gaba_a = rand(self.i_size)*self.inh_params.w_gaba*2.0
 def testDownloadConsituentsGrabsData(self):
     self.market.downloader.get = Mock()
     self.market.downloader.get.return_value = DataFrame(randn(2, 1))
     calls = [call(ticker + ".AX", self.start_date, self.end_date) for ticker in self.tickers] 
     self.market.download_data()
     self.market.downloader.get.assert_has_calls(calls)
     self.assertSetEqual(set(self.tickers), set(self.market.instruments.keys()))
Ejemplo n.º 5
0
 def __init__(self, *args):
     if len(args) < 2:
         raise TypeError("__init__() missing 2 required positional arguments: 'n_X' and 'n_y'")
     
     self._n = args
     self._Theta = []
     for i in range(len(args) - 1):
         self._Theta.append(matlib.randn((args[i] + 1, args[i + 1])))
    def __init__(self, dim):
        self.num_tables = 2
        self.hash_size = 8

        if setup:
            for i in range(self.num_tables):
                projections = matlib.randn(self.hash_size, dim)
                SQL.insert_table_data(db_conn, i, projections.tostring(), self.hash_size)
Ejemplo n.º 7
0
def awgn(image, sigma):
    # image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
    image = np.asarray(image).astype('double')
    shape = image.shape
    noise_map = randn(shape[0], shape[1])
    noise = sigma * noise_map
    noise_image = image + noise

    return noise_image
Ejemplo n.º 8
0
    def test_calc_correlation(self):
        quotes = self.get_quotes()

        rho = calc_correlation(quotes)
        self.assertEqual(rho.shape, (1, 1))
        self.assertEqual(list(rho.flat), [1])

        return

        rho = calc_correlation(quotes, quotes)
        self.assertEqual(rho.shape, (2, 2))
        self.assertEqual(list(rho.flat), [1, 1, 1, 1])

        rho = calc_correlation(quotes, quotes, quotes)
        self.assertEqual(rho.shape, (3, 3))
        self.assertEqual(list(rho.flat), [1, 1, 1, 1, 1, 1, 1, 1, 1])

        rho = calc_correlation(quotes, list(map(lambda x: -x, quotes)))
        self.assertEqual(rho.shape, (2, 2))
        self.assertEqual(list(rho.flat), [1, -1, -1, 1])

        scipy.random.seed(12345)
        a = list(randn(20000).flat)
        b = list(randn(20000).flat)
        c = list(randn(20000).flat)
        rho = calc_correlation(a, b)
        self.assertEqual(rho.shape, (2, 2))
        self.assertAlmostEqual(rho[0][0], 1, places=1)
        self.assertAlmostEqual(rho[0][1], 0, places=1)
        self.assertAlmostEqual(rho[1][0], 0, places=1)
        self.assertAlmostEqual(rho[1][1], 1, places=1)

        rho = calc_correlation(a, b, c)
        self.assertEqual(rho.shape, (3, 3))
        self.assertAlmostEqual(rho[0][0], 1, places=1)
        self.assertAlmostEqual(rho[0][1], 0, places=1)
        self.assertAlmostEqual(rho[0][2], 0, places=1)
        self.assertAlmostEqual(rho[1][0], 0, places=1)
        self.assertAlmostEqual(rho[1][1], 1, places=1)
        self.assertAlmostEqual(rho[1][2], 0, places=1)
        self.assertAlmostEqual(rho[2][0], 0, places=1)
        self.assertAlmostEqual(rho[2][1], 0, places=1)
        self.assertAlmostEqual(rho[2][2], 1, places=1)
 def testDownloadConsituentsGrabsData(self):
     self.market.downloader.get = Mock()
     self.market.downloader.get.return_value = DataFrame(randn(2, 1))
     calls = [
         call(ticker + ".AX", self.start_date, self.end_date)
         for ticker in self.tickers
     ]
     self.market.download_data()
     self.market.downloader.get.assert_has_calls(calls)
     self.assertSetEqual(set(self.tickers),
                         set(self.market.instruments.keys()))
Ejemplo n.º 10
0
def gaussian_orthogonal_kernel(deg):
    n = 2**deg
    rows = []
    for i in range(0, n):
        x = mat.randn(n, 1)
        x = x / linalg.norm(x)
        p = mat.zeros((1, n))
        for r in rows:
            p += (r * x) * r
        y = np.transpose(x) - p
        y = y / linalg.norm(y)
        rows.append(y)
    return np.concatenate(rows)
Ejemplo n.º 11
0
def predict(particles, u, std, dt):
    """ move according to control input u (heading change, velocity)
    with noise Q (std heading change, std velocity)`"""
    (vl, vr) = u
    R = wheel_base_half * (vl + vr) / (vr - vl)
    wt = (vr - vl) / (wheel_base_half * 2) * dt

    ICCx = particles[:, 3] - R * np.sin(particles[:, 0])
    ICCy = particles[:, 4] + R * np.cos(particles[:, 0])

    particles[:, 3] = np.cos(wt) * (particles[:, 3] - ICCx) - np.sin(wt) * (
        particles[:, 4] - ICCy) + ICCx
    particles[:, 4] = np.sin(wt) * (particles[:, 3] - ICCx) + np.cos(wt) * (
        particles[:, 4] - ICCy) + ICCy

    N = len(particles)
    # update heading
    particles[:, 0] += wt
    # particles[:, 0] %= 2 * np.pi

    # TODO
    # move in the (noisy) commanded direction
    particles[:, 1] = vr + (randn(N) * 5)
    particles[:, 2] = vl + (randn(N) * 5)
Ejemplo n.º 12
0
    def Path(self, timeline, nb_path):

        dsigmas = 0.5 * self.Vol**2
        adjused_drift = (self.Drift - dsigmas)
        T = len(timeline)
        L = zeros([nb_path, T])
        W = array(randn((nb_path, T)))
        L[:, 0] = log(
            self.InitialValue) + adjused_drift * timeline[0] + self.Vol * sqrt(
                timeline[0]) * W[:, 0]
        for t in range(1, T):
            delta = timeline[t] - timeline[t - 1]
            L[:,
              t] = L[:, t -
                     1] + adjused_drift * delta + self.Vol * sqrt(delta) * W[:,
                                                                             t]
        return exp(L)
Ejemplo n.º 13
0
    def init_subpopulations(self):
        # Main excitatory subpopulation
        self.e_size = int(self.params.network_group_size * .8)
        self.group_e = self.subgroup(self.e_size)
        self.group_e.C = self.pyr_params.C
        self.group_e.gL = self.pyr_params.gL
        self.group_e._refractory_time = self.pyr_params.refractory

        # Main inhibitory subpopulation
        self.i_size = int(self.params.network_group_size * .2)
        self.group_i = self.subgroup(self.i_size)
        self.group_i.C = self.inh_params.C
        self.group_i.gL = self.inh_params.gL
        self.group_i._refractory_time = self.inh_params.refractory

        # Input-specific sub-subpopulations
        self.groups_e = []
        for i in range(self.params.num_groups):
            subgroup_e = self.group_e.subgroup(int(self.params.f *
                                                   self.e_size))
            self.groups_e.append(subgroup_e)

        # Initialize state variables
        self.vm = self.params.EL + randn(self.params.network_group_size) * mV
        self.group_e.g_ampa_b = rand(
            self.e_size) * self.pyr_params.w_ampa_ext_correct * 2.0
        self.group_e.g_nmda = rand(self.e_size) * self.pyr_params.w_nmda * 2.0
        self.group_e.g_gaba_a = rand(
            self.e_size) * self.pyr_params.w_gaba * 2.0
        self.group_i.g_ampa_r = rand(
            self.i_size) * self.inh_params.w_ampa_rec * 2.0
        self.group_i.g_ampa_b = rand(
            self.i_size) * self.inh_params.w_ampa_ext * 2.0
        #self.group_i.g_nmda = self.inh_params.w_nmda*100.0+10.0*nS*randn(self.i_size)
        self.group_i.g_nmda = rand(self.i_size) * self.inh_params.w_nmda * 2.0
        self.group_i.g_gaba_a = rand(
            self.i_size) * self.inh_params.w_gaba * 2.0
Ejemplo n.º 14
0
arr = np.arange(32).reshape((8, 4))
print("np.arange(32).reshape((8, 4)):")
print(arr)
print(arr[[1, 5, 7, 2], [0, 3, 1, 2]])  #[ 4 23 29 10]
print(" 输出矩阵转置:  ")
arr = np.arange(15).reshape((3, 5))
print(arr.T)
print("输出np.arange(10):")
arr = np.arange(10)
print(arr)
print("输出np.sqrt(arr):")
print(np.sqrt(arr))  #仍是一维数组,求数组中的每个元素求平方根
print("输出np.exp(arr)):")
print(np.exp(arr))  #仍是一维数组,求以e为底,数组中的每个元素作为指数的值
print("randn(8):")
x = randn(8)  #生成8个随机数
print(x)
print("randn(8):")
y = randn(8)
print(y)
print(np.maximum(x, y))  #仍是一维数组,每个元素为数组x和y对应元素最大的那个;
print("-------------------------------------")
xarr = np.array([1.1, 1.2, 1.3, 1.4, 1.5])
yarr = np.array([2.1, 2.2, 2.3, 2.4, 2.5])
cond = np.array([True, False, True, True, False])
result = np.where(cond, xarr, yarr)
print(result)  #输出:[1.1 2.2 1.3 1.4 2.5]
print("-------------------------------------")
arr = randn(4, 4)
print(arr)
print(np.where(arr > 0, 2, -2))
def buildNumericDataFrame(columns, length):
    index = date_range("1/1/2010", periods = length, freq = "D")
    return DataFrame(randn(length, len(columns)), index = index, columns = columns)
Ejemplo n.º 16
0
import math
import numpy as np
import numpy.matlib as nm
import matplotlib.pyplot as plt

n = 100
u = nm.randn(n, 1) / 4 + 2
x = nm.randn(n, 1) / 2 + 1
w = 2 * nm.exp(-8 * np.power((x - 2), 2) + 2 * np.power((x - 1), 2))
y = nm.sin(nm.pi * x) / (nm.pi * x) + 0.1 * nm.randn(n, 1)
x2 = np.ones(len(x)).reshape(len(x), 1)  # 生成一个维度为(n,1),元素全是1的数组
x = np.c_[x, x2]  # x(:,2)=1;加第二行为1
t1 = np.multiply(nm.repmat(w, 1, 2), x)  # np.multiply为矩阵对应元素相乘
t = np.linalg.inv(x.T * t1) * (x.T *
                               (np.multiply(w, y)))  # np.linalg.inv(.)求逆矩阵
X = nm.linspace(-1, 3, 100)  # 生成数据节点
Y = nm.sin(nm.pi * X) / (nm.pi * X)  # 根据节点计算Y
u = np.c_[u, x2]
v = u * t
print(u.shape)
plt.figure()
plt.plot(x[:, 0], y, 'bo', label='xi,yi')  # 绘制原始数据点
plt.plot(X, Y, 'r-', label='f(x)')  # 绘制重要度加权的最小二乘曲线
plt.plot(u[:, 0], v, 'kx', label='xi1,yi1')  # 绘制最小二乘曲线
plt.legend()  # 显示绘制的legend
plt.show()  # 显示绘制的画布
Ejemplo n.º 17
0
def run_pf1(N,
            sensor_std_err=1,
            do_plot=True,
            plot_particles=False,
            xlim=(-10, 40),
            ylim=(0, 140),
            initial_x=None,
            vl=None,
            vr=None,
            t=None,
            angle=None,
            dist=None):
    landmarks = np.array([[0, 10, 10], [0.1, 5, 15], [-0.1, 15, 5],
                          [0.3, 10, 15], [-1, 15, 15]])
    NL = len(landmarks)
    iters = len(t)

    plt.figure()

    # create particles and weights
    if initial_x is not None:
        particles = create_gaussian_particles(mean=initial_x,
                                              std=(5, 5, np.pi / 4),
                                              N=N)
    else:
        particles = create_uniform_particles((0, 20), (0, 20), (0, 6.28), N)
    weights = np.zeros(N)

    if plot_particles:
        alpha = .20
        if N > 5000:
            alpha *= np.sqrt(5000) / np.sqrt(N)
        plt.scatter(particles[:, 3], particles[:, 4], alpha=alpha, color='g')

    xs = []
    robot_pos = np.array([0., 0., 13.8])

    x = 0
    y = 0
    for i in range(1, iters):
        # TODO
        # robot_pos += (1, 1)
        dt = t[i] - t[i - 1]

        robot_pos = (angle[i - 1], vl[i - 1], vr[i - 1])

        # landmarks = np.array([robot_pos])
        # distance from robot to each landmark
        zs = (norm(landmarks - robot_pos) + (randn(NL) * sensor_std_err))

        # move diagonally forward to (x+1, x+1)
        predict(particles, u=(0.00, 1.414), std=(.2, .05), dt=dt)

        # incorporate measurements
        update(particles, weights, z=zs, R=sensor_std_err, landmarks=landmarks)

        # resample if too few effective particles
        if neff(weights) < N / 2:
            indexes = systematic_resample(weights)
            resample_from_index(particles, weights, indexes)

        mu, var = estimate(particles, weights)
        xs.append(mu)

        if plot_particles:
            plt.scatter(particles[:, 0],
                        particles[:, 2],
                        color='k',
                        marker=',',
                        s=1)
        p1 = plt.scatter(robot_pos[1],
                         robot_pos[2],
                         marker='',
                         color='k',
                         s=180,
                         lw=3)
        p2 = plt.scatter(mu[0], mu[1], marker='s', color='r')

    # xs = np.array(xs)
    # plt.plot(xs[:, 0], xs[:, 1])
    plt.legend([p1, p2], ['Actual', 'PF'], loc=4, numpoints=1)
    # plt.xlim(*xlim)
    # plt.ylim(*ylim)
    # print('final position error, variance:\n\t', mu - np.array([iters, iters]), var)
    plt.show()
Ejemplo n.º 18
0
 def __init__(self, n_X, n_RBF, n_y):
     self._n_X = n_X
     self._n_RBF = n_RBF
     self._n_y = n_y
     self._Theta1 = matlib.randn((n_RBF, n_X))
     self._Theta2 = matlib.randn((n_RBF + 1, n_y))
Ejemplo n.º 19
0
# -*- encoding: utf-8 -*-
"""
4.6.1 创建矩阵
"""

import numpy as np
import numpy.matlib as mat

print(np.mat([[1, 2, 3], [4, 5, 6]], dtype=np.int))  # 使用列表创建矩阵
print(np.mat(np.arange(6).reshape((2, 3))))  # 使用数组创建矩阵
print(np.mat('1 4 7; 2 5 8; 3 6 9'))  # 使用Matlab风格的字符串创建矩阵

print(mat.zeros((2, 3)))  # 全0矩阵
print(mat.ones((2, 3)))  # 全1矩阵
print(mat.eye(3))  # 单位矩阵
print(mat.empty((2, 3)))  # 空矩阵
print(mat.rand((2, 3)))  # [0,1)区间随机数矩阵
print(mat.randn((2, 3)))  # 均值0方差1的高斯(正态)分布矩阵
def buildNumericDataFrame(columns, length):
    index = date_range("1/1/2010", periods=length, freq="D")
    return DataFrame(randn(length, len(columns)), index=index, columns=columns)
Ejemplo n.º 21
0
    def ci_bootstrapping_method(self):
        print("-------2.2.2 Use Bootstrap method--------")
        B = 1000
        sigma = 0.05
        print("Using confidance level: ", sigma)
        lower_bound_rate = sigma / 2
        higher_bound_rate = 1 - lower_bound_rate
        lower_bound = int(lower_bound_rate * B)
        higher_bound = int(higher_bound_rate * B)
        beta_set = []
        alpha_set = []
        beta_t_set = []
        alpha_t_set = []

        ret_len = len(self.cc_return_index)
        y = list(self.cc_return_single_stock)
        X = np.array(self.cc_return_index).reshape(ret_len, 1)
        alpha = self.alpha
        beta = self.beta

        y_mean = np.mean(y)
        y_var = np.var(y)
        y_std = np.std(y)

        sample_mean = np.mean(X)
        sample_var = np.var(X)
        sample_std = np.std(X)

        se_alpha = y_var * ((1 / ret_len) + (sample_mean**2) /
                            (ret_len * sample_var))
        se_beta = y_var * (1 / (ret_len * sample_var))

        # Non-parametric bootstrap
        print("-------2.2.2.1 Use Non-parametric Bootstrap method--------")
        beta_set.clear()
        alpha_set.clear()
        beta_t_set.clear()
        alpha_t_set.clear()

        for i in range(B):
            rs_idx = [random.randint(0, ret_len - 1) for _ in range(ret_len)]
            rs_X = X[rs_idx]
            rs_y = np.array(y)[rs_idx]

            rs_y_var = np.var(rs_y)

            rs_model = linear_model.LinearRegression()
            rs_model.fit(rs_X, rs_y)
            rs_beta = rs_model.coef_
            rs_alpha = rs_model.intercept_

            rs_mean = np.mean(rs_X)
            rs_var = np.var(rs_X)
            rs_se_alpha = rs_y_var * ((1 / ret_len) + ((rs_mean**2) /
                                                       (ret_len * rs_var)))
            rs_se_beta = rs_y_var * (1 / (ret_len * rs_var))

            rs_alpha_t = (rs_alpha - alpha) / rs_se_alpha
            rs_beta_t = (rs_beta - beta) / rs_se_beta

            beta_set.append(rs_beta)
            alpha_set.append(rs_alpha)
            alpha_t_set.append(rs_alpha_t)
            beta_t_set.append(rs_beta_t)

        beta_set.sort()
        alpha_t_set.sort()
        beta_t_set.sort()

        # non-parametric bootstrap percentile method
        print(
            "------2.2.2.1.1 Use non-parametric bootstrap percentile method-------"
        )

        print("Alpha CI:")
        print(alpha_set[lower_bound], alpha_set[higher_bound])
        print("Beta CI:")
        print(beta_set[lower_bound], beta_set[higher_bound])

        self.ci_non_para_bs_percentile_alpha_l = alpha_set[lower_bound]
        self.ci_non_para_bs_percentile_alpha_h = alpha_set[higher_bound]
        self.ci_non_para_bs_percentile_beta_l = beta_set[lower_bound]
        self.ci_non_para_bs_percentile_beta_h = beta_set[higher_bound]

        # non-parametric bootstrap t method
        print("------2.2.2.1.2 Use non-parametric bootstrap t method-------")

        print("Alpha CI:")
        print(alpha - alpha_t_set[higher_bound] * se_alpha,
              alpha - alpha_t_set[lower_bound] * se_alpha)
        print("Beta CI:")
        print(beta - beta_t_set[higher_bound] * se_beta,
              beta - beta_t_set[lower_bound] * se_beta)

        self.ci_non_para_bs_t_alpha_l = alpha - alpha_t_set[
            higher_bound] * se_alpha
        self.ci_non_para_bs_t_alpha_h = alpha - alpha_t_set[
            lower_bound] * se_alpha
        self.ci_non_para_bs_t_beta_l = beta - beta_t_set[higher_bound] * se_beta
        self.ci_non_para_bs_t_beta_h = beta - beta_t_set[lower_bound] * se_beta

        # Parameter bootstrap
        print("----------2.2.2.2 Use parametric bootstrap method-----------")
        beta_set.clear()
        alpha_set.clear()
        beta_t_set.clear()
        alpha_t_set.clear()

        para_x_samples = sample_std * matlib.randn((B, ret_len)) + sample_mean
        para_eps_samples = y_std * matlib.randn((B, ret_len)) + y_mean
        para_y_samples = beta[0] * para_x_samples + alpha + para_eps_samples

        for i in range(B):
            rs_y = para_y_samples[i].reshape((-1, 1))
            rs_X = np.array(para_x_samples[i]).reshape(ret_len, 1)
            rs_y_var = np.var(rs_y)
            rs_model = linear_model.LinearRegression()
            rs_model.fit(rs_X, rs_y)
            rs_beta = rs_model.coef_
            rs_alpha = rs_model.intercept_
            rs_mean = np.mean(rs_X)
            rs_std = np.std(rs_X)
            rs_var = np.var(rs_X)
            rs_se_alpha = rs_y_var * ((1 / ret_len) + ((rs_mean**2) /
                                                       (ret_len * rs_var)))
            rs_se_beta = rs_y_var * (1 / (ret_len * rs_var))

            rs_alpha_t = (rs_alpha - alpha) / rs_se_alpha
            rs_beta_t = (rs_beta - beta) / rs_se_beta

            beta_set.append(rs_beta)
            alpha_set.append(rs_alpha)
            alpha_t_set.append(rs_alpha_t)
            beta_t_set.append(rs_beta_t)

        alpha_set.sort()
        beta_set.sort()
        alpha_t_set.sort()
        beta_t_set.sort()

        # Parametric bootstrap method - percentile method
        print(
            "--------2.2.2.2.1 Use parametric bootstrap method - percentile method----------"
        )
        print("Alpha CI:")
        print(alpha_set[lower_bound], alpha_set[higher_bound])
        print("Beta CI:")
        print(beta_set[lower_bound], beta_set[higher_bound])

        self.ci_para_bs_percentile_alpha_l = alpha_set[lower_bound]
        self.ci_para_bs_percentile_alpha_h = alpha_set[higher_bound]
        self.ci_para_bs_percentile_beta_l = beta_set[lower_bound]
        self.ci_para_bs_percentile_beta_h = beta_set[higher_bound]

        # Parametric bootstrap method - t method

        print(
            "---------2.2.2.2.2 Use parametric bootstrap method - t method----------"
        )

        print("Alpha CI:")
        print(alpha - alpha_t_set[higher_bound] * se_alpha,\
              alpha - alpha_t_set[lower_bound] * se_alpha)
        print("Beta CI:")
        print(beta - beta_t_set[higher_bound] * se_beta,\
              beta - beta_t_set[lower_bound] * se_beta)

        self.ci_para_bs_t_alpha_l = alpha - alpha_t_set[higher_bound] * se_alpha
        self.ci_para_bs_t_alpha_h = alpha - alpha_t_set[lower_bound] * se_alpha
        self.ci_para_bs_t_beta_l = beta - beta_t_set[higher_bound] * se_beta
        self.ci_para_bs_t_beta_h = beta - beta_t_set[lower_bound] * se_beta

        # parametric bootstrap method - SEboot method
        # calculate SEboot using resampling

        print(
            "---------2.2.2.2.3 Use parametric bootstrap method - SEboot method----------"
        )

        se_boot_alpha = (sum([(i - np.mean(alpha_set))**2
                              for i in alpha_set]) / (B - 1))**0.5
        se_boot_beta = (sum([(i - np.mean(beta_set))**2
                             for i in beta_set]) / (B - 1))**0.5

        print("----置信区间 95% using +/- 2 * SE")
        print("Alpha CI:")
        print(alpha - 2 * se_boot_alpha, alpha + 2 * se_boot_alpha)
        print("Beta CI:")
        print(beta - 2 * se_boot_beta, beta + 2 * se_boot_beta)

        self.ci_para_bs_se_alpha_l = alpha - 2 * se_boot_alpha
        self.ci_para_bs_se_alpha_h = alpha + 2 * se_boot_alpha
        self.ci_para_bs_se_beta_l = beta - 2 * se_boot_beta
        self.ci_para_bs_se_beta_h = beta + 2 * se_boot_beta
Ejemplo n.º 22
0
#Indexing with slices

print(arr[1:6])
print(arr2d)
print(arr2d[:2])

print(arr2d[:2, 1:])

print(arr2d[1, :2])
print(arr2d[2, :1])
print(arr2d[:, :1])

#Boolean Indexing

names = np.array(['Bob', 'Joe', 'Will', 'Bob', 'Will', 'Joe', 'Joe'])
data = randn(7, 4)

print(names == 'Bob')
print(data[names == 'Bob'])
print(data[names == 'Bob', 2:])
print(data[names == 'Bob', 3])

print(names != 'Bob')

print(data[-(names == 'Bob')])

mask = (names == 'Bob') | (names == 'Will')
print(mask)

print(data[mask])
Ejemplo n.º 23
0
def test_suite_1():
    n1 = 100
    n2 = 100
    n = n1+n2
    d = 5
    eta = .1
    degree = 3
    iterations = 1
    results = mat.zeros((8,5)) 
    times = mat.zeros((1,5))
    sigma = 2
    # 1st col is non-kernelized
    # 2nd col is poly-kernel 

    for itr in xrange(iterations):
        X = mat.randn(n1,d)
        Phi_X = poly.phi(X, degree)

        D0 = X + mat.rand(n2,d) / 1000
        # Verify identity K(X,X) = 1
        D1 = mat.randn(n2,d) 
        # How does kernel perform iid data
        D2 = mat.rand(n2,d)
         # Uniform rather than normal distribution
        D3 = mat.randn(n2,d) * 2 + 2
        # Linear transformation
        D4 = mat.power(mat.randn(n2,d) + 1 ,3) 
        #Non-linear transformation
        D5 = mat.power(X+1,3) 
        #non-linear transformation of the D0 dataset;
        D6 = mat.rand(n2,d)/100 + mat.eye(n2,d) 
        #Totally different data - should have low similarity
        D7 = mat.rand(n2,d)/100 + mat.eye(n2,d)*5 
        # Scaled version of D7

        Data = [D0, D1, D2, D3, D4, D5, D6, D7]


        for idx in xrange(8):
            D = Data[idx]
            start = time.time()
            results[idx, 0] += nk_bhatta(X, D, 0)
            nk = time.time()
             emp = time.time()
            results[idx, 1] += Bhattacharrya(X,D,gaussk(sigma),eta,5)
            e5 = time.time()
            results[idx, 2] += Bhattacharrya(X,D,gaussk(sigma),eta,15)
            e15 = time.time()
            results[idx, 3] += Bhattacharrya(X,D,gaussk(sigma),eta,25)
            e25 = time.time()
            nktime = nk-start
            emptime = emp-nk
            e5time = e5-emp
            e15time = e15-e5
            e25time = e25-e15
            print "nk: {:.1f}, emp: {:.1f}, e5: {:.1f}, e15: {:.1f}, e25: {:.1f}".format(nktime, emptime, e5time, e15time, e25time)
            times[0,0]+= nktime
            times[0,4]+= emptime
            times[0,1]+= e5time
            times[0,2]+= e15time
            times[0,3]+= e25time
Ejemplo n.º 24
0
np.save('some_array',arr)

arr = np.load('some_array.npy')
print(arr)
###############################################################

np.savez('array_archive.npz', a=arr, b=arr)

arch = np.load('array_archive.npz')
print(arch['b'])


print('\n')
###############################################################

arr = randn(100)
np.savetxt('array_ex.txt',arr, delimiter=',')

arrtxt = np.loadtxt('array_ex.txt',delimiter=',')
print(arrtxt)

print('\n')
###############################################################

x = np.array([[1.,2.,3.],[4.,5.,6.]])
y = np.array([[6.,23.],[-1,7],[8,9]])

print(x)
print(y)
print('\n')
Ejemplo n.º 25
0
import numpy as np
from numpy.matlib import randn

arr = randn(4, 4)
print(arr)

# 将>0的数赋为2,否则 = -2
print(np.where(arr > 0, 2, -2))

# 将>0的数赋为2,否则 为原值
print(np.where(arr > 0, 2, arr))

if __name__ == '__main__':
    pass
Ejemplo n.º 26
0
import numpy as np
import numpy.matlib as matlib
from numpy import *
import matplotlib.pyplot as plt
import random

Num = 100

x_data = np.linspace(-3, 3, Num)
x_datawithPi = x_data * pi
s = matlib.randn(Num)

print(x_data.shape)
newb = s.flatten()
s = np.ravel(newb)

Y_data = np.sin(x_datawithPi) / x_datawithPi + 0.1 * x_data + 0.05 * s
print(Y_data)

plt.scatter(x_data, Y_data, color="black", linewidth=2)
plt.xlim(-3, 3)
plt.ylim(-2, 2)
plt.show()

data = vstack((x_data, Y_data))
savetxt("Validation\LinearModeValidation\eye.txt", data.transpose())
 def testDownloadsConstituentsUpdatesStatus(self):
     self.market.downloader.get = Mock()
     self.market.downloader.get.return_value = DataFrame(randn(2, 1))
     self.market.download_data()
     expected_status = dict(zip(self.tickers, [True] * len(self.tickers)))
     self.assertEqual(self.market.status, expected_status)
def learn_perceptron(neg_examples_nobias, pos_examples_nobias, w_init,
                     w_gen_feas):
    """
%% 
% Learns the weights of a perceptron for a 2-dimensional dataset and plots
% the perceptron at each iteration where an iteration is defined as one
% full pass through the data. If a generously feasible weight vector
% is provided then the visualization will also show the distance
% of the learned weight vectors to the generously feasible weight vector.
% Required Inputs:
%   neg_examples_nobias - The num_neg_examples x 2 matrix for the examples with target 0.
%       num_neg_examples is the number of examples for the negative class.
%   pos_examples_nobias - The num_pos_examples x 2 matrix for the examples with target 1.
%       num_pos_examples is the number of examples for the positive class.
%   w_init - A 3-dimensional initial weight vector. The last element is the bias.
%   w_gen_feas - A generously feasible weight vector.
% Returns:
%   w - The learned weight vector.
%%
	"""
    #%Bookkeeping
    #% Size(vector, [dimension requried]) - get size of first dimension (rows)
    #num_neg_examples = size(neg_examples_nobias,1);
    num_neg_examples = neg_examples_nobias.shape[0]
    #num_pos_examples = size(pos_examples_nobias,1);
    num_pos_examples = pos_examples_nobias.shape[0]

    num_err_history = []
    # should be array?
    w_dist_history = []
    # should be array?

    #%Here we add a column of ones to the examples in order to allow us to learn
    #%bias parameters
    #% Ones(rows, cols)
    #neg_examples = [neg_examples_nobias,ones(num_neg_examples,1)];
    neg_examples = c_[neg_examples_nobias,
                      ones(num_neg_examples)].astype(float32)
    #pos_examples = [pos_examples_nobias,ones(num_pos_examples,1)];
    pos_examples = c_[pos_examples_nobias,
                      ones(num_pos_examples)].astype(float32)

    #%If weight vectors have not been provided, initialize them appropriately
    #% exist(name, type)
    #% || is short circuit boolean or (stops when overall value determined)
    #% randn(rows, cols) - random matrix with zero mean and variance one
    #if (~exist('w_init','var') || isempty(w_init))
    if 'w_init' not in locals() or w_init.size == 0:
        #	w = randn(3,1);
        w = randn((3, 1))
#else
    else:
        w = w_init.astype(float32)
#end
#if (~exist('w_gen_feas','var'))
    if 'w_gen_feas' not in locals():
        w_gen_feas = []
        # should be array?
#end

#%Find the data points that the perceptron has incorrectly classified
#%and record the number of errors it makes.
    iter_ = 0
    [mistakes0, mistakes1] = eval_perceptron(neg_examples, pos_examples, w)
    #num_errs = size(mistakes0,1) + size(mistakes1,1);
    num_errs = mistakes0.shape[0] + mistakes1.shape[0]

    #% (..., end, ...) end index is last entry for a particular dimension
    #num_err_history(end+1) = num_errs;
    num_err_history.append(num_errs)
    #fprintf('Number of errors in iteration %d:\t%d\n',iter,num_errs);
    print('Number of errors in iteration %d:\t%d\n' % (iter_, num_errs))
    #fprintf(['weights:\t', mat2str(w), '\n']);
    print('weights:\n', w, '\n')
    plot_perceptron(neg_examples, pos_examples, mistakes0, mistakes1,
                    num_err_history, w, w_dist_history)
    #key = input('<Press enter to continue, q to quit.>', 's');
    key = input('<Press enter to continue, q to quit.>')
    #if (key == 'q')
    if (key == 'q'):
        return
#end

#%If a generously feasible weight vector exists, record the distance
#%to it from the initial weight vector.
#if (length(w_gen_feas) ~= 0)
    if w_gen_feas.size != 0:
        #% (..., end, ...) end index is last entry for a particular dimension
        #% norm(x1 - x2) is Euclidean distance between two vectors
        #w_dist_history(end+1) = norm(w - w_gen_feas);
        w_dist_history.append(norm(w - w_gen_feas))
#end

#%Iterate until the perceptron has correctly classified all points.
#while (num_errs > 0)
    while num_errs > 0:
        iter_ = iter_ + 1

        #%Update the weights of the perceptron.
        w = update_weights(neg_examples, pos_examples, w)

        #%If a generously feasible weight vector exists, record the distance
        #%to it from the current weight vector.
        #if (length(w_gen_feas) ~= 0)
        if w_gen_feas.size != 0:
            #w_dist_history(end+1) = norm(w - w_gen_feas);
            w_dist_history.append(norm(w - w_gen_feas))
#end
#%Find the data points that the perceptron has incorrectly classified.
#%and record the number of errors it makes.
        [mistakes0, mistakes1] = eval_perceptron(neg_examples, pos_examples, w)
        #num_errs = size(mistakes0,1) + size(mistakes1,1);
        num_errs = mistakes0.shape[0] + mistakes1.shape[0]
        #num_err_history(end+1) = num_errs;
        num_err_history.append(num_errs)
        #fprintf('Number of errors in iteration %d:\t%d\n',iter,num_errs);
        print('Number of errors in iteration %d:\t%d\n' % (iter_, num_errs))
        #fprintf(['weights:\t', mat2str(w), '\n']);
        print('weights:\n', w, '\n')
        plot_perceptron(neg_examples, pos_examples, mistakes0, mistakes1,
                        num_err_history, w, w_dist_history)
        #key = input('<Press enter to continue, q to quit.>', 's');
        key = input('<Press enter to continue, q to quit.>')
        #if (key == 'q')
        if (key == 'q'):
            break
Ejemplo n.º 29
0
import math
import numpy as np
import numpy.matlib as nm
import matplotlib.pyplot as plt

np.seterr(divide='ignore', invalid='ignore')

n = 200
a = nm.linspace(0, nm.pi, n / 2)
x_u = np.c_[nm.cos(a) + 0.5, nm.cos(a) - 0.5].reshape(n, 1)
u = -10 * x_u + nm.randn(n, 1)
x_v = np.c_[nm.sin(a), -nm.sin(a)].reshape(n, 1)
v = 10 * x_v + nm.randn(n, 1)
x = np.c_[u, v]
y = np.zeros((n, 1))
y[0] = 1
y[n - 1] = -1
x2 = np.sum(np.power(x, 2), 1)
hh = 2 * 1**2
k = nm.exp(-(nm.repmat(x2, 1, n) + nm.repmat(x2.T, n, 1) - 2 * x * x.T) / hh)
w = k
t_tmp1 = k**2 + 1 * np.eye(n) + 10 * k * (nm.diag(sum(w)) - w) * k
t = np.linalg.inv(t_tmp1) * (k * y)

m = 100
X = nm.linspace(-20, 20, m).T
X2 = np.power(X, 2)
U = nm.exp(
    -(nm.repmat(np.power(u, 2), 1, m) + nm.repmat(X2.T, n, 1) - 2 * u * X.T) /
    hh)
V = nm.exp(
Ejemplo n.º 30
0
import numpy as np
import matplotlib.pyplot as plt
from numpy.matlib import randn
import pylab

###############################################################

arr = np.arange(10)
print(np.sqrt(arr))
print("\n")
print(np.exp(arr))
print("\n")

###############################################################

x = randn(8)
y = randn(8)
print(x)
print(y)
print("\n")
print(np.maximum(x, y))
print("\n")

###############################################################

points = np.arange(-5, 5, 0.01)
xs, ys = np.meshgrid(points, points)
print(ys)
print("\n")

###############################################################
Ejemplo n.º 31
0
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 12 16:01:13 2018

@author: Administrator
@description: numpy库中的数组排序(sort)函数
"""
import numpy as np
from numpy.matlib import randn
arr = randn(8)   
print("排序前:")
print(arr) 
print("排序后:")  
print(np.sort(arr))   
Ejemplo n.º 32
0
S = np.array([[1, 0], [0, 1], [1, 1]])
sigma_n = 0.2
M, K = S.shape
N = 6
num_problem = 500

CD_homo_P = dict()
SVD_homo_P = dict()
QP_homo_P = dict()

U_true_set = dict()
for each in range(num_problem):
    Z_true = 1 + np.array([random.expovariate(1) for rand in range(M)])
    U_true = np.random.rand(K, N)
    X_clean = np.diag(Z_true).dot(S).dot(U_true)
    X = X_clean + (X_clean * sigma_n) * np.array(mtl.randn(M, N))

    U_true_set[str(each)] = U_true
    prot_sub = dict()
    prot_sub['S'] = S
    prot_sub['X'] = X
    hf.null_sp_dim(prot_sub)
    # SVD sovler
    SVD_solution = Solvers.SVD(prot_sub)
    SVD_protein, SVD_opt = SVD_solution
    SVD_homo_P[str(each)] = SVD_protein

    # QP solver
    QP_solution = Solvers.QP(prot_sub)
    QP_protein, QP_opt = QP_solution
    QP_homo_P[str(each)] = QP_protein
 def testDownloadsConstituentsUpdatesStatus(self):
     self.market.downloader.get = Mock()
     self.market.downloader.get.return_value = DataFrame(randn(2, 1))
     self.market.download_data()
     expected_status = dict(zip(self.tickers, [True] * len(self.tickers)))
     self.assertEqual(self.market.status, expected_status)
Ejemplo n.º 34
0
def generate_latent_points(latent_dim, n_samples):
    # generate points in the latent space
    x_input = randn(latent_dim * n_samples)
    # reshape into a batch of inputs for the network
    x_input = x_input.reshape(n_samples, latent_dim)
    return x_input
    print(arr * arr)
    print(arr - arr)

    arr2d = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
    print(arr2d[2])
    arr3d = np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])
    old_values = arr3d[0].copy()
    print('------------')
    arr3d[0] = 42
    print(arr3d)
    arr3d[0] = old_values
    print(arr3d)

    names = np.array(['Bob', 'Joe', 'Will', 'Bob', 'Will', 'Joe', 'Joe'])
    print(names)
    data = randn(7, 4)
    print(data)

    # Boolean Indexing
    # Suppose each name corresponds to a row in the data array. If we wanted to select all
    # the rows with corresponding name 'Bob'. Like arithmetic operations, comparisons
    # (such as ==) with arrays are also vectorized. Thus, comparing names with the string
    # 'Bob' yields a boolean array:
    print(names == 'Bob')
    print(data[names == 'Bob'])
    print(data[names == 'Bob', 2:],)
    print(data[names == 'Bob', 3])
    print(data[((names == 'Bob') | (names == 'Will'))])

    # Fancy Indexing
    # Fancy indexing is a term adopted by NumPy to describe indexing using integer arrays.
Ejemplo n.º 36
0
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 12 16:07:17 2018

@author: Administrator
@description: any和all函数
"""
import numpy as np
from numpy.matlib import randn
print(
    'Boolean values are coerced to 1 (True) and 0 (False) in the above methods. Thus, sum is often used as a means of counting True values in a boolean array:'
)
arr = randn(100)
print("输出生成的随机数数组中数值大于0的个数:")
print((arr > 0).sum())
bools = np.array([False, False, True, False])
print(bools)
print("只要有一个为true就输出true:")
print(bools.any())  # true
print("全部才输出true:")
print(bools.all())  #false
bools2 = np.array([True, True, True, True])
print(bools2.all())  #true

#两个数组之间
# any():a数组与b数组有一个元素对应相等就输出true
#all():a数组与b数组每个元素都对应相等才输出true
a = np.array([1, 2, 3])
b = np.array([2, 2, 3])
boolean = (a == b).all()
boolean2 = (a == b).any()
Ejemplo n.º 37
0
# coding:utf-8

from matplotlib.pyplot import plot, show, figure
from numpy.matlib import randn, array, vstack, where
from scipy.cluster.vq import kmeans, vq

# 生成正态分布的二维数据
class1 = 1.5 * randn(100, 2)
class2 = randn(100, 2) + array([5, 5])
features = vstack((class1, class2))

# 用k=2对这些数据进行聚类

centroids, variance = kmeans(features, 2)

# 通过矢量化函数对每个数据点进行归类:
code, distance = vq(features, centroids)

figure()
ndx = where(code == 0)[0]
plot(features[ndx, 0], features[ndx, 1], '*')
ndx = where(code == 1)[0]
plot(features[ndx, 0], features[ndx, 1], 'r.')
plot(features[:, 0], features[:, 1], 'go')
show()
Ejemplo n.º 38
0
print('\n')
print(arr2d[:, :1])
print('\n')
arr2d[:2, 1:]=0
print(arr2d)
print('\n')

###############################################################

#Page 92
#Boolean Indexing

###############################################################

names = np.array(['Bob', 'Joe', 'Will', 'Bob', 'Will', 'Joe', 'Joe'])
data = randn(7,4)
print(names)
print(data)
print('\n')

print(names == 'Bob')
print('\n')
print(data[names == 'Bob'])
print('\n')
print(data[names == 'Bob',2:])
print('\n')
print(data[names == 'Bob',3])
print('\n')
print(names != 'Bob')
print('\n')
print(data[-(names == 'Bob')])
Ejemplo n.º 39
0
arr_slice[1] = 10
arr[:]

# n-d arrays
arr2d = np.array([[1,2,3], [4,5,6], [7,8,9]])
arr3d = np.array([[[1,2,3], [4,5,6]], [[7,8,9], [10,11,12]]])

# demo copying on 3d array
old_values = arr3d[0].copy()
arr3d[0] = 42
arr3d[0] = old_values

# Boolean indices
names = np.array(['Bob', 'Joe', 'Will', 'Bob', 'Will', 'Joe', 'Joe'])
from numpy.matlib import randn
data = randn(7,4)
bob_index = names == 'Bob'
names[bob_index]
data[bob_index]
data[bob_index, :2]
data[bob_index, :10]
data[:, bob_index]
not_bob = names != 'Bob'
data[not_bob]
data[bob_index]
data[~bob_index]
data[~not_bob]

data[data < 0] = 0
data[names != 'Joe'] = 7
Ejemplo n.º 40
0
 def sample(self, x):
     m = self.inf(x, meanonly=True)
     return m + np.exp(self.kernel.lik) * np.randn(m.shape)
Ejemplo n.º 41
0
"""
import numpy as np
from numpy.matlib import randn
print("创建一维数组:")
data1 = [3, 3.3, 9, 5, 6]
arr1 = np.array(data1)
print(arr1)

print("创建二维数组:")
data2 = [[1, 2, 3, 4], [5, 6, 7, 8]]
arr2 = np.array(data2)
print(arr2)
# Type of data in array.
print("输出第一个数组的数据类型:", arr1.dtype)
print("输出第二个数组的维数:", arr2.ndim)
print("输出第三个数组的形状:", arr2.shape)
#用zeros函数创建数组
print("np.zeros(10)创建10个元素都是0的一维数组: ")
print(np.zeros(10))
print("np.zeros((3, 6))创建3行6列都是0的二维数组:")
print(np.zeros((3, 6)))
#用Empty函数创建数组,其初始值为乱值
print("np.empty((3,6))创建3行6列都是0的二维数组:")
print(np.empty((3, 6)))
#用arrange函数创建数组
print("np.arange(9)创建一维数组:")
print(np.arange(9))
#用随机函数randn创建二维数组,7行4列
print("randn(7, 4))创建数字随机的一维数组:")
data = randn(7, 4)
print(data)