def setUp(self):
        self.X = None
        self.cost = lambda X: np.exp(np.sum(X**2))

        n = self.n = 15

        Y = self.Y = rnd.randn(1, n)
        A = self.A = rnd.randn(1, n)

        # Calculate correct cost and grad...
        self.correct_cost = np.exp(np.sum(Y**2))
        self.correct_grad = correct_grad = 2 * Y * np.exp(np.sum(Y**2))

        # ... and hess
        # First form hessian matrix H
        # Convert Y and A into matrices (column vectors)
        Ymat = np.matrix(Y)
        Amat = np.matrix(A)

        diag = np.eye(n)

        H = np.exp(np.sum(Y**2)) * (4 * Ymat.T.dot(Ymat) + 2 * diag)

        # Then 'right multiply' H by A
        self.correct_hess = np.array(Amat.dot(H))
Beispiel #2
0
    def setUp(self):
        self.X = None
        self.cost = lambda X: np.exp(np.sum(X**2))

        n = self.n = 15

        Y = self.Y = rnd.randn(1, n)
        A = self.A = rnd.randn(1, n)

        # Calculate correct cost and grad...
        self.correct_cost = np.exp(np.sum(Y ** 2))
        self.correct_grad = correct_grad = 2 * Y * np.exp(np.sum(Y ** 2))

        # ... and hess
        # First form hessian matrix H
        # Convert Y and A into matrices (row vectors)
        Ymat = np.matrix(Y)
        Amat = np.matrix(A)

        diag = np.eye(n)

        H = np.exp(np.sum(Y ** 2)) * (4 * Ymat.T.dot(Ymat) + 2 * diag)

        # Then 'left multiply' H by A
        self.correct_hess = np.array(Amat.dot(H))

        self.backend = AutogradBackend()
Beispiel #3
0
    def setUp(self):
        np.seterr(all='raise')
        self.X = None
        self.cost = lambda X: np.exp(np.sum(X**2))

        n = self.n = 15

        Y = self.Y = rnd.randn(n)
        A = self.A = rnd.randn(n)

        # Calculate correct cost and grad...
        self.correct_cost = np.exp(np.sum(Y**2))
        self.correct_grad = correct_grad = 2 * Y * np.exp(np.sum(Y**2))

        # ... and hess
        # First form hessian matrix H
        # Convert Y and A into matrices (row vectors)
        Ymat = np.matrix(Y)
        Amat = np.matrix(A)

        diag = np.eye(n)

        H = np.exp(np.sum(Y**2)) * (4 * Ymat.T.dot(Ymat) + 2 * diag)

        # Then 'left multiply' H by A
        self.correct_hess = np.squeeze(np.array(Amat.dot(H)))

        self.backend = AutogradBackend()
Beispiel #4
0
 def f(t):
     if num_vs_sym:
         # numpy matrix
         M = np.matrix([[0, 1], [-w2(t), 0]])
     elif num_vs_sym == False:
         M = sym.Matrix([[0, 1], [-w2(t), 0]])
     return M
Beispiel #5
0
 def load_data(self):
     # load data
     data = np.matrix(
         np.genfromtxt(
             '../../mlrefined_datasets/superlearn_datasets/bacteria_data.csv',
             delimiter=','))
     self.x = np.asarray(data[:, 0])
     self.y = np.asarray(data[:, 1])
Beispiel #6
0
    def get_MWhessian(self):
        temp = []
        for i in self.hess.splitlines():
            temp.append(i.split())

        H0 = np.matrix(temp, float)
        A = np.dot(H0, self.MM)
        mwH = np.dot(A, self.MM)

        return mwH
def Airy_sol(t):
	Ai0, Aip0, Bi0, Bip0 = special.airy(0)
	M = (1/(-Ai0*Bip0 + Aip0*Bi0))*np.matrix([[-Bip0, -Bi0], [+Aip0, Ai0]])
	ab = M @ Airy["x0"].reshape(2, 1)	
	Ai, Aip, Bi, Bip = special.airy(-t)
	a = ab[0, 0]
	b = ab[1, 0]
	x_true = a*Ai + b*Bi
	dxdt_true = a*Aip + b*Bip
	x = np.hstack((x_true.reshape(t.size, 1),dxdt_true.reshape(t.size, 1))) 
	return x
Beispiel #8
0
    def get_frequencies(self):

        self.e, self.l = np.linalg.eigh(self.get_MWhessian())

        self.Q = np.matrix(self.MM) * np.matrix(self.l)
        freq = []

        A =  np.sqrt(hartree2J)
        B = A/(amu2kg*bohr2m**2)
        C = (c*2*np.pi)

        conv = B / C

        for i in self.e:
            if i >= 0:
                freq.append(i**0.5*conv)
            elif i < -1:
                freq.append((-i)**0.5*conv)
            else:
                freq.append((-i)**0.5*conv)
        return freq
Beispiel #9
0
 def _getRay(cam_param):
     """Inner function with camera parameter to calculate derivative against.
     """
     az, el, distance_ratio = cam_param
     K, RT = getBlenderProj(az, el, distance_ratio, img_w=img_w, img_h=img_h)
     W2B = getBinvoxProj(voxel_d, voxel_t_x, voxel_t_y, voxel_t_z, voxel_s)
     # Calculate camera location from camera matrix.
     invrot = RT[:, :3].transpose()
     invloc = - np.matrix(invrot) * np.matrix(RT[:, 3]).T
     camloc = np.matrix((*np.array(invloc.T)[0], 1)) * np.linalg.inv(W2B).T
     camloc = camloc[0, :3] / camloc[0, 3]
     # Calculate direction vector of ray for each pixel of the image.
     pixloc = np.matrix(list(itertools.product(range(img_h),
                                               range(img_w),
                                               (1,))))
     pixloc = pixloc * (np.linalg.inv(W2B) *
                        np.linalg.pinv(RT) *
                        np.linalg.inv(K)).T
     pixloc = pixloc[:, :3] / pixloc[:, 3]
     raydir = camloc - pixloc
     return np.array(camloc)[0].astype(np.float32), raydir.astype(np.float32)
Beispiel #10
0
    def test_safe_matmul(self):
        def safe_matmul_todense(a, b):
            result = obj_lib.safe_matmul(a, b)
            if sp.sparse.issparse(result):
                return np.asarray(result.todense())
            else:
                return np.asarray(result)

        a = np.random.random((3, 3))
        b = np.random.random((3, 3))
        ab = np.matmul(a, b)

        np_test.assert_array_almost_equal(ab, safe_matmul_todense(a, b))
        np_test.assert_array_almost_equal(
            ab, safe_matmul_todense(sp.sparse.csr_matrix(a), b))
        np_test.assert_array_almost_equal(
            ab, safe_matmul_todense(a, sp.sparse.csr_matrix(b)))
        np_test.assert_array_almost_equal(ab,
                                          safe_matmul_todense(np.matrix(a), b))
        np_test.assert_array_almost_equal(ab,
                                          safe_matmul_todense(a, np.matrix(b)))
Beispiel #11
0
def test():
    size = 1000
    mu = 2
    var = 1
    dx = 20
    dy = 20
    f1 = np.square
    f2 = np.square

    # generate confounding variables for 1st sample
    mean = np.zeros(dx)
    cov = np.eye(dx)
    X1 = np.random.multivariate_normal(mean, cov, size)
    X1 = np.matrix(X1)

    # generate confounding variables for 2nd sample
    mean = np.zeros(dx) + mu
    cov = np.eye(dx) * var
    X2 = np.random.multivariate_normal(mean, cov, size)
    X2 = np.matrix(X2)

    # Define data generating process for y
    Axy = np.random.rand(dx, dy)
    for i in range(dy):
        Axy[:, i] = Axy[:, i] / np.linalg.norm(Axy[:, i], ord=1)
    Axy = np.matrix(Axy)

    print(Axy[:10, ])
    print(f1(X1 * Axy).shape)
    print(f1(X1 * Axy)[:10])
    #print(min(f1(X1)))

    #print(max(f1(X1 * Axy)))
    #print(min(f1(X1 * Axy)))

    #print(max(f2(X2 * Axy)))
    #print(min(f2(X2 * Axy)))

    print(np.mean(np.abs(f1(X1 * Axy))))
    print(np.mean(np.abs(f2(X2 * Axy))))
Beispiel #12
0
def getBinvoxProj(voxel_d, voxel_t_x, voxel_t_y, voxel_t_z, voxel_s):
    """Calculate 4x4 projection matrix from voxel to obj coordinate"""
    # Calculate rotation and translation matrices.
    # Step 1: Voxel coordinate to binvox coordinate.
    S_vox2bin = Matrix.Scale(1 / (voxel_d - 1), 4)

    # Step 2: Binvox coordinate to obj coordinate.
    voxel_t = min(voxel_t_x, voxel_t_y, voxel_t_z)
    RST_bin2obj = (Matrix.Rotation(radians(90), 4, 'X') *
                   Matrix.Translation([voxel_t] * 3) *
                   Matrix.Scale(voxel_s, 4))

    return np.matrix(RST_bin2obj * S_vox2bin)
Beispiel #13
0
def getBlenderProj(az, el, distance_ratio, img_w=IMG_W, img_h=IMG_H):
    """Calculate 4x3 3D to 2D projection matrix given viewpoint parameters."""

    # Calculate intrinsic matrix.
    scale = RESOLUTION_PCT / 100
    f_u = F_MM * img_w * scale / SENSOR_SIZE_MM
    f_v = F_MM * img_h * scale * PIXEL_ASPECT_RATIO / SENSOR_SIZE_MM
    u_0 = img_w * scale / 2
    v_0 = img_h * scale / 2
    K = np.matrix(((f_u, SKEW, u_0), (0, f_v, v_0), (0, 0, 1)))

    # Calculate rotation and translation matrices.
    # Step 1: World coordinate to object coordinate.
    sa = np.sin(np.radians(-az))
    ca = np.cos(np.radians(-az))
    se = np.sin(np.radians(-el))
    ce = np.cos(np.radians(-el))
    R_world2obj = np.transpose(np.matrix(((ca * ce, -sa, ca * se),
                                          (sa * ce, ca, sa * se),
                                          (-se, 0, ce))))

    # Step 2: Object coordinate to camera coordinate.
    R_obj2cam = np.transpose(np.matrix(CAM_ROT))
    R_world2cam = R_obj2cam * R_world2obj
    cam_location = np.transpose(np.matrix((distance_ratio * CAM_MAX_DIST,
                                           0,
                                           0)))
    T_world2cam = -1 * R_obj2cam * cam_location

    # Step 3: Fix blender camera's y and z axis direction.
    R_camfix = np.matrix(((1, 0, 0), (0, -1, 0), (0, 0, -1)))
    R_world2cam = R_camfix * R_world2cam
    T_world2cam = R_camfix * T_world2cam

    RT = np.hstack((R_world2cam, T_world2cam))

    return K, RT
Beispiel #14
0
    def local_trans(self, joint_name, joint_angle):
        '''calculate local transformation of one joint

        :param str joint_name: the name of joint
        :param float joint_angle: the angle of joint in radians
        :return: transformation
        :rtype: 4x4 matrix
        '''
        # determine right rotation_matrix values
        normal_value_for_2 = sqrt(0.5)

        if self.yaw_rotators.__contains__(joint_name):
            ux, uy, uz = 0, 0, 1
        elif self.roll_rotators.__contains__(joint_name):
            ux, uy, uz = 1, 0, 0
        elif self.pitch_rotators.__contains__(joint_name):
            ux, uy, uz = 0, 1, 0
        elif joint_name == 'LHipYawPitch':
            ux, uy, uz = 0, normal_value_for_2, -normal_value_for_2
        elif joint_name == 'RHipYawPitch':
            ux, uy, uz = 0, -normal_value_for_2, -normal_value_for_2
        else:
            print "weird jointname"
            ux, uy, uz = 0, 0, 0

        cost = cos(joint_angle)
        sint = sin(joint_angle)
        distance_lengths = self.distances[joint_name]
        # rotation matrix in 3d around any normalized axis
        T = matrix([[
            cost + ux**2 * (1 - cost), ux * uy * (1 - cost) - uz * sint,
            ux * uz * (1 - cost) + uy * sint, distance_lengths[0]
        ],
                    [
                        uy * ux * (1 - cost) + uz * sint,
                        cost + uy**2 * (1 - cost),
                        uy * uz * (1 - cost) - ux * sint, distance_lengths[1]
                    ],
                    [
                        uz * ux * (1 - cost) - uy * sint,
                        uz * uy * (1 - cost) + ux * sint,
                        cost + uz**2 * (1 - cost), distance_lengths[2]
                    ], [0, 0, 0, 1]])

        return T
Beispiel #15
0
    def _generate_data(self, n):
        def _is_pos_def(x):
            return np.all(np.linalg.eigvals(x) > 0)
        np.random.seed(self.rnd_seed)
        A = np.matrix(np.random.rand(n, n) - np.ones([n, n]) * 0.5)
        A = (A + A.T)/2
        w, _ = np.linalg.eig(A)
        # d = Diagonal offset; smaller value = more elliptic i.e. harder for GD
        d = self.params.get('d')
        if not _is_pos_def(A):
            lmin = min(w)
            A = A + (abs(lmin) + d) * np.identity(n)
        assert(_is_pos_def(A))
        w, _ = np.linalg.eig(A)
        # condition_number = max(w) / min(w)

        b = (np.random.rand(n) - np.ones(n) * 0.5).reshape(n, 1)
        c = np.random.rand() - 0.5

        return A, b, c
Beispiel #16
0
def get_xm(x_var, x_base):
    return np.matrix(
        [splinify(np.min(x_var), np.max(x_base), 1.0, x) for x in x_var])
Beispiel #17
0
def error(X, Y, a):
    a = np.matrix([[a], [0.666]])
    e = X * a - Y
    return (e.transpose() * e).item(0)
Beispiel #18
0
    def setUp(self):
        np.seterr(all='raise')

        def f(x):
            return (np.exp(np.sum(x[0]**2)) + np.exp(np.sum(x[1]**2)) +
                    np.exp(np.sum(x[2]**2)))

        self.cost = f

        n1 = self.n1 = 3
        n2 = self.n2 = 4
        n3 = self.n3 = 5
        n4 = self.n4 = 6
        n5 = self.n5 = 7
        n6 = self.n6 = 8

        self.y = y = (rnd.randn(n1), rnd.randn(n2, n3), rnd.randn(n4, n5, n6))
        self.a = a = (rnd.randn(n1), rnd.randn(n2, n3), rnd.randn(n4, n5, n6))

        self.correct_cost = f(y)

        # CALCULATE CORRECT GRAD
        g1 = 2 * y[0] * np.exp(np.sum(y[0]**2))
        g2 = 2 * y[1] * np.exp(np.sum(y[1]**2))
        g3 = 2 * y[2] * np.exp(np.sum(y[2]**2))

        self.correct_grad = (g1, g2, g3)

        # CALCULATE CORRECT HESS
        # 1. VECTOR
        Ymat = np.matrix(y[0])
        Amat = np.matrix(a[0])

        diag = np.eye(n1)

        H = np.exp(np.sum(y[0]**2)) * (4 * Ymat.T.dot(Ymat) + 2 * diag)

        # Then 'left multiply' H by A
        h1 = np.array(Amat.dot(H)).flatten()

        # 2. MATRIX
        # First form hessian tensor H (4th order)
        Y1 = y[1].reshape(n2, n3, 1, 1)
        Y2 = y[1].reshape(1, 1, n2, n3)

        # Create an m x n x m x n array with diag[i,j,k,l] == 1 iff
        # (i == k and j == l), this is a 'diagonal' tensor.
        diag = np.eye(n2 * n3).reshape(n2, n3, n2, n3)

        H = np.exp(np.sum(y[1]**2)) * (4 * Y1 * Y2 + 2 * diag)

        # Then 'right multiply' H by A
        Atensor = a[1].reshape(1, 1, n2, n3)

        h2 = np.sum(H * Atensor, axis=(2, 3))

        # 3. Tensor3
        # First form hessian tensor H (6th order)
        Y1 = y[2].reshape(n4, n5, n6, 1, 1, 1)
        Y2 = y[2].reshape(1, 1, 1, n4, n5, n6)

        # Create an n1 x n2 x n3 x n1 x n2 x n3 diagonal tensor
        diag = np.eye(n4 * n5 * n6).reshape(n4, n5, n6, n4, n5, n6)

        H = np.exp(np.sum(y[2]**2)) * (4 * Y1 * Y2 + 2 * diag)

        # Then 'right multiply' H by A
        Atensor = a[2].reshape(1, 1, 1, n4, n5, n6)

        h3 = np.sum(H * Atensor, axis=(3, 4, 5))

        self.correct_hess = (h1, h2, h3)
        self.backend = AutogradBackend()
 def translation(self, x, y, z):
     return matrix([[1, 0, 0, x], [0, 1, 0, y], [0, 0, 1, z], [0, 0, 0, 1]])
Beispiel #20
0

time_series =[4,5,4,3,6,2,4,5,10,6,8,2,6,17,23,13,21,28,24,20,40,27,42,33,43,37,57,71,44,56,53,52,47,26,27,21,21,26,34,37,17,19,25,18,21,17,17,16,16,15,23,16,17,12,17,10,15,19,21,14,18,13,14,18,23,25,62,60,76,66,64,68,89,92,140,116,142,129,140,140,127,129,169,141,108,78,70,81,104,90,85,55,53,65,33,38,59,40,37,29,30,30,28,23,24,29,26,23,20,19,20,26,29,31,28,26,32,35,33,30,52,59,67,65,74,70,61,53,76,61,57,44,34,47,60,60,53,36,31,30,32,28,33,33,35,22,13,13,21,17,11,8,8,6,6,7,12,17,10,10,18,19,12,22,12,21,18,16,16,22,17,25,23,12,25,28,27,18,23,23,29,38,36,43,46,31,25,40,31,38,30,22,31,26,35,36,39,25,31,37,33,25,24,18,23,13,18,14,17,22,13,24,31,34,31,31,38,49,42,49,55,80,84,72,89,115,179,202,272,302,395,426,461,381,333,353,410,364,359,288,221,149,112,154,91,72,56,46,37,26,17,17,20,11,7,16,14,16,5,2,6,5,4,3,4,16,8,7,10,14,7,9,11,23,17,19,24,17,28,40,33,31,33,29,30,36,48,40,28,36,19,34,23,17,17,23,14,20,13,23,20,16,16,23,14,15,4,5,5,11,11,7,4,6,5,2,4,2,4,6,6,4,6,11,16,9,12,13,27,21,19,17,24,27,30,29,25,35,33,30,29,31,29,22,27,24,26,29,22,33,24,30,20,17,24,28,18,13,9,14,11,11,19,10,8,8,9,3,7,14,4,9,14,7,9,3,3,14,12,10,21,26,47,42,31,34,33,52,56,70,112,70,47,48,49,66,56,61,67,64,68,49,50,56,75,63,62,41,50,34,31,38,30,32,26,30,36,35,46,48,44,51,59,71,102,128,127,150,191,256,329,263,220,204,181,99,54,80,102,127,73,68,64,55,67,84,85,67,73,89,68,59,56,77,75,47,50,42,28,37,37,27,12,15,22,8,15,17,10,9,11,20,13,11,16,11,7,17,14,13,15,30,25,40,44,25,21,48,56,60,45,55,32,46,61,42,37,43,34,40,25,16,17,17,16,23,18,18,9,7,7,4,3,2,8,3,1,1,2,3,3,2,0,0,2,2,0,6,3,6,2,3,2,4,5,2,9,2,4,8,6,3,11,14,15,20,9,20,28,38,30,30,23,16,22,28,14,17,20,17,10,13,20,9,18,9,8,19,11,4,6,6,8,13,8,8,5,16,12,11,18,10,22,14,16,18,27,38,35,41,51,65,55,54,62,64,56,65,71,75,71,72,47,27,35,25,19,37,38,34,26,19,18,22,16,18,6,12,6,6,3,7,6,1,3,2,2,1,10,3,3,1,1,2,6,3,3,5,4,7,6,5,7,6,4,4,7,9,5,5,10,6,13,6,5,5,9,3,6,11,7,7,15,9,6,6,6,7,10,8,7,12,3,2,7,5,5,7,7,7,7,10,13,10,14,11,20,25,17,18,25,21,31,32,26,35,28,37,41,34,30,39,39,39,34,30,37,29,26,15,22,15,20,14,10,21,14,14,9,11,5,6,7,11,4,3,2,6,10,7,5,3,12,13,10,13,13,8,21,18,8,7,20,14,14,7,14,10,13,27,13,18,16,16,20,17,4,15,8,6,12,15,11,10,15,17,7,7,8,9,12,12,5,4,11,4,5,7,1,1,4,2,6,3,4,10,12,21,26,21,30,45,56,75,83,82,126,119,137,131,112,82,73,43,55,55,53,46,43,29,22,26,13,17,8,13,10,17,19,9,9,9,3,7,7,0,2,3,3,1,3,3,3,7,3,5,11,5,5,6,6,4,4,8,14,12,16,10,16,18,15,23,17,33,15,13,11,14,17,19,20,12,21,7,19,10,13,10,8,21,11,9,14,14,15,18,16,12,20,8,3,13,4,1,10,8,13,10,21,18,21,34,25,34,33,40,42,36,72,75,76,92,71,112,106,101,170,135,106,68,48,48,26,33,29,17,12,13,17,15,14,15,10,9,2,6,8,5,1,2,3,4,3,1,3,5,2,3,2,3,2,2,3,4,3,4,4,4,7,6,15,11,9,9,12,13,13,13,20,28,45,28,34,41,36,38,48,27,23,28,42,30,18,38,28,36,44,41,35,28,28,22,26,24,9,21,10,15]


num_particles = 50
#style.use('ggplot')

import matplotlib.pyplot as plt

#-(1.0/(2*observation_variance))*(theta_i  -  time_series[t])**2  + np.log(1.0/np.sqrt(np.pi*2*observation_variance))
observation_variance = .00000000001
transition_variance = 1000
seasonality = 4

G = np.matrix([[np.cos(2*np.pi/seasonality),np.sin(2*np.pi/seasonality)],[-np.sin(2*np.pi/seasonality),np.cos(2*np.pi/seasonality)]])

class StateSpaceModel:

    def lnprob_theta_i(self, theta_i, theta_t_minus_1, time_series,t):
        #ln poisson observations
            lnprob_theta_i = -np.exp(theta_i[0]) + time_series[t]*theta_i[0] - np.sum(np.log(np.arange(time_series[t])+1))
            transition_sum = 0
            for theta_t_minus_1_i in theta_t_minus_1:
                tmp = np.transpose(np.matmul(G,theta_t_minus_1_i.reshape((-1,1)))).tolist()[0]
              
                transition_sum += 1.0/(np.sqrt(2*np.pi*transition_variance))*np.exp(-.5*(1.0/transition_variance)*((theta_i - tmp )**2))
                
            return (lnprob_theta_i+np.log(transition_sum))
    
    def dlnprob(self, theta_i,theta_t_minus_1,time_series, t):
 def rotY(self, angle):
     return matrix([[cos(angle), 0, sin(angle), 0], [0, 1, 0, 0],
                    [-sin(angle), 0, cos(angle), 0], [0, 0, 0, 1]])
 def rotZ(self, angle):
     return matrix([[cos(angle), -sin(angle), 0, 0],
                    [sin(angle), cos(angle), 0, 0], [0, 0, 1, 0],
                    [0, 0, 0, 1]])
Beispiel #23
0
    def setUp(self):
        def f(x):
            return (np.exp(np.sum(x[0]**2)) + np.exp(np.sum(x[1]**2)) +
                    np.exp(np.sum(x[2]**2)))

        self.cost = f

        n1 = self.n1 = 3
        n2 = self.n2 = 4
        n3 = self.n3 = 5
        n4 = self.n4 = 6
        n5 = self.n5 = 7
        n6 = self.n6 = 8

        self.y = y = (rnd.randn(n1), rnd.randn(n2, n3), rnd.randn(n4, n5, n6))
        self.a = a = (rnd.randn(n1), rnd.randn(n2, n3), rnd.randn(n4, n5, n6))

        self.correct_cost = f(y)

        # CALCULATE CORRECT GRAD
        g1 = 2 * y[0] * np.exp(np.sum(y[0] ** 2))
        g2 = 2 * y[1] * np.exp(np.sum(y[1] ** 2))
        g3 = 2 * y[2] * np.exp(np.sum(y[2] ** 2))

        self.correct_grad = (g1, g2, g3)

        # CALCULATE CORRECT HESS
        # 1. VECTOR
        Ymat = np.matrix(y[0])
        Amat = np.matrix(a[0])

        diag = np.eye(n1)

        H = np.exp(np.sum(y[0] ** 2)) * (4 * Ymat.T.dot(Ymat) + 2 * diag)

        # Then 'left multiply' H by A
        h1 = np.array(Amat.dot(H)).flatten()

        # 2. MATRIX
        # First form hessian tensor H (4th order)
        Y1 = y[1].reshape(n2, n3, 1, 1)
        Y2 = y[1].reshape(1, 1, n2, n3)

        # Create an m x n x m x n array with diag[i,j,k,l] == 1 iff
        # (i == k and j == l), this is a 'diagonal' tensor.
        diag = np.eye(n2 * n3).reshape(n2, n3, n2, n3)

        H = np.exp(np.sum(y[1] ** 2)) * (4 * Y1 * Y2 + 2 * diag)

        # Then 'right multiply' H by A
        Atensor = a[1].reshape(1, 1, n2, n3)

        h2 = np.sum(H * Atensor, axis=(2, 3))

        # 3. Tensor3
        # First form hessian tensor H (6th order)
        Y1 = y[2].reshape(n4, n5, n6, 1, 1, 1)
        Y2 = y[2].reshape(1, 1, 1, n4, n5, n6)

        # Create an n1 x n2 x n3 x n1 x n2 x n3 diagonal tensor
        diag = np.eye(n4 * n5 * n6).reshape(n4, n5, n6, n4, n5, n6)

        H = np.exp(np.sum(y[2] ** 2)) * (4 * Y1 * Y2 + 2 * diag)

        # Then 'right multiply' H by A
        Atensor = a[2].reshape(1, 1, 1, n4, n5, n6)

        h3 = np.sum(H * Atensor, axis=(3, 4, 5))

        self.correct_hess = (h1, h2, h3)
        self.backend = AutogradBackend()
 def rotX(self, angle):
     return matrix([[1, 0, 0, 0], [0, cos(angle), -sin(angle), 0],
                    [0, sin(angle), cos(angle), 0], [0, 0, 0, 1]])
Beispiel #25
0
def get_y_reg(a, x_spline):
    return np.matrix([[np.dot(x, a).item(0)] for x in x_spline])
Beispiel #26
0
def generate_samples_random(size=1000,
                            mu=0,
                            var=1,
                            dx=1,
                            dy=1,
                            noise="gaussian",
                            f1='linear',
                            f2='linear',
                            seed=None):
    '''Generate null or alternative nonlinear samples with different degrees of confounding
    1. X1 and X2 independent Gaussians - confounding variables
    2. Y = f1(X1) + noise and Y = f2(X2)
    Arguments:
        size : number of samples
        mu: mean of X
        var: variance of X
        dx: Dimension of X
        dy: Dimension of Y
        nstd: noise standard deviation
        noise: type of noise
        f1,f2 to be within {x,x^2,x^3,tanh x, cos x}

    Output:
        allsamples --> complete data-set
    '''
    if seed == None:
        np.random.seed()
    else:
        np.random.seed(seed)

    if f1 == 'linear':
        f1 = same
    if f1 == 'square':
        f1 = np.square
    if f1 == 'tanh':
        f1 = np.tanh
    if f1 == 'cos':
        f1 = np.cos
    if f1 == 'cube':
        f1 = cube

    if f2 == 'linear':
        f2 = same
    if f2 == 'square':
        f2 = np.square
    if f2 == 'tanh':
        f2 = np.tanh
    if f2 == 'cos':
        f2 = np.cos
    if f2 == 'cube':
        f2 = cube

    # generate confounding variables for 1st sample
    mean = np.zeros(dx)
    cov = np.eye(dx)
    X1 = np.random.multivariate_normal(mean, cov, size)
    X1 = np.matrix(X1)

    # generate confounding variables for 2nd sample
    mean = np.zeros(dx) + mu
    cov = np.eye(dx) * var
    X2 = np.random.multivariate_normal(mean, cov, size)
    X2 = np.matrix(X2)

    Axy = np.random.rand(dx, dy)
    Axy = np.matrix(Axy)

    if noise == 'gaussian':
        noise1 = np.random.multivariate_normal(np.zeros(dy),
                                               np.eye(dy) * 0.5, size)
        noise2 = np.random.multivariate_normal(np.zeros(dy),
                                               np.eye(dy) * 0.5, size)
        noise1 = np.matrix(noise1)
        noise2 = np.matrix(noise2)

    elif noise == 'exp':
        noise1 = numpy.random.exponential(scale=1.0, size=size)
        noise2 = numpy.random.exponential(scale=1.0, size=size)
        noise1 = np.matrix(noise1)
        noise2 = np.matrix(noise2)

    if dx == dy:
        Y1 = X1
        Y2 = X2
        Y1[:, 0] = f1(X1[:, 0]) + noise1[:, 0]
        Y2[:, 0] = f2(X2[:, 0]) + noise2[:, 0]
        Y1[:, 1:] = f1(X1[:, 1:]) + noise1[:, 1:]
        Y2[:, 1:] = f2(X2[:, 1:]) + noise2[:, 1:]
    else:
        Y1 = f1(X1 * Axy) + noise1
        Y2 = f2(X2 * Axy) + noise2

    return np.array(X1), np.array(X2), np.array(Y1), np.array(Y2)
Beispiel #27
0
import autograd.numpy as np
from autograd import grad

import math
import random
from numpy.linalg import inv
import matplotlib.pyplot as plt

nbSamples = 1000
X = np.matrix([[random.random(), 1] for x in range(nbSamples)])
Y = np.matrix([3 * x[0].item(0) + 0.666 for x in X]).transpose()


def error(X, Y, a):
    a = np.matrix([[a], [0.666]])
    e = X * a - Y
    return (e.transpose() * e).item(0)


def genError(X, Y):
    return lambda a: error(X, Y, a)


err = genError(X, Y)
xs = [x * 6.0 / nbSamples for x in range(nbSamples)]
e = [err(x) for x in xs]

grad_err = grad(err)


def newtonStep(f0, df, x0):
def main():

    # Create initial condition.

    u = np.ones(nx)

    # Apply the step condition.

    u[int(.5 / dx):int(1 / dx + 1)] = 2

    # Print initial condition.

    un = np.ones(nx)

    # Prepare the residue vector.

    rhs = np.zeros(nx)

    # Iterate the solution and monitor the eigenvalues.

    for n in range(nt):

        # Dump iteration count.

        print(" +++ Time: " + str(n) + " +++")

        # Copy the solution of the explicit time marching scheme.

        un = u.copy()

        # Separate the residue.

        rhs = frhs_vec(un, nu, dx)

        # March the residue.

        u = un + dt * rhs

        # Computes the derivative of the residues with respect to the solution vector.

        eps = 0.0001

        drhs_du = np.zeros(
            (nx - 1, nx -
             1))  # In order to take the eigenvalues, this shall be a matrix.

        # This loop computes the jacobian matrix according to http://www.netlib.org/math/docpdf/ch08-04.pdf

        for i in range(1, nx - 1):
            for j in range(1, nx - 1):
                drhs_du[i,
                        j] = (frhs(un[i - 1] + eps, un[i] + eps,
                                   un[i + 1] + eps, dx, nu) -
                              frhs(un[j - 1], un[j], un[j + 1], dx, nu)) / eps

        # Build the Hirsch matrix (chap 8).

        s_m = np.zeros((nx - 1, nx - 1))

        # Fill the diagonals

        s_m = (nu / dx**2.0) * create_diagonal(1.0, -2.0, 1.0, nx - 1)

        # Solve the eigenvalues.

        w1, v1 = np.linalg.eig(drhs_du)
        w2, v2 = np.linalg.eig(s_m)

        # Prepare the plots.

        real1 = np.zeros(nx)
        imag1 = np.zeros(nx)

        real1 = -np.sort(-w1.real[:])
        imag1 = -np.sort(-w1.imag[:])

        real2 = np.zeros(nx)
        imag2 = np.zeros(nx)

        real2 = -np.sort(-w2.real[:])
        imag2 = -np.sort(-w2.imag[:])

        print("\n")
        print("Minimun eigenvalues (Frechet): Real(eig): ", min(real1),
              " Imaginary: Imag(eig): ", min(imag1))
        print("Maximun eigenvalues (Frechet): Real(eig): ", max(real1),
              " Imaginary: Imag(eig): ", max(imag1))
        print("Minimun eigenvalues (Hirsch ): Real(eig): ", min(real2),
              " Imaginary: Imag(eig): ", min(imag2))
        print("Maximun eigenvalues (Hirsch ): Real(eig): ", max(real2),
              " Imaginary: Imag(eig): ", max(imag2))

    # Print both matrices.

    print(np.matrix(s_m))
    print("------------------------------------------------------------")
    print(np.matrix(drhs_du))

    # plot the eigenvalues.

    plt.figure(3)
    fig, ax = plt.subplots(3, figsize=(11, 11))
    ax[0].plot(imag1, real1, 'ro')
    ax[0].set(ylabel='Real(Eig)', xlabel='Imag(Eig)')
    ax[0].set_xlim(-0.06, 0.06)
    # ax[0].set_ylim(-70.0,10.0)

    ax[1].plot(imag2, real2, 'ro')
    ax[1].set(ylabel='Real(Eig)', xlabel='Imag(Eig)')
    ax[1].set_xlim(-0.06, 0.06)
    # ax[1].set_ylim(-70.0,10.0)

    ax[2].plot(np.linspace(0, 2, nx), u)
    ax[2].set(xlabel='x', ylabel='u')

    image_name = str(n) + "image" + ".png"

    plt.savefig(image_name)
    plt.close()
Beispiel #29
0
    # otherwise, load the model from specified file
    print "Using saved model:", args.model
    model = deserialize(args.model)
    vec_size = model.vec_size  # vec size comes from model
    seq = SequenceGen(args.task, vec_size, args.hi, args.lo)

# An object that keeps the optimizer state during training
optimizer = RMSProp(model.W)

n = 0  # counts the number of sequences trained on
bpc = None  # keeps track of trailing bpc (cost)

while n < 100:

    i, t, seq_len = seq.make()
    inputs = np.matrix(i)
    targets = np.matrix(t)

    loss, deltas, outputs, r, w, a, e = model.lossFun(inputs, targets,
                                                      args.manual_grad)

    newbpc = np.sum(loss) / ((seq_len * 2 + 2) * vec_size)
    if bpc is not None:
        bpc = 0.99 * bpc + 0.01 * newbpc
    else:
        bpc = newbpc

    # sometimes print out diagnostic info
    if ((n % args.log_freq) == 0) or args.test_mode:
        print 'iter %d' % (n)
        visualize(inputs, outputs, r, w, a, e)
Beispiel #30
0
def error(x_var, y_var, a):
    a = np.matrix([[a], [0.666]])
    e = x_var * a - y_var
    return (e.transpose() * e).item(0)
Beispiel #31
0
def RKF45_Integrator(t_start, t_stop, h0, x0, A):
    # An integrator using a 4(5) RKF method
    T_0 = time.time()
    """
	x0 = initial conditions
	t_start = start time
	t_stop = end time
	n_step = number of steps
	A = A(t) matrix function
	"""
    Ndim = x0.size
    x_ = np.zeros((1, Ndim))  # set up the array of x values
    t_ = np.zeros(1)  # set up the array of t values
    t_[0] = t_start
    x_[0, :] = x0
    h = h0
    h_min = h0 * (10**(-2))
    h_max = 5 * h0
    n = 0
    t = t_start
    #
    S = 0.98  # safety factor
    #
    while t <= t_stop:
        x_n = x_[n, :].reshape(Ndim, 1)
        Err_small = False
        h_new = h
        while Err_small == False:
            # compute the predictions using 4th and 5th order RK methods
            test_M = np.matrix([[1, 2], [1, 2]])
            k1 = np.dot(test_M, x_n)
            k1 = np.dot(h * A(t), x_n)
            k2 = h * A(t + 0.25 * h) @ (x_n + 0.25 * k1)
            k3 = h * A(t + (3 / 8) * h) @ (x_n + (3 / 32) * k1 + (9 / 32) * k2)
            k4 = h * A(t + (12 / 13) * h) @ (x_n + (1932 / 2197) * k1 -
                                             (7200 / 2197) * k2 +
                                             (7296 / 2197) * k3)
            k5 = h * A(t + h) @ (x_n + (439 / 216) * k1 - 8 * k2 +
                                 (3680 / 513) * k3 - (845 / 4104) * k4)
            k6 = h * A(t + 0.5 * h) @ (x_n - (8 / 27) * k1 + 2 * k2 -
                                       (3544 / 2565) * k3 +
                                       (1859 / 4104) * k4 - (11 / 40) * k5)
            y_np1 = x_n + (25 / 216) * k1 + (1408 / 2565) * k3 + (
                2197 / 4101) * k4 - (11 / 40) * k5
            z_np1 = x_n + (16 / 135) * k1 + (6656 / 12825) * k3 + (
                28561 / 56430) * k4 - (9 / 50) * k5 + (2 / 55) * k6
            #
            Err = ferr(y_np1, z_np1)
            """
			Err_max = ε(rtol*|z_np1| + atol)
			"""
            Err_max = epsilon_RK * (rtol_RK * np.abs(z_np1) + atol_RK)
            Err_ratio = np.asscalar(np.mean(Err / Err_max))
            #
            if Err_ratio <= 1:
                h_new = h * S * np.power(Err_ratio, -1.0 / 5)
                #Delta = max(np.asscalar(max(Err)), epsilon_RK*0.1)
                #h_new = h*(epsilon_RK*h/Delta)**(1/4)
                if h_new > 10 * h:  # limit how fast the step size can increase
                    h_new = 10 * h
                if h_new > h_max:  # limit the maximum step size
                    h_new = h_max
                Err_small = True  # break loop
            elif Err_ratio > 1:
                h_new = h * S * np.power(np.abs(Err_ratio), -1.0 / 4)
                #h_new = h*(epsilon_RK*h/np.asscalar(max(Err)))**(1/4)
                if h_new < 0.2 * h:  # limit how fast the step size decreases
                    h_new = 0.2 * h
                if h_new < h_min:  # limit the minimum step size
                    h_new = h_min
                    Err_small = True  # break loop
                elif h_new >= h_min:
                    h = h_new
        t = t + h
        x_ = np.vstack(
            (x_, z_np1.reshape(1, Ndim)))  # add x_n+1 to the array of x values
        t_ = np.append(t_, t)  # add t_n+1 to the array of t values
        n = n + 1
        h = h_new
        if True:  #np.round(((t-t_start)/(t_stop-t_start))*100000) % 1000 == 0:
            print("\r" + "integrated {:.1%}".format(
                (t - t_start) / (t_stop - t_start)),
                  end='')
    T = time.time() - T_0
    print(" done in {:.5g}s".format(T))
    return (t_, x_, T)
Beispiel #32
0
  # otherwise, load the model from specified file
  print "Using saved model:", args.model
  model = deserialize(args.model)
  vec_size = model.vec_size # vec size comes from model
  seq = SequenceGen(args.task, vec_size, args.hi, args.lo)

# An object that keeps the optimizer state during training
optimizer = RMSProp(model.W)

n = 0 # counts the number of sequences trained on
bpc = None # keeps track of trailing bpc (cost)

while n < 100:

  i, t, seq_len = seq.make()
  inputs = np.matrix(i)
  targets = np.matrix(t)

  loss, deltas, outputs, r, w, a, e = model.lossFun(inputs, targets, args.manual_grad)

  newbpc = np.sum(loss) / ((seq_len*2 + 2) * vec_size)
  if bpc is not None:
    bpc = 0.99 * bpc + 0.01 * newbpc
  else:
    bpc = newbpc

  # sometimes print out diagnostic info
  if ((n % args.log_freq) == 0) or args.test_mode:
    print 'iter %d' % (n)
    visualize(inputs, outputs, r, w, a, e)
 def f(t):
     M = np.matrix([[0, 1], [-w2(t), 0]])
     return M