Exemple #1
0
def mpow2(A, n):
    """ 
    Returns the n-th power of A.
    Here, this is computed using eigenvalue decomposition.
    
    ==========
    Parameter:
    ==========
    A : *array*
        the square matrix from which the n-th power should be returned
    n : *integer*
        the power

    ========
    Returns:
    ========
    B : *array*
        B = A^n

    """
     
    D, L = eig(A)
    if isreal(A).all():
        return reduce(dot, [L, diag(D**n), inv(L)]).real
    else:
        return reduce(dot, [L, diag(D**n), inv(L)])
Exemple #2
0
def mpow2(A, n):
    """ 
    Returns the n-th power of A.
    Here, this is computed using eigenvalue decomposition.
    
    ==========
    Parameter:
    ==========
    A : *array*
        the square matrix from which the n-th power should be returned
    n : *integer*
        the power

    ========
    Returns:
    ========
    B : *array*
        B = A^n

    """

    D, L = eig(A)
    if isreal(A).all():
        return reduce(dot, [L, diag(D**n), inv(L)]).real
    else:
        return reduce(dot, [L, diag(D**n), inv(L)])
Exemple #3
0
def fib2ABD(fiber_layup):
    '''
    :param fiber_layup: type(fiber_layup)=dict, Mandatory: numbers from 1 to n_layer where n_layer is the number of layers. n_layer
    :subparam fiber_layup[i_layer]: type(fiber_layup[i_layer])=dict, Mandatory fields: E1, E2, nu12, G12, angle, thickness, z_start, z_end. Optional fields: fiber_type,
    :return fiber_layup["ABD"] = ABD: ["thickness"] = sum(fiber_layup[i_layer]["thickness"])
    :subfield fiber_layup[i_layer]: ["S_l"] = S_l, ["Q_l"] = Q_l, ["Q_G"] = Q_G, ["A"] = A, ["B"] = B, ["D"] = D
    '''

    A = py.zeros((3, 3))
    B = py.zeros((3, 3))
    D = py.zeros((3, 3))

    for i_fib in range(1, fiber_layup["fiber_nr"] + 1):
        # Setting up local compliance matrix (S) for each fiber
        fiber_layup[i_fib]["S_l"] = get_compliance_matrix(fiber_layup[i_fib])

        # Getting local stiffness matrix (Q) - (Q = inv(S))
        fiber_layup[i_fib]["Q_l"] = py.inv(fiber_layup[i_fib]["S_l"])

        # Make transformation matrix for each fiber (T_lg, local->global - inv(T_lg)=T_gl, Global->local)
        fiber_layup[i_fib]["T_L2G"] = get_transform_Local2Global(
            fiber_layup[i_fib]["angle"])
        fiber_layup[i_fib]["T_G2L"] = py.inv(fiber_layup[i_fib]["T_L2G"])

        # Make global stiffness matrix
        fiber_layup[i_fib]["Q_G"] = py.dot(
            fiber_layup[i_fib]["T_L2G"],
            py.dot(fiber_layup[i_fib]["Q_l"], fiber_layup[i_fib]["T_L2G"].T))

        # Make global A, B, D
        A += fiber_layup[i_fib]["Q_G"] * (fiber_layup[i_fib]["z_end"] -
                                          fiber_layup[i_fib]["z_start"])
        B += fiber_layup[i_fib]["Q_G"] * 1.0 / 2.0 * (
            fiber_layup[i_fib]["z_end"]**2 - fiber_layup[i_fib]["z_start"]**2)
        D += fiber_layup[i_fib]["Q_G"] * 1.0 / 3.0 * (
            fiber_layup[i_fib]["z_end"]**3 - fiber_layup[i_fib]["z_start"]**3)

    # Collect ABD matrix
    fiber_layup["A"] = A
    fiber_layup["B"] = B
    fiber_layup["D"] = D
    ABD = py.zeros((6, 6))
    ABD[:3, :3] = A
    ABD[:3, 3:] = B
    ABD[3:, :3] = B
    ABD[3:, 3:] = D
    fiber_layup["ABD"] = ABD
    fiber_layup["abd"] = py.inv(fiber_layup["ABD"])
    fiber_layup["E_x"] = 1.0 / (fiber_layup["abd"][0, 0] *
                                fiber_layup["thickness"])
    fiber_layup["E_y"] = 1.0 / (fiber_layup["abd"][1, 1] *
                                fiber_layup["thickness"])
    fiber_layup["G_xy"] = 1.0 / (fiber_layup["abd"][2, 2] *
                                 fiber_layup["thickness"])
    return (fiber_layup)
def stackElements(S1, S2):
    # Reursive relations, eq (59-62) in "A combined three-dimensional finite element and scattering matrix
    #  method for the analysis of plane wave diffraction by bi-periodic, multilayered structures"
    #  (Dossou 2012a)
    RIn1, TIn1, TOut1, ROut1 = StoRT(S1)
    RIn2, TIn2, TOut2, ROut2 = StoRT(S2)
    I = pl.identity(RIn1.shape[0])
    RIn = RIn1 + TOut1 * RIn2 * pl.inv(I - ROut1 * RIn2) * TIn1
    TIn = TIn2 * pl.inv(I - ROut1 * RIn2) * TIn1
    ROut = ROut2 + TIn2 * ROut1 * pl.inv(I - RIn2 * ROut1) * TOut2
    TOut = TOut1 * pl.inv(I - RIn2 * ROut1) * TOut2
    return RTtoS(RIn, TIn, TOut, ROut)
Exemple #5
0
def kalman(F,G,H,u,y,x0,Px0,Px,Py):
    """
    computes the discrete-time Kalman-Filter
    parameters:
        F - system matrix [m x m]
        G - input matrix   x(k+1) = F*x(k) + G*u(k) [m x d_u]
        H - observation matrix  y(k) = H*x(k)
        u: input vector [d_u x n]
        y: observations [d_y x n]
        x0: initial estimate [d_x,]
        Px: (initial) covariance matrix of x
        Py: covariance matrix of y
        Q: covariance of dynamical noise (can be 0)
    returns:
        (state, state_cov) the state and the state covariance matrices
    
    Implementation according to Geering: Regelungstechnik, Springer (2004),
    p. 259f.
    """
    n = y.shape[-1]
    if y.shape[-1] != u.shape[-1]:
        raise ValueError, 'y and u do not have same length!'
    
    # reserve space for results
    Px_minus = zeros((Px0.shape[0],Px0.shape[1],n))
    Px_plus = zeros((Px0.shape[0],Px0.shape[1],n))
    x_minus = zeros((x0.shape[0],n))
    x_plus = zeros((x0.shape[0],n))
    
    Px_minus[:,:,0] = Px0
    x_minus[:,0:1] = x0
    
    # compute for each time frame
    for k in xrange(n):
        Px_plus[:,:,k] = Px_minus[:,:,k] - reduce(dot,
                         [Px_minus[:,:,k], H.T, 
                          inv(Py + dot(H,dot(Px_minus[:,:,k],H.T))), H,
                          Px_minus[:,:,k]])
        L = dot(Px_minus[:,:,k], 
                dot(H.T, inv( Py + dot(H, dot(Px_minus[:,:,k],H.T)) )))
        x_plus[:,k:k+1] = x_minus[:,k:k+1] + dot(L,
                          (y[:,k:k+1] - dot(H,x_minus[:,k:k+1]) ))
        if k < n-1:
            #print 'k = ', k, 'x-plus: ',x_plus.shape,'u',u.shape, 'x_minus:', x_minus.shape         
            Px_minus[:,:,k+1] = (dot(F, dot(Px_plus[:,:,k],F.T)) +
                                 dot(G, dot(Px,G.T)))
            x_minus[:,k+1:k+2] = dot(F,x_plus[:,k:k+1]) + dot(G,u[:,k:k+1])
        
    
    return x_plus, Px_plus
Exemple #6
0
def kalman(F, G, H, u, y, x0, Px0, Px, Py):
    """
    computes the discrete-time Kalman-Filter
    parameters:
        F - system matrix [m x m]
        G - input matrix   x(k+1) = F*x(k) + G*u(k) [m x d_u]
        H - observation matrix  y(k) = H*x(k)
        u: input vector [d_u x n]
        y: observations [d_y x n]
        x0: initial estimate [d_x,]
        Px: (initial) covariance matrix of x
        Py: covariance matrix of y
        Q: covariance of dynamical noise (can be 0)
    returns:
        (state, state_cov) the state and the state covariance matrices
    
    Implementation according to Geering: Regelungstechnik, Springer (2004),
    p. 259f.
    """
    n = y.shape[-1]
    if y.shape[-1] != u.shape[-1]:
        raise ValueError, 'y and u do not have same length!'

    # reserve space for results
    Px_minus = zeros((Px0.shape[0], Px0.shape[1], n))
    Px_plus = zeros((Px0.shape[0], Px0.shape[1], n))
    x_minus = zeros((x0.shape[0], n))
    x_plus = zeros((x0.shape[0], n))

    Px_minus[:, :, 0] = Px0
    x_minus[:, 0:1] = x0

    # compute for each time frame
    for k in xrange(n):
        Px_plus[:, :, k] = Px_minus[:, :, k] - reduce(dot, [
            Px_minus[:, :, k], H.T,
            inv(Py + dot(H, dot(Px_minus[:, :, k], H.T))), H, Px_minus[:, :, k]
        ])
        L = dot(Px_minus[:, :, k],
                dot(H.T, inv(Py + dot(H, dot(Px_minus[:, :, k], H.T)))))
        x_plus[:, k:k + 1] = x_minus[:, k:k + 1] + dot(
            L, (y[:, k:k + 1] - dot(H, x_minus[:, k:k + 1])))
        if k < n - 1:
            #print 'k = ', k, 'x-plus: ',x_plus.shape,'u',u.shape, 'x_minus:', x_minus.shape
            Px_minus[:, :, k + 1] = (dot(F, dot(Px_plus[:, :, k], F.T)) +
                                     dot(G, dot(Px, G.T)))
            x_minus[:, k + 1:k +
                    2] = dot(F, x_plus[:, k:k + 1]) + dot(G, u[:, k:k + 1])

    return x_plus, Px_plus
Exemple #7
0
    def estimate_kernel(self, X, P, M):

        """estimate the ide model's kernel weights from data stored in the ide object"""

        # form Xi variables
        Xi_0 = pb.zeros([self.model.nx, self.model.nx])
        Xi_1 = pb.zeros([self.model.nx, self.model.nx])
        for t in range(1, len(X)):
            Xi_0 += pb.dot(X[t - 1, :].reshape(self.model.nx, 1), X[t, :].reshape(self.model.nx, 1).T) + M[
                t, :
            ].reshape(self.model.nx, self.model.nx)
            Xi_1 += pb.dot(X[t - 1, :].reshape(self.model.nx, 1), X[t - 1, :].reshape(self.model.nx, 1).T) + P[
                t - 1, :
            ].reshape(self.model.nx, self.model.nx)

            # form Upsilon and upsilons
        Upsilon = pb.zeros([self.model.ntheta, self.model.ntheta])
        upsilon0 = pb.zeros([1, self.model.ntheta])
        upsilon1 = pb.zeros([1, self.model.ntheta])
        for i in range(self.model.nx):
            for j in range(self.model.nx):
                Upsilon += Xi_1[i, j] * self.model.Delta_Upsilon[j, i]
                upsilon0 += Xi_0[i, j] * self.model.Delta_upsilon[j, i]
                upsilon1 += Xi_1[i, j] * self.model.Delta_upsilon[j, i]
        upsilon1 = upsilon1 * self.model.xi
        Upsilon = Upsilon * self.model.Ts * self.model.varsigma

        weights = pb.dot(pb.inv(Upsilon.T), upsilon0.T - upsilon1.T)

        return weights
Exemple #8
0
    def __init__(self, x_wc, K):
        self.x_wc = x_wc
        self.x_cw = coord_xfms.ssc.inverse(self.x_wc)

        # camera calibration matrix
        self.K = K

        # camera center
        self.C = self.x_wc[0:3]

        # transforms points in world frame to points in camera frame
        self.wHc = coord_xfms.xyzrph2matrix(self.x_cw)
        # transforms points in camera frame to points in world frame
        self.cHw = coord_xfms.xyzrph2matrix(self.x_wc)

        # the stupid [R t] notation that I absolutely hate
        self.Rt = self.wHc[0:3, :]

        # projects points in world frame to image plane of camera
        self.P = K.dot(self.Rt)

        # used for normalized image points
        self.invK = pl.inv(K[0:3, 0:3])

        # used in back-projecting points to rays
        self.pinvP = pl.pinv(self.P)
Exemple #9
0
    def __init__ (self, x_wc, K):
        self.x_wc = x_wc
        self.x_cw = coord_xfms.ssc.inverse (self.x_wc)

        # camera calibration matrix
        self.K = K
        
        # camera center
        self.C = self.x_wc[0:3]

        # transforms points in world frame to points in camera frame
        self.wHc = coord_xfms.xyzrph2matrix (self.x_cw)
        # transforms points in camera frame to points in world frame
        self.cHw = coord_xfms.xyzrph2matrix (self.x_wc)

        # the stupid [R t] notation that I absolutely hate
        self.Rt = self.wHc[0:3,:]
        
        # projects points in world frame to image plane of camera
        self.P = K.dot (self.Rt)

        # used for normalized image points
        self.invK = pl.inv (K[0:3,0:3])

        # used in back-projecting points to rays
        self.pinvP = pl.pinv (self.P)
Exemple #10
0
def affineTransform(image, x1, y1, x2, y2, x3, y3, M, N):
    # Construct the matrix M
    mat_M = array([[x1, y1, 1, 0, 0, 0], \
                   [0, 0, 0, x1, y1, 1], \
                   [x2, y2, 1, 0, 0, 0], \
                   [0, 0, 0, x2, y2, 1], \
                   [x3, y3, 1, 0, 0, 0], \
                   [0, 0, 0, x3, y3, 1]])
                   
    # Construct vector q
    q = array([[0], [0], [M], [0], [M], [N]])
    
    p = lstsq(mat_M, q)
    a, b, c, d, e, f =  p[0][0][0], \
                        p[0][1][0], \
                        p[0][2][0], \
                        p[0][3][0], \
                        p[0][4][0], \
                        p[0][5][0]
    
    # A is the resulting matrix that describes the transformation
    A = array([[a, b, c], \
               [d, e, f], \
               [0, 0, 1]])
    
    # Create the new image
    b = array([zeros(N, float)] * M)    
    for i in range(0, M):
        for j in range(0, N):
            old_coor = dot(inv(A),([[i],[j],[1]]))
            b[i][j] = pV(image, old_coor[0][0], old_coor[1][0], 'linear')
    
    return b
Exemple #11
0
	def _filter(self,Y):

		## initialise
		xf=self.x0
		Pf=self.P0
		# filter quantities
		xfStore =[]
		PfStore=[]

		#calculate the weights
		Wm_i,Wc_i=self.sigma_vectors_weights()

		for y in Y:
			#calculate the sigma points matrix, each column is a sigma vector
			Xi_f_=self.sigma_vectors(xf,Pf)
			#propogate sigma verctors through non-linearity
			Xi_f=self.state_equation(Xi_f_)
			#pointwise multiply by weights and sum along y-axis
			xf_=pb.sum(Wm_i*Xi_f,1)
			xf_=xf_.reshape(self.nx,1)
			#purturbation
			Xi_purturbation=Xi_f-xf_
			weighted_Xi_purturbation=Wc_i*Xi_purturbation
			Pf_=pb.dot(Xi_purturbation,weighted_Xi_purturbation.T)+self.Sigma_e			
			#measurement update equation
			Pyy=dots(self.C,Pf_,self.C.T)+self.Sigma_varepsilon 
			Pxy=pb.dot(Pf_,self.C.T)
			K=pb.dot(Pxy,pb.inv(Pyy))
			yf_=pb.dot(self.C,xf_)
			xf=xf_+pb.dot(K,(y-yf_))
			Pf=pb.dot((pb.eye(self.nx)-pb.dot(K,self.C)),Pf_)
			xfStore.append(xf)
			PfStore.append(Pf)

		return xfStore,PfStore
def get_projection_transformed_point(src_x_values, src_y_values, dest_width,
                                     dest_height, target_point_x,
                                     target_point_y):
    sx1, sy1 = src_x_values[0], src_y_values[0]  # tl
    sx2, sy2 = src_x_values[1], src_y_values[1]  # bl
    sx3, sy3 = src_x_values[2], src_y_values[2]  # br
    sx4, sy4 = src_x_values[3], src_y_values[3]  # tr

    source_points_123 = pl.matrix([[sx1, sx2, sx3],
                                   [sy1, sy2, sy3],
                                   [1, 1, 1]])

    source_point_4 = [[sx4], [sy4], [1]]

    scale_to_source = pl.solve(source_points_123, source_point_4)

    l, m, t = [float(x) for x in scale_to_source]

    unit_to_source = pl.matrix([[l * sx1, m * sx2, t * sx3],
                                [l * sy1, m * sy2, t * sy3],
                                [l, m, t]])

    dx1, dy1 = 0, 0
    dx2, dy2 = 0, dest_height
    dx3, dy3 = dest_width, dest_height
    dx4, dy4 = dest_width, 0

    dest_points_123 = pl.matrix([[dx1, dx2, dx3],
                                 [dy1, dy2, dy3],
                                 [1, 1, 1]])

    dest_point_4 = pl.matrix([[dx4],
                              [dy4],
                              [1]])

    scale_to_dest = pl.solve(dest_points_123, dest_point_4)

    l, m, t = [float(x) for x in scale_to_dest]

    unit_to_dest = pl.matrix([[l * dx1, m * dx2, t * dx3],
                              [l * dy1, m * dy2, t * dy3],
                              [l, m, t]])

    source_to_unit = pl.inv(unit_to_source)

    source_to_dest = unit_to_dest @ source_to_unit

    x, y, z = [float(w) for w in (source_to_dest @ pl.matrix([
        [target_point_x],
        [target_point_y],
        [1]]))]

    x /= z
    y /= z

    y = target_point_y * 2 - y

    return x, y
Exemple #13
0
def get_transformation_matrix(matrix):
    # Get the vector p and the values that are in there by taking the SVD.
    # Since D is diagonal with the eigenvalues sorted from large to small
    # on the diagonal, the optimal q in min ||Dq|| is q = [[0]..[1]].
    # Therefore, p = Vq means p is the last column in V.
    U, D, V = svd(matrix)
    p = V[8][:]

    return inv(array([[p[0],p[1],p[2]], [p[3],p[4],p[5]], [p[6],p[7],p[8]]]))
Exemple #14
0
def JointValuesFromTransform(robot, Tdesired):
    """
    Find the dummy joint values such that the Transform of the baselink is
    Tdesired. T0 is the initial Transform of the baselink.
    """
    R = dot(inv(robot.baselinkinittransform[0:3, 0:3]), Tdesired[0:3, 0:3])
    [h1, h2, h3] = AnglesFromRot(R)
    offset = dot(R, robot.baselinkinittransform[0:3, 3])
    [s1, s2, s3] = Tdesired[0:3, 3] - offset
    return [s1, s2, s3, h1, h2, h3]
Exemple #15
0
def JointValuesFromTransform(robot, Tdesired):
    """
    Find the dummy joint values such that the Transform of the baselink is
    Tdesired. T0 is the initial Transform of the baselink.
    """
    R = dot(inv(robot.baselinkinittransform[0:3, 0:3]), Tdesired[0:3, 0:3])
    [h1, h2, h3] = AnglesFromRot(R)
    offset = dot(R, robot.baselinkinittransform[0:3, 3])
    [s1, s2, s3] = Tdesired[0:3, 3] - offset
    return [s1, s2, s3, h1, h2, h3]
def MLEf(x,x_data,y_data):
    
    X = x_data.numpy()
    Y = y_data.numpy()
    X = np.array([np.concatenate((np.array([1]),X[i,:])) for i in range(X.shape[0])])
    B = pl.inv(X.T@X)@X.T@Y
    s = 0
    N = X.shape[1]
    for i in np.arange(N):
        s = s + B[i]*(x**(i))
    return s,B.reshape(1,-1)
Exemple #17
0
def setup_asr_step_methods(m, vars, additional_stochs=[]):
    # groups RE stochastics that are suspected of being dependent
    groups = []
    fe_group = [n for n in vars.get('beta', []) if isinstance(n, mc.Stochastic)]
    ap_group = [n for n in vars.get('gamma', []) if isinstance(n, mc.Stochastic)]
    groups += [[g_i, g_j] for g_i, g_j in zip(ap_group[1:], ap_group[:-1])] + [fe_group, ap_group, fe_group+ap_group]

    for a in vars.get('hierarchy', []):
        group = []

        col_map = dict([[key, i] for i,key in enumerate(vars['U'].columns)])
        
        if a in vars['U']:
            for b in nx.shortest_path(vars['hierarchy'], 'all', a):
                if b in vars['U']:
                    n = vars['alpha'][col_map[b]]
                    if isinstance(n, mc.Stochastic):
                        group.append(n)
        groups.append(group)
        #if len(group) > 0:
            #group += ap_group
            #groups.append(group)
            #group += fe_group
            #groups.append(group)
                    
    for stoch in groups:
        if len(stoch) > 0 and pl.all([isinstance(n, mc.Stochastic) for n in stoch]):
            # only step certain stochastics, for understanding convergence
            #if 'gamma_i' not in stoch[0].__name__:
            #    print 'no stepper for', stoch
            #    m.use_step_method(mc.NoStepper, stoch)
            #    continue

            #print 'finding Normal Approx for', [n.__name__ for n in stoch]
            if additional_stochs == []:
                vars_to_fit = [vars.get('p_obs'), vars.get('pi_sim'), vars.get('smooth_gamma'), vars.get('parent_similarity'),
                               vars.get('mu_sim'), vars.get('mu_age_derivative_potential'), vars.get('covariate_constraint')]
            else:
                vars_to_fit = additional_stochs

            try:
                raise ValueError
                na = mc.NormApprox(vars_to_fit + stoch)
                na.fit(method='fmin_powell', verbose=0)
                cov = pl.array(pl.inv(-na.hess), order='F')
                #print 'opt:', pl.round_([n.value for n in stoch], 2)
                #print 'cov:\n', cov.round(4)
                if pl.all(pl.eigvals(cov) >= 0):
                    m.use_step_method(mc.AdaptiveMetropolis, stoch, cov=cov)
                else:
                    raise ValueError
            except ValueError:
                #print 'cov matrix is not positive semi-definite'
                m.use_step_method(mc.AdaptiveMetropolis, stoch)
Exemple #18
0
def train_readout(states, targets, reg_fact=0):
    # train readout with linear regression
    # states... numpy array with states[i,j] the state of neuron j in example i
    # targets.. the targets for training/testing. targets[i] is target of example i
    # reg_fact..regularization factor. If set to 0, no regularization is performed
    # returns:
    #    w...weight vector
    if reg_fact == 0:
        w = np.linalg.lstsq(states, targets)[0]
    else:
        w = np.dot(np.dot(pylab.inv(reg_fact * pylab.eye(np.size(states, 1)) + np.dot(states.T, states)), states.T),
                   targets)
    return w
Exemple #19
0
	def EM(self,Y,U,eps = 0.000000001):
		converged = False
		l = [1000]
		while not converged:	
			# E step
			X, P, K, M = self.rtssmooth(Y, U)
			Xi11 = pb.sum([Pt + x*x.T for Pt,x in zip(P,X)],0)
			Xi10 = pb.sum([Mt + x1*x.T for Mt,x1,x in zip(M[1:],X[:-1],X[1:])],0)
			# M step
			self.A = pb.inv(Xi11)*Xi10
			l.append(self.likelihood(self.x0,X,Y,U))
			converged = abs(l[-1] - l[-2]) < eps 
			print l[-1]
		return l, X, P
Exemple #20
0
def check_actuated_torques(hrp, q, qd, qdd, tau, J, F_ext):
    """Check actuated torques `tau` computed against the external wrench F_ext
    using Equation (8) from (Mistry, Buchli and Schall, 2010).

    robot -- robot object
    q -- full-body configuration
    qd -- full-body velocity
    qdd -- full-body acceleration
    tau -- active joint torques
    J -- contact Jacobian
    F_ext -- contact wrench

    """
    with hrp.rave:
        hrp.rave.SetDOFValues(q)
        hrp.rave.SetDOFVelocities(qd)
        _, tc, tg = hrp.rave.ComputeInverseDynamics(qdd, returncomponents=True)
    M = hrp.compute_inertia_matrix(hrp.q)
    S = hstack([eye(50), zeros((50, 6))])
    SMS_inv = inv(dot(S, dot(inv(M), S.T)))
    S_bar = dot(SMS_inv, dot(S, inv(M))).T
    v = dot(S_bar.T, (tc + tg - dot(J.T, F_ext)))
    tau_check = dot(SMS_inv, qdd[:50]) + v
    return norm(tau - tau_check) < 1e-5
Exemple #21
0
 def compute_readout_weights(states, targets, reg_fact=0):
     """
     Train readout with linear regression
     :param states: numpy array with states[i, j], the state of neuron j in example i
     :param targets: numpy array with targets[i], while target i corresponds to example i
     :param reg_fact: regularization factor; 0 results in no regularization
     :return: numpy array with weights[j]
     """
     if reg_fact == 0:
         w = np.linalg.lstsq(states, targets)[0]
     else:
         w = np.dot(
             np.dot(
                 pylab.inv(reg_fact * pylab.eye(np.size(states, 1)) +
                           np.dot(states.T, states)), states.T), targets)
     return w
def fit_ellipse(pts):
	(x,y)=pts
	cm=(x.mean(),y.mean())
	D=np.transpose([x**2, x*y, y**2, x, y, np.ones(len(x))])
	S=np.dot(np.transpose(D),D)
	C=np.zeros((6,6))
	C[5,5]=0; C[0,2]=2; C[1,1]=-1; C[2,0]=2
	(gevec, geval)=pl.eig( np.dot(pl.inv(S),C) ) 
	(PosR, PosC)= np.where(geval>0 & ~np.isinf(geval))
	a=gevec[:,PosC]
	
	(v,w)=pl.eig(np.array([[a[0],a[1]/2],[a[1]/2,a[2]]]))
	vect1=w[:,0]
	theta=np.arctan2(vect1[1],vect1[0])
		
	return cm, v[1], v[0], theta
Exemple #23
0
def normal_equation(x, y):
    """
    Description:
        Computes the parameters based the training examples and values of the target variables using
        the closed form formula theta = (inverse(X.transpose()*X)*X.transpose())*Y where X.transpose()
        computes the transpose of matrix X and * implies matrix multiplication.
    Parameters:
        x - an array of feature vectors
        y - target variables corresponding to the feature vectors
    Returns:
        theta - an array of parameters corresponding to the feature vectors.
    """

    z = inv(dot(x.transpose(), x))
    theta = dot(dot(z, x.transpose()), y)
    return theta
Exemple #24
0
def perspectiveTransform(image, x1, y1, x2, y2, x3, y3, x4, y4, M, N):
    # Construct the matrix M
    x1_a, y1_a = 0, 0
    x2_a, y2_a = M, 0
    x3_a, y3_a = M, N
    x4_a, y4_a = 0, N
    
    mat_M = array([[x1, y1, 1, 0,  0,  0, -x1_a * x1, -x1_a * y1, -x1_a], \
                   [0,  0,  0, x1, y1, 1, -y1_a * x1, -y1_a * y1, -y1_a], \
                   [x2, y2, 1, 0,  0,  0, -x2_a * x2, -x2_a * y2, -x2_a], \
                   [0,  0,  0, x2, y2, 1, -y2_a * x2, -y2_a * y2, -y2_a], \
                   [x3, y3, 1, 0,  0,  0, -x3_a * x3, -x3_a * y3, -x3_a], \
                   [0,  0,  0, x3, y3, 1, -y3_a * x3, -y3_a * y3, -y3_a], \
                   [x4, y4, 1, 0,  0,  0, -x4_a * x4, -x4_a * y4, -x4_a], \
                   [0,  0,  0, x4, y4, 1, -y4_a * x4, -y4_a * y4, -y4_a]])
    
    # Get the vector p and the values that are in there by taking the SVD. 
    # Since D is diagonal with the eigenvalues sorted from large to small on
    # the diagonal, the optimal q in min ||Dq|| is q = [[0]..[1]]. Therefore, 
    # p = Vq means p is the last column in V.
    U, D, V = svd(mat_M)
    p = V[8][:]                
    a, b, c, d, e, f, g, h, i = p[0], \
                                p[1], \
                                p[2], \
                                p[3], \
                                p[4], \
                                p[5], \
                                p[6], \
                                p[7], \
                                p[8]
    
    # P is the resulting matrix that describes the transformation
    P = array([[a, b, c], \
               [d, e, f], \
               [g, h, i]])
    
    # Create the new image
    b = array([zeros(M, float)] * N)
    for i in range(0, M):
        for j in range(0, N):
            or_coor = dot(inv(P),([[i],[j],[1]]))
            or_coor_h = or_coor[1][0] / or_coor[2][0], \
                      or_coor[0][0] / or_coor[2][0]
            b[j][i] = pV(image, or_coor_h[0], or_coor_h[1], 'linear')
    
    return b
Exemple #25
0
		def Kupdate(A,C,P_,Sigma_epsilon,x_,y):
								
			if (np.rank(A)!=2 or type(self.A)!=np.ndarray):raise TypeError('A should be rank two array')
			if (np.rank(C)!=2 or type(self.C)!=np.ndarray):raise TypeError('C should be rank two array')
			if (np.rank(Sigma_epsilon)!=2 or type(self.Sigma_epsilon)!=np.ndarray):raise TypeError('Sigma_epsilon should be rank two array')
			if (np.rank(P_)!=2 or type(P_)!=np.ndarray):raise TypeError('P should be rank two array')
			if type(x_)!=np.ndarray:raise TypeError('x0 should be rank two array')
			if type(y)!=np.ndarray:raise TypeError('y should be rank two array')

			
			# calculte Kalman gain
			K =pb.dot(pb.dot(P_,C.T),pb.inv(dots(C,P_,C.T) + Sigma_epsilon))
			# update estimate with model output measurement
			x = x_ + pb.dot(K,(y-pb.dot(C,x_)))
			# update the state error covariance
			P = pb.dot((np.eye(self.nx)-pb.dot(K,C)),P_);
			return x,P
Exemple #26
0
	def estimate_kernel(self,X):

		"""
			estimate the ide parameters using least squares method
	
			Arguments
			----------
			X: list of ndarray
				state vectors

			Returns
			---------
			least squares estimation of the IDE parameters
		"""

		Q=self.Q_calc(X)
		Z=pb.vstack(X[1:])
		X_t_1=pb.vstack(X[:-1])
		Q_t_1=pb.vstack(Q[:-1])
		X_ls=pb.hstack((Q_t_1,X_t_1))
		theta=dots(pb.inv(pb.dot(X_ls.T,X_ls)),X_ls.T,Z)
		parameters=[float(theta[i]) for i in range(theta.shape[0])]
		return parameters
def findMatrix( uN, dim, nCoeff, ptns, pascale, serendipity ):
    fop = g.PolynomialModelling( dim, nCoeff, pnts )
    fop.setPascalsStyle( pascale )
    fop.setSerendipityStyle( serendipity )
    
    tmp = g.RPolynomialFunction( g.RVector( fop.polynomialFunction().size() ) )
    tmp.fill( fop.startModel() )
    print "base:", tmp
    
    G = P.zeros( ( len( pnts), len( tmp.elements() ) ) )
    
    for i, p in enumerate( pnts ):
        for j, e in enumerate( tmp.elements() ):
            G[i,j] = e( p )
    
    GI = P.inv( G )
    coef = P.dot( GI, uN )
    
    print coef
    print P.dot( G, coef )
    tmp.fill( g.asvector( coef ) )
    print coef, tmp
    return tmp
 def compute_readout_weights(states, targets, reg_fact=0):
     """
     Train readout with linear regression
     :param states: numpy array with states[i, j], the state of neuron j in example i
     :param targets: numpy array with targets[i], while target i corresponds to example i
     :param reg_fact: regularization factor; 0 results in no regularization
     :return: numpy array with weights[j]
     """
     if reg_fact == 0:
         # lstsq solves the equation Xw = b for the best w 
         w = np.linalg.lstsq(states, targets)[0]
     else:
         # pylab.inv -> inverse 
         # pylab.eye -> identity matrix
         # Note that the inverse of kI_n = 1/k I_n for a scalar k. 
         
         # This is somewhat related to the least squares equation.
         # A^TA x = A^T b 
         # for vectors x and b 
         w = np.dot(np.dot(pylab.inv(reg_fact * pylab.eye(np.size(states, 1)) + np.dot(states.T, states)),
                           states.T),
                    targets)
     return w
Exemple #29
0
def findMatrix(uN, dim, nCoeff, ptns, pascale, serendipity):
    fop = g.PolynomialModelling(dim, nCoeff, pnts)
    fop.setPascalsStyle(pascale)
    fop.setSerendipityStyle(serendipity)

    tmp = g.RPolynomialFunction(g.RVector(fop.polynomialFunction().size()))
    tmp.fill(fop.startModel())
    print("base:", tmp)

    G = P.zeros((len(pnts), len(tmp.elements())))

    for i, p in enumerate(pnts):
        for j, e in enumerate(tmp.elements()):
            G[i, j] = e(p)

    GI = P.inv(G)
    coef = P.dot(GI, uN)

    print(coef)
    print(P.dot(G, coef))
    tmp.fill(g.asvector(coef))
    print(coef, tmp)
    return tmp
Exemple #30
0
from __future__ import print_function, division, absolute_import
import sys
sys.path.insert(0, "../../")
# from SBDet import parseToCoo, gen_sigs, mix, select_model
from SBDet import *
import pylab as P
import numpy as np

# tr = zload('./EstBiase-n_num.pk')
tr = zload('./EstBiase-n_num-revised.pk')
n_num, iso_n, PA_para, PA_lk, ER_para, ER_lk = zip(*tr['n_num'])
x = iso_n
y = np.array(PA_lk).reshape(-1) - np.array(ER_lk)
P.plot(x, y, 'r.')
# import ipdb;ipdb.set_trace()
P.axis([0, max(x), 0, max(y)])

# solve least square problem
n = len(x)
# X = np.hstack([np.ones((n, 1)), np.array(x).reshape(-1, 1)])
X = np.array(x).reshape(-1, 1)
beta = P.dot(P.dot(P.inv(P.dot(X.T, X)), X.T), np.array(y).reshape(-1, 1))
xp = np.linspace(0, max(x), 100)
yp = xp * beta[0, 0]
P.plot(xp, yp, 'b--')
P.xlabel('Number of isolated nodes')
P.ylabel('Difference between log liklihood of PA and ER')
xm = max(x) * 0.5
P.text(xm, xm * beta - 500, 'y=%f x' % (beta))
P.show()
Exemple #31
0
def setup_asr_step_methods(m, vars, additional_stochs=[]):
    # groups RE stochastics that are suspected of being dependent
    groups = []
    fe_group = [
        n for n in vars.get('beta', []) if isinstance(n, mc.Stochastic)
    ]
    ap_group = [
        n for n in vars.get('gamma', []) if isinstance(n, mc.Stochastic)
    ]
    groups += [[g_i, g_j] for g_i, g_j in zip(ap_group[1:], ap_group[:-1])
               ] + [fe_group, ap_group, fe_group + ap_group]

    for a in vars.get('hierarchy', []):
        group = []

        col_map = dict([[key, i] for i, key in enumerate(vars['U'].columns)])

        if a in vars['U']:
            for b in nx.shortest_path(vars['hierarchy'], 'all', a):
                if b in vars['U']:
                    n = vars['alpha'][col_map[b]]
                    if isinstance(n, mc.Stochastic):
                        group.append(n)
        groups.append(group)
        #if len(group) > 0:
        #group += ap_group
        #groups.append(group)
        #group += fe_group
        #groups.append(group)

    for stoch in groups:
        if len(stoch) > 0 and pl.all(
            [isinstance(n, mc.Stochastic) for n in stoch]):
            # only step certain stochastics, for understanding convergence
            #if 'gamma_i' not in stoch[0].__name__:
            #    print 'no stepper for', stoch
            #    m.use_step_method(mc.NoStepper, stoch)
            #    continue

            #print 'finding Normal Approx for', [n.__name__ for n in stoch]
            if additional_stochs == []:
                vars_to_fit = [
                    vars.get('p_obs'),
                    vars.get('pi_sim'),
                    vars.get('smooth_gamma'),
                    vars.get('parent_similarity'),
                    vars.get('mu_sim'),
                    vars.get('mu_age_derivative_potential'),
                    vars.get('covariate_constraint')
                ]
            else:
                vars_to_fit = additional_stochs

            try:
                raise ValueError
                na = mc.NormApprox(vars_to_fit + stoch)
                na.fit(method='fmin_powell', verbose=0)
                cov = pl.array(pl.inv(-na.hess), order='F')
                #print 'opt:', pl.round_([n.value for n in stoch], 2)
                #print 'cov:\n', cov.round(4)
                if pl.all(pl.eigvals(cov) >= 0):
                    m.use_step_method(mc.AdaptiveMetropolis, stoch, cov=cov)
                else:
                    raise ValueError
            except ValueError:
                #print 'cov matrix is not positive semi-definite'
                m.use_step_method(mc.AdaptiveMetropolis, stoch)
'''
Created on Jun 5, 2016

@author: ahanagrawal
'''
from GradientDescent import ConstructArrays
import pylab
import numpy as np

if __name__ == '__main__':
    array = pylab.loadtxt("ex1/ex1data1.txt", dtype=float, delimiter=",")
    X, Y = ConstructArrays(array)
    X = np.matrix(X)
    Y = np.matrix(Y)
    print(pylab.inv(X.T * X) * X.T * Y)
Exemple #33
0
def create_cm(dim,eigList1 = None, eigList2 = None):
    """
    returns two real-valued commuting matrices of dimension dim x dim
    the eigenvalues of each matrix can be given; single complex numbers will be
    interpreted as pair of complex conjuates.
    With this restriction, the (internally augmented) lists must have the length
    of dim
    """
    if eigList1 is None:
        eigList1 = rand(dim)
    if eigList2 is None:
        eigList2 = rand(dim)
        
    # order 1st array such that complex numbers are first
    EL1 = array(eigList1)    
    imPos1 = find(iscomplex(EL1))
    rePos1 = find(isreal(EL1)) # shorter than set comparisons :D
    EL1 = hstack([EL1[imPos1],EL1[rePos1]])
    # order 2nd array such that complex numbers are last
    EL2 = array(eigList2)    
    imPos2 = find(iscomplex(EL2))
    rePos2 = find(isreal(EL2)) # shorter than set comparisons :D
    EL2 = hstack([EL2[rePos2],EL2[imPos2]])
    # now: make eigenvalues of list #2, where a block is in list #1, 
    # pairwise equal, and other way round
    EL2[1:2*len(imPos1):2] = EL2[0:2*len(imPos1):2]    
    EL1[-2*len(imPos2)+1::2] = EL1[-2*len(imPos2)::2]
    
    if len(imPos2)*2 + len(imPos1)*2 > dim:
        raise ValueError(
           'too many complex eigenvalues - cannot create commuting matrices')

    # augment lists
    ev1 = []       
    nev1 = 0
    for elem in EL1:
        if elem.imag != 0.:
            ev1.append( array( [[elem.real, -elem.imag],
                               [elem.imag,  elem.real]]))
            nev1 += 2
        else:
            ev1.append(elem)
            nev1 += 1
            
    if nev1 != dim:
        raise ValueError(
          'number of given eigenvalues #1 (complex: x2) does not match dim!')
            
    
    ev2 = []
    nev2 = 0
    for elem in EL2:
        if elem.imag != 0.:
            ev2.append( array( [[elem.real, -elem.imag],
                               [elem.imag,  elem.real]]))
            nev2 += 2
        else:
            ev2.append(elem)
            nev2 += 1
    
    if nev2 != dim:
        raise ValueError(
          'number of given eigenvalues #2 (complex: x2) does not match dim!')
        


    
    u,s,v = svd(randn(dim,dim))
    # create a coordinate system v that is not orthogonal but not too skew
    v = v + .2*rand(dim,dim) - .1
    
    cm1 = dot(inv(v),dot(blockdiag(ev1),v))
    cm2 = dot(inv(v),dot(blockdiag(ev2),v))
    # create block diagonal matrices
    
    return cm1, cm2
Exemple #34
0
	def rtssmooth(self,Y):
		## initialise
		xf=self.x0
		Pf=self.P0
		# filter quantities
		xfStore =[]
		PfStore=[]


		#calculate the weights
		Wm_i,Wc_i=self.sigma_vectors_weights()



		for y in Y:

			#calculate the sigma points matrix, each column is a sigma vector
			Xi_f_=self.sigma_vectors(xf,Pf)
			#propogate sigma verctors through non-linearity
			Xi_f=self.state_equation(Xi_f_)
			#pointwise multiply by weights and sum along y-axis
			xf_=pb.sum(Wm_i*Xi_f,1)
			xf_=xf_.reshape(self.nx,1)
			#purturbation
			Xi_purturbation=Xi_f-xf_
			weighted_Xi_purturbation=Wc_i*Xi_purturbation
			Pf_=pb.dot(Xi_purturbation,weighted_Xi_purturbation.T)+self.Sigma_e			
			#measurement update equation
			Pyy=dots(self.C,Pf_,self.C.T)+self.Sigma_varepsilon 
			Pxy=pb.dot(Pf_,self.C.T)
			K=pb.dot(Pxy,pb.inv(Pyy))
			yf_=pb.dot(self.C,xf_)
			xf=xf_+pb.dot(K,(y-yf_))
			Pf=pb.dot((pb.eye(self.nx)-pb.dot(K,self.C)),Pf_)
			xfStore.append(xf)
			PfStore.append(Pf)



		# initialise the smoother

		T=len(Y)
		xb = [None]*T
		Pb = [None]*T
	

		xb[-1], Pb[-1] = xfStore[-1], PfStore[-1]

		# backward iteration

		for t in range(T-2,-1,-1):
			#calculate the sigma points matrix from filterd states, each column is a sigma vector
			Xi_b_=self.sigma_vectors(xfStore[t],PfStore[t]) 
			#propogate sigma verctors through non-linearity
			Xi_b=self.state_equation(Xi_b_) 
			#calculate xb_
			#pointwise multiply by weights and sum along y-axis
			xb_=pb.sum(Wm_i*Xi_b,1)
			xb_=xb_.reshape(self.nx,1)
			#purturbation
			Xi_b__purturbation=Xi_b_-xfStore[t] 
			Xi_b_purturbation=Xi_b-xb_ 
			#weighting
			weighted_Xi_b__purturbation=Wc_i*Xi_b__purturbation 
			weighted_Xi_b_purturbation=Wc_i*Xi_b_purturbation
			Pb_=pb.dot(Xi_b_purturbation,weighted_Xi_b_purturbation.T)+self.Sigma_e
			Mb_=pb.dot(weighted_Xi_b__purturbation,Xi_b_purturbation.T)

			#Calculate Smoother outputs
			S=pb.dot(Mb_,pb.inv(Pb_))
			xb[t]=xfStore[t]+pb.dot(S,(xb[t+1]-xb_))
			Pb[t]=PfStore[t]+dots(S,(Pb[t+1]-Pb_),S.T)


		return xb,Pb
'''
Created on Jun 5, 2016

@author: ahanagrawal
'''
from GradientDescent import ConstructArrays
import pylab
import numpy as np

if __name__ == '__main__':
    array = pylab.loadtxt("ex1/ex1data1.txt", dtype = float, delimiter = ",")
    X,Y = ConstructArrays(array)
    X = np.matrix(X)
    Y = np.matrix(Y)
    print(pylab.inv(X.T*X) * X.T * Y)
    def Global_Stiffness(self):
        '''
        Generates Global Stiffness Matrix for the plane structure
        '''
        elem = self.element;
        B = py.zeros((6,6))
        for i in range (0,py.size(elem,0)): 
            #for each element find the stifness matrix
            K = py.zeros((self.n_nodes*2,self.n_nodes*2))            
            el = elem[i]
            
            #nodes formatted for input            
            [node1, node2, node3] = el;
            node1x = 2*(node1);node2x = 2*(node2);node3x = 2*(node3);
            node1y = 2*(node1)+1;node2y = 2*(node2)+1;node3y = 2*(node3)+1;
            #Area, Strain Matrix and E Matrix multiplied to get element stiffness            
            [J,B] = self.B(el)
            local_k =0.5*abs(J)*py.dot(py.transpose(B),py.dot(self.E_matrix,B))
            
            if self.debug:            
                print 'K for elem', el, '\n', local_k
            #Element K-Matrix converted into Global K-Matrix format 
            K[py.ix_([node1x,node1y,node2x,node2y,node3x,node3y],[node1x,node1y,node2x,node2y,node3x,node3y])] += local_k

            #Adding contibution into Global Stiffness           
            self.k_global += K
            
        if self.debug: 
                print 'Global Stiffness','\n', self.k_global, '\n', 'size', py.shape(self.k_global), '\n Symmetry test' , py.dot(py.inv(self.k_global),self.k_global)    
Exemple #37
0
def dia(r, i, k, w=None, verbose=False, noback=False):
    """
    Computational tool for Difference Image Analysis (DIA)
    
    :INPUTS:
       R -- reference image.  This should have the highest possible
            signal-to-noise and the sharpest PSF.

       I -- Current image to be analysed.

       k -- 2D kernel basis mask: 1 for pixels to be used, 0 for
            pixels to be ignored

    :OPTIONS:
       w       -- weights of the pixel values in I; typically (sigma)^-2

       noback  -- do not fit for a variable background; assume constant.

       verbose -- Print output statements and make a plot or two

    :OUTPUTS:       (M, K, B, C):
       M -- R, convolved to match I

       K -- kernel used in convolution

       B -- background offset

       C -- chisquared of fit. If no weights were specified, weights
            are set to unity for this calculation.

    :EXAMPLE:
      ::

                import pylab as py
                import dia
                
                # Define reference and blurred images:
                ref = py.zeros((10,10))
                img = py.zeros((10,10))
                ref[3,3] = 1; ref[3,6] = 1;  ref[6,3] = 1
                img[2,3] = 1; img[3,2:5] = 1;  img[4,3] = 1
                img[2,6] = 1; img[3,5:8] = 1;  img[4,6] = 1
                img[5,3] = 1; img[6,2:5] = 1;  img[7,3] = 1

                # Add some noise:
                img += py.randn(10, 10)/10.

                # Define kernel basis
                kb = py.ones((3,3))    

                # Compute Difference Image Analysis:
                m, kern, bkg, chisq = dia.dia(ref, img, kb)

                # Display results:
                py.figure()
                py.subplot(231)
                py.imshow(ref)
                py.title('Reference image')
                py.subplot(232)
                py.imshow(img)
                py.title('Observed image')
                py.subplot(234)
                py.imshow(kern)
                py.title('Optimal kernel')
                py.subplot(235)
                py.imshow(m)
                py.title('Convolved Reference')
                py.subplot(236)
                py.imshow(img - m)
                py.title('Residuals')
                



    :NOTES:
        Best results are obtained with proper registration of the images.

        Also, beware of edge effects.  As a general rule, anything within
        a kernel width of the edges is suspect.

        Based on the 2D Bramich (2008) DIA algorithm


        2008-11-18 11:12 IJC: Extrapolating from my 1D algorithm

        2012-04-03 08:19 IJMC: Added example to documentation.
    """
#    """Testing Bramich's algorithm for 2D DIA."""

    from nsdata import imshow

    if noback:
        if verbose: print "Not fitting for a variable background..."

    tol = 1e-10  # tolerance for singularity

    r = array(r, copy=True)
    i = array(i, copy=True)
    k = array(k, copy=True, dtype=bool)
    if len(k.ravel())==1:
        k = k.reshape((1,1))
    elif len(k.shape)==1:
        k = k.reshape((k.shape[0], 1))

    Nrx = r.shape[0]
    Nry = r.shape[1]
    Nr  = Nrx * Nry
    Nkx = k.shape[0]
    Nky = k.shape[1]
    Nk  = k.sum()
    dx  = int(floor(Nkx/2))
    dy  = int(floor(Nky/2))
    ix  = Nrx - Nkx + 1
    iy  = Nry - Nky + 1

    pvec = find(k.ravel())
    pl = pvec/Nkx
    pm = pvec % Nky

    if w==None:
        w = ones(i.shape, dtype=float)

    #ind = arange(Nr-Nk+1, dtype=int)

    if verbose: 
        print "Nrx,Nry,Nr>>" + str((Nrx,Nry,Nr))
        print "r>>" + str(r)
        print "i>>" + str(i)
        print "Nkx,Nky,Nk>>" + str((Nkx,Nky,Nk))
        print "k>>" + str(k)
        print "pvec>>" + str(pvec)
        print "pl>>" + str(pl)
        print "pm>>" + str(pm)
        print "dx,dy>>" + str((dx,dy))
        print "ix,iy>>" + str((ix,iy))

    #b = zeros(Nk+1, dtype=float)
    #for ii in arange(Nk):
    #    b[ii] = (w[ind+dx] * i[ind+dx] * r[ind+ii]).sum()
    #b[Nk] = (w[ind+dx] * i[ind+dx]).sum()

    b = zeros(Nk+1, dtype=float)
    itemp = i[dx:dx+ix,dy:dy+iy]
    wtemp = w[dx:dx+ix,dy:dy+iy]
    if verbose: print "itemp>>" + str(itemp)
    for ii in arange(Nk):
        b[ii] = (itemp * wtemp * r[pl[ii]:pl[ii]+ix,pm[ii]:pm[ii]+iy]).sum()
    b[Nk] = (w[dx:dx+ix] * i[dx:dx+ix]).sum()
    if verbose: print "b>>" + str(b)
    
    if verbose: print "Made it through 'b' Calculation!"
    
        
    U = zeros((Nk+1,Nk+1))
    # This is optimized by only computing the upper diagonal elements...
    for ii in arange(Nk):
        l = pl[ii]
        m = pm[ii]
        if verbose: print "l, m>>" + str((l,m))
        rwtemp = r[l :l +ix,m :m +ix] * wtemp
        U[Nk, ii] = rwtemp.sum()
        U[ii, Nk] = U[Nk, ii]
        for jj in arange(ii,Nk):
            l2 = pl[jj]
            m2 = pm[jj]
            if verbose: print "l2, m2>>" + str((l2,m2))
            U[ii,jj] = ( rwtemp * r[l2:l2+ix,m2:m2+ix] ).sum()
            U[jj,ii] = U[ii,jj]
    if verbose: print "U>>" + str(U)

    U[Nk, Nk] = wtemp.sum()

    if verbose: print "U>>" + str(U)

    if noback:
        U = U[0:Nk, 0:Nk]
        b = b[0:Nk]

    detU = linalg.det(U)
    if verbose: print "det(U) is:  " + str(detU)

    if detU<tol:
        if verbose: print "Singular matrix: det(U) < tol.  Using pseudoinverse..."
        a = dot(linalg.pinv(U), b)
    else:
        a = dot(inv(U), b)
    
    if noback:
        K = a
        B0 = 0.0
    else:
        K = a[0:len(a)-1]
        B0 = a[-1]
    if verbose: print "K>>" + str(K)
    if verbose: print "B0>>" + str(B0)
    if verbose: print "find(k.ravel())>>" + str(find(k.ravel()))

    kernel = zeros(Nkx*Nky, dtype=float)
    kernel[find(k.ravel())] = K

    kernel = kernel.reshape((Nkx, Nky))
                        
    # Bramich's convolution is not Numpy-standard, so we do it by hand:
    m = rconvolve2d(r, kernel, mode='valid') + B0

    if verbose: print "itemp.shape>>" + str(itemp.shape)
    if verbose: print "wtemp.shape>>" + str(wtemp.shape)
    if verbose: print "r.shape>>" + str(r.shape)
    if verbose: print "m.shape>>" + str(m.shape)
    if verbose: print "m[dx:dx+ix,dy:dy+iy].shape>>" + str(m[dx:dx+ix,dy:dy+iy].shape)

    chisq  = ( wtemp * (itemp - m[dx:dx+ix,dy:dy+iy])**2 ).sum()
    chisq0 = ( wtemp * (itemp - r[dx:dx+ix,dy:dy+iy])**2 ).sum()

    #if verbose: print "Kernel is:  " + str(kernel)
    if verbose: print "Background: " + str(B0)
    if verbose: print "Phot. scaling: " + str(kernel.sum())
    if verbose: print "For the (" + str(Nr) + " - " + str(Nk+1) + ") = " + str(Nr-Nk-1) + " DOF:"
    if verbose: print "Red. Chisquared (I-R): " + str(chisq0/(Nr-Nk-1))
    if verbose: print "Red. Chisquared (I-M): " + str(chisq/(Nr-Nk-1))
    
    if verbose:
        figure(); 
        cmin = array([i.min(), m.min()]).min()
        cmax = array([i.max(), m.max()]).max()
        subplot(131); imshow(r); title('R'); clim([cmin, cmax]); colorbar()
        subplot(132); imshow(i); title('I'); clim([cmin, cmax]); colorbar()
        subplot(133); imshow(m); title('M'); clim([cmin, cmax]); colorbar()

        figure(); 
        cmin = array([(r-i).min(), (m-i).min()]).min()
        cmax = array([(r-i).max(), (m-i).min()]).max()
        subplot(121); imshow(r - i); title('R - I'); clim([cmin, cmax]); colorbar();
        subplot(122); imshow(m - i); title('M - I'); clim([cmin, cmax]); colorbar(); 

        figure();
        imshow(kernel); title('Kernel'); colorbar()

        show()

    return (m, kernel, B0, chisq)
Exemple #38
0
def solve_inv(A, B):
    return pl.inv(A) * B
Exemple #39
0
	def rtssmooth(self,Y):
		
		
		"""Rauch Tung Streibel(RTS) smoother
		
		
		Arguments
		----------
		Y : list of ndarray
			 list of observation vectors
			
		Returns
		----------	
		XStore : list of ndarray
			 list of forward state estimates
		PStore : list of ndarray
			 list of forward state covariance matrices
			
		XbStore : list of ndarray
			 list of backward state estimates
		PbStore : list of ndarray
			 list of backward state covariance matrices	

		"""
		
		
		
		# prediction
		def Kpred(A,P,Sigma_e,x):
					
			if (np.rank(A)!=2 or type(self.A)!=np.ndarray):raise TypeError('A should be rank two array')
			if (np.rank(Sigma_e)!=2 or type(self.Sigma_e)!=np.ndarray):raise TypeError('Sigma_e should be rank two array')
			if (np.rank(P)!=2 or type(self.Sigma_e)!=np.ndarray):raise TypeError('P should be rank two array')
			if type(x)!=np.ndarray:raise TypeError('x0 should be rank two array')
			
			# predict state
			x_ = pb.dot(A,x) 
			# predict state covariance
			P_ =dots(A,P,A.T) + Sigma_e
			return x_,P_

		# correction
		def Kupdate(A,C,P_,Sigma_epsilon,x_,y):
								
			if (np.rank(A)!=2 or type(self.A)!=np.ndarray):raise TypeError('A should be rank two array')
			if (np.rank(C)!=2 or type(self.C)!=np.ndarray):raise TypeError('C should be rank two array')
			if (np.rank(Sigma_epsilon)!=2 or type(self.Sigma_epsilon)!=np.ndarray):raise TypeError('Sigma_epsilon should be rank two array')
			if (np.rank(P_)!=2 or type(P_)!=np.ndarray):raise TypeError('P should be rank two array')
			if type(x_)!=np.ndarray:raise TypeError('x0 should be rank two array')
			if type(y)!=np.ndarray:raise TypeError('y should be rank two array')

			
			# calculte Kalman gain
			
			K =pb.dot(pb.dot(P_,C.T),pb.inv(dots(C,P_,C.T) + Sigma_epsilon))
			# update estimate with model output measurement
			x = x_ + pb.dot(K,(y-pb.dot(C,x_)))
			# update the state error covariance
			P = pb.dot((np.eye(self.nx)-pb.dot(K,C)),P_);
			return x,P,K

		# smoother quantities
		XStore_ =pb.zeros([len(Y),self.nx])
		PStore_ =pb.zeros([len(Y),self.nx**2])
		XStore = pb.zeros([len(Y),self.nx])
		PStore = pb.zeros([len(Y),self.nx**2])
		x_,P_ = Kpred(self.A,self.P0,self.Sigma_e,self.x0)
		# filter
		for i in range(len(Y)):
			#store predicted states
			PStore_[i,:]=P_.ravel()
			XStore_[i,:]=x_.ravel()
			# update state estimate with measurement and stored Kalman gain
			x,P,K= Kupdate(self.A,self.C,P_,self.Sigma_epsilon,x_,Y[i])
			# store corrected states and covariance matrices
			XStore[i,:]=x.ravel()
			PStore[i,:]=P.ravel()
			# predict new state
			x_, P_ = Kpred(self.A,P,self.Sigma_e,x)


			
		# initialise the smoother
		T=len(Y)
		XbStore = pb.zeros([T,self.nx])
		PbStore = pb.zeros([T,self.nx**2])
		S = pb.zeros([T,self.nx**2])
		XbStore[-1,:], PbStore[-1,:] = XStore[-1,:], PStore[-1,:]
		# RTS smoother
		for t in range(T-2,-1,-1):
			S_temp= dots(PStore[t,:].reshape(self.nx,self.nx),self.A.T,pb.inv(PStore_[t+1,:].reshape(self.nx,self.nx)))
			S[t,:]=S_temp.ravel()
			XbStore_temp= XStore[t,:].reshape(self.nx,1) + pb.dot(S_temp,(XbStore[t+1].reshape(self.nx,1) - XStore_[t+1,:].reshape(self.nx,1)))
			XbStore[t,:]=XbStore_temp.ravel()
			PbStore_temp = PStore[t,:].reshape(self.nx,self.nx) + dots(S_temp,PbStore[t+1,:].reshape(self.nx,self.nx)-PStore_[t+1,:].reshape(self.nx,self.nx),S_temp.T)
			PbStore[t,:]=PbStore_temp.ravel()
		# iterate a final time to calucate the cross covariance matrices 		
		M = pb.zeros([T,self.nx**2])
 		M[-1,:]=pb.dot(np.eye(self.nx)-pb.dot(K,self.C), pb.dot(self.A,PStore[-2,:].reshape(self.nx,self.nx))).ravel()
		for t in range(T-2,0,-1):
			M_temp=pb.dot(PStore[t,:].reshape(self.nx,self.nx),S[t-1,:].reshape(self.nx,self.nx).T) + dots(S[t,:].reshape(self.nx,self.nx),M[t+1,:].reshape(self.nx,self.nx) - pb.dot(self.A,PStore[t,:].reshape(self.nx,self.nx)),S[t-1].reshape(self.nx,self.nx).T)
			M[t,:]=M_temp.ravel()
		
		return XbStore,PbStore,M
Exemple #40
0
        if listener.msg_ok:

            H = listener.H.copy()
            A = listener.A.copy()
            B = listener.B.copy()

            # get vertices
            vert = []
            for i in xrange(7):
                for j in xrange(i + 1, 8):  #xrange(8):
                    #if i != j:
                    for u in [A[i], B[i]]:
                        for v in [A[j], B[j]]:
                            # intersection of Ai = Hi.x and Bj = Hj.x
                            x = pl.dot(pl.inv(H[[i, j], :]), pl.array([u, v]))
                            # check constraints: A <= H.x <= B
                            if pl.amin(pl.dot(H, x) - A) >= -1e-6 and pl.amax(
                                    pl.dot(H, x) - B) <= 1e-6:
                                vert.append(x.reshape(2).copy())
            print('the number of vertices', len(vert))

            # continue only if enough vertices
            if len(vert) > 2:
                ax.clear()
                inter = inter + 1
                vert_uns = pl.array(vert + [vert[0]])

                xm, xM, ym, yM = pl.amin(vert_uns[:, 0]), pl.amax(
                    vert_uns[:, 0]), pl.amin(vert_uns[:,
                                                      1]), pl.amax(vert_uns[:,
Exemple #41
0
def fit_consistent(model, iter=2000, burn=1000, thin=1, tune_interval=100, verbose=False):
    """Fit data model for all epidemiologic parameters using MCMC
    
    :Parameters:
      - `model` : data.ModelData
      - `iter` : int, number of posterior samples fit
      - `burn` : int, number of posterior samples to discard as burn-in
      - `thin` : int, samples thinned by this number
      - `tune_interval` : int
      - `verbose` : boolean

    :Results:
      - returns a pymc.MCMC object created from vars, that has been fit with MCMC

    .. note::
      - `burn` must be less than `iter`
      - `thin` must be less than `iter` minus `burn`

    """
    assert burn < iter, 'burn must be less than iter'
    assert thin < iter - burn, 'thin must be less than iter-burn'

    param_types = 'i r f p pf rr smr m_with X'.split()

    vars = model.vars
    
    start_time = time.time()
    map = mc.MAP(vars)
    m = mc.MCMC(vars)

    ## use MAP to generate good initial conditions
    try:
        method='fmin_powell'
        tol=.001

        fit_model.logger.info('fitting submodels')
        fit_model.find_consistent_spline_initial_vals(vars, method, tol, verbose)

        for t in param_types:
            fit_model.find_re_initial_vals(vars[t], method, tol, verbose)
            fit_model.logger.info('.')

        fit_model.find_consistent_spline_initial_vals(vars, method, tol, verbose)
        fit_model.logger.info('.')

        for t in param_types:
            fit_model.find_fe_initial_vals(vars[t], method, tol, verbose)
            fit_model.logger.info('.')

        fit_model.find_consistent_spline_initial_vals(vars, method, tol, verbose)
        fit_model.logger.info('.')

        for t in param_types:
            fit_model.find_dispersion_initial_vals(vars[t], method, tol, verbose)
            fit_model.logger.info('.')

        fit_model.logger.info('\nfitting all stochs\n')
        map.fit(method=method, tol=tol, verbose=verbose)

        if verbose:
            from fit_posterior import inspect_vars
            print inspect_vars({}, vars)

    except KeyboardInterrupt:
        fit_model.logger.warning('Initial condition calculation interrupted')

    ## use MCMC to fit the model

    try:
        fit_model.logger.info('finding step covariances')
        vars_to_fit = [[vars[t].get('p_obs'), vars[t].get('pi_sim'), vars[t].get('smooth_gamma'), vars[t].get('parent_similarity'),
                        vars[t].get('mu_sim'), vars[t].get('mu_age_derivative_potential'), vars[t].get('covariate_constraint')] for t in param_types]
        max_knots = max([len(vars[t]['gamma']) for t in 'irf'])
        for i in range(max_knots):
            stoch = [vars[t]['gamma'][i] for t in 'ifr' if i < len(vars[t]['gamma'])]

            if verbose:
                print 'finding Normal Approx for', [n.__name__ for n in stoch]
            try:
                na = mc.NormApprox(vars_to_fit + stoch)
                na.fit(method='fmin_powell', verbose=verbose)
                cov = pl.array(pl.inv(-na.hess), order='F')
                if pl.all(pl.eigvals(cov) >= 0):
                    m.use_step_method(mc.AdaptiveMetropolis, stoch, cov=cov)
                else:
                    raise ValueError
            except ValueError:
                if verbose:
                    print 'cov matrix is not positive semi-definite'
                m.use_step_method(mc.AdaptiveMetropolis, stoch)

            fit_model.logger.info('.')

        for t in param_types:
            fit_model.setup_asr_step_methods(m, vars[t], vars_to_fit)

            # reset values to MAP
            fit_model.find_consistent_spline_initial_vals(vars, method, tol, verbose)
            fit_model.logger.info('.')
        map.fit(method=method, tol=tol, verbose=verbose)
        fit_model.logger.info('.')
    except KeyboardInterrupt:
        fit_model.logger.warning('Initial condition calculation interrupted')

    fit_model.logger.info('\nsampling from posterior distribution\n')
    m.iter=iter
    m.burn=burn
    m.thin=thin
    if verbose:
        try:
            m.sample(m.iter, m.burn, m.thin, tune_interval=tune_interval, progress_bar=True, progress_bar_fd=sys.stdout)
        except TypeError:
            m.sample(m.iter, m.burn, m.thin, tune_interval=tune_interval, progress_bar=False, verbose=verbose)
    else:
        m.sample(m.iter, m.burn, m.thin, tune_interval=tune_interval, progress_bar=False)
    m.wall_time = time.time() - start_time

    model.map = map
    model.mcmc = m
    
    return model.map, model.mcmc
Exemple #42
0
def create_cm(dim, eigList1=None, eigList2=None):
    """
    returns two real-valued commuting matrices of dimension dim x dim
    the eigenvalues of each matrix can be given; single complex numbers will be
    interpreted as pair of complex conjuates.
    With this restriction, the (internally augmented) lists must have the length
    of dim
    """
    if eigList1 is None:
        eigList1 = rand(dim)
    if eigList2 is None:
        eigList2 = rand(dim)

    # order 1st array such that complex numbers are first
    EL1 = array(eigList1)
    imPos1 = find(iscomplex(EL1))
    rePos1 = find(isreal(EL1))  # shorter than set comparisons :D
    EL1 = hstack([EL1[imPos1], EL1[rePos1]])
    # order 2nd array such that complex numbers are last
    EL2 = array(eigList2)
    imPos2 = find(iscomplex(EL2))
    rePos2 = find(isreal(EL2))  # shorter than set comparisons :D
    EL2 = hstack([EL2[rePos2], EL2[imPos2]])
    # now: make eigenvalues of list #2, where a block is in list #1,
    # pairwise equal, and other way round
    EL2[1:2 * len(imPos1):2] = EL2[0:2 * len(imPos1):2]
    EL1[-2 * len(imPos2) + 1::2] = EL1[-2 * len(imPos2)::2]

    if len(imPos2) * 2 + len(imPos1) * 2 > dim:
        raise ValueError(
            'too many complex eigenvalues - cannot create commuting matrices')

    # augment lists
    ev1 = []
    nev1 = 0
    for elem in EL1:
        if elem.imag != 0.:
            ev1.append(array([[elem.real, -elem.imag], [elem.imag,
                                                        elem.real]]))
            nev1 += 2
        else:
            ev1.append(elem)
            nev1 += 1

    if nev1 != dim:
        raise ValueError(
            'number of given eigenvalues #1 (complex: x2) does not match dim!')

    ev2 = []
    nev2 = 0
    for elem in EL2:
        if elem.imag != 0.:
            ev2.append(array([[elem.real, -elem.imag], [elem.imag,
                                                        elem.real]]))
            nev2 += 2
        else:
            ev2.append(elem)
            nev2 += 1

    if nev2 != dim:
        raise ValueError(
            'number of given eigenvalues #2 (complex: x2) does not match dim!')

    u, s, v = svd(randn(dim, dim))
    # create a coordinate system v that is not orthogonal but not too skew
    v = v + .2 * rand(dim, dim) - .1

    cm1 = dot(inv(v), dot(blockdiag(ev1), v))
    cm2 = dot(inv(v), dot(blockdiag(ev2), v))
    # create block diagonal matrices

    return cm1, cm2
Exemple #43
0
def dialin(r, i, k, w=None, verbose=True, noback=False):
    """
    Computational tool for Difference Image Analysis (DIA) with a
       linearly-varying kernel
    
    :INPUTS:
       R -- reference image.  This should have the highest possible
            signal-to-noise and the sharpest PSF.

       I -- Current image to be analysed.

       k -- pixel mask for kernel; 1 for pixels to be used, 0 for
            pixels to be ignored

    :OPTIONS:
       w       -- weights of the pixel values in I; typically (sigma)^-2

       noback  -- do not fit for a variable background; assume constant.

       verbose -- Print output statements and make a plot or two

    :OUTPUTS:       (M, K, X, Y, B, C):
       M -- R, convolved to match I

       K -- kernel used in convolution

       B -- background offset

       C -- chisquared of fit. If no weights were specified, weights
            are set to unity for this calculation.


    :NOTES:
        Best results are obtained with proper registration of the images.

        Also, beware of edge effects.  As a general rule, anything within
        a kernel width of the edges is suspect.

        Originally based on the 2D Bramich (2008) DIA algorithm

        2008-11-19 21:50 IJC: Trying it out
    """
#    """Testing Bramich's algorithm for 2D DIA."""

    from nsdata import imshow

    if noback:
        if verbose: print "Not fitting for a variable background..."

    tol = 1e-10  # tolerance for singularity

    r = array(r, copy=True)
    i = array(i, copy=True)
    k = array(k, copy=True, dtype=bool)
    if len(k.ravel())==1:
        k = k.reshape((1,1))
    elif len(k.shape)==1:
        k = k.reshape((k.shape[0], 1))

    Nrx = r.shape[0]
    Nry = r.shape[1]
    Nr  = Nrx * Nry
    Nkx = k.shape[0]
    Nky = k.shape[1]
    Nk  = k.sum()
    dx  = int(floor(Nkx/2))
    dy  = int(floor(Nky/2))
    ix  = Nrx - Nkx + 1
    iy  = Nry - Nky + 1

    pvec = find(k.ravel())
    pl = pvec/Nkx
    pm = pvec % Nky

    if w==None:
        w = ones(i.shape, dtype=float)

    #ind = arange(Nr-Nk+1, dtype=int)

    if verbose: print "Nrx,Nry,Nr>>" + str((Nrx,Nry,Nr))
    if verbose: print "r>>" + str(r)
    if verbose: print "i>>" + str(i)
    if verbose: print "Nkx,Nky,Nk>>" + str((Nkx,Nky,Nk))
    if verbose: print "k>>" + str(k)
    if verbose: print "pvec>>" + str(pvec)
    if verbose: print "pl>>" + str(pl)
    if verbose: print "pm>>" + str(pm)
    if verbose: print "dx,dy>>" + str((dx,dy))
    if verbose: print "ix,iy>>" + str((ix,iy))

    xcoords = arange(Nrx) - floor(Nrx/2)
    ycoords = arange(Nry) - floor(Nry/2)
    xx,yy = meshgrid(xcoords, ycoords)
    if verbose: print "xx>>" + str(xx)
    if verbose: print "yy>>" + str(yy)
    
    b = zeros(3*Nk+1, dtype=float)
    itemp = i[dx:dx+ix,dy:dy+iy]
    wtemp = w[dx:dx+ix,dy:dy+iy]
    iwtemp = itemp * wtemp
    #xtemp = xx[dx:dx+ix,dy:dy+iy]
    #ytemp = yy[dx:dx+ix,dy:dy+iy]
    
    if verbose: print "itemp>>" + str(itemp)
    #if verbose: print "xtemp>>" + str(ytemp)
    #if verbose: print "ytemp>>" + str(xtemp)
    for ii in arange(Nk):
        l = pl[ii]
        m = pm[ii]
        b[ii]      = (iwtemp * r[l:l+ix,m:m+iy]).sum()
        b[ii+Nk]   = (iwtemp * r[l:l+ix,m:m+iy] * xx[l:l+ix,m:m+iy]).sum()
        b[ii+2*Nk] = (iwtemp * r[l:l+ix,m:m+iy] * yy[l:l+ix,m:m+iy]).sum()

    b[3*Nk] = iwtemp.sum()

    if verbose: print "b>>" + str(b)
    if verbose: print "Made it through 'b' Calculation!"
    
    # Construct the U_pq matrix; here p is "ii" and q is "jj"
    U = zeros((3*Nk+1,3*Nk+1))
    for ii in arange(Nk):
        l = pl[ii]
        m = pm[ii]
        xtemp = xx[l:l+ix,m:m+iy]
        ytemp = yy[l:l+ix,m:m+iy]
        rtemp = r[l:l+ix, m:m+iy]
        #if verbose: print "xtemp.shape>>" + str(xtemp.shape)
        #if verbose: print "ytemp.shape>>" + str(ytemp.shape)

        # Compute the final row and column:
        U[ii,-1]      = ( wtemp * rtemp ).sum()
        U[-1,ii]      = U[ii,-1]
        U[ii+Nk,-1]   = ( wtemp * rtemp * xtemp ).sum()
        U[-1,ii+Nk]   = U[ii+Nk,-1]
        U[ii+2*Nk,-1] = ( wtemp * rtemp * ytemp ).sum()
        
        # Compute the guts of the matrix:
        for jj in arange(ii+1):
            l2 = pl[jj]
            m2 = pm[jj]
            if verbose: print "ii,jj, l, m, l2, m2>>" + str((ii,jj,l,m,l2,m2))
            xtemp2 = xx[l2:l2+ix,m2:m2+iy]
            ytemp2 = yy[l2:l2+ix,m2:m2+iy]
            rtemp2 = r[l2:l2+ix, m2:m2+iy]
            #if verbose: print "xtemp2.shape>>" + str(xtemp2.shape)
            #if verbose: print "ytemp2.shape>>" + str(ytemp2.shape)

            wrr2 = wtemp * rtemp * rtemp2

            U[ii,jj] =           ( wrr2 ).sum()
            U[ii+Nk,jj]        = ( wrr2 * xtemp ).sum()
            U[ii+Nk,jj+Nk]     = ( wrr2 * xtemp * xtemp2 ).sum()
            U[ii+2*Nk,jj]      = ( wrr2 * ytemp ).sum()
            U[ii+2*Nk,jj+Nk]   = ( wrr2 * ytemp * xtemp2 ).sum()
            U[ii+2*Nk,jj+2*Nk] = ( wrr2 * ytemp * ytemp2 ).sum()

            if ii>jj:   # Fill out the upper diagonal:
                if verbose: print "Filling upper diag. component " + str((ii,jj))
                U[jj     ,ii     ] = U[ii     ,jj     ]
                U[jj     ,ii+Nk  ] = U[ii+Nk  ,jj     ]
                U[jj+Nk  ,ii+Nk  ] = U[ii+Nk  ,jj+Nk  ]
                U[jj     ,ii+2*Nk] = U[ii+2*Nk,jj     ]
                U[jj+Nk  ,ii+2*Nk] = U[ii+2*Nk,jj+Nk  ]
                U[jj+2*Nk,ii+2*Nk] = U[ii+2*Nk,jj+2*Nk]

    U[-1,-1] = wtemp.sum()

    if verbose: print "U>>" + str(U)

    if noback:
        U = U[0:3*Nk, 0:3*Nk]
        b = b[0:3*Nk]

    detU = linalg.det(U)
    if verbose: print "det(U) is:  " + str(detU)

    if detU<tol:
        if verbose: print "Singular matrix: det(U) < tol.  Using pseudoinverse..."
        a = dot(linalg.pinv(U), b)
    else:
        a = dot(inv(U), b)
    
    if noback:
        K = a
        B0 = 0.0
    else:
        K = a[0:len(a)-1]
        B0 = a[-1]
    if verbose: print "K>>" + str(K)
    if verbose: print "B0>>" + str(B0)
    if verbose: print "find(k.ravel())>>" + str(find(k.ravel()))

    kernel  = zeros(Nkx*Nky, dtype=float)
    xkernel = zeros(Nkx*Nky, dtype=float)
    ykernel = zeros(Nkx*Nky, dtype=float)
    kernel[find(k.ravel())] = K[0:Nk]
    xkernel[find(k.ravel())] = K[Nk:2*Nk]
    ykernel[find(k.ravel())] = K[2*Nk:3*Nk]

    kernel  =  kernel.reshape((Nkx, Nky))
    xkernel = xkernel.reshape((Nkx, Nky))
    ykernel = ykernel.reshape((Nkx, Nky))
                        
    # Bramich's convolution is not Numpy-standard, so we do it by hand:
    #m = rconvolve2d(r, kernel, mode='valid') + B0

    #chisq  = ( wtemp * (itemp - m[dx:dx+ix,dy:dy+iy])**2 ).sum()
    #chisq0 = ( wtemp * (itemp - r[dx:dx+ix,dy:dy+iy])**2 ).sum()

    if verbose: print "Kernel is:  " + str(kernel)
    if verbose: print "X-Kernel is:  " + str(xkernel)
    if verbose: print "Y-Kernel is:  " + str(ykernel)
    if verbose: print "Background: " + str(B0)
    if verbose: print "Phot. scaling: " + str(kernel.sum())
    if verbose: print "For the (" + str(Nr) + " - " + str(3*Nk+1) + ") = " + str(Nr-3*Nk-1) + " DOF:"
    #if verbose: print "Red. Chisquared (I-R): " + str(chisq0/(Nr-Nk-1))
    #if verbose: print "Red. Chisquared (I-M): " + str(chisq/(Nr-Nk-1))
    
    if verbose:
        ndim=3
        for ii in range(ndim):
            for jj in range(ndim):
                figure(1); s = subplot(ndim, ndim, 1+jj + (ndim-ii-1)*ndim)
                xind = int(Nrx/(ndim+1.0))
                yind = int(Nry/(ndim+1.0))
                x0 = xx[(ii+1)*xind,(jj+1)*yind]
                y0 = yy[(ii+1)*xind,(jj+1)*yind]
                print "x0,y0>>" + str((x0,y0))
                imshow(kernel + x0*xkernel + y0*ykernel)
                s.set_ylim(s.get_ylim()[::-1])
                offset = array(centroid(kernel + x0*xkernel + y0*ykernel)) - \
                    array(centroid(ones(kernel.shape)))
                title(str((x0,y0)) + ':  ' + str(offset))
                #figure(2)
                #plot([x0],[y0], 'oc')
                #plot([x0, x0+offset[0]], [y0, y0+offset[1]], '-k')
        figure(); 
        subplot(121); imshow(r); title('Reference'); #clim([cmin, cmax]); colorbar()
        subplot(122); imshow(i); title('Current Image'); #clim([cmin, cmax]); colorbar()
        #subplot(133); imshow(m); title('M'); clim([cmin, cmax]); colorbar()

#       figure(); 
#       cmin = array([(r-i).min(), (m-i).min()]).min()
#       cmax = array([(r-i).max(), (m-i).min()]).max()
#       subplot(121); imshow(r - i); title('R - I'); clim([cmin, cmax]); colorbar();
#       subplot(122); imshow(m - i); title('M - I'); clim([cmin, cmax]); colorbar(); 

        show()

    return
Exemple #44
0
	def gen_ssmodel(self):

		'''Generates non-linear, Integro-Difference, discrete-time state space model.

		Atributes:
		----------
		Gamma: ndarray
			Inner product of field basis functions

		Gamma_inv: ndarray
			Inverse of Gamma

		Sigma_e: ndarray
			Covariance matrix of the field disturbance

		Sigma_e_inv: ndarray
			Inverse of Sigma_e

		Sigma_e_c: ndarray
			cholesky decomposiotion of field disturbance covariance matrix
		
		Sigma_varepsilon_c: ndarray
			cholesky decomposiotion of observation noise covariance matrix

		Phi_values: ndarray  nx by number of spatial locations
			field basis functions values over the spatial field

		psi_conv_Phi_values: ndarray ; each ndarray is nx by number of spatial locations
			convolution of the  connectivity kernel basis functions with the field basis functions
			evaluate over space


		Gamma_inv_psi_conv_Phi:ndarray; each ndarray is nx by number of spatial locations
			the product of inverse gamme withe psi0_conv_Phi



		C: ndarray
			Observation matrix

		Gamma_inv_Psi_conv_Phi: ndarray nx by number of spatial locations
			the convolution of the kernel  with field basis functions at discritized spatial points

		'''

		#Generate Gamma
		if hasattr(self,'Gamma'):
			pass
		else:
			t_total=time.time()
			#calculate Gamma=PhixPhi.T; inner product of the field basis functions
			Gamma=pb.dot(self.field.Phi,self.field.Phi.T)
			Gamma_inv=pb.inv(Gamma)
			self.Gamma=Gamma.astype('float')
			self.Gamma_inv=Gamma_inv


		#Generate field covariance matrix
		if hasattr(self,'Sigma_e'):
			pass
		else:

			gamma_convolution_vecrorized=pb.vectorize(self.gamma.conv)
			gamma_conv_Phi=gamma_convolution_vecrorized(self.field.Phi).T 
			#[gamma*phi1 gamma*phi2 ... gamma*phin] 1 by nx
			Pi=pb.dot(self.field.Phi,gamma_conv_Phi) #nx by nx ndarray
			Pi=Pi.astype('float')
			Sigma_e=pb.dot(pb.dot(self.gamma_weight*Gamma_inv,Pi),Gamma_inv.T)
			Sigma_e_c=sp.linalg.cholesky(Sigma_e,lower=1)
			self.Sigma_e=Sigma_e
			self.Sigma_e_inv=pb.inv(Sigma_e)
			self.Sigma_e_c=Sigma_e_c
			Sigma_varepsilon_c=sp.linalg.cholesky(self.Sigma_varepsilon,lower=1)
			self.Sigma_varepsilon_c=Sigma_varepsilon_c



		if hasattr(self,'Phi_values'):
			pass
		else:

		
			#Generate field meshgrid
			estimation_field_space_x,estimation_field_space_y=pb.meshgrid(self.estimation_space_x_y,self.estimation_space_x_y)
			estimation_field_space_x=estimation_field_space_x.ravel()
			estimation_field_space_y=estimation_field_space_y.ravel()

			#calculate Phi_values
			#Phi_values is like [[phi1(r1) phi1(r2)...phi1(rn_r)],[phi2(r1) phi2(r2)...phi2(rn_r)],..[phiL(r1) phiL(r2)...phiL(rn_r)]]
			Phi_values=[self.field.Phi[i,0](estimation_field_space_x,estimation_field_space_y) for i in range(self.nx)]
			self.Phi_values=pb.squeeze(Phi_values) #it's nx by number of apatial points

			#vectorizing kernel convolution method
			psi_convolution_vectorized=pb.empty((self.n_theta,1),dtype=object)
			for i in range(self.n_theta):
				psi_convolution_vectorized[i,0]=pb.vectorize(self.kernel.Psi[i].conv)

			#find convolution between kernel and field basis functions analytically
			psi_conv_Phi=pb.empty((self.nx,self.n_theta),dtype=object)#nx by n_theta
			for i in range(self.n_theta):
				psi_conv_Phi[:,i]=psi_convolution_vectorized[i,0](self.field.Phi).ravel()


			#ecaluate convolution between kernel and field basis functions at spatial locations
			psi_conv_Phi_values=pb.empty((self.n_theta,self.nx,len(self.estimation_space_x_y)**2),dtype=float)
			for i in range(self.n_theta):
				for j in range(self.nx):
					psi_conv_Phi_values[i,j,:]=psi_conv_Phi[j,i](estimation_field_space_x,estimation_field_space_y)
			self.psi_conv_Phi_values=psi_conv_Phi_values



			self.Gamma_inv_psi_conv_Phi=pb.dot(self.Gamma_inv,self.psi_conv_Phi_values)
				

		


		#Calculate observation matrix
			
		if hasattr(self,'C'):
			pass
		else:
			#Generate Observation locations grid
			obs_locns_x=self.obs_locns[:,0]
			obs_locns_y=self.obs_locns[:,1]
			sensor_kernel_convolution_vecrorized=pb.vectorize(self.sensor_kernel.conv)
			sensor_kernel_conv_Phi=sensor_kernel_convolution_vecrorized(self.field.Phi).T #first row 
			#[m*phi_1 m*phi2 ... m*phin]
			C=pb.empty(([self.ny,self.nx]),dtype=float)
			for i in range(self.nx):
				C[:,i]=sensor_kernel_conv_Phi[0,i](obs_locns_x,obs_locns_y)

			self.C=C
			print 'Elapsed time in seconds to generate the stat-space model',time.time()-t_total


		#We need to calculate this bit at each iteration	
		#Finding the convolution of the kernel  with field basis functions at discritized spatial points
		self.Gamma_inv_Psi_conv_Phi=pb.sum(self.kernel.weights[pb.newaxis,:,pb.newaxis]*self.Gamma_inv_psi_conv_Phi,axis=1)
Exemple #45
0
def dsa(r, i, Nk, **kw): #, w=None, verbose=False, noback=False):
    """
    Computational tool for Difference Spectral Analysis (DSA)
    
    :INPUTS:
       R -- reference spectrum.  This should have the highest possible
            signal-to-noise and the highest spectral resolution.

       I -- Current spectrum to be analysed.

       Nk -- number of pixels in the desired convolution kernel

    :OPTIONS:
       w       -- weights of the pixel values in I; typically (sigma)^-2

       noback  -- do not fit for a variable background; assume constant.

       tol=1e-10 -- if matrix determinant is less than tol, use
                    pseudoinverse rather than straight matrix
                    inversion

       verbose -- Print output statements and make a plot or two

       retinv -- return a fourth output, the Least Squares inverse
                 matrix (False by default)

    :OUTPUTS:       (M, K, B, C):
       M -- R, convolved to match I

       K -- kernel used in convolution

       B -- background offset

       C -- chisquared of fit. If no weights were specified, weights
            are set to unity for this calculation.

    :OPTIONS:
       I -- inverse matrix


    :NOTES:
        Best results are obtained with proper registration of the spectra.

        Also, beware of edge effects.  As a general rule, anything within
        a kernel width of the edges is suspect.


    :SEE_ALSO:  
       :func:`dsamulti`    """
#    """Testing Bramich's algorithm for 1D spectra."""
#    Based on the 2D Bramich (2008) DIA algorithm
#    -----
#    2008-11-14 10:56 IJC: Created @ UCLA.
#    2008-11-18 11:12 IJC: Registration now works correctly
#    2008-12-09 16:10 IJC: Somewhat optimized
#    2009-02-26 22:06 IJC: Added retinv, changed optional input format

    defaults = dict(verbose=False, w=None, noback=False, tol=1e-10, \
                        retinv=False)
    for key in defaults:
        if (not kw.has_key(key)):
            kw[key] = defaults[key]
    verbose = bool(kw['verbose'])
    noback = bool(kw['noback'])
    retinv = bool(kw['retinv'])
    w = kw['w']
    if verbose:
        print "kw>>" + str(kw)

    if noback:
        if verbose: print "Not fitting for a variable background..."

    tol = 1e-10  # tolerance for singularity

    r = array(r, copy=True)
    i = array(i, copy=True)
    dx = int(floor(Nk/2))
    Nk = int(Nk)  # length of kernel

    if w==None:
        w = ones(len(r), dtype=float)

    Nr = len(r)  # length of Referene
    ind = arange(Nr-Nk+1, dtype=int)
    wind = w[ind]
    if verbose: 
        #print "r>>" + str(r)
        #print "i>>" + str(i)
        #print "ind>>" + str(ind)
        print ""
        
    if noback:    
        U = zeros((Nk,Nk), dtype=float)
        b = zeros(Nk, dtype=float)
    else:
        U = zeros((Nk+1,Nk+1), dtype=float)
        b = zeros(Nk+1, dtype=float)

    # Build the b vector and U matrix
    tempval0 = w[ind+dx] * i[ind+dx]
    for p in range(Nk):
        b[p] = (tempval0 * r[ind+p]).sum()
        tempval2 = wind*r[ind+p]
        for q in range(p, Nk):
            U[p,q] = (tempval2 * r[ind+q]).sum()
            U[q,p] = U[p,q]

    if not noback:
        b[Nk] = (w[ind+dx] * i[ind+dx]).sum()
        for q in range(Nk):
            U[Nk, q] = (wind * r[ind+q]).sum()
            U[q, Nk] = U[Nk, q]

        U[Nk,Nk] = wind.sum()
    
    detU = linalg.det(U)
    if verbose: print "det(U) is:  " + str(detU)

    if detU<tol:
        print "Singular matrix: det(U) < tol.  Using pseudoinverse..."
        if verbose: 
            print 'U>>',U
        invmat = linalg.pinv(U)
    else:
        invmat = inv(U)

    a = dot(invmat, b)

    if noback:
        K = a
        B0 = 0.0
    else:
        K = a[0:len(a)-1]
        B0 = a[-1]

    m = rconvolve1d(r, K, mode='valid') + B0

    chisq  = ( wind * (i[ind] - m[ind])**2 ).sum()

    if verbose:
        chisq0 = ( wind * (i[ind] - r[ind])**2 ).sum()
        #print "Kernel is:  " + str(K)
        print "Background: " + str(B0)
        print "For the (" + str(Nr) + " - " + str(Nk+1) + ") = " + str(Nr-Nk-1) + " DOF:"
        print "Red. Chisquared (I-R): " + str(chisq0/(Nr-Nk-1))
        print "Red. Chisquared (I-M): " + str(chisq/(Nr-Nk-1))
    
        figure(); subplot(311)
        plot(r, '--'); plot(i, '-x'); plot(m, '-..'); legend('RIM'); 
        subplot(312); plot(r - i, '--'); plot(m - i); legend(['R-I', 'M-I'])
        subplot(313); plot(K, '-o'); grid('on'); legend(['Kernel']); 

    if retinv:
        return (m, K, B0, chisq, invmat)
    else:
        return (m, K, B0, chisq)
Exemple #46
0
	def gen_ssmodel(self):


		#calculate U
		#~~~~~~~~~~~

		if not(hasattr(self,'U')):

			t0=time.time()
			self.U_Index=[]
			U=pb.empty((self.nx,self.nx),dtype=object) 
			U_T=pb.empty((self.nx,self.nx),dtype=object)
			for i in range(self.nx):				
				for j in range(self.nx):
					u_temp=pb.vectorize(self.field.Mu[j,0].conv)(self.kernel.Lambda) 
					U[i,j]=pb.dot(u_temp.T,self.field.Mu[i,0]).astype(float) 
					U_T[j,i]=U[i,j].T

					if U[i,j].any():
						self.U_Index.append((i,j))	

			self.U=U
			self.U_T=U_T
			print 'Elapsed time in seconds to calculate U is', time.time()-t0

		#calculate the inverse of inner product of field basis functions
		if not(hasattr(self,'Lambda_x_blocks_inverse')):
			t0=time.time()
			Lambda_x_blocks_inverse=pb.empty((1,len(self.field.NoatEachLevel)),dtype=object)
			Lambda_x_inverse_temp=0
			for i in range(Lambda_x_blocks_inverse.shape[1]):
				Lambda_x_blocks_inverse_temp=pb.reshape(self.field.Mu[Lambda_x_inverse_temp:Lambda_x_inverse_temp+self.field.NoatEachLevel[i],0],(1,self.field.NoatEachLevel[i]))
				Lambda_x_blocks_inverse[0,i]=pb.inv(pb.dot(Lambda_x_blocks_inverse_temp.T,Lambda_x_blocks_inverse_temp)).astype(float)
				Lambda_x_inverse_temp+=self.field.NoatEachLevel[i]
			self.Lambda_x_blocks_inverse=Lambda_x_blocks_inverse
			print 'Elapsed time in seconds to calculate Lambda_x_inverse is',time.time()-t0

		#calculate Lambda_x 
		if not(hasattr(self,'Lambda_x')):
			t0=time.time()
			Lambda_x=pb.dot(self.field.Mu,self.field.Mu.T)
			self.Lambda_x=Lambda_x
			print 'Elapsed time in seconds to calculate Lambda_x is',time.time()-t0

		t0=time.time()
		Lambda_theta = pb.zeros([self.nx,self.nx]) 
		for i in self.U_Index:
			Lambda_theta[i] = pb.dot(self.U[i],self.kernel.weights)			
		self.Lambda_theta = Lambda_theta

		#calculate A
		A_blocks=pb.empty((1,len(self.field.NoatEachLevel)),dtype=object)
		A_temp=0
		for i in range(A_blocks.shape[1]):
			A_blocks_temp=self.Lambda_theta[A_temp:self.field.NoatEachLevel[i]+A_temp]
			A_blocks[0,i]=pb.dot(self.Lambda_x_blocks_inverse[0,i],A_blocks_temp)	
			A_temp+=self.field.NoatEachLevel[i]
		self.A=self.Ts*self.varsigma*pb.vstack(A_blocks[0,:])+self.xi*pb.eye(self.nx)
		print 'Elapsed time in seconds to calculate Lambda_theta and A is',time.time()-t0

		# form the observation matrix 
		if not(hasattr(self,'C')):
			t0=time.time()
            

			t_observation_matrix=time.time()
			sensor_kernel_convolution_vecrorized=pb.vectorize(self.sensor_kernel.conv)
			sensor_kernel_conv_Mu=sensor_kernel_convolution_vecrorized(self.field.Mu).T  
			C=pb.empty(([self.ny,self.nx]),dtype=float)
			for i in range(self.nx):
				c_temp=pb.vectorize(sensor_kernel_conv_Mu[0,i].__call__)
				C[:,i]=c_temp(self.obs_locns)

			self.C=C
			print 'Elapsed time in seconds to calculate observation matrix C is',time.time()-t_observation_matrix	


		#calculate Sigma_e_c
		if not(hasattr(self,'Sigma_e_c')):

			t0=time.time()
			eta_convolution_vecrorized=pb.vectorize(self.eta.conv)
			eta_conv_Mu=eta_convolution_vecrorized(self.field.Mu).T
			Pi=pb.dot(eta_conv_Mu.T,self.field.Mu.T).astype(float).T
			self.Pi=Pi
			Sigma_e_blocks=pb.empty((1,len(self.field.NoatEachLevel)),dtype=object)
			Sigma_e_temp=0
			#calculate (LAmbda_x )^-1* Sigma_e)
			for i in range(Sigma_e_blocks.shape[1]):
				Sigma_e_blocks_temp=Pi[Sigma_e_temp:self.field.NoatEachLevel[i]+Sigma_e_temp]
				Sigma_e_blocks[0,i]=pb.dot(self.Lambda_x_blocks_inverse[0,i],Sigma_e_blocks_temp)	
				Sigma_e_temp+=self.field.NoatEachLevel[i]
			Sigma_e=pb.vstack(Sigma_e_blocks[0,:])
			
			Sigma_e_temp=0
			for i in range(Sigma_e_blocks.shape[1]):
				Sigma_e_blocks_temp=Sigma_e[:,Sigma_e_temp:self.field.NoatEachLevel[i]+Sigma_e_temp]
				Sigma_e_blocks[0,i]=pb.dot(Sigma_e_blocks_temp,self.Lambda_x_blocks_inverse[0,i].T)
				Sigma_e_temp+=self.field.NoatEachLevel[i]
			Sigma_e=pb.hstack(Sigma_e_blocks[0,:])
			self.Sigma_e=self.eta_weight*Sigma_e
			print 'Elapsed time in seconds to calculate Sigma_e is',time.time()-t0
			self.Sigma_e_c=sp.linalg.cholesky(self.Sigma_e,lower=1)


		#calculate Sigma_epsilon_c

		if not(hasattr(self,'Sigma_epsilon_c')):
			Sigma_epsilon_c=sp.linalg.cholesky(self.Sigma_epsilon,lower=1)
			self.Sigma_epsilon_c=Sigma_epsilon_c

		#calculate EM components for speed
		if not(hasattr(self,'Delta_upsilon')):					
			t0=time.time()
			self.Delta_upsilon=dots(pb.inv(self.Sigma_e),directsum(self.Lambda_x_blocks_inverse),self.U)
			print 'Elapsed time in seconds to calculate Delta_upsilon is',time.time()-t0

		if not(hasattr(self,'Delta_Upsilon')):
			t0=time.time()
			self.Delta_Upsilon=dots(self.U_T,directsum(self.Lambda_x_blocks_inverse),self.Delta_upsilon)
			print 'Elapsed time in seconds to calculate Delta_Upsilon is',time.time()-t0
print "***********************"
print "some basic array math and matrix math in 2D"
array1 = pylab.array([[2, 3], [4, 5]])
array2 = pylab.array([[5, 6], [3, 7]])  #このような定義でもOK
vector = pylab.array([1, 2])

print "array1 is:"
print array1
print "array2 is:"
print array2
print "vector is:", vector
print "array1 + array2:"
print array1 + array2
print "array1 * array2:"
print array1 * array2
print "array1 + vector:"
print array1 + vector
print "array1 / vector:"
print array1 / vector
print "pylab.dot(array1, vector)"
print pylab.dot(array1, vector)

array1Inv = pylab.inv(array1)
print "pylab.inv(array1)"
print array1Inv
print "pylab.dot(array1, array1Inv)"
# [[1,0],
#  [0,1]]になる
print pylab.dot(array1, array1Inv)
#pylab.crossもある
Exemple #48
0
import sys
sys.path.insert(0, "../../")
# from SBDet import parseToCoo, gen_sigs, mix, select_model
from SBDet import *
import pylab as P
import numpy as np

# tr = zload('./EstBiase-n_num.pk')
tr = zload('./EstBiase-n_num-revised.pk')
n_num, iso_n, PA_para, PA_lk, ER_para, ER_lk = zip(*tr['n_num'])
x = iso_n
y = np.array(PA_lk).reshape(-1) - np.array(ER_lk)
P.plot(x, y, 'r.')
# import ipdb;ipdb.set_trace()
P.axis([0, max(x), 0, max(y)])

# solve least square problem
n = len(x)
# X = np.hstack([np.ones((n, 1)), np.array(x).reshape(-1, 1)])
X = np.array(x).reshape(-1, 1)
beta = P.dot(P.dot(P.inv(P.dot(X.T, X)), X.T), np.array(y).reshape(-1, 1))
xp = np.linspace(0, max(x), 100)
yp = xp * beta[0, 0]
P.plot(xp, yp, 'b--')
P.xlabel('Number of isolated nodes')
P.ylabel('Difference between log liklihood of PA and ER')
xm = max(x) * 0.5
P.text(xm, xm * beta - 500, 'y=%f x' %(beta))
P.show()