示例#1
0
    def update(self, x, x_upper, mu, sigma, M, V, theta, eps):
        # lambda from equation 7
        foo = (V - x_upper * x.T * np.sum(sigma, axis=1)) / M**2
        a = 2 * theta * V * foo
        b = foo + 2 * theta * V * (eps - log(M))
        c = eps - log(M) - theta * V
        
        a,b,c = a[0,0], b[0,0], c[0,0]

        lam = max(0, 
                  (-b + sqrt(b**2 - 4 * a * c)) / (2. * a),
                  (-b - sqrt(b**2 - 4 * a * c)) / (2. * a))
        # bound it due to numerical problems
        lam = min(lam, 1E+7)
        
        # update mu and sigma
        mu = mu - lam * sigma * (x - x_upper) / M
        sigma = inv(inv(sigma) + 2 * lam * theta * diag(x)**2)
        """
        tmp_sigma = inv(inv(sigma) + theta*lam/U_sqroot*diag(xt)^2);
        % Don't update sigma if results are badly scaled.
        if all(~isnan(tmp_sigma(:)) & ~isinf(tmp_sigma(:)))
            sigma = tmp_sigma;
        end
        """ 
        return mu, sigma    
示例#2
0
def iGs(Q, kA, kB):
    r"""
    Calculate GBA and GAB matrices (Eq. 1.25, CH82).
    Calculate also GFA and GAF if kF is given instead of kB.

    .. math::

       \bs{G}_\cl{BA} &= -\bs{Q}_\cl{BB}^{-1} \bs{Q}_\cl{BA} \\
       \bs{G}_\cl{AB} &= -\bs{Q}_\cl{AA}^{-1} \bs{Q}_\cl{AB}

    Parameters
    ----------
    Q : array_like, shape (k, k)
    kA : int
        A number of open states in kinetic scheme.
    kB : int
        A number of short lived shut states in kinetic scheme.

    Returns
    -------
    GAB : ndarray, shape (kA, kB)
    GBA : ndarray, shape (kB, kA)
    """

    kE = kA + kB
    QBB = Q[kA:kE, kA:kE]
    QBA = Q[kA:kE, 0:kA]
    QAA = Q[0:kA, 0:kA]
    QAB = Q[0:kA, kA:kE]
    GAB = np.dot(nplin.inv(-1 * QAA), QAB)
    GBA = np.dot(nplin.inv(-1 * QBB), QBA)
    return GAB, GBA
示例#3
0
文件: cuts.py 项目: Bigben37/FP2
def makeCuts(datas):
    efficencies = []
    sefficencies = []
    purities = []
    for cutnum, cutinfo in enumerate(CUTS):
        name, cut = cutinfo
        (effs, seffs), purity = makeCut(datas, cut, cutnum, name)
        efficencies.append(effs)
        sefficencies.append(seffs)
        purities.append(purity)
    # output for further calculations
    with TxtFile('../calc/efficencies.txt', 'w') as f:
        f.write2DArrayToFile(efficencies, ['%.8f'] * 4)
    with TxtFile('../calc/efficencies_error.txt', 'w') as f:
        f.write2DArrayToFile(sefficencies, ['%.8f'] * 4)
    with TxtFile('../calc/invEfficencies.txt', 'w') as f:
        f.write2DArrayToFile(inv(efficencies), ['%.8f'] * 4)
    with TxtFile('../calc/purities.txt', 'w') as f:
        f.write2DArrayToFile(list(zip(*[purities])), ['%.6f'])
    # output for protocol
    thead = [r"Schnitt$\backslash$MC-Daten", LATEXE, LATEXM, LATEXT, LATEXQ]
    firstrow = [LATEXE, LATEXM, LATEXT, LATEXQ]
    with TxtFile('../src/tab_effmat_val.tex', 'w') as f:
        f.write2DArrayToLatexTable(list(zip(*([firstrow] + list(zip(*efficencies))))), thead, 
                                   ['%s'] + ['%.6f']*4, 'Effizienzmatrix.', 'tab:effmat:val')
    with TxtFile('../src/tab_effmat_err.tex', 'w') as f:
        f.write2DArrayToLatexTable(list(zip(*([firstrow] + list(zip(*sefficencies))))), thead, 
                                   ['%s'] + ['%.6f']*4, 'Fehler der Effizienzmatrix.', 'tab:effmat:err')
    with TxtFile('../src/tab_effmat_inv_val.tex', 'w') as f:
        f.write2DArrayToLatexTable(list(zip(*([firstrow] + list(zip(*inv(efficencies)))))), thead, 
                                   ['%s'] + ['%.6f']*4, 'Inverse Effizienzmatrix.', 'tab:inveffmat:val')
示例#4
0
    def setLatBase(self, base):
        """Set matrix of unit cell base vectors and calculate corresponding
        lattice parameters and stdbase, baserot and metrics tensors.

        No return value.
        """
        self.base = numpy.array(base)
        detbase = numalg.det(self.base)
        if abs(detbase) < 1.0e-8:
            emsg = "base vectors are degenerate"
            raise LatticeError(emsg)
        elif detbase < 0.0:
            emsg = "base is not right-handed"
            raise LatticeError(emsg)
        self._a = a = math.sqrt(numpy.dot(self.base[0,:], self.base[0,:]))
        self._b = b = math.sqrt(numpy.dot(self.base[1,:], self.base[1,:]))
        self._c = c = math.sqrt(numpy.dot(self.base[2,:], self.base[2,:]))
        self._ca = ca = numpy.dot(self.base[1,:], self.base[2,:]) / (b*c)
        self._cb = cb = numpy.dot(self.base[0,:], self.base[2,:]) / (a*c)
        self._cg = cg = numpy.dot(self.base[0,:], self.base[1,:]) / (a*b)
        self._sa = sa = math.sqrt(1.0 - ca**2)
        self._sb = sb = math.sqrt(1.0 - cb**2)
        self._sg = sg = math.sqrt(1.0 - cg**2)
        self._alpha = math.degrees(math.acos(ca))
        self._beta = math.degrees(math.acos(cb))
        self._gamma = math.degrees(math.acos(cg))
        # cache the unit volume value
        Vunit = self.unitvolume
        # reciprocal lattice
        self._ar = ar = sa/(self.a*Vunit)
        self._br = br = sb/(self.b*Vunit)
        self._cr = cr = sg/(self.c*Vunit)
        self._car = car = (cb*cg - ca)/(sb*sg)
        self._cbr = cbr = (ca*cg - cb)/(sa*sg)
        self._cgr = cgr = (ca*cb - cg)/(sa*sb)
        self._sar = sar = math.sqrt(1.0 - car**2)
        self._sbr = sbr = math.sqrt(1.0 - cbr**2)
        self._sgr = sgr = math.sqrt(1.0 - cgr**2)
        self._alphar = math.degrees(math.acos(car))
        self._betar = math.degrees(math.acos(cbr))
        self._gammar = math.degrees(math.acos(cgr))
        # standard orientation of lattice vectors
        self.stdbase = numpy.array([
                [ 1.0/ar,   -cgr/sgr/ar,    cb*a ],
                [ 0.0,       b*sa,          b*ca ],
                [ 0.0,       0.0,           c    ]],
                dtype=float)
        # calculate unit cell rotation matrix,  base = stdbase*baserot
        self.baserot = numpy.dot(numalg.inv(self.stdbase), self.base)
        self.recbase = numalg.inv(self.base)
        # bases normalized to unit reciprocal vectors
        self.normbase = self.base * [[ar], [br], [cr]]
        self.recnormbase = self.recbase / [ar, br, cr]
        # update metrics tensor
        self.metrics = numpy.array([
                [ a*a,     a*b*cg,  a*c*cb ],
                [ b*a*cg,  b*b,     b*c*ca ],
                [ c*a*cb,  c*b*ca,  c*c    ]],
                dtype=float)
        return
示例#5
0
def test_cl():
    ctx = cl.create_some_context()  # (interactive=False)

    # print 'ctx', ctx
    queue = cl.CommandQueue(ctx, properties=cl.command_queue_properties.PROFILING_ENABLE)
    f = open("part1.cl", "r")
    fstr = "".join(f.readlines())
    program = cl.Program(ctx, fstr).build()
    mf = cl.mem_flags

    cameraPos = np.array([0, 6, -1, 0])
    invView = la.inv(look_at((0, 6, -1), (0, 1, 1), (0, 1, 0)))
    invProj = la.inv(perspective(60, 1, 1, 1000))
    print "view", invView
    print "proj", invProj
    viewParamsData = (
        cameraPos.flatten().tolist()
        + np.transpose(invView).flatten().tolist()
        + np.transpose(invProj).flatten().tolist()
    )
    # print 'vpd', viewParamsData
    viewParams = struct.pack("4f16f16f", *viewParamsData)
    viewParams_buf = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=viewParams)
    num_pixels = 1000 * 1000
    # setup opencl
    dest = np.ndarray((1000, 1000, 4), dtype=np.float32)
    dest_buf = cl.Buffer(ctx, mf.WRITE_ONLY, dest.nbytes)
    local_shape = (8, 8)
    # run kernel
    evt = program.part1(queue, (dest.shape[0], dest.shape[1]), None, viewParams_buf, dest_buf)
    # evt = program.part1(queue, dest.shape, None, dest_buf)
    cl.enqueue_read_buffer(queue, dest_buf, dest).wait()
    print "time", (evt.profile.end - evt.profile.start) * 0.000001, "ms"
    return dest
示例#6
0
def reachable_form(xsys):
    # Check to make sure we have a SISO system
    if not control.issiso(xsys): 
        raise control.ControlNotImplemented(
            "Canonical forms for MIMO systems not yet supported")

    # Create a new system, starting with a copy of the old one
    zsys = control.StateSpace(xsys)

    # Generate the system matrices for the desired canonical form
    zsys.B = zeros(shape(xsys.B)); zsys.B[0, 0] = 1;
    zsys.A = zeros(shape(xsys.A))
    Apoly = poly(xsys.A)                # characteristic polynomial
    for i in range(0, xsys.states):
        zsys.A[0, i] = -Apoly[i+1] / Apoly[0]
        if (i+1 < xsys.states): zsys.A[i+1, i] = 1
    
    # Compute the reachability matrices for each set of states
    Wrx = control.ctrb(xsys.A, xsys.B)
    Wrz = control.ctrb(zsys.A, zsys.B)

    # Transformation from one form to another
    Tzx = Wrz * inv(Wrx)

    # Finally, compute the output matrix
    zsys.C = xsys.C * inv(Tzx)

    return zsys, Tzx
 def find_position(self, mirror=False):
     n_v = self.p0 - self.apex
     a, b, c = n_v[0], n_v[1], n_v[2]
     x0, y0, z0 = self.apex[0], self.apex[1], self.apex[2]
     ref_p = 0
     if c != 0.0:
         ref_p = np.array([1.0, 1.0, (a * (x0 - 1.0) + b * (y0 - 1.0)) / c + z0])
     elif b != 0.0:
         ref_p = np.array([1.0, (a * (x0 - 1.0) + c * (z0 - 1.0)) / b + y0, 1.0])
     else:
         ref_p = np.array([(b * (y0 - 1.0) + c * (z0 - 1.0)) / a + x0, 1.0, 1.0])
     z_v = f3(np.zeros(3), (self.p0 - self.apex))
     x_v = f3(np.zeros(3), (ref_p - self.apex))
     y_v = np.cross(z_v, x_v)
     T = f1(x0_v, y0_v, z0_v, x_v, y_v, z_v)
     theta = self.top_angle
     phi_p1 = self.phi
     phi_p2 = self.phi
     if mirror == True:
         phi_p2 = phi_p2 + self.bottom_angle
     else:
         phi_p2 = phi_p2 - self.bottom_angle
     r0 = self.edge
     p1_new = np.array(
         [r0 * np.cos(phi_p1) * np.sin(theta), r0 * np.sin(phi_p1) * np.sin(theta), r0 * np.cos(theta)]
     )
     p2_new = np.array(
         [r0 * np.cos(phi_p2) * np.sin(theta), r0 * np.sin(phi_p2) * np.sin(theta), r0 * np.cos(theta)]
     )
     self.p1 = np.dot(inv(T), p1_new) + self.apex
     self.p2 = np.dot(inv(T), p2_new) + self.apex
示例#8
0
def CCA(X, Y, eps=1.e-15):
    """
    Canonical corelation analysis of two matrices
    
    Parameters
    ----------
    X array of shape (nbitem,p) 
    Y array of shape (nbitem,q) 
    eps=1.e-15, float is a small biasing constant
                to grant invertibility of the matrices
    
    Returns
    -------
    ccs, array of shape(min(n,p,q) the canonical correlations
        
    Note
    ----
    It is expected that nbitem>>max(p,q)
    """
    from numpy.linalg import cholesky, inv, svd
    if Y.shape[0]!=X.shape[0]:
        raise ValueError,"Incompatible dimensions for X and Y"
    p = X.shape[1]
    q = Y.shape[1]
    sqX = np.dot(X.T,X)
    sqY = np.dot(Y.T,Y)
    sqX += np.trace(sqX)*eps*np.eye(p)
    sqY += np.trace(sqY)*eps*np.eye(q)
    rsqX = cholesky(sqX)
    rsqY = cholesky(sqY)
    iX = inv(rsqX).T
    iY = inv(rsqY).T
    Cxy = np.dot(np.dot(X,iX).T,np.dot(Y,iY))
    uv, ccs, vv = svd(Cxy)
    return ccs
    def get_original(self, model, view, projection, screen_size, point2d):
        #print(point2d)
        point2d = 2 * point2d / screen_size - 1
        point = np.array([point2d[0], point2d[1], 1, 1])

        # transform model and view to matrices
        model = transformation_matrix(model.translation, model.orientation)
        view = transformation_matrix(view.translation, view.orientation)

        # full matrix os transformation from point in object's coordinates
        # to camera's coordinates
        transformation = projection.dot(view).dot(model)

        inverse_transformation = lin.inv(transformation)

        # some point on the same ray as original point
        point = inverse_transformation.dot(point).A1

        # position of camera in object's coordinates
        camera_point = lin.inv(view.dot(model)).dot(ORIGIN).A1

        # convert points to useful coordinates
        point = _to_cartesian_coordinates(point)
        camera_point = _to_cartesian_coordinates(camera_point)

        ray = Ray(camera_point, point)
        # original point in object's coordinates
        point3d = self._intersect(ray)

        if point3d is None:
            return None
        # original point in world's coordinates
        world_point = _to_homogeneous_coordinates(point3d)
        world_point = model.dot(world_point).A1
        return world_point
示例#10
0
 def run(self):
     rate = rospy.Rate(10)
     while not rospy.is_shutdown():
         now = rospy.Time.now()
         self.listener.waitForTransform(self.odom_frame,self.body_frame, now, rospy.Duration(1.0))
         (trans,rot) = self.listener.lookupTransform(self.odom_frame,self.body_frame, now)
         new_odom = mat(self.listener.fromTranslationRotation(trans,rot))
         # print "================================================"
         # print new_odom
         # print self.old_odom
         odom = new_odom * inv(self.old_odom)
         self.old_odom = new_odom
         self.lock.acquire()
         self.predict(odom)
         theta = self.X[2,0]
         pose_mat = mat([[cos(theta), -sin(theta), 0, self.X[0,0]], 
                       [sin(theta),  cos(theta), 0, self.X[1,0]],
                       [         0,           0, 1, 0],
                       [         0,           0, 0, 1],
                       ]);
         correction_mat = inv(new_odom) * pose_mat
         self.lock.release()
         scale, shear, angles, trans, persp = decompose_matrix(correction_mat)
         self.broadcaster.sendTransform(trans,
                 quaternion_from_euler(*angles),now, self.odom_frame,self.target_frame)
         self.publish(now)
         rate.sleep()
示例#11
0
def test_gruber():
  from numpy import dot
  from numpy.linalg import inv
  from pylada.math import gruber, is_integer
  from pylada.error import internal, input
  
  cell = [[0, 0.5, 0.5], [0.5, 0, 0.5], [0.5, 0.5, 0]]
  lim = 5
   
  for a00 in [-1, 1]: 
    for a10 in xrange(-lim, lim+1): 
      for a11 in [-1, 1]:
        for a20 in xrange(-lim, lim+1): 
          for a21 in xrange(-lim, lim+1): 
            for a22 in [-1, 1]:
              a = [[a00, 0, 0], [a10, a11, 0], [a20, a21, a22]]
              g = gruber(dot(cell, a))
              assert is_integer(dot(inv(cell), g))
              assert is_integer(dot(inv(g), cell))

  try: gruber([[0, 0, 0], [1, 2, 0], [4, 5, 6]])
  except input: pass
  else: raise Exception()

  try: gruber([[1, 0, 0], [1, 1, 0], [4, 5, 1]], itermax=2)
  except internal: pass
  else: raise Exception()
 def cal_apex_coor(self,switch=False,phi=0.,mirror=False):
 #basis idea: set a new coordinate frame with p0p1 as the z vector (start from p1)
 #set a arbitrary y vector on the normal plane, and cross product to solve the x vector
 #then use phi and theta (sharp angle) to sove the cross_point(CP on file) and apex (A on file)
 #note phi is in range of [0,2pi]
 
     p0,p1=self.p0,self.p1
     if switch==True:
         p0,p1=self.p1,self.p0
     n_v=p0-p1
     origin=p1
     a,b,c=n_v[0],n_v[1],n_v[2]
     x0,y0,z0=p1[0],p1[1],p1[2]
     ref_p=0
     if c!=0.:
         ref_p=np.array([1.,1.,(a*(x0-1.)+b*(y0-1.))/c+z0])
     elif b!=0.:
         ref_p=np.array([1.,(a*(x0-1.)+c*(z0-1.))/b+y0,1.])
     else:
         ref_p=np.array([(b*(y0-1.)+c*(z0-1.))/a+x0,1.,1.])
     y_v=f3(np.zeros(3),(ref_p-origin))
     z_v=f3(np.zeros(3),(p0-origin))
     x_v=np.cross(y_v,z_v)
     T=f1(x0_v,y0_v,z0_v,x_v,y_v,z_v)
     r1=self.len_offset[0]
     r2=self.len_offset[0]+self.edge_len
     theta=self.sharp_angle
     cross_pt_new = np.array([r1*np.cos(phi)*np.sin(theta),r1*np.sin(phi)*np.sin(theta),r1*np.cos(theta)])
     apex_new = np.array([r2*np.cos(phi)*np.sin(theta),r2*np.sin(phi)*np.sin(theta),r2*np.cos(theta)])
     self.cross_pt = np.dot(inv(T),cross_pt_new)+origin
     self.apex = np.dot(inv(T),apex_new)+origin
     self.cal_p2(p0,p1,mirror)
示例#13
0
文件: rti.py 项目: npatwari/rti
def initRTI(nodeLocs, delta_p, sigmax2, delta, excessPathLen):

    # Set up pixel locations as a grid.
    personLL        = nodeLocs.min(axis=0)
    personUR        = nodeLocs.max(axis=0)
    pixelCoords, xVals, yVals = calcGridPixelCoords(personLL, personUR, delta_p)
    pixels          = pixelCoords.shape[0]
    #plt.figure(3)
    #plotLocs(pixelCoords)
    

    # Find distances between pixels and transceivers
    DistPixels  = dist.squareform(dist.pdist(pixelCoords))
    DistPixelAndNode = dist.cdist(pixelCoords, nodeLocs)
    DistNodes   = dist.squareform(dist.pdist(nodeLocs))

    # Find the (inverse of) the Covariance matrix between pixels
    CovPixelsInv       = linalg.inv(sigmax2*np.exp(-DistPixels/delta))

    # Calculate weight matrix for each link.
    nodes = len(nodeLocs)
    links = nodes*(nodes-1)
    W = np.zeros((links, pixels))
    for ln in range(links):
        txNum, rxNum  = txRxForLinkNum(ln, nodes)
        ePL           = DistPixelAndNode[:,txNum] + DistPixelAndNode[:,rxNum] - DistNodes[txNum,rxNum]  
        inEllipseInd  = np.argwhere(ePL < excessPathLen)
        pixelsIn      = len(inEllipseInd)
        if pixelsIn > 0:
            W[ln, inEllipseInd] = 1.0 / float(pixelsIn)

    # Compute the projection matrix
    inversion       = np.dot(linalg.inv(np.dot(W.T, W) + CovPixelsInv), W.T)

    return (inversion, xVals, yVals)
def lda(class1,class2,N):
	N1 = len(class1)/float(N)
	N2 = len(class2)/float(N) 
	mu1 = np.matrix([np.sum(class1[:,0])/len(class1),np.sum(class1[:,1])/len(class1)])
	mu2 = np.matrix([np.sum(class2[:,0])/len(class2),np.sum(class2[:,1])/len(class2)])
	sigma1 = np.zeros(shape=(2,2))
	for i in range(0,len(class1)):
		a = class1[i][0]-mu1[0,0]
		b = class1[i][1]-mu1[0,1]
		X = np.matrix([a,b])
		z = np.dot(X.T,X)
		sigma1 = sigma1 + z
	sigma1 = sigma1/(N-2)
	sigma2 = np.zeros(shape=(2,2))
	for i in range(0,len(class2)):
		a = class2[i][0]-mu2[0,0]
		b = class2[i][1]-mu2[0,1]
		X = np.matrix([a,b])
		z = np.dot(X.T,X)
		sigma2 = sigma2 + z
	sigma2 = sigma2/(N-2)
	sigma = (sigma1 + sigma2)
	a0 = log(N1/N2)-0.5*((mu1+mu2)*inv(sigma)*((mu1-mu2).T))
	A0 = np.squeeze(np.asarray(a0))
	a = inv(sigma)*((mu1-mu2).T)
	A = np.squeeze(np.asarray(a))
	return A0, A, sigma1, sigma2, mu1, mu2
示例#15
0
def part_b(run_count, a, b):
    """
    Solve using LU decomposition
    """
    _, l, u = lu(a)
    for run in xrange(run_count):
        inv(u).dot(inv(l).dot(b))
示例#16
0
文件: wt.py 项目: jpcoles/jcode
def test8():
    global L0, N

    L = deepcopy(L0)
    rho = zeros(N, 'double')

    rho[random.sample(xrange(N), N/2)] = 1

    print rho

    LI = linalg.inv(L)
    #print L
    #print LI
    #I = numpy.dot(L,LI)
    #I[abs(I)<0.001] = 0
    #print I

    t = numpy.greater(rho, 0)
    X = numpy.zeros((N,N))
    for i in xrange(N):
        X[0][i] = i
    print X
    LIC = numpy.compress(t, LI, 1)
    print LIC
    LIC = numpy.compress(t, LIC, 0)
    print LIC
    LICI = linalg.inv(LIC)
    print LICI
示例#17
0
文件: mllt.py 项目: cesarrp/SAAVRAZ
    def objective(self, A, r, c):
        """
        Log-likelihood function and gradient for MLLT::

          L(A) = N|A| - \\sum_j \\frac{N_j}{2} \\log |diag(A \\Sigma_j A^T)|
          \\nabla L(A) = N(A^T)^{-1} - \\sum_j N_j diag(A \\Sigma_j A^T)^{-1}A\\Sigma_j

        @param A: Flattened MLLT transformation matrix
        @type A: numpy.ndarray
        @param r: Actual number of rows in MLLT transformation
        @type r: int
        @param c: Actual number of columns in MLLT transformation
        @type c: int
        @return: negated log-likelihood and (flattened) gradient
        @rtype: (float, numpy.ndarray)
        """
        # Note: A has been flattened to make it acceptable to scipy.optimize
        A = A.reshape((r,c))
        detA = det(A)
        ll = self.totalcount * log(detA)
        lg = self.totalcount * inv(A.T)
        for j, nj in enumerate(self.count):
            C = self.cov[j]
            cl = diag(dot(dot(A, C), A.T))
            ll = ll - (float(nj) / 2) * sum(log(cl))
            lg = lg - float(nj) * dot(dot(inv(diag(cl)), A), C)
        print "likelihood: %f" % ll
        # Flatten out the gradient
        lg = lg.ravel()
        print "gradient L2: %f" % sqrt(sum(lg*lg))
        # Note: we negate these to maximize likelihood
        return -ll, -lg
示例#18
0
    def colorDeconv(self, imin):
        M_h_e_dab_meas = numpy.array([[0.650, 0.072, 0.268],
                                      [0.704, 0.990, 0.570],
                                      [0.286, 0.105, 0.776]])

        # [H,E]
        M_h_e_meas = numpy.array([[0.644211, 0.092789],
                                  [0.716556, 0.954111],
                                  [0.266844, 0.283111]])

        if self.params['image_type'] == "HE":
            # print "HE stain"
            M = M_h_e_meas
            M_inv = numpy.dot(linalg.inv(numpy.dot(M.T, M)), M.T)

        elif self.params['image_type'] == "HEDab":
            # print "HEDab stain"
            M = M_h_e_dab_meas
            M_inv = linalg.inv(M)

        else:
            # print "Unrecognized image type !! image type set to \"HE\" "
            M = numpy.diag([1, 1, 1])
            M_inv = numpy.diag([1, 1, 1])

        imDecv = numpy.dot(self.log_transform(imin.astype('float')), M_inv.T)
        imout = self.exp_transform(imDecv)

        return imout
示例#19
0
文件: ar.py 项目: idiap/ssp
def ARLasso(ac, order=10, ridge=0.0):
    if ac.ndim > 1:
        ret = np.ndarray((ac.shape[0], order))
        gain = np.ndarray(ac.shape[0])
        for f in range(ac.shape[0]):
            ret[f], gain[f] = ARLasso(ac[f], order, ridge)
        return ret, gain

    # Convert ac into matrices
    YY, Yy = ACToMatrix(ac, order)

    # Initialise lasso with ridge
    gain = ac[0]
    A = np.zeros((order, order))
    for i in range(order):
        #A[i,i] = ridge*ac[0]*ac.size
        A[i,i] = 0.01*ac[0]
    coef = np.dot(linalg.inv(YY+A), Yy)

    for i in range(10):
        for j in range(order):
            A[j,j] = np.sqrt(abs(coef[j]))
        gain = ac[0] + np.dot(coef, (np.dot(YY, coef) - 2*Yy)) / ac.size
        B = np.identity(order) * gain
        X = linalg.inv(np.dot(A, np.dot(YY, A)) + ridge*B)
        coef = np.dot(np.dot(A, np.dot(X, A)), Yy)
        # Each iteration should reduce the L1 norm of coef
        #print i, linalg.norm(coef, ord=1)

    return coef, gain
示例#20
0
 def polar(self, i):
     """Return the polar angle for the specified peak"""
     Oimat = inv(self.Omat)
     Mat = self.pixel_size * inv(self.Dmat) * Oimat
     peak = Oimat * (vec(self.xp[i], self.yp[i]) - self.Cvec)
     v = norm(Mat * peak)
     return np.arctan(v / self.distance)
示例#21
0
 def Gvec(self, x, y, z):
     phi = self.phi + self.phi_step * z
     v1 = vec(x, y)
     v2 = self.pixel_size * inv(self.Omat) * (v1 - self.Cvec)
     v3 = inv(self.Dmat) * v2 - self.Dvec
     return (inv(self.Gmat(phi)) * 
             ((norm_vec(v3) / self.wavelength) - self.Evec))
示例#22
0
def adjacent_open_to_shut_range_mean(u1, u2, QAA, QAF, QFF, QFA, phiA):
    """
    Calculate mean (ideal- no missed events) open times adjacent to a 
    specified shut time range.

    Parameters
    ----------
    u1, u2 : floats
        Shut time range.
    QAA, QAF, QFF, QFA : array_like
        Submatrices of Q.
    phiA : array_like, shape (1, kA)
        Initial vector for openings

    Returns
    -------
    m : float
        Mean open time.
    """
    
    kA = QAA.shape[0]
    uA = np.ones((kA))[:,np.newaxis]
    invQAA, invQFF = -nplin.inv(QAA), nplin.inv(QFF)
    expQFFr = qml.expQt(QFF, u2) - qml.expQt(QFF, u1)
    col = np.dot(np.dot(np.dot(np.dot(QAF, invQFF), expQFFr), QFA), uA)
    row1 = np.dot(phiA, qml.Qpow(invQAA, 2))
    row2 = np.dot(phiA, invQAA)
    m = np.dot(row1, col)[0, 0] / np.dot(row2, col)[0, 0]
    return m
示例#23
0
def doTheMath(file1, file2):  
    matrixAleft = numpy.loadtxt(file1, dtype=numpy.float32, usecols=[1, 2])
    matrixAxtion = numpy.loadtxt(file2, dtype=numpy.float32)

    matrixA_x = numpy.array([[matrixAleft[0][0], (dest_size_x-matrixAleft[0][0])]])
    matrixb_x = numpy.array([ matrixAxtion[0][0] * dest_size_x ])
    for i in range(1, 54):
        matrixAi = numpy.array([[ matrixAleft[i][0], (dest_size_x-matrixAleft[i][0]) ]])
        matrixA_x = numpy.append(matrixA_x, matrixAi, axis=0)
        matrixb_x = numpy.append(matrixb_x, [ matrixAxtion[i][0] * dest_size_x ])

    ATb_x = numpy.dot (numpy.transpose(matrixA_x), matrixb_x)
    ATAinverse_x = linalg.inv( numpy.dot( numpy.transpose(matrixA_x), matrixA_x ) )
    x = numpy.dot (ATAinverse_x, ATb_x)

    print '[horizon_x, start_x] is', x

    matrixA_y = numpy.array([[matrixAleft[0][1], (dest_size_y-matrixAleft[0][1])]])
    matrixb_y = numpy.array([ matrixAxtion[0][1] * dest_size_y ])
    for i in range(1, 54):
        matrixAi = numpy.array([[ matrixAleft[i][1], (dest_size_y-matrixAleft[i][1]) ]])
        matrixA_y = numpy.append(matrixA_y, matrixAi, axis=0)
        matrixb_y = numpy.append(matrixb_y, [ matrixAxtion[i][1] * dest_size_y ])

    ATb_y = numpy.dot (numpy.transpose(matrixA_y), matrixb_y)
    ATAinverse_y = linalg.inv( numpy.dot( numpy.transpose(matrixA_y), matrixA_y ) )
    y = numpy.dot (ATAinverse_y, ATb_y)

    print '[horizon_y, start_y] is', y
示例#24
0
def get_sdrefb_upd(amat, t, fbtype=None, wnrm=2,
                   B=None, R=None, Q=None, maxeps=None,
                   baseA=None, baseZ=None, baseP=None, maxfac=None, **kwargs):
    if fbtype == 'sylvupdfb' or fbtype == 'singsylvupd':
        if baseP is not None:
            deltaA = amat - baseA
            epsP = spla.solve_sylvester(amat, -baseZ, -deltaA)
            eps = npla.norm(epsP, ord=wnrm)
            print('|amat - baseA|: {0} -- |E|: {1}'.
                  format(npla.norm(deltaA, ord=wnrm), eps))
            if maxeps is not None:
                if eps < maxeps:
                    opepsPinv = npla.inv(epsP+np.eye(epsP.shape[0]))
                    return baseP.dot(opepsPinv), True
            elif maxfac is not None:
                if (1+eps)/(1-eps) < maxfac and eps < 1:
                    opepsPinv = npla.inv(epsP+np.eye(epsP.shape[0]))
                    return baseP.dot(opepsPinv), True

    # otherwise: (SDRE feedback or `eps` too large already)
    # curX = spla.solve_continuous_are(amat, B, Q, R)
    # if fbtype == 'sylvupdfb' or fbtype == 'singsylvupd':
    #     logger.debug('in `get_fb_dict`: t={0}: eps={1} too large, switch!'.
    #                  format(t, eps))
    # else:
    #     logger.debug('t={0}: computed the SDRE feedback')
    return None, False
示例#25
0
    def de_embed(self, pad_in, s2p, pad_out):

        left_mat = pad_in["data"][0]["body"]
        right_mat = pad_out["data"][0]["body"]

        for sec_idx in range(len(s2p["data"])):

            section = s2p["data"][sec_idx]

            for i in range(len(section["body"])):

                left = map(float, left_mat[i][1:])
                l_abcd = self.get_ABCD(left)

                center = map(float, section["body"][i][1:])
                c_abcd = self.get_ABCD(center)

                right = map(float, right_mat[i][1:])
                r_abcd = self.get_ABCD(right)

                [[A, B], [C, D]] = inv(l_abcd).dot(c_abcd).dot(inv(r_abcd))
                [s11, s21, s12, s22] = self.abcd2s(A, B, C, D)
                section["body"][i][1:] = \
                    [s11.real, s11.imag, s21.real, s21.imag, s12.real, s12.imag, s22.real, s22.imag]

        return s2p
示例#26
0
def fcvrot(f,theta):
    if asarray(f.shape).prod() == 1:
    	return f
    if len(f.shape) == 1:
        f = asarray([f])
    x,y = numpy.indices(f.shape)
    theta = radians(theta)
    Trot = asarray([
                [cos(theta),    sin(theta), 0],
                [-sin(theta),   cos(theta), 0],
                [0,             0,          1]
            ])

    Tx   = asarray([
                [1, 0,  f.shape[0]//2],
                [0, 1,  f.shape[0]//2],
                [0, 0,  1]
            ])
    T = dot(dot(Tx,Trot),inv(Tx))
    
    pt = array([ x.ravel(), y.ravel(), ones(len(x.ravel()))])
    npt= dot(inv(T), pt)
    npt= numpy.round(npt).astype(int)
    
    g = f[npt[0],npt[1]].reshape(f.shape)
    return g
示例#27
0
 def iterate_filter(self, dyn_model, dyn_noise_model, dyn_noise_cov,
         iters = 1, epsilon = 1, verbose=False):
     A = dyn_model
     B = dyn_noise_model
     Q = dyn_noise_cov
     x_prev = deepcopy(self.x_est)
     P_prev = deepcopy(self.P_est)
     for i in self.network.keys():
         yi = self.sensors[i].u[:,0]
         Si = self.sensors[i].U[:,:]
         for nbr in self.network[i]:
             logging.debug("Reading sensor locals {}".format(nbr))
             # Do NOT use +=, it will modify the objects
             # within the elements
             yi = yi + self.sensors[nbr].u[:,0]
             Si = Si + self.sensors[nbr].U[:,:]
             logging.debug(self.sensors[nbr].U[:,:])
         for it in xrange(iters):
             Mi = LA.inv(LA.inv(P_prev[i]) + Si)
             x_hat = self.x_est[i] + \
                     np.dot(Mi, (yi - np.dot(Si, self.x_est[i]) ) )
             for nbr in self.network[i]:
                 logging.debug(self.x_est[nbr].shape)
                 logging.debug(self.x_est[i].shape)
                 x_hat += np.dot(Mi, (self.x_est[nbr] - self.x_est[i])) * epsilon
             # Update the state of the filter
             self.P_est[i] = np.dot(A, np.dot(Mi, A.T)) + np.dot(B, np.dot(Q, B.T))
             self.x_est[i] = np.dot(A, x_hat)
         if verbose:
             print "Current estimate from {}: {}".format(i, self.x_est[i].T)
    def InitializeElectrons(self):
        if self.N_up == 0 and self.N_down == 0:
            print 'Error: no electrons to initialize'
            return []
        else:
            # generate array of electron positions, normally distributed from the origin with Bohr radius
            n = self.N_up+self.N_down
            self.e_positions = np.random.randn(n,3) * GSF.a_B # generate array of electron positions
            print 'init e_pos',self.e_positions
            # Store displacements and distances
            self.e_disp = np.zeros((n,n,3)) # store the displacements in a 3D matrix to make indexing easier
            self.e_dist = np.zeros((n,n)) # the electron matrices should only be upper diagonal
            self.atom_disp = np.zeros((n,len(self.atom_list),3))
            self.atom_dist = np.zeros((n,len(self.atom_list)))
            index = 0
            for i in range(n):
                self.e_disp[i,i+1:] = self.e_positions[i] - self.e_positions[i+1:]
                self.e_dist[i,i+1:] = np.sqrt(np.sum(self.e_disp[i,i+1:]**2,1))
                self.atom_disp[i] = self.e_positions[i] - self.ion_positions
                self.atom_dist[i,:] = np.sqrt(np.sum(self.atom_disp[i,:]**2,1))
 	      #Once the e_position is initialize, the slater matrix and its deteriminant and inverse are all initialized. 
        self.slater_matrix_up = SlaterMatrix(self.e_positions[0:self.N_up],self.psi_up)
        self.slater_matrix_down = SlaterMatrix(self.e_positions[self.N_up:],self.psi_down)
        print 'slater_matrix',self.slater_matrix_up
        if self.N_up>0: 
            self.inverse_SD_up = LA.inv(self.slater_matrix_up)
            self.slater_det_up = LA.det(self.slater_matrix_up)			
        if self.N_down>0: 
            self.inverse_SD_down = LA.inv(self.slater_matrix_down) 
            self.slater_det_down = LA.det(self.slater_matrix_down)
        self.J = self.Jastrow()
        print 'slater_inv',self.inverse_SD_up
        return self.e_positions
示例#29
0
文件: learn.py 项目: sboosali/PGM
def marginal_log_likelihood(y, A,Q,C,R, _x,_P):
    """
    compute the marginal log likelihood
    given the learned model parameters
    and the learned expected sufficient statistics

    C must be invertible (:= square and non-singular)
    eg aint no inverse to a f*****g projection

    p(y|params) = ...
    """

    if True: return 0

    T,p = y.shape
    _,d = _x.shape
    Cy = mul(inv(C), tp(y))
    CRC = inv(mul(tp(C),inv(R),C))

    Z_swap = (1/2) * (log(det(2*pi*CRC)) - log(det(2*pi*R)))

    covar = CRC+identity(d)
    Z_merge = log(2*pi*det(covar))**(-d/2) - (1/2)*mul(tp(Cy), covar, Cy)

    for t in r(1,T-1):
        mean  = _x[t+1]
        covar = CRC + _P[t+1]
        Z_merge += log(2*pi*det(covar))**(-d/2) - (1/2)*mul(tp(Cy-mean), covar, (Cy-mean))

    return sum(T*Z_swap + Z_merge)
示例#30
0
    def update(self, x, x_upper, mu, sigma, M, V, theta, eps):
        # lambda from equation 7
        foo = (V - x_upper * x.T * np.sum(sigma, axis=1)) / M**2 + V * theta**2 / 2.
        a = foo**2 - V**2 * theta**4 / 4
        b = 2 * (eps - np.log(M)) * foo
        c = (eps - np.log(M))**2 - V * theta**2

        a,b,c = a[0,0], b[0,0], c[0,0]

        lam = np.amax([0,
                  (-b + sqrt(b**2 - 4 * a * c)) / (2. * a),
                  (-b - sqrt(b**2 - 4 * a * c)) / (2. * a)])
        # bound it due to numerical problems
        lam = np.minimum(lam, 1E+7)

        # update mu and sigma
        U_sqroot = 0.5 * (-lam * theta * V + sqrt(lam**2 * theta**2 * V**2 + 4*V))
        mu = mu - lam * sigma * (x - x_upper) / M
        sigma = inv(inv(sigma) + theta * lam / U_sqroot * diag(x)**2)
        """
        tmp_sigma = inv(inv(sigma) + theta*lam/U_sqroot*diag(xt)^2);
        % Don't update sigma if results are badly scaled.
        if all(~isnan(tmp_sigma(:)) & ~isinf(tmp_sigma(:)))
            sigma = tmp_sigma;
        end
        """
        return mu, sigma
示例#31
0
                   [(x_pre[:, 0][0] - anchor2[0]) / AA2,
                    (x_pre[:, 0][1] - anchor2[1]) / AA2,
                    (x_pre[:, 0][2] - anchor2[2]) / AA2, 0, 0, 0, 0, 0, 0],
                   [(x_pre[:, 0][0] - anchor3[0]) / AA3,
                    (x_pre[:, 0][1] - anchor3[1]) / AA3,
                    (x_pre[:, 0][2] - anchor3[2]) / AA3, 0, 0, 0, 0, 0, 0],
                   [(x_pre[:, 0][0] - anchor4[0]) / AA4,
                    (x_pre[:, 0][1] - anchor4[1]) / AA4,
                    (x_pre[:, 0][2] - anchor4[2]) / AA4, 0, 0, 0, 0, 0, 0],
                   [0, 0, 1, 0, 0, 0, 0, 0, 0]],
                  dtype=np.float)

    #########################################################################################
    P_pre = AA.dot(P_old.dot(AA.T)) + QQ

    G1 = la.inv(CC.dot(P_pre.dot(CC.T)) + RR)
    GG = P_pre.dot((CC.T).dot(G1))
    #########################################################################################
    h_pre = np.array([[s_the_pre * g], [-c_the_pre * s_phi_pre * g],
                      [-c_the_pre * c_phi_pre * g],
                      [
                          np.sqrt(
                              pow(x_pre[:, 0][0] - anchor1[0], 2) +
                              pow(x_pre[:, 0][1] - anchor1[1], 2) +
                              pow(x_pre[:, 0][2] - anchor1[2], 2))
                      ],
                      [
                          np.sqrt(
                              pow(x_pre[:, 0][0] - anchor2[0], 2) +
                              pow(x_pre[:, 0][1] - anchor2[1], 2) +
                              pow(x_pre[:, 0][2] - anchor2[2], 2))
def GLS(x, A, L):
    """
    x: sample matrix, each column is a link flow vector sample; 24 * K
    A: path-link incidence matrix
    P: logit route choice probability matrix
    L: dimension of xi
    ----------------
    return: xi
    ----------------
    """
    K = np.size(x, 1)
    S = samp_cov(x)

    #print("rank of S is: \n")
    #print(matrix_rank(S))
    #print("sizes of S are: \n")
    #print(np.size(S, 0))
    #print(np.size(S, 1))

    inv_S = inv(S).real

    A_t = np.transpose(A)

    Q_ = np.dot(np.dot(A_t, inv_S), A)
    #Q = adj_PSD(Q_).real  # Ensure Q to be PSD
    Q = Q_

    #print("rank of Q is: \n")
    #print(matrix_rank(Q))
    #print("sizes of Q are: \n")
    #print(np.size(Q, 0))
    #print(np.size(Q, 1))

    b = sum([np.dot(np.dot(A_t, inv_S), x[:, k]) for k in range(K)])
    # print(b[0])
    # assert(1==2)

    model = Model("OD_matrix_estimation")

    xi = []
    for l in range(L):
        xi.append(model.addVar(name='xi_' + str(l)))

    model.update() 

    # Set objective: (K/2) xi' * Q * xi - b' * xi
    obj = 0
    for i in range(L):
        for j in range(L):
            obj += (1.0 / 2) * K * xi[i] * Q[i, j] * xi[j]
    for l in range(L):
        obj += - b[l] * xi[l]
    model.setObjective(obj)

    # Add constraint: xi >= 0
    for l in range(L):
        model.addConstr(xi[l] >= 0)
        #model.addConstr(xi[l] <= 5000)
    #fictitious_OD_list = zload('../temp_files/fictitious_OD_list')
    #for l in fictitious_OD_list:
        #model.addConstr(xi[l] == 0)
    model.update() 

    model.setParam('OutputFlag', False)
    model.optimize()

    xi_list = []
    for v in model.getVars():
        # print('%s %g' % (v.varName, v.x))
        xi_list.append(v.x)
    # print('Obj: %g' % obj.getValue())
    return xi_list
示例#33
0
import numpy as np
import numpy.linalg as LA

#constants
H = np.matrix('1 0 ; 1 0 ; 1 0 ; 0 1 ; 0 1')
Hplus = LA.inv(np.transpose(H) * H)
B_0 = np.matrix('0 1 ; 1 0 ; 1 0 ; 0 1 ; 0 1')
n = 5
c = 2
g = 2
alpha = 0.1

#Admits EP
A_0 = np.matrix(
    '.0 .4 .4 .1 .1 ; .4 .0 .4 .1 .1 ; .4 .4 .0 .1 .1 ; .1 .1 .1 .0 .7 ; .1 .1 .1 .7 .0'
)
T_0 = np.matrix('1 .3 ; 0 .7')
T_1 = np.matrix('.2 0 ; .8 1')

I = np.identity(n)
Ipi = np.identity(c)

#Look at this. It's interpretable
Api = Hplus * np.transpose(H) * A_0 * H

print "Api:\n", Api, "\n"

#should be equal and column stochastic
Ppi_1 = alpha * np.transpose(B_0) * H * LA.inv(Ipi - (1 - alpha) * Api) * Hplus
P_1 = np.transpose(B_0) * alpha * LA.inv(I - (1 - alpha) * A_0) * H * Hplus
示例#34
0
# In[11]:

#Reusing code from exercise 2

#Separate fixed acidity and assign it as the input variable (independent)
#Separate wine quality and assign it as the output variable (dependent)
y = data_test[:,11]
one_feature = data_test[:,0]

#create vector of ones and concatenate fixed acidity
c1 = np.ones(shape=y.shape)[...]
x2 = np.array([c1,one_feature]).T

#run linear regression
final_c = inv(x2.transpose().dot(x2)).dot(x2.transpose()).dot(y)
print(final_c)


# Part B: run regression function on all features and report estimated weights

# In[12]:

#create vector of ones...
y = data_test[:,11]
features = np.delete(data_test, 11,axis=1)

v_2 = np.ones(shape=y.shape)[...,None]

x = np.concatenate((v_2, features), 1)
示例#35
0
#input_img=(?, 28, 28, 1), encoded=(?, 6), decoded=(?, 28, 28, 1)
autoencoder = Model(inputs=input_img, outputs=[encoded, decoded])
autoencoder.load_weights("autoencoder_mnist.h5")

encoder = Model(input_img, encoded)

#load the tensor containing the mean of the components of the gaussian mixture, shape=(C, D, 1)
mean = np.load("red_mean.npy")
#load the tensor containing the cov matrices of the components, shape=(C, D, D)
cov = np.load("red_cov.npy")
mean = mean.astype('float32')
cov = cov.astype('float32')

#inverses of the covariance matrices of the components ,shape=(1, C, D, D)
invcov = K.expand_dims(K.tf.constant(inv(cov)), axis=0)
#constant function of the determinants of the cov matrices, shape=(1, C, 1, 1)
dets = K.expand_dims(K.expand_dims(K.expand_dims(K.tf.constant(1. / np.sqrt(2 * np.pi * det(cov))), axis=0), axis=-1), axis=-1)
#shape=(1, C, D, 1)
mean_tens = K.expand_dims(K.tf.constant(mean), axis=0)


def mixture_loss(y_true):

    #latent points in the batch, shape=(batch_size, 1, D, 1)
    y_lat = K.expand_dims(K.expand_dims(y_true, axis=-2), axis=-1)
    invcov_bs = K.repeat_elements(invcov, batch_size, axis=0)
    #print(K.dot K.tf.transpose(y_lat - mean_tens, perm=[0, 1, 3, 2]) @ invcov)
    #likelihoods of the batch points, shape=(batch_size, D)
    likel = K.tf.squeeze(dets * K.exp(-0.5 * K.tf.transpose(y_lat - mean_tens, perm=[0, 1, 3, 2]) @ invcov_bs @ (y_lat - mean_tens)), axis=[-2, -1])
    #sum of these likelihoods for the batch
示例#36
0
文件: msmle.py 项目: Kryohi/pynamd
    def compute_expectations(self, A_jn, doerror=True):
        """Compute the weighted expectations in each state. This assumes that
        the states here were all included in the initialization.

        Arguments
        ---------
        A_jn : array-like
            Either 1 or 2d array-like samples. 2d data is assumed to be sorted
            by state with the sample size as the input reduced potential
        doerror : boolean
            Compute variances if true

        Returns
        -------
        A_j : ndarray
            Expected values in each state
        varA_j : ndarray
            Estimated variance in each state (all zeros if doerror is False)
        """
        A_n = self._validate_and_convert_2d(A_jn)
        W_nj = self.W_nj
        A_j = (W_nj * A_n[:, newaxis]).mean(axis=0)
        varA_j = zeros(self.nstates)
        if not doerror:
            return A_j, varA_j
        """
        There are a number of errors in Ref. 1. First, the definitions of W and
        WA are incorrect, the extra factors of exp(f) should indeed be
        included. Doing so obviates the need for the C matrix defined therein.
        This itself is used incorrectly in that paper since the dimensions are
        inconsistent during matrix multiplication.
        
        NB This is all borne out by R code released with Ref. 1, which uses the
        same equations below, but with completely different notation (A1 --> G,
        B1 --> H). The matrix D in Ref. 1 is NOT the same as in the code, where
        it seems to be the first term in the B matrix from the paper.
        
        Shorthand indices - notation similiar to Ref. 1.
        """
        n, m, mpk = self.total_samples, self.nstates_sampled, self.nstates
        mpk2 = 2 * mpk
        mask0, maskn0 = self.mask_zero, self.mask_nonzero
        # Shuffle indices and re-define W (WWA, hereafter).
        _W_nj = self._W_nj
        _WA_nj = _W_nj * A_n[:, newaxis]
        _A_j = _WA_nj.mean(axis=0)
        WWA_nj = hstack((_W_nj, _WA_nj))
        # Repeat the same procedure for free energies with the new W.
        O = WWA_nj.T.dot(WWA_nj) / n
        Os = O[:, :m]
        D = hstack((Os.dot(self.PIs), zeros((mpk2, mpk2 - m))))
        B1 = (D - identity(mpk2))[1:, 1:]
        A1 = (O - D[:, :m].dot(Os.T))[1:, 1:]
        V = solve(B1, A1).dot(inv(B1.T)) / n
        """
        This is how things are computed in the R code. The solution of
        solve(B1, A1) is not numerically the same as B1invA1, although they are
        supposedly mathematically identical. Non-exhaustive tests show that
        these give virtually identical results for V, except for state 0.
        """
        #        B1invA1 = (-O + tile(O[0], O[0].size).reshape(O.shape))[1:, 1:]
        #        V = B1invA1.dot(inv(B1.T)) / n
        U = zeros((mpk2, mpk2))
        U[1:, 1:] = V
        Ch = hstack((diagflat(-_A_j), identity(mpk)))
        V_full = Ch.dot(U).dot(Ch.T)
        varA_j[maskn0] = diagonal(V_full)[:m]
        varA_j[mask0] = diagonal(V_full)[m:]
        return A_j, varA_j
    def generatorMilleParameters(self):

        print(f'no of events: {len(self.trks)}')


        # we transform to LMDlocal
        matToLMD = np.array(self.detectorMatrices['/cave_1/lmd_root_0']).reshape((4,4))

        # TODO: use vectorized version to use numpy!
        # loop over all events
        for event in self.trks:

            # track origin and direction
            trackOri = np.array(event['trkPos'])
            trackDir = np.array(event['trkMom']) / np.linalg.norm(event['trkMom'])

            for reco in event['recoHits']:
                # print(f'hit index: {reco["index"]}')
                # print(f"reco hit pos: {reco['pos']}")
                # print(f"reco hit err: {reco['err']}")
                recoPos = np.array(reco['pos'])

                sensorID = reco['sensorID']
                modulePath = self.getPathModuleFromSensorID(sensorID)

                # determine module position from reco hit
                half, plane, module, sector = self.getParamsFromModulePath(modulePath)

                # create path to first module in this sector
                # pathFirstMod = f"/cave_1/lmd_root_0/half_{half}/plane_0/module_{module}"

                #* get matrix to first module
                # matrixFirstMod = np.array(self.detectorMatrices[pathFirstMod]).reshape(4,4)

                # transform recoHit and track origin
                # recoNew = self.transformPoint(recoPos, inv(matrixFirstMod))
                # trackOriNew = self.transformPoint(trackOri, inv(matrixFirstMod))

                # track direction requires more work
                # trackDirPoint = self.transformPoint(trackOri + trackDir, inv(matrixFirstMod))
                # trackDirNew = trackDirPoint - trackOriNew

                #* transform all reco points and tracks to lmd local here

                # transform recoHit and track origin
                recoNew = self.transformPoint(recoPos, inv(matToLMD))
                trackOriNew = self.transformPoint(trackOri, inv(matToLMD))

                # track direction requires more work
                trackDirPoint = self.transformPoint(trackOri + trackDir, inv(matToLMD))
                trackDirNew = trackDirPoint - trackOriNew

                # print(f'recoNew: {recoNew}')
                # print(f'trackOriNew: {trackOriNew}')
                # print(f'trackDirNew: {trackDirNew}')

                # better way calculate vector from reco point to track
                # https://en.wikipedia.org/wiki/Distance_from_a_point_to_a_line
                # attention! this goes FROM reco TO track, so minus is important!
                dVec = -((trackOriNew - recoNew) - ((trackOriNew - recoNew)@trackDirNew) * trackDirNew)

                # z position of the plane
                dz = (recoNew[2] / trackDirNew[2])

                # position of the track on a module
                px = (trackOriNew + trackDirNew*dz)[0]
                py = (trackOriNew + trackDirNew*dz)[1]
                pz = (trackOriNew + trackDirNew*dz)[2]

                # print(f'------------------------')
                # print(f'dx: {px}, dy: {py}, dz: {dz}')
                # print(f'dVec: {dVec}')

                # okay, at this point, I have all positions, distances and errors in x and y

                if plane == 0:
                    yield from self.milleParamsPlaneOne(px, py, dz, dVec, reco['err'], sector)
                
                elif plane == 1:
                    yield from self.milleParamsPlaneTwo(px, py, dz, dVec, reco['err'], sector)
                
                elif plane == 2:
                    yield from self.milleParamsPlaneThree(px, py, dz, dVec, reco['err'], sector)
                
                elif plane == 3:
                    yield from self.milleParamsPlaneFour(px, py, dz, dVec, reco['err'], sector)
    memoria = []

    nombre = (f"matmul{i}.txt")
    # Se crea archivo
    archivo = open(nombre, "w")

    for N in Nm:
        print(f"N={N}")
        # Las dos matrices
        A = laplaciana_single(N)
        B = laplaciana_single(N)

        # tiempo 1
        t1 = perf_counter()

        ainv = inv(A)
        binv = inv(B)

        # tiempo 2
        t2 = perf_counter()

        # diferencia de tiempo
        dt = t2 - t1

        # cantidad de memoria utilizada
        size = 3 * (N**2) * 8

        # Se agrega el tiempo y memoria a sus respectivas listas
        tiempo.append(dt)
        memoria.append(size)
示例#39
0
#!/usr/bin/python
from numpy import *
from numpy.linalg import inv

a = array([(1, 5, 9), (-3, 5, -2), (6, 8, -7)], dtype=double)
b = array([-3, 4, -8], dtype=double)
print inv(a).dot(b)
def findSimilarity(uv, xy, options=None):
    """
    Function:
    ----------
        Find Reflective Similarity Transform Matrix 'trans':
            u = uv[:, 0]
            v = uv[:, 1]
            x = xy[:, 0]
            y = xy[:, 1]
            [x, y, 1] = [u, v, 1] * trans

    Parameters:
    ----------
        @uv: Kx2 np.array
            source points each row is a pair of coordinates (x, y)
        @xy: Kx2 np.array
            each row is a pair of inverse-transformed
        @option: not used, keep it as None

    Returns:
    ----------
        @trans: 3x3 np.array
            transform matrix from uv to xy
        @trans_inv: 3x3 np.array
            inverse of trans, transform matrix from xy to uv

    Matlab:
    ----------
    % The similarities are a superset of the nonreflective similarities as they may
    % also include reflection.
    %
    % let sc = s*cos(theta)
    % let ss = s*sin(theta)
    %
    %                   [ sc -ss
    % [u v] = [x y 1] *   ss  sc
    %                     tx  ty]
    %
    %          OR
    %
    %                   [ sc  ss
    % [u v] = [x y 1] *   ss -sc
    %                     tx  ty]
    %
    % Algorithm:
    % 1) Solve for trans1, a nonreflective similarity.
    % 2) Reflect the xy data across the Y-axis,
    %    and solve for trans2r, also a nonreflective similarity.
    % 3) Transform trans2r to trans2, undoing the reflection done in step 2.
    % 4) Use TFORMFWD to transform uv using both trans1 and trans2,
    %    and compare the results, Returnsing the transformation corresponding
    %    to the smaller L2 norm.

    % Need to reset options.K to prepare for calls to findNonreflectiveSimilarity.
    % This is safe because we already checked that there are enough point pairs.
    """
    options = {'K': 2}

    #    uv = np.array(uv)
    #    xy = np.array(xy)

    # Solve for trans1
    trans1, trans1_inv = findNonreflectiveSimilarity(uv, xy, options)

    # Solve for trans2

    # manually reflect the xy data across the Y-axis
    xyR = xy
    xyR[:, 0] = -1 * xyR[:, 0]

    trans2r, trans2r_inv = findNonreflectiveSimilarity(uv, xyR, options)

    # manually reflect the tform to undo the reflection done on xyR
    TreflectY = np.array([[-1, 0, 0], [0, 1, 0], [0, 0, 1]])

    trans2 = np.dot(trans2r, TreflectY)

    # Figure out if trans1 or trans2 is better
    xy1 = tformfwd(trans1, uv)
    norm1 = norm(xy1 - xy)

    xy2 = tformfwd(trans2, uv)
    norm2 = norm(xy2 - xy)

    if norm1 <= norm2:
        return trans1, trans1_inv
    else:
        trans2_inv = inv(trans2)
        return trans2, trans2_inv
示例#41
0
def bls_train_enhance(train_x, train_y, test_x, test_y, s, C, N1, N2, N3,
                      epochs, m, l):
    N11 = N1
    for i in range(epochs):
        train_starttime = datetime.datetime.now()
        train_x_inv = stats.zscore(train_x.T, axis=0)
        train_x = train_x_inv.T
        H1 = np.hstack((train_x, 0.1 * np.ones((np.shape(train_x)[0], 1))))
        y = np.zeros((np.shape(train_x)[0], N2 * N11))
        Betal = []
        for i in range(N2):
            we = 2 * np.random.rand(np.shape(train_x)[1] + 1, N1) - 1
            A1 = np.dot(H1, we)
            A1 = mapminmax(A1)
            betal_inv = sparse_bls(A1, H1, 1e-3, 50)
            betal1 = np.transpose(betal_inv)
            Betal.append(betal1)
            T1 = np.dot(H1, betal1)
            print(
                'feature nodes in window %d:Max Val of output %f Min val %f' %
                (i, getMax(T1), getMin(T1)))
            T1 = mapminmax_onezero(T1)
            y[:, N1 * i:N1 * (i + 1)] = T1

        H2 = np.hstack((y, 0.1 * np.ones((np.shape(y)[0], 1))))
        if N1 * N2 >= N3:
            wh = orth((2 * np.random.random((N1 * N2 + 1, N3)) - 1))
        else:
            wh = orth((2 * np.random.random((N1 * N2 + 1, N3)).T - 1)).T
        T2 = np.dot(H2, wh)
        l2 = np.max(np.max(T2))
        l2 = s / l2
        print('enhancement nodes:max val of output %f min val %f' %
              (l2, getMin(T2)))
        T2 = tansig(T2 * l2)
        T3 = np.hstack((y, T2))
        Wa = np.dot(T3.T, T3) + np.eye(np.shape(T3.T)[0]) * C
        Wa_inv = nlg.inv(Wa)
        beta = np.dot(Wa_inv, T3.T)
        beta2 = np.dot(beta, train_y)
        train_endtime = datetime.datetime.now()
        time = (train_endtime - train_starttime).seconds
        print("training has been finished")
        print('the total training time is : %d seconds' % time)
        xx = np.dot(T3, beta2)

        yy = result(xx)
        train_yy = result(train_y)
        total = 0
        for i in range(np.shape(yy)[0]):
            if (yy[i] == train_yy[i]):
                total += 1
        print('total:%d' % total)
        trainingAccuracy = float(total / 60000)
        print('Training Accuracy is:%d%%' % (trainingAccuracy * 100))

        test_starttime = datetime.datetime.now()
        test_x_inv = stats.zscore(test_x.T, axis=0)
        test_x = test_x_inv.T
        HH1 = np.hstack((test_x, 0.1 * np.ones((np.shape(test_x)[0], 1))))
        yy1 = np.zeros((np.shape(test_x)[0], N2 * N1))
        for i in range(N2):
            betal_test = Betal[i]
            TT1 = np.dot(HH1, betal_test)
            TT1 = mapminmax_onezero(TT1)
            yy1[:, N11 * i:N11 * (i + 1)] = TT1
        HH2 = np.hstack((yy1, 0.1 * np.ones((np.shape(yy1)[0], 1))))
        TT2 = tansig(np.dot(HH2, wh) * l2)
        TT3 = np.hstack((yy1, TT2))
        x = np.dot(TT3, beta2)
        y = result(x)
        test_yy = result(test_y)
        test_total = 0
        for j in range(np.shape(test_yy)[0]):
            if (y[j] == test_yy[j]):
                test_total += 1
        print('total:%d' % test_total)
        testingAccuracy = float(test_total / np.shape(test_yy)[0])
        test_endtime = datetime.datetime.now()
        test_time = (test_endtime - test_starttime).seconds
        print('testing has benn finished')
        print('test time is : %d seonds' % test_time)
        print('testing accuracy is:%d%%' % (testingAccuracy * 100))
        #print(np.shape(T3))
        # print(np.shape(beta))
        # incremental training steps
        WH = []
        train_time = []
        for j in range(l - 1):
            increment_start = datetime.datetime.now()
            if N1 * N2 >= m:
                wh_inc = orth((2 * np.random.random((N1 * N2 + 1, m)) - 1))
            else:
                wh_inc = orth((2 * np.random.random((N1 * N2 + 1, m)).T - 1)).T

            WH.append(wh_inc)
            t2 = np.dot(H2, wh_inc)
            l2_inc = (np.max(np.max(t2)))
            l2_inc = s / l2_inc
            print(
                "enhancement nodes in incremental step %d: max val of output %f min val %f"
                % (j, l2_inc, getMin(t2)))
            t2 = tansig(t2 * l2_inc)
            T3_temp = np.hstack((T3, t2))
            d = np.dot(beta, t2)
            c = t2 - np.dot(T3, d)
            if not c.all():
                q, w = np.shape(d)
                dd_inv = np.eye(w) - np.dot(d.T, d)
                dd = nlg.inv(dd_inv)
                d_beta = np.dot(d.T, beta)
                b = np.dot(dd, d_beta)
            else:
                cc_inv = np.dot(c.T, c) + np.eye(np.shape(c.T)[0]) * C
                cc = nlg.inv(cc_inv)
                b = np.dot(cc, c.T)
            beta_inc = np.vstack((beta - np.dot(d, b), b))
            beta2_inc = np.dot(beta_inc, train_y)
            T3_inc = T3_temp
            increment_end = datetime.datetime.now()
            time = (increment_end - increment_start).seconds
            train_time.append(time)
            xx_inc = np.dot(T3_inc, beta2_inc)
            yy_inc = result(xx_inc)
            train_yy = result(train_y)
            incre_total = 0
            for i in range(np.shape(yy_inc)[0]):
                if (yy_inc[i] == train_yy[i]):
                    incre_total += 1
            print('total:%d ' % incre_total)
            trainingAccuracy_incre = incre_total / 60000
            print(' increment training accuracy is:%d%%' %
                  (trainingAccuracy_incre * 100))

            TT2_inc = tansig(np.dot(HH2, wh_inc) * l2_inc)
            TT3_inc = np.hstack((TT3, TT2_inc))

            # incremental testing steps
            x_inc = np.dot(TT3_inc, beta2_inc)
            y_inc = result(x_inc)
            test_yy_inc = result(test_y)
            total_inc_test = 0
            for k in range(np.shape(y_inc)[0]):
                if (y_inc[k] == test_yy_inc[k]):
                    total_inc_test += 1
            print('inc_total:%d' % total_inc_test)
            testingAccuracy_inc = float(total_inc_test / np.shape(y_inc)[0])
            print("testing has been finished")
            print('testing Accuracy is : %d %%' % (testingAccuracy_inc * 100))
示例#42
0
 CF = CF.row_join(CADT)
 fx = fx.row_insert(0, FAP)
 fx = fx.col_join(sym.Matrix([FA[-4:]]).T)
 fx = np.array(fx).astype(np.float64)
 # -------------------------- Processo de ajustamento --------------------------
 # -- Prepara as metrizes para trabalhar como modelo númerico
 f = -np.array(F - AT * V).astype(np.float64)
 g = -np.array(G - ACT * VC).astype(np.float64)
 AT = np.array(AT).astype(np.float64)
 Q = np.array(Q).astype(np.float64)
 QC = np.array(QC).astype(np.float64)
 WXX = np.array(WXX).astype(np.float64)
 BF = np.array(BF).astype(np.float64)
 CF = np.array(CF).astype(np.float64)
 ACT = np.array(ACT).astype(np.float64)
 WE = inv(AT.dot(Q.dot(np.transpose(AT))))
 WEC = inv(ACT.dot(QC.dot(np.transpose(ACT))))
 # -- CONTRIBUIÇÃO DOS PARAMETROS --
 N = np.transpose(BF).dot(WE.dot(BF))
 T = np.transpose(BF).dot(WE.dot(f))
 # -- CONTRIBUIÇÃO DAS RESTRIÇÕES --
 NC = np.transpose(CF).dot(WEC.dot(CF))
 TC = np.transpose(CF).dot(WEC.dot(g))
 DELTA = inv(N + NC + WXX).dot(T + TC - WXX.dot(fx))
 V = Q.dot(np.transpose(AT).dot(WE.dot(f - BF.dot(DELTA))))
 VC = QC.dot(np.transpose(ACT).dot(WEC.dot(g - CF.dot(DELTA))))
 VX = fx + DELTA
 # -------------------------- Aplicar as correções nos veres iniciasi --------------------------
 CCP = pd.DataFrame(
 )  # Vetor para manipuras as corrções do paramentro dos planos
 for i in range(0, vetor.shape[0]):
# Store the voltage and current data as column vectors.
I = np.mat([0.2, 0.3, 0.4, 0.5, 0.6]).T
V = np.mat([1.23, 1.38, 2.06, 2.47, 3.17]).T

# Now we can plot the measurements - can you see the linear relationship between current and voltage?
plt.scatter(np.asarray(I), np.asarray(V))

plt.xlabel('Current (A)')
plt.ylabel('Voltage (V)')
plt.grid(True)
plt.show()

# Define the H matrix, what does it contain?
H = np.mat([1, 1, 1, 1, 1]).T

# Now estimate the resistance parameter.
R = np.dot(np.dot(inv(np.dot(H.T, H)), H.T), (V / I))
print('The slope parameter (i.e., resistance) for the best-fit line is:')
print(R)

# Now let's plot our result. How do we relate our linear parameter fit to the resistance value in ohms?

I_line = np.arange(0, 0.8, 0.1)
V_line = R * I_line

plt.scatter(np.asarray(I), np.asarray(V))
plt.plot(I_line, np.asarray(V_line).reshape(-1))
plt.xlabel('current (A)')
plt.ylabel('voltage (V)')
plt.grid(True)
plt.show()
示例#44
0
}
c_bath_vec = {
    s: matrix([[c(s, o)] for o in orb_bath_names])
    for s in spin_names
}

h_bath = sum(c_dag_bath_vec[s] * h_bath_mat * c_bath_vec[s]
             for s in spin_names)[0, 0]
h_coup = sum(c_dag_vec[s] * V_mat * c_bath_vec[s] +
             c_dag_bath_vec[s] * V_mat * c_vec[s]
             for s in spin_names)[0, 0]  # FIXME Adjoint

# ==== Total impurity hamiltonian ====
h_tot = h_imp + h_coup + h_bath

# ==== Green function structure ====
gf_struct = [[s, orb_names] for s in spin_names]

# ==== Non-Interacting Impurity Green function  ====
n_iw = int(10 * beta)
iw_mesh = MeshImFreq(beta, 'Fermion', n_iw)
G0_iw = BlockGf(mesh=iw_mesh, gf_struct=gf_struct)
h_tot_mat = block([[h_0_mat, V_mat], [V_mat.H, h_bath_mat]])
for bl, iw in product(spin_names, iw_mesh):
    G0_iw[bl][iw] = inv(iw.value * eye(2 * n_orb) - h_tot_mat)[:n_orb, :n_orb]

# ==== Hybridization Function ====
Delta = G0_iw.copy()
Delta['up'] << iOmega_n - h_0_mat - inverse(G0_iw['up'])
Delta['dn'] << iOmega_n - h_0_mat - inverse(G0_iw['dn'])
示例#45
0
def polyfitw(x, y, w, ndegree, return_fit=0):
    """
    Performs a weighted least-squares polynomial fit with optional error estimates.

    Inputs:
        x: 
            The independent variable vector.

        y: 
            The dependent variable vector.  This vector should be the same 
            length as X.

        w: 
            The vector of weights.  This vector should be same length as 
            X and Y.

        ndegree: 
            The degree of polynomial to fit.

    Outputs:
        If return_fit==0 (the default) then polyfitw returns only C, a vector of 
        coefficients of length ndegree+1.
        if return_fit==1 then polyfitw returns a list of  c and yfit (yfit is the vector of calculated Y's)

        If return_fit!=0 and !=1 then polyfitw returns a tuple (c, yfit, yband, sigma, a)
            yfit:  
                The vector of calculated Y's.  Has an error of + or - Yband.

            yband: 
                Error estimate for each point = 1 sigma.

            sigma: 
                The standard deviation in Y units.

            a: 
                Correlation matrix of the coefficients.

    Written by:   George Lawrence, LASP, University of Colorado,
                    December, 1981 in IDL.
                    Weights added, April, 1987,  G. Lawrence
                    Fixed bug with checking number of params, November, 1998, 
                    Mark Rivers.  
                    Python version, May 2002, Mark Rivers
    """
    n = min(len(x), len(y)) # size = smaller of x,y
    m = ndegree + 1         # number of elements in coeff vector
    a = Numeric.zeros((m,m),Numeric.float)  # least square matrix, weighted matrix
    b = Numeric.zeros(m,Numeric.float)    # will contain sum w*y*x^j
    z = Numeric.ones(n,Numeric.float)     # basis vector for constant term

    a[0,0] = Numeric.sum(w)
    b[0] = Numeric.sum(w*y)

    for p in range(1, 2*ndegree+1):     # power loop
        z = z*x   # z is now x^p
        if (p < m):  b[p] = Numeric.sum(w*y*z)   # b is sum w*y*x^j
        sum = Numeric.sum(w*z)
        for j in range(max(0,(p-ndegree)), min(ndegree,p)+1):
            a[j,p-j] = sum

    a = LinearAlgebra.inv(a)
    c = Numeric.matmul(b, a)
    if (return_fit == 0):
        return c     # exit if only fit coefficients are wanted
    minx = min(x)
    maxx = max(x)
    
    x_yfit = Numeric.asarray(range(int(minx*4),int(maxx*4)+1))/4
    n_yfit = len(x_yfit)
    # compute optional output parameters.
    yfit = Numeric.zeros(n_yfit,Numeric.float)+c[0]   # one-sigma error estimates, init
    
    for k in range(1, ndegree +1):
        yfit = yfit + c[k]*(x_yfit**k)  # sum basis vectors
        
    if (return_fit ==1):
        return [c, yfit, x_yfit]    
    var = Numeric.sum((yfit-y)**2 )/(n-m)  # variance estimate, unbiased
    sigma = Numeric.sqrt(var)
    yband = Numeric.zeros(n,Numeric.float) + a[0,0]
    z = Numeric.ones(n,Numeric.float)
    for p in range(1,2*ndegree+1):     # compute correlated error estimates on y
        z = z*x		# z is now x^p
        sum = 0.
        for j in range(max(0, (p - ndegree)), min(ndegree, p)+1):
            sum = sum + a[j,p-j]
        yband = yband + sum * z      # add in all the error sources
    yband = yband*var
    yband = Numeric.sqrt(yband)
    return c, yfit, yband, sigma, a
示例#46
0
文件: Kalman.py 项目: yanncalec/pyshm
def Kalman_filter(Y, A, B, G, Q, R, X0, P0):
    """Kalman filter.

    Consider the dynamical system (for t>=1):
        X_t = A_t X_{t-1} + G_t + U_t
        Y_t = B_t X_t + V_t
    Given the initial guess at t=0, Kalman filter computes the conditional mean
        X_{t|t}=Exp[X_t | Y_{1:t}].

    Args:
        Y (2d array): observation vectors, must be 2d array and each row corresponds to an observation (ie, the 1st dimension corresponds to time).
        A (array): system state matrix, 2d or 3d. In case of 2d array the system transition matrix is time-independent, in case of 3d array it is time-dependent and the 1st dimension corresponds to time.
        B (array): observation matrix, 2d or 3d. In case of 3d array the 1st dimension corresponds to time.
        G (array): input control vector, 1d or 2d. In case of 2d array the 1st dimension corresponds to time. Set G to None if there is no control vector.
        Q (array): system noise covariance matix, 2d or 3d. In case of 3d array the 1st dimension corresponds to time.
        R (array): observation noise covariance matrix, 2d or 3d. In case of 3d array the 1st dimension corresponds to time.
        X0 (array): guess for the initial state
        P0 (array): guess for the covariance matrix of the initial state

    Returns:
        ..
        - LXtt: X_{t|t} for t=1...{Nt}, Nt is the length of Y
        - LPtt: P_{t|t}
        - LXtm: X_{t|t-1}
        - LPtm: P_{t|t-1}
        - LEt: E{t}
        - LSt: S{t}
        - LKt: K{t}
        - LLLHt: Log-Likelihood_{t}
    """

    # dimX = B[0].shape[1] if B.ndim == 3 else B.shape[1]  # dimension of X
    dimY = B[0].shape[0] if B.ndim == 3 else B.shape[0]  # dimension of Y
    Nt = Y.shape[1]  # length of observation

    # Creat iterators
    AL = A if A.ndim == 3 else itertools.repeat(A, Nt)
    BL = B if B.ndim == 3 else itertools.repeat(B, Nt)
    QL = Q if Q.ndim == 3 else itertools.repeat(Q, Nt)
    RL = R if R.ndim == 3 else itertools.repeat(R, Nt)
    GL = G if G is not None and G.ndim == 2 else itertools.repeat(G, Nt)

    # Lists for keeping the results
    LXtt = [X0]  # X_{0|0}
    LPtt = [P0]  # P_{0|0}
    LmXt = [np.zeros_like(X0)]  # mean(X)_{0}

    LXtm = []  # X_{0|-1}
    LPtm = []  # P_{0|-1}
    LSt = []  # S_0
    LKt = []  # K_0
    LEt = []  # E_0
    LLLHt = []  # Log-Likelihood_0

    for t, At, Bt, Qt, Rt, Gt in zip(range(Nt), AL, BL, QL, RL, GL):
        # Prediction at t from Y_{1:t-1}
        Xtm = At @ LXtt[-1] if Gt is None else At @ LXtt[-1] + Gt  # X_{t,t-1}
        Ptm = At @ LPtt[-1] @ At.conjugate().T + Qt                # P_{t,t-1}
        St = Bt @ Ptm @ Bt.conjugate().T + Rt                      # Cov(Epsilon_t)
        iSt = la.inv(St)
        Kt = Ptm @ Bt.conjugate().T @ iSt         # Gain

        # mX_{t}, this is optional and not used in KF
        mXt = At @ LmXt[-1] if Gt is None else At @ LmXt[-1] + Gt

        # Update
        if not np.isnan(Y[:,t]).any(): # if Y[t] is available
            Et = Y[:,[t]] - Bt @ Xtm    # Epsilon_{t}
            # assert(Et.shape[1]==1)
            Xtt = Xtm + Kt @ Et     # X_{t,t}
            Ptt = Ptm - Kt @ Bt @ Ptm    # P_{t,t}
            # Log(Proba_(Y_t|Y_{1..t-1}))
            LLHt = -1/2 * (np.log(la.det(St)) + Et.conjugate().T @ iSt @ Et + dimY * np.log(2*np.pi))
        else:
            Et = np.nan * np.zeros((dimY,1))     # Epsilon_{t}
            Xtt = Xtm.copy()    # X_{t,t}
            Ptt = Ptm.copy()    # P_{t,t}
            LLHt = 0

        # Save
        LXtt.append(Xtt)
        LPtt.append(Ptt)
        LmXt.append(mXt)
        LXtm.append(Xtm)
        LPtm.append(Ptm)
        LEt.append(Et)
        LSt.append(St)
        LKt.append(Kt)
        if t > 0:
            LLLHt.append(LLLHt[-1] + LLHt) # append the increment
        else:
            LLLHt.append(LLHt)

    # Pop the first element in these lists, such that all the outputs are for t=1..T
    LXtt.pop(0)
    LPtt.pop(0)
    LmXt.pop(0)

    return LXtt, LPtt, LXtm, LPtm, LEt, LSt, LKt, LmXt, LLLHt
示例#47
0
                                                    reference_frame, end_time)

        # Create homogeneous transformation matrix based on the position and quaternion
        rot_mat_ins_start = listener.fromTranslationRotation(
            check_pos_start, check_quat_start)
        rot_mat_ins_end = listener.fromTranslationRotation(
            check_pos_end, check_quat_end)

        # Creation of array of acceleration values
        arr_est_acc = np.array([[twist_est_acc.x], [twist_est_acc.y],
                                [twist_est_acc.z]])
        arr_accel_val = np.array([[twist_acc.x], [twist_acc.y], [twist_acc.z]])
        # print " shap trial val ", trialval[0], trialval[1]

        # Create and transform the acceleration value into the desired frame
        mid_frame = np.dot(inv(rot_mat_ins_start[:3, :3]),
                           rot_mat_ins_end[:3, :3])
        rot_est_acc_val = np.dot(mid_frame, np.reshape(arr_est_acc, (3, 1)))
        print "rotated estimated accel val ", rot_est_acc_val[0][
            0], rot_est_acc_val[1][0], rot_est_acc_val[2][0]

        # Normalize both acceleration vectors
        norm_val1 = np.linalg.norm(arr_accel_val)
        print "The normalized value of actual acceleration is ", norm_val1
        norm_val2 = np.linalg.norm(rot_est_acc_val)
        print "The normalized value of estimated acceleration is ", norm_val2

        # Publish the accelerometer acceleration value
        accel_pub = geometry_msgs.msg.WrenchStamped()
        accel_pub.header.frame_id = tracking_frame
        accel_pub.wrench.force.x = twist_acc.x
示例#48
0
	for i in range(0,ndata):
	    for j in range(0,ndata):
	       if (cov[i,i]*cov[j,j] >0):
	         cor[i,j] = cov[i,j]/math.sqrt(cov[i,i]*cov[j,j])


	a = np.sort(LA.eigvals(cor[:,:]))
	print "min+max eigenvalues full cor:"
	print np.min(a), np.max(a)
	print "neg eigenvalues full cor:"
	for i in range(0,a.shape[0]):
		if (a[i]< 0.0): print a[i]


	# ############### invert shear covariance #################
	inv = LA.inv(cov[0:nshear*ncl,0:nshear*ncl])
	a = np.sort(LA.eigvals(cov[0:nshear*ncl,0:nshear*ncl]))
	print "min+max eigenvalues shear cov:"
	print np.min(a), np.max(a)
	outfile = "cov/"+outname[k]+"_shear_shear_inv"
	f = open(outfile, "w")
	for i in range(0,nshear*ncl):
		inv[i,i]=inv[i,i]*mask[i]
	  	for j in range(0,nshear*ncl):
	  		f.write("%d %d %e\n" %(i,j, inv[i,j]))
	f.close()

	
	# ############### invert clustering covariance #################
	inv = LA.inv(cov[(nshear+nggl)*ncl:(nshear+nggl+nlens)*ncl,(nshear+nggl)*ncl:(nshear+nggl+nlens)*ncl])
	a = np.sort(LA.eigvals(cov[(nshear+nggl)*ncl:(nshear+nggl+nlens)*ncl,(nshear+nggl)*ncl:(nshear+nggl+nlens)*ncl]))
示例#49
0
def matrix_power(M, n):
    """
    Raise a square matrix to the (integer) power `n`.

    For positive integers `n`, the power is computed by repeated matrix
    squarings and matrix multiplications. If ``n == 0``, the identity matrix
    of the same shape as M is returned. If ``n < 0``, the inverse
    is computed and then raised to the ``abs(n)``.

    Parameters
    ----------
    M : ndarray or matrix object
        Matrix to be "powered."  Must be square, i.e. ``M.shape == (m, m)``,
        with `m` a positive integer.
    n : int
        The exponent can be any integer or long integer, positive,
        negative, or zero.

    Returns
    -------
    M**n : ndarray or matrix object
        The return value is the same shape and type as `M`;
        if the exponent is positive or zero then the type of the
        elements is the same as those of `M`. If the exponent is
        negative the elements are floating-point.

    Raises
    ------
    LinAlgError
        If the matrix is not numerically invertible.

    See Also
    --------
    matrix
        Provides an equivalent function as the exponentiation operator
        (``**``, not ``^``).

    Examples
    --------
    >>> from numpy import linalg as LA
    >>> i = np.array([[0, 1], [-1, 0]]) # matrix equiv. of the imaginary unit
    >>> LA.matrix_power(i, 3) # should = -i
    array([[ 0, -1],
           [ 1,  0]])
    >>> LA.matrix_power(np.matrix(i), 3) # matrix arg returns matrix
    matrix([[ 0, -1],
            [ 1,  0]])
    >>> LA.matrix_power(i, 0)
    array([[1, 0],
           [0, 1]])
    >>> LA.matrix_power(i, -3) # should = 1/(-i) = i, but w/ f.p. elements
    array([[ 0.,  1.],
           [-1.,  0.]])

    Somewhat more sophisticated example

    >>> q = np.zeros((4, 4))
    >>> q[0:2, 0:2] = -i
    >>> q[2:4, 2:4] = i
    >>> q # one of the three quarternion units not equal to 1
    array([[ 0., -1.,  0.,  0.],
           [ 1.,  0.,  0.,  0.],
           [ 0.,  0.,  0.,  1.],
           [ 0.,  0., -1.,  0.]])
    >>> LA.matrix_power(q, 2) # = -np.eye(4)
    array([[-1.,  0.,  0.,  0.],
           [ 0., -1.,  0.,  0.],
           [ 0.,  0., -1.,  0.],
           [ 0.,  0.,  0., -1.]])

    """
    M = asanyarray(M)
    if len(M.shape) != 2 or M.shape[0] != M.shape[1]:
        raise ValueError("input must be a square array")
    if not issubdtype(type(n), int):
        raise TypeError("exponent must be an integer")

    from numpy.linalg import inv

    if n == 0:
        M = M.copy()
        M[:] = identity(M.shape[0])
        return M
    elif n < 0:
        M = inv(M)
        n *= -1

    result = M
    if n <= 3:
        for _ in range(n - 1):
            result = N.dot(result, M)
        return result

    # binary decomposition to reduce the number of Matrix
    # multiplications for n > 3.
    beta = binary_repr(n)
    Z, q, t = M, 0, len(beta)
    while beta[t - q - 1] == '0':
        Z = N.dot(Z, Z)
        q += 1
    result = Z
    for k in range(q + 1, t):
        Z = N.dot(Z, Z)
        if beta[t - k - 1] == '1':
            result = N.dot(result, Z)
    return result
示例#50
0
l_pc = np.concatenate(l_pc, axis=0)


# 1. Transform lidar frame points to camera frame points using extrinsic matrices
# 1) Calculate rotation matrix using quaternion parameters
cqw, cqx, cqy, cqz = camera_pose_param[3:]
c_rotation = R.from_quat([cqx, cqy, cqz, cqw]).as_matrix()


# 2) Make transformation matrix using rotation and translation matrix
c_trans = rot_2_trans_mat(camera_pose_param[:3], c_rotation)


# 5) Transform world frame points to camera frame point using inversed camera transformation matrix
c_l_pc = np.matmul(inv(c_trans), l_pc.T).T


# 3) Extract the points contain positive integer of z(z+)
pos_z = np.where(c_l_pc[:, 2] > 0)
c_l_pc = c_l_pc[pos_z]


# 2. Project the 3D camera frame points onto the image
# 1) Set all the intrinsic matrices such as projection and calibration
proj_mat = np.c_[np.identity(3), np.zeros([3, 1])]
calib_mat = calib_matrix_cal(fx, fy, cx, cy)


# 2) Calculate the camera calibration using intrinsic matrices
prj_pc = np.matmul(proj_mat, c_l_pc.T).T
示例#51
0
 def _create_tofractionalcoordsMatrix(self):
     return la.inv(self.base.T)
示例#52
0
for i in range(0, Nd):
    sig2 = sig[i]**2
    ss += 1.0 / sig2
    sx += x[i] / sig2
    sy += y[i] / sig2
    rhl = x[i]**2
    sxx += rhl / sig2
    sxxy += rhl * y[i] / sig2
    sxy += x[i] * y[i] / sig2
    sxxx += rhl * x[i] / sig2
    sxxxx += (rhl**2) / sig2

A = array([[ss, sx, sxx], [sx, sxx, sxxx], [sxx, sxxx, sxxxx]])
bvec = array([sy, sxy, sxxy])

xvec = dot(inv(A), bvec)
ltest = dot(A, inv(A))
print('\n Matrix via direct')
print(xvec, 'end=')
print('A*inverse(A)')
print(ltest, '\n')
xvec = solve(A, bvec)
print('x Matrix via direct')
print(xvec, 'end=')
print('FitParabola Final Results \n')
print('y(x) = a0+a1*x+a2*(x**2)')
print('a0 = ', x[0])
print('a1 = ', x[1])
print('a2 = ', x[2], '\n')
print('i   xi    yi    yfit')
示例#53
0
    def invKine(self, desired_pos, start_pos):  # T60
        des_pos = desired_pos
        des_pos = m3d.Transform(des_pos)
        des_pos = des_pos.matrix
        #print("desired pos: ", desired_pos)
        th = self.mat(np.zeros((6, 8)))
        P_05 = (des_pos * self.mat([0, 0, -self.d6, 1]).T -
                self.mat([0, 0, 0, 1]).T)
        #print(self.d4 / sqrt(P_05[2 - 1, 0] * P_05[2 - 1, 0] + P_05[1 - 1, 0] * P_05[1 - 1, 0]))
        # **** theta1 ****

        R = (P_05[2 - 1, 0] * P_05[2 - 1, 0] + P_05[1 - 1, 0] * P_05[1 - 1, 0])
        psi = atan2(P_05[2 - 1, 0], P_05[1 - 1, 0])
        phi = acos(self.d4 / sqrt(P_05[2 - 1, 0] * P_05[2 - 1, 0] +
                                  P_05[1 - 1, 0] * P_05[1 - 1, 0]))

        # The two solutions for theta1 correspond to the shoulder
        # being either left or right
        # pi / 2 = 90deg
        th[0, 0:4] = pi / 2 + psi + phi
        th[0, 4:8] = pi / 2 + psi - phi
        th = th.real
        #print("th___")
        #print(th)

        # **** theta5 ****

        cl = [0, 4]  # wrist up or down
        for i in range(0, len(cl)):
            c = cl[i]
            T_10 = linalg.inv(self.AH(1, th, c))
            T_16 = T_10 * des_pos
            th[4, c:c + 2] = +acos((T_16[2, 3] - self.d4) / self.d6)
            th[4, c + 2:c + 4] = -acos((T_16[2, 3] - self.d4) / self.d6)

        th = th.real

        # **** theta6 ****
        # theta6 is not well-defined when sin(theta5) = 0 or when T16(1,3), T16(2,3) = 0.

        cl = [0, 2, 4, 6]
        for i in range(0, len(cl)):
            c = cl[i]
            T_10 = linalg.inv(self.AH(1, th, c))
            T_16 = linalg.inv(T_10 * des_pos)
            th[5, c:c + 2] = atan2((-T_16[1, 2] / sin(th[4, c])),
                                   (T_16[0, 2] / sin(th[4, c])))

        th = th.real

        # **** theta3 ****
        cl = [0, 2, 4, 6]
        for i in range(0, len(cl)):
            c = cl[i]
            T_10 = linalg.inv(self.AH(1, th, c))
            T_65 = self.AH(6, th, c)
            T_54 = self.AH(5, th, c)
            T_14 = (T_10 * des_pos) * linalg.inv(T_54 * T_65)
            P_13 = T_14 * self.mat([0, -self.d4, 0, 1]).T - self.mat(
                [0, 0, 0, 1]).T
            t3 = cmath.acos((linalg.norm(P_13)**2 - self.a2**2 - self.a3**2) /
                            (2 * self.a2 * self.a3))  # norm ?
            th[2, c] = t3.real
            th[2, c + 1] = -t3.real

        # **** theta2 and theta 4 ****

        cl = [0, 1, 2, 3, 4, 5, 6, 7]
        for i in range(0, len(cl)):
            c = cl[i]
            T_10 = linalg.inv(self.AH(1, th, c))
            T_65 = linalg.inv(self.AH(6, th, c))
            T_54 = linalg.inv(self.AH(5, th, c))
            T_14 = (T_10 * des_pos) * T_65 * T_54
            P_13 = T_14 * self.mat([0, -self.d4, 0, 1]).T - self.mat(
                [0, 0, 0, 1]).T

            # theta 2
            th[1, c] = -atan2(P_13[1], -P_13[0]) + asin(
                self.a3 * sin(th[2, c]) / linalg.norm(P_13))
            # theta 4
            T_32 = linalg.inv(self.AH(3, th, c))
            T_21 = linalg.inv(self.AH(2, th, c))
            T_34 = T_32 * T_21 * T_14
            th[3, c] = atan2(T_34[1, 0], T_34[0, 0])
        th = th.real
        #print("___" * 30)
        #print(th)
        th = np.transpose(th)
        th = th.tolist()
        print("___" * 30)
        print(th)
        print("___" * 30)

        best_th = th
        #best_th = self.select(th, start_pos)
        best_th = self.get_closest_solution(th, start_pos)
        best_th = self.get_absolute_joints(best_th, start_pos)

        return best_th
示例#54
0
def get_posterior(X, Y, cur_cov):
    """ Calculate the posterior covariance, mean and w values """
    posterior_cov = inv((1 / 0.3) * np.dot(X.T, X) + inv(cur_cov))
    posterior_mean = (1 / 0.3) * np.dot(posterior_cov, np.dot(X.T, Y))
    posterior_w = multivariate_normal(posterior_mean.flatten(), posterior_cov)
    return posterior_cov, posterior_mean, posterior_w
示例#55
0
 def ddf(self, x):
     hessian = self.rosen_hess(x)
     part_deriv_rosen = self.df(x)
     part_deriv_rosen2 = np.dot(inv(hessian), part_deriv_rosen)
     return part_deriv_rosen2
 def _get_beta(self, L, S):
     B = mul(L.T, inv(S + mul(L, L.T)))
     return B
示例#57
0
def consInvert(A, b, sigmad, ineq='yes', cond=1.0e-3, iter=2000, acc=1e-12):
    '''Solves the constrained inversion problem.

    Minimize:
    
    ||Ax-b||^2

    Subject to:
    mmin < m < mmax
    '''

    if A.shape[0] != len(b):
        raise ValueError('Incompatible dimensions for A and b')

    if ineq == 'no':
        print 'ineq=no: SVD decomposition neglecting small eigenvectors inferior to {} (cond)'.format(
            cond)
        fsoln = invSVD(A, b, cond)
        print 'SVD solution:', fsoln

    else:
        print 'ineq=yes: Iterative least-square decomposition. Prior obtained with SVD.'
        if len(indexpo > 0):
            # invert first without post-seismic
            Ain = np.delete(A, indexpo, 1)
            try:
                U, eignv, V = lst.svd(Ain, full_matrices=False)
                s = np.diag(eignv)
                print 'Eigenvalues:', eignv
                index = np.nonzero(s < cond)
                inv = lst.inv(s)
                inv[index] = 0.
                mtemp = np.dot(V.T, np.dot(inv, np.dot(U.T, b)))
            except:
                mtemp = lst.lstsq(Ain, b, rcond=cond)[0]
            print 'SVD solution:', mtemp

            # rebuild full vector
            for z in range(len(indexpo)):
                mtemp = np.insert(mtemp, indexpo[z], 0)
            minit = np.copy(mtemp)
            # # initialize bounds
            mmin, mmax = -np.ones(len(minit)) * np.inf, np.ones(
                len(minit)) * np.inf

            # We here define bounds for postseismic to be the same sign than coseismic
            # and coseismic inferior or egual to the coseimic initial
            print 'ineq=yes: Impose postseismic to be the same sign than coseismic'
            for i in range(len(indexco)):
                if (pos[i] > 0.) and (minit[int(indexco[i])] > 0.):
                    mmin[int(indexpofull[i])], mmax[int(
                        indexpofull[i])] = 0, np.inf
                    mmin[int(indexco[i])], mmax[int(
                        indexco[i])] = 0, minit[int(indexco[i])]
                if (pos[i] > 0.) and (minit[int(indexco[i])] < 0.):
                    mmin[int(indexpofull[i])], mmax[int(
                        indexpofull[i])] = -np.inf, 0
                    mmin[int(indexco[i])], mmax[int(indexco[i])] = minit[int(
                        indexco[i])], 0
            bounds = zip(mmin, mmax)

        else:
            minit = invSVD(A, b, cond)
            print 'SVD solution:', minit
            bounds = None

        ####Objective function and derivative
        _func = lambda x: np.sum(((np.dot(A, x) - b) / sigmad)**2)
        _fprime = lambda x: 2 * np.dot(A.T / sigmad,
                                       (np.dot(A, x) - b) / sigmad)
        res = opt.fmin_slsqp(_func,minit,bounds=bounds,fprime=_fprime, \
            iter=iter,full_output=True,iprint=0,acc=acc)
        fsoln = res[0]
        print 'Optimization:', fsoln

    # tarantola:
    # Cm = (Gt.Cov.G)-1 --> si sigma=1 problems
    # sigma m **2 =  misfit**2 * diag([G.TG]-1)
    try:
        varx = np.linalg.inv(np.dot(A.T, A))
        # res2 = np.sum(pow((b-np.dot(A,fsoln))/sigmad,2))
        res2 = np.sum(pow((b - np.dot(A, fsoln)), 2))
        scale = 1. / (A.shape[0] - A.shape[1])
        # scale = 1./A.shape[0]
        sigmam = np.sqrt(scale * res2 * np.diag(varx))
    except:
        sigmam = np.ones((A.shape[1])) * float('NaN')
    print 'model errors:', sigmam

    return fsoln, sigmam
示例#58
0
dist_PC_10 = []

count = 0
r = 0

pbar = tqdm(total=1)

while c < 1:
    dims = 2
    true_mu = np.zeros((dims, ))

    x_ac, C_ac = generate_covariance(true_mu, dims)
    x_c, C_c = generate_covariance(true_mu, dims)
    x_bc, C_bc = generate_covariance(true_mu, dims)

    C_fus = LA.inv(LA.inv(C_ac) + LA.inv(C_bc) + LA.inv(C_c))
    x_fus = C_fus @ (LA.inv(C_ac) @ x_ac + LA.inv(C_bc) @ x_bc +
                     LA.inv(C_c) @ x_c)

    C_a = LA.inv(LA.inv(C_ac) + LA.inv(C_c))
    C_b = LA.inv(LA.inv(C_bc) + LA.inv(C_c))

    x_a = C_a @ (LA.inv(C_ac) @ x_ac + LA.inv(C_c) @ x_c)
    x_b = C_b @ (LA.inv(C_bc) @ x_bc + LA.inv(C_c) @ x_c)

    ei, eid, dist_ei, pc, pcd, dist_pc, d, f = fusion(C_a, C_b, x_a, x_b,
                                                      C_fus, x_fus, C_c)
    if d:
        count += 1
    if f:
        r += 1
示例#59
0
#- Imports:

import numpy as np
from numpy.linalg import inv
from numpy.linalg import solve

A = np.array([[1, 2, 3], [2, 1, 3], [3, 2, 1]])

x = np.array([1, 1, 1])

b = np.matmul(A, x)
print(b)

Ainv = inv(A)

X = np.matmul(Ainv, b)
print(X)

X = solve(A, b)
print(X)

#===== end file =====
示例#60
0
    def get_covar(self, coeffs, ctf_fb=None, ctf_idx=None, mean_coeff=None,
                  do_refl=True, noise_var=1, covar_est_opt=None):
        """
        Calculate the covariance matrix from the expansion coefficients and CTF information.

        :param coeffs: A coefficient vector (or an array of coefficient vectors) to be calculated.
        :param ctf_fb: The CFT functions in the FB expansion.
        :param ctf_idx: An array of the CFT function indices for all 2D images.
            If ctf_fb or ctf_idx is None, the identity filter will be applied.
        :param mean_coeff: The mean value vector from all images.
        :param noise_var: The estimated variance of noise. The value should be zero for `coeffs`
            from clean images of simulation data.
        :param covar_est_opt: The optimization parameter list for obtaining the Cov2D matrix.
        :return: The basis coefficients of the covariance matrix in
            the form of cell array representing a block diagonal matrix. These
            block diagonal matrices are implemented as BlkDiagMatrix instances.
            The covariance is calculated from the images represented by the coeffs array,
            along with all possible rotations and reflections. As a result, the computed covariance
            matrix is invariant to both reflection and rotation. The effect of the filters in ctf_fb
            are accounted for and inverted to yield a covariance estimate of the unfiltered images.
        """

        if coeffs.size == 0:
            raise RuntimeError('The coefficients need to be calculated!')

        if (ctf_fb is None) or (ctf_idx is None):
            ctf_idx = np.zeros(coeffs.shape[1], dtype=int)
            ctf_fb = [BlkDiagMatrix.eye_like(RadialCTFFilter().fb_mat(self.basis))]

        def identity(x):
            return x

        default_est_opt = {'shrinker': 'None', 'verbose': 0, 'max_iter': 250, 'iter_callback': [],
                             'store_iterates': False, 'rel_tolerance': 1e-12, 'precision': 'float64',
                             'preconditioner': identity}

        covar_est_opt = fill_struct(covar_est_opt, default_est_opt)

        if mean_coeff is None:
            mean_coeff = self.get_mean(coeffs, ctf_fb, ctf_idx)

        b_coeff = BlkDiagMatrix.zeros_like(ctf_fb[0])
        b_noise = BlkDiagMatrix.zeros_like(ctf_fb[0])
        A = []
        for k in range(0, len(ctf_fb)):
            A.append(BlkDiagMatrix.zeros_like(ctf_fb[0]))

        M = BlkDiagMatrix.zeros_like(ctf_fb[0])

        for k in np.unique(ctf_idx[:]):

            coeff_k = coeffs[:, ctf_idx == k]
            weight = np.size(coeff_k, 1)/np.size(coeffs, 1)

            ctf_fb_k = ctf_fb[k]
            ctf_fb_k_t = ctf_fb_k.T
            mean_coeff_k = ctf_fb_k.apply(mean_coeff)
            covar_coeff_k = self._get_covar(coeff_k, mean_coeff_k)

            b_coeff +=  weight * (ctf_fb_k_t @ covar_coeff_k @ ctf_fb_k)

            ctf_fb_k_sq = ctf_fb_k_t @ ctf_fb_k
            b_noise += weight * ctf_fb_k_sq

            A[k] = np.sqrt(weight) * ctf_fb_k_sq
            M += A[k]

        if covar_est_opt['shrinker'] == 'None':
            b = b_coeff - noise_var * b_noise
        else:
            b = self.shrink_covar_backward(b_coeff, b_noise, np.size(coeffs, 1),
                                           noise_var, covar_est_opt['shrinker'])

        cg_opt = covar_est_opt

        covar_coeff = BlkDiagMatrix.zeros_like(ctf_fb[0])

        def precond_fun(S, x):
            p = np.size(S, 0)
            ensure(np.size(x) == p*p, 'The sizes of S and x are not consistent.')
            x = m_reshape(x, (p, p))
            y = S @ x @ S
            y = m_reshape(y, (p ** 2,))
            return y

        def apply(A, x):
            p = np.size(A[0], 0)
            x = m_reshape(x, (p, p))
            y = np.zeros_like(x)
            for k in range(0, len(A)):
                    y = y + A[k] @ x @ A[k].T
            y = m_reshape(y, (p ** 2,))
            return y

        for ell in range(0, len(b)):
            A_ell = []
            for k in range(0, len(A)):
                A_ell.append(A[k][ell])
            p = np.size(A_ell[0], 0)
            b_ell = m_reshape(b[ell], (p ** 2,))
            S = inv(M[ell])
            cg_opt["preconditioner"] = lambda x: precond_fun(S, x)
            covar_coeff_ell, _, _ = conj_grad(lambda x: apply(A_ell, x), b_ell, cg_opt)
            covar_coeff[ell] = m_reshape(covar_coeff_ell, (p, p))

        return covar_coeff