예제 #1
0
def main():
    n=10
    # prepare matrix a
    a=np.diag([5.]*n)
    a+=np.diagflat([2.]*(n-1),1)
    a+=np.diagflat([2.]*(n-1),-1)
    print(a)

    b=np.array([3,1,4,0,5,-1,6,-2,7,-15],dtype='f').T
    #initial value x
    x=np.ones(10).T

    D=np.diag(np.diag(a))
    L=np.tril(a,-1)
    U=np.triu(a,+1)
    M= -np.linalg.inv(D) @ (L+U)
    N=np.linalg.inv(D)
    for k in range(maxiteration):
        x_new=M @ x + N @ b
        if(np.linalg.norm(x_new-x) <epsilon):
            break
        x=x_new
    else:
        print("fail jacobi method ...")
        exit(1)

    print("the sol of ax = b using Jacobi method is \n{}".format(x))
    print("iteration {} times".format(k))
    print("indeed ax-b is \n{}".format(a @x -b))
    print("you can check sol x using sympy...")
    sy_a=sy.Matrix(a)
    sy_x=sy_a.solve(b)
    print(sy_x)
예제 #2
0
    def matrix_simulstionnp(self, temp, dt, t, outtemps):
        size = len(self.npwam[0])
        time = 0
        dt = dt
        I = np.diagflat(np.array([[1]*size]))
        neg_c = np.diagflat((self.npwam.dot(np.array([[-1]]*size))))
        M = self.npthermal_mass.dot(self.npwam + neg_c).dot(dt) + I
        print M, "Tdot"
        temp = np.array(temp)  # K
        #temps = [x for x in temp.matrix]
        temps = [[cell for cell in row] for row in temp]
        # finish matrix simulation
        hour = 60*60*3
        idx = 0
        qhour = 15*60
        while time < t:
            if hour == (60*60*3):
                temp[3][0] = outtemps[idx][0]
                hour = 0
                idx += 1
            tempn = M.dot(temp)
            temp = tempn
            if qhour >= 15*60:
                for i in range(len(tempn)):
                    temps[i].append(temp[i][0])
                    qhour = 0
            time += dt
            hour += dt
            qhour += dt

        return temps
예제 #3
0
def rvmtrain(xinp, yinp, r, beta = 100):
    dmat = getDesMat(xinp, r)
    N = len(yinp)
    target = yinp
    alphas = np.ones((len(xinp) +1, 1))
    Amat = np.diagflat(alphas)
    newAlphas = np.copy(alphas)

    converged = False
    idx =  np.ones(len(alphas)) == 1
    mMat = np.zeros(len(alphas))
    iterationCount = 0

    for t in range(5000):
        iterationCount = iterationCount + 1
        idx = np.abs(newAlphas) < alphaThreshold
        idx = np.squeeze(idx)
        
        sig = Amat[idx][:,idx] + beta * np.dot(dmat[:,idx].transpose(), dmat[:,idx])
        sigMat = np.linalg.inv(sig)
        mMat[idx] = beta * np.dot(sigMat, np.dot(dmat[:,idx].transpose(), target))
                
        oldAlphas = np.copy(newAlphas)

        gamma = 1 - np.transpose(newAlphas[idx]) * np.diag(sigMat)
        newAlphas[idx] = np.transpose( gamma / np.array(map(float,mMat[idx]**2)) )

        beta = ( N - np.sum(gamma) ) / np.linalg.norm( yinp - np.dot(dmat[:,idx], mMat[idx]) )

        delta = sum(np.abs(newAlphas - oldAlphas))
#        if (sum(newAlphas<0)>0):
#            print iterationCount
        if (delta < convThreshold):
            print "\n\n\n\n\n!!!!!CONVERGED!!!!!\n\n\n\n\n"
            converged = True
            print newAlphas
            break

        Amat = np.diagflat(newAlphas)

    relevant_vecs_ind = []
    x_rel =[]
    y_rel = []
    
    print "iterations: {}".format(iterationCount)
    # If we start from 0 then we check if the bias term of alpha is relevant.  If it is so we pick the last training point
    # x[-1] and y[-1] to be a relevancevector.  The bias term is problematic.
    for i in range(1,N+1):
        if newAlphas[i] < alphaThreshold:
            relevant_vecs_ind.append(i+1)
            x_rel.append(xinp[i-1])
            y_rel.append(yinp[i-1])

    print "number of relevancevectors (alpha < {0}): ".format(alphaThreshold) + str(np.sum(idx[1:]))
    muMat = mMat
    print "beta: " + str(beta)
#    for a in newAlphas:
#        print a
    
    return muMat, beta, converged, idx, x_rel, y_rel
예제 #4
0
    def MStep(self, posterior, data, mix_pi=None):
        if isinstance(data, DataSet):
            x = data.internalData
        elif hasattr(data, "__iter__"):
            x = data
        else:
            raise TypeError, "Unknown/Invalid input to MStep."

        post = posterior.sum() # sum of posteriors
        self.mean = np.dot(posterior, x) / post

        # centered input values (with new mus)
        centered = np.subtract(x, np.repeat([self.mean], len(x), axis=0));


        # estimating correlation factor
        sigma = np.dot(np.transpose(np.dot(np.identity(len(posterior)) * posterior, centered)), centered) / post # sigma/covariance matrix

        diagsigma = np.diagflat(1.0 / np.diagonal(sigma)) # vector with diagonal entries of sigma matrix
        correlation = np.dot(np.dot(diagsigma, np.multiply(sigma, sigma)), diagsigma) # correlation matrix with entries sigma_xy^2/(sigma^2_x * sigma^2_y)

        correlation = correlation - np.diagflat(np.diagonal(correlation)) # making diagonal entries = 0

        # XXX - check this
        parents = self.maximunSpanningTree(correlation) # return maximun spanning tree from the correlation matrix
        self.parents = self.directTree(parents, 0) # by default direct tree from 0


        # XXX note that computational time could be saved as these functions share same suficient statistics
        ConditionalGaussDistribution.MStep(self, posterior, data, mix_pi)
예제 #5
0
def ARD(X,Y):
# X - (p x q) matrix with inputs in rows
# Y - (p, 1) matrix with measurements
# Implelements the ARD regression, adapted from:
#M. Sahani and J. F. Linden.
#Evidence optimization techniques for estimating stimulus-response functions.
#In S. Becker, S. Thrun, and K. Obermayer, eds., Advances in Neural Information Processing Systems, vol. 15, pp. 301-308, Cambridge, MA, 2003. 
    
    (p,q) = numpy.shape(X)
    
    #initialize parameters
    sigma_sq = 0.1
    CC = X.T * X
    XY = X.T * Y
    start_flag = False
        
    alpha = numpy.mat(numpy.zeros((q,1)))+2.0
    
    for i in xrange(0,100):
        sigma = numpy.linalg.inv(CC/sigma_sq + numpy.diagflat(alpha)) 
        ni = sigma * (XY) /  (sigma_sq)
        sigma_sq = numpy.sum(numpy.power(Y - X*ni,2))/(p - numpy.sum(1 - numpy.multiply(numpy.mat(numpy.diagonal(sigma)).T,alpha)));
        print numpy.min(numpy.abs(ni))
        alpha =  numpy.mat(numpy.divide((1 - numpy.multiply(alpha,numpy.mat(numpy.diagonal(sigma)).T)) , numpy.power(ni,2)))
        print  sigma_sq
        
    w = numpy.linalg.inv(CC + sigma_sq * numpy.diagflat(alpha)) * (XY)
    
    print alpha
    print  sigma_sq
    
    return w
예제 #6
0
def burgers_ei_bcp(M, N, xmin, xmax, tinz, tfin, ic):
    dx = (xmax - xmin) / M
    dt = (tfin - tinz) / N

    x = xmin + array(range(0, M + 1)) * dx
    t = array(range(0, N + 1)) * dt

    u = zeros([M + 1, N + 1])
    u[:, 0] = ic(x)

    uold = zeros([M + 1])
    uold[0:M + 1] = u[:, 0]

    for n in range(0, N + 1):
        # numero di Courant
        nu = max(abs(uold)) * dt / dx
        # CFL
        cfl = nu * dt / dx

        if cfl > 1:
            print "CFL " + str(cfl)
            raise ArithmeticError

        else:
            A = eye(M + 1) + 0.5 * dt * diagflat(uold[0:M], 1) / dx - 0.5 * dt * diagflat(uold[1:M + 1], -1) / dx
            A[0, M] = -0.5 * dt * uold[0] / dx
            A[M, 1] = 0.5 * dt * uold[M] / dx
            unew = solve(A, uold.transpose()).transpose()
            uold = unew
            u[0:M + 1, n] = unew

    return u, x, t
예제 #7
0
def gen_roots_and_weights(n, an_func, sqrt_bn_func, mu):
    """[x,w] = gen_roots_and_weights(n,an_func,sqrt_bn_func,mu)

    Returns the roots (x) of an nth order orthogonal polynomial,
    and weights (w) to use in appropriate Gaussian quadrature with that
    orthogonal polynomial.

    The polynomials have the recurrence relation
          P_n+1(x) = (x - A_n) P_n(x) - B_n P_n-1(x)

    an_func(n)          should return A_n
    sqrt_bn_func(n)     should return sqrt(B_n)
    mu ( = h_0 )        is the integral of the weight over the orthogonal interval
    """
    nn = np.arange(1.0,n)
    sqrt_bn = sqrt_bn_func(nn)
    an = an_func(np.concatenate(([0], nn)))
    x, v = eig((np.diagflat(an) +
                np.diagflat(sqrt_bn,1) +
                np.diagflat(sqrt_bn,-1)))
    answer = []
    sortind = x.real.argsort()
    answer.append(x[sortind])
    answer.append((mu*v[0]**2)[sortind])
    return answer
예제 #8
0
def trasporto_ei_bcp(M, N, xmin, xmax, tinz, tfin, vel, ic):
    dx = (xmax - xmin) / M
    dt = (tfin - tinz) / N

    x = xmin + array(range(0, M + 1)) * dx
    t = array(range(0, N + 1)) * dt

    u = zeros([M + 1, N + 1])
    u[:, 0] = ic(x)

    uold = zeros([M + 1])
    uold[0:M + 1] = u[:, 0]

    a = vel
    # CFL
    cfl = a * dt / dx
    # numero di Courant
    nu = a * dt / dx

    A = eye(M + 1) + 0.5 * nu * diagflat(ones([1, M]), 1) - 0.5 * nu * diagflat(ones([1, M]), -1)
    A[0, M] = -0.5 * nu
    A[M, 1] = 0.5 * nu

    if cfl > 1:
        print "CFL " + str(cfl)
        raise ArithmeticError

    else:
        for n in range(0, N + 1):
            unew = solve(A, uold.transpose()).transpose()
            uold = unew
            u[0:M + 1, n] = unew

    return u, x, t
예제 #9
0
파일: blr.py 프로젝트: gniqia/pyblr
  def fit_map(self, X, y, full_posterior=True):
    """
      Fit a MAP estimate

      X: features, n_samples by n_features nd-array
      y: target values, n_samples array
    """
    if self.fit_intercept:
      X = self.add_intercept(X)
    #data setup
    f_dim = X.shape[1]
    if np.isscalar(self.inv_lamb):
      inv_lamb = np.diagflat(np.repeat(self.inv_lamb,f_dim))
    else:
      inv_lamb = np.diagflat(self.inv_lamb)
    if np.isscalar(self.beta_mu):
      beta_mu = np.repeat(self.beta_mu,f_dim)
    else:
      beta_mu = self.beta_mu
    sigma = self.sigma
    #let the actual calculation begin
    l = sigma**2 * inv_lamb
    s = LA.inv(X.T.dot(X)+l)
    # adding in the mean of the prior
    b0 = sigma**2 * inv_lamb.dot(beta_mu)

    self.beta = s.dot(X.T.dot(y) + b0)
    if full_posterior:
      self.Sigma = sigma * sigma * s
예제 #10
0
def jac_dtwa(s, t, param):
    """
    Jacobian of the general case. First order.
    This is given by 9 NXN submatrices:
    J00=J11=J22=0
    Although Jacobian is NOT antisymmetric in general! See below
    J01 = +J_z diag(J|s^x>) + h(t) h_z - J_y (J#|s^z>)
    J10 = -J_z diag(J|s^x>) - h(t) h_z + J_x (J#|s^z>)
    J02 = -J_y diag(J|s^y>) - h(t) h_y + J_z (J#|s^y>)
    J20 = +J_y diag(J|s^y>) + h(t) h_y - J_x (J#|s^y>)
    J12 = +J_x diag(J|s^x>) + h(t) h_x - J_z (J#|s^x>)
    J21 = -J_x diag(J|s^x>) - h(t) h_x + J_y (J#|s^x>)
    Here, '#' (hash operator) means multiply each row of a matrix by the
    corresponding vector element. This is implemented by numpy.multiply()
    """
    N = param.latsize
    # s[0:N] = sx , s[N:2*N] = sy, s[2*N:3*N] = sz
    full_jacobian = np.zeros(shape=(3 * N, 3 * N))
    diag_jsx = np.diagflat((param.jmat.dot(s[0:N]))) / param.norm
    diag_jsy = np.diagflat((param.jmat.dot(s[N : 2 * N]))) / param.norm
    # diag_jsz = np.diagflat((param.jmat.dot(s[2*N:3*N])))/param.norm
    hash_jsx = (np.multiply(param.jmat.T, s[0:N]).T) / param.norm
    hash_jsy = (np.multiply(param.jmat.T, s[N : 2 * N]).T) / param.norm
    hash_jsz = (np.multiply(param.jmat.T, s[2 * N : 3 * N]).T) / param.norm
    full_jacobian[0:N, N : 2 * N] = param.jz * diag_jsx + drivemat * param.hz - param.jy * hash_jsz
    full_jacobian[N : 2 * N, 0:N] = -param.jz * diag_jsx - param.hz + param.jx * hash_jsz
    full_jacobian[0:N, 2 * N : 3 * N] = -param.jy * diag_jsy - param.hy + param.jz * hash_jsy
    full_jacobian[2 * N : 3 * N, 0:N] = param.jy * diag_jsy + param.hy - param.jx * hash_jsy
    full_jacobian[N : 2 * N, 2 * N : 3 * N] = param.jx * diag_jsx + param.hx - param.jz * hash_jsx
    full_jacobian[2 * N : 3 * N, N : 2 * N] = -param.jx * diag_jsx - param.hx + param.jy * hash_jsx
    return full_jacobian
예제 #11
0
파일: hamiltotian.py 프로젝트: alexfmsu/TCM
def get_Hatoms(ph_count, at_count, wc, wa, g):
	#------------------------------------------------------------------------------------------------------------------
	sigmadiag = [1]

	sigmacross = np.diagflat(sigmadiag, -1)
	sigma = np.diagflat(sigmadiag, 1)
	sigmacrosssigma = np.dot(sigmacross, sigma)
	#------------------------------------------------------------------------------------------------------------------
	ph_dim = ph_count+1
	
	I_ph = np.identity(ph_dim)
	#------------------------------------------------------------------------------------------------------------------
	H_dim = (ph_count+1) * pow(2, at_count)
	
	H_atoms = np.zeros([H_dim, H_dim])
	#------------------------------------------------------------------------------------------------------------------
	for i in range(1, at_count+1):
		elem = sigmacrosssigma
		
		at_prev = np.identity(pow(2, i-1))
		elem = np.kron(at_prev, elem)
			
		at_next = np.identity( pow(2, at_count-i))
		elem = np.kron(elem, at_next)
		
		H_atoms += wa * np.kron(I_ph, elem)		
	#------------------------------------------------------------------------------------------------------------------
	return H_atoms
예제 #12
0
파일: hamiltotian.py 프로젝트: alexfmsu/TCM
def get_Hint_EXACT(ph_count, at_count, wc, wa, g):
	#------------------------------------------------------------------------------------------------------------------
	adiag = np.sqrt(np.arange(1, ph_count+1))
	
	across = np.diagflat(adiag, -1)
	a = np.diagflat(adiag, 1)
	acrossa = np.dot(across, a)
	#------------------------------------------------------------------------------------------------------------------
	sigmadiag = [1]

	sigmacross = np.diagflat(sigmadiag, -1)
	sigma = np.diagflat(sigmadiag, 1)
	sigmacrosssigma = np.dot(sigmacross, sigma)
	#------------------------------------------------------------------------------------------------------------------
	H_dim = (ph_count+1) * pow(2, at_count)
	
	H_int = np.zeros([H_dim, H_dim], dtype=complex)
	#------------------------------------------------------------------------------------------------------------------
	for i in range(1, at_count+1):
		#------------------------------------------------
		elem = (across + a)
		
		before = np.identity(pow(2, i-1))
		elem = np.kron(elem, before)
		
		elem = np.kron(elem, sigmacross + sigma)
		
		after = np.identity(pow(2, at_count-i))
		elem = np.kron(elem, after)
		
		H_int += g * elem	
		#------------------------------------------------
	#------------------------------------------------------------------------------------------------------------------
	return H_int
 def TestSetCovariances(self,hyperparameters,X,Y):                   #compute test set covariances
     [n, D] = X.shape;
     ell = np.exp(hyperparameters[0:D]);        # characteristic length scale
     sf2 = np.exp(2*hyperparameters[D]);      # signal variance
     A = np.dot(sf2*(1+self.jitter),np.ones([np.size(Y,0),1]));
     B = sf2*np.exp(-self.sq_dist2(np.dot(np.diagflat(1./ell),X.T),np.dot(np.diagflat(1./ell),Y.T))/2);#TODO diagflat
     return [A,B] #TODO [A,B]
예제 #14
0
def test_project():
    X = np.diagflat([-2, -1, 0, 1, 2])
    # eigenvalues -2, -1, 0, 1, 2; eigenvectors are I

    Xproj = ProjectPSD().fit_transform(X)
    assert np.allclose(Xproj, np.diagflat([0, 0, 0, 1, 2]))

    Xproj2 = ProjectPSD().fit(X).transform(X)
    assert np.allclose(Xproj2, np.diagflat([0, 0, 0, 1, 2]))

    Xproj3 = ProjectPSD(negatives_likely=True).fit(X).transform(X[:3, :])
    assert np.allclose(Xproj3, np.zeros((3, 5)))

    Xproj4 = ProjectPSD(negatives_likely=False).fit(X).transform(X[:3, :])
    assert np.allclose(Xproj4, np.zeros((3, 5)))

    Xproj5 = ProjectPSD(negatives_likely=True, copy=False, min_eig=.5) \
            .fit_transform(X.copy())
    assert np.allclose(Xproj5, np.diagflat([.5, .5, .5, 1, 2]))

    Xproj6 = ProjectPSD(negatives_likely=True, copy=False, min_eig=.5) \
            .fit(X.copy()).transform(X.copy())
    assert np.allclose(Xproj6, np.diagflat([.5, .5, 0, 1, 2]))

    assert_raises(TypeError, lambda: ProjectPSD().fit(X[:2, :]))
    assert_raises(TypeError, lambda: ProjectPSD().fit_transform(X[:2, :]))
    assert_raises(TypeError, lambda: ProjectPSD().fit(X).transform(X[:, :2]))
예제 #15
0
파일: main.py 프로젝트: eflurin/pyHFSS
def eBBQ_participation2_H_params(s, cos_trunc = None, fock_trunc = None):
    '''   
    returns the CHIs as MHz with anharmonicity alpha as the diagonal  (with - sign)
        f1: qubit dressed freq
        f0: qubit linear freq (eigenmode) 
        and an overcomplete set of matrcieis
    '''
    import  scipy;    Planck  = scipy.constants.Planck
    f0s        = s['freq'].values    
    Qs         = s.loc[:,'modeQ']
    LJs        = s.loc[0,s.keys().str.contains('LJs')] # LJ in nH
    EJs        = (bbq.fluxQ**2/LJs/Planck*10**-9).astype(np.float)        # EJs in GHz
    PJ_Jsu     = s.loc[:,s.keys().str.contains('pJ')]  # EPR from Jsurf avg
    PJ_Jsu_sum = PJ_Jsu.apply(sum, axis = 1)           # sum of participations as calculated by avg surf current 
    PJ_glb_sum = (s['U_E'] - s['U_H'])/(2*s['U_E'])    # sum of participations as calculated by global UH and UE  
    diff       = (PJ_Jsu_sum-PJ_glb_sum)/PJ_glb_sum*100# debug
    if 1:  # Renormalize: to sum to PJ_glb_sum; so that PJs.apply(sum, axis = 1) - PJ_glb_sum =0
           #TODO: figure out the systematic   # print '% diff b/w Jsurf_avg & global Pj:'; display(diff)
        PJs = PJ_Jsu.divide(PJ_Jsu_sum, axis=0).mul(PJ_glb_sum,axis=0)
    else: PJs = PJ_Jsu
    SIGN  = s.loc[:,s.keys().str.contains('sign_')]
    PJ    = np.mat(PJs.values)
    Om    = np.mat(np.diagflat(f0s)) 
    EJ    = np.mat(np.diagflat(EJs.values))
    CHI_O1= Om * PJ * EJ.I * PJ.T * Om * 1000       # MHz
    CHI_O1= divide_diagonal_by_2(CHI_O1)            # Make the diagonals alpha 
    f1s   = f0s - np.diag(CHI_O1)                   # 1st order PT expect freq to be dressed down by alpha 
    if cos_trunc is not None:
        f1s, CHI_ND, fzpfs, f0s = eBBQ_ND(f0s, PJ, Om, EJ, LJs, SIGN, cos_trunc = cos_trunc, fock_trunc = fock_trunc)                
    else: CHI_ND, fzpfs = None, None
    return CHI_O1, CHI_ND, PJ, Om, EJ, diff, LJs, SIGN, f0s, f1s, fzpfs, Qs
예제 #16
0
 def computePD(self, bodyUniqueId, jointIndices, desiredPositions, desiredVelocities, kps, kds, maxForces, timeStep):
   numJoints = self._pb.getNumJoints(bodyUniqueId)
   jointStates = self._pb.getJointStates(bodyUniqueId, jointIndices)
   q1 = []
   qdot1 = []
   zeroAccelerations = []
   for i in range (numJoints):
     q1.append(jointStates[i][0])
     qdot1.append(jointStates[i][1])
     zeroAccelerations.append(0)
   q = np.array(q1)
   qdot=np.array(qdot1)
   qdes = np.array(desiredPositions)
   qdotdes = np.array(desiredVelocities)
   qError = qdes - q
   qdotError = qdotdes - qdot
   Kp = np.diagflat(kps)
   Kd = np.diagflat(kds)
   p =  Kp.dot(qError)
   d = Kd.dot(qdotError)
   forces = p + d
   
   M1 = self._pb.calculateMassMatrix(bodyUniqueId,q1)
   M2 = np.array(M1)
   M = (M2 + Kd * timeStep)
   c1 = self._pb.calculateInverseDynamics(bodyUniqueId, q1, qdot1, zeroAccelerations)
   c = np.array(c1)
   A = M
   b = -c + p + d
   qddot = np.linalg.solve(A, b)
   tau = p + d - Kd.dot(qddot) * timeStep
   maxF = np.array(maxForces)
   forces = np.clip(tau, -maxF , maxF )
   #print("c=",c)
   return tau
예제 #17
0
 def attention_prob_conv_matrix(Q, l):
     assert l >= Q
     m = np.diagflat([1.0] * l)
     for i in range(1, Q):
         m += np.diagflat([1.0] * (l - i), k=i)
         m += np.diagflat([1.0] * (l - i), k=-i)
     m = m / np.sum(m, axis=0)
     return m
예제 #18
0
 def build_attention_convolution_matrix(self, Q, l):
     # straight up borrowed from Jencir's initial model
     assert l >= Q
     m = np.diagflat([1.] * l)
     for i in range(1, Q):
         m += np.diagflat([1.] * (l - i), k=i)
         m += np.diagflat([1.] * (l - i), k=-i)
     return m / np.sum(m, axis = 0)
def phi_2_sins(dim):
    basic_sin  = 1j*np.diagflat([-0.5]*(dim-1), 1) + 1j*np.diagflat([0.5]*(dim-1), -1)
    basic_sin2 = np.diagflat([0.5]*dim, 0) + np.diagflat([-0.25]*(dim-2), 2) + np.diagflat([-0.25]*(dim-2), -2)
    
    sin = np.tensordot(np.identity(dim), basic_sin, axes=0).swapaxes(1,2).reshape(dim*dim,-1)
    sin2 = np.tensordot(np.identity(dim), basic_sin2, axes=0).swapaxes(1,2).reshape(dim*dim,-1)

    return (sin, sin2)
예제 #20
0
def Smat(g):
    """
    Return overlap matrix using triangular FEM basis.
    """
    A=1.0/3.0*N.diagflat((g.Rx-g.Lx))
    tmp=1.0/6.0*(g.Rx-g.x)
    B=N.diagflat(tmp[0:g.NN-1],1)
    return A+B+B.transpose()    
예제 #21
0
def generate_laplacian_matrix(height, width):
    N = height*width
    a = np.diagflat(-4*np.ones(N), k=0)
    b = np.diagflat(np.ones(N-1), k=1)
    c = np.diagflat(np.ones(N-1), k=-1)
    d = np.diagflat(np.ones(N-width), k=-width)
    e = np.diagflat(np.ones(N-width), k=width)
    return (a+b+c+d+e)*(height*width)
예제 #22
0
 def __init__(self, skel, h):
     self.h = h
     self.skel = skel
     ndofs = self.skel.ndofs
     self.qhat = self.skel.q
     self.Kp = np.diagflat([0.0] * 6 + [400.0] * (ndofs - 6))
     self.Kd = np.diagflat([0.0] * 6 + [40.0] * (ndofs - 6))
     self.preoffset = 0.0
예제 #23
0
def initialize_from_hdf5_file(file_name, structure, read_trajectory=True, initial_cut=1, final_cut=None, memmap=False):
    import h5py

    print("Reading data from hdf5 file: " + file_name)

    trajectory = None
    velocity = None
    vc = None
    reduced_q_vector = None

    #Check file exists
    if not os.path.isfile(file_name):
        print(file_name + ' file does not exist!')
        exit()

    hdf5_file = h5py.File(file_name, "r")
    if "trajectory" in hdf5_file and read_trajectory is True:
        trajectory = hdf5_file['trajectory'][:]
        if final_cut is not None:
            trajectory = trajectory[initial_cut-1:final_cut]
        else:
            trajectory = trajectory[initial_cut-1:]

    if "velocity" in hdf5_file:
        velocity = hdf5_file['velocity'][:]
        if final_cut is not None:
            velocity = velocity[initial_cut-1:final_cut]
        else:
            velocity = velocity[initial_cut-1:]

    if "vc" in hdf5_file:
        vc = hdf5_file['vc'][:]
        if final_cut is not None:
            vc = vc[initial_cut-1:final_cut]
        else:
            vc = vc[initial_cut-1:]

    if "reduced_q_vector" in hdf5_file:
        reduced_q_vector = hdf5_file['reduced_q_vector'][:]
        print("Load trajectory projected onto {0}".format(reduced_q_vector))

    time = hdf5_file['time'][:]
    supercell = hdf5_file['super_cell'][:]
    hdf5_file.close()


    if vc is None:
        return dyn.Dynamics(structure=structure,
                            trajectory=trajectory,
                            velocity=velocity,
                            time=time,
                            supercell=np.dot(np.diagflat(supercell), structure.get_cell()),
                            memmap=memmap)
    else:
        return vc, reduced_q_vector, dyn.Dynamics(structure=structure,
                                                  time=time,
                                                  supercell=np.dot(np.diagflat(supercell), structure.get_cell()),
                                                  memmap=memmap)
예제 #24
0
파일: svd.py 프로젝트: vosen/Juiz
 def __init__(self, score_matrix, features):
     self.source = score_matrix
     u, e, vt = linalg.svds(self.source.raw_matrix.transpose().tocoo(), k = features)
     self.normalization = self.source.normalization
     e_inv = numpy.mat(numpy.diagflat(e)).getI()
     e_sqrt = numpy.sqrt(numpy.diagflat(e))
     u_mat = numpy.mat(u)
     self.terms = u_mat * e_sqrt
     self.documents = e_sqrt * e_inv * u_mat.transpose()
def build_Gamma(Gamma_diags):
    """Build Gamma (covariance) given the lists of diagonal values.
    """
    Gamma = np.zeros((4, 4))
    for k, diag in enumerate(Gamma_diags):
        Gamma += np.diagflat(diag, k)  # fill upper diagonals
        if k > 0:
            Gamma += np.diagflat(diag, -k)  # fill lower diagonals
    return Gamma
예제 #26
0
파일: cov.py 프로젝트: depet/scikit-learn
def linArd(hyp=None, x=None, z=None, hi=None, dg=None):
  """
  Linear covariance function with Automatic Relevance Determination (ARD). The
  covariance function is parameterized as:

  k(x^p,x^q) = x^p'*inv(P)*x^q

  where the P matrix is diagonal with ARD parameters ell_1^2,...,ell_D^2, where
  D is the dimension of the input space. The hyperparameters are:

  hyp = [ log(ell_1)
          log(ell_2)
           ..
          log(ell_D) ]

  Note that there is no bias term; use covConst to add a bias.
  """
  #report number of parameters
  if x is None:
    return 'D'

  if z is None:
    z = numpy.array([[]])

  if dg is None:
    dg = False

  xeqz = numpy.size(z) == 0

  ell = numpy.exp(hyp)
  n, D = numpy.shape(x)
  x = numpy.dot(x,numpy.diagflat(1./ell))

  # compute inner products
  if dg:
    K = numpy.sum(x*x,1)
  else:
    if xeqz:                                             # symmetric matrix Kxx
      K = numpy.dot(x,x.T)
    else:                                               # cross covariances Kxz
      z = numpy.dot(z,numpy.diagflat(1./ell))
      K = numpy.dot(x,z.T)

  if hi is not None:                                              # derivatives
    if hi > 0 and hi < D:
      if dg:
        K = -2*x[:,[i]]*x[:,[i]]
      else:
        if xeqz:
          K = -2*numpy.dot(x[:,[i]],x[:,[i]].T)
        else:
          K = -2*numpy.dot(x[:,[i]],z[:,[i]].T)
    else:
      raise AttributeError('Unknown hyperparameter')

  return K
예제 #27
0
  def computePD(self, bodyUniqueId, jointIndices, desiredPositions, desiredVelocities, kps, kds,
                maxForces, timeStep):
    numBaseDofs = 0
    numPosBaseDofs = 0
    baseMass = self._pb.getDynamicsInfo(bodyUniqueId, -1)[0]
    curPos, curOrn = self._pb.getBasePositionAndOrientation(bodyUniqueId)
    q1 = []
    qdot1 = []
    zeroAccelerations = []
    qError = []
    if (baseMass > 0):
      numBaseDofs = 6
      numPosBaseDofs = 7
      q1 = [curPos[0], curPos[1], curPos[2], curOrn[0], curOrn[1], curOrn[2], curOrn[3]]
      qdot1 = [0] * numBaseDofs
      zeroAccelerations = [0] * numBaseDofs
      angDiff = [0, 0, 0]
      qError = [
          desiredPositions[0] - curPos[0], desiredPositions[1] - curPos[1],
          desiredPositions[2] - curPos[2], angDiff[0], angDiff[1], angDiff[2]
      ]
    numJoints = self._pb.getNumJoints(bodyUniqueId)
    jointStates = self._pb.getJointStates(bodyUniqueId, jointIndices)

    for i in range(numJoints):
      q1.append(jointStates[i][0])
      qdot1.append(jointStates[i][1])
      zeroAccelerations.append(0)
    q = np.array(q1)
    qdot = np.array(qdot1)
    qdes = np.array(desiredPositions)
    qdotdes = np.array(desiredVelocities)
    #qError = qdes - q
    for j in range(numJoints):
      qError.append(desiredPositions[j + numPosBaseDofs] - q1[j + numPosBaseDofs])
    #print("qError=",qError)
    qdotError = qdotdes - qdot
    Kp = np.diagflat(kps)
    Kd = np.diagflat(kds)
    p = Kp.dot(qError)
    d = Kd.dot(qdotError)
    forces = p + d

    M1 = self._pb.calculateMassMatrix(bodyUniqueId, q1)
    M2 = np.array(M1)
    M = (M2 + Kd * timeStep)
    c1 = self._pb.calculateInverseDynamics(bodyUniqueId, q1, qdot1, zeroAccelerations)
    c = np.array(c1)
    A = M
    b = -c + p + d
    qddot = np.linalg.solve(A, b)
    tau = p + d - Kd.dot(qddot) * timeStep
    maxF = np.array(maxForces)
    forces = np.clip(tau, -maxF, maxF)
    #print("c=",c)
    return tau
예제 #28
0
def KPMF(input_matrix, approx=50, iterations=30, learning_rate=.001, adjacency_width=5, adjacency_strength=.5):
    A = input_matrix
    Z = np.asarray(A > 0,dtype=np.int)
    A1d = np.ravel(A)
    mean = np.mean(A1d)
    A = A-mean
    K = approx
    R = itr = iterations
    l = learning_rate
    N = A.shape[0]
    M = A.shape[1]
    U = np.random.randn(N,K)
    V = np.random.randn(K,M)
    #KPMF using gradient descent as per paper
    #Kernelized Probabilistic Matrix Factorization: Exploiting Graphs and Side Information
    #T. Zhou, H. Shan, A. Banerjee, G. Sapiro
    #Using diffusion kernel
    #U are the rows, we use an adjacency matrix CU to reprent connectivity
    #This matrix connects rows +-adjacency_width
    #V are the columns, connected columns are CV
    #Operate on graph laplacian L, which is the degree matrix D - C
    #Applying the diffusion kernel to L, this forms a spatial smoothness graph
    bw = adjacency_width
    #Use scipy.sparse.diags to generate band matrix with bandwidth = 2*adjacency_width+1
    #Example of adjacency_width = 1, N = 4
    #[1 1 0 0]
    #[1 1 1 0]
    #[0 1 1 1]
    #[0 0 1 1]
    print "Running KPMF with:"
    print "learning rate=" + `l`
    print "bandwidth=" + `bw`
    print "beta=" + `b`
    print "approximation rank=" + `K`
    print "iterations=" + `R`
    print ""
    CU = sp.diags([1]*(2*bw+1),range(-bw,bw+1),shape=(N,N)).todense()
    DU = np.diagflat(np.sum(CU,1))
    CV = sp.diags([1]*(2*bw+1),range(-bw,bw+1),shape=(M,M)).todense()
    DV = np.diagflat(np.sum(CV,1))
    LU = DU - CU
    LV = DV - CV
    beta = adjacency_strength
    KU = sl.expm(beta*LU)
    KV = sl.expm(beta*LV)
    SU = np.linalg.pinv(KU)
    SV = np.linalg.pinv(KV)
    for r in range(R):
        for i in range(N):
            for j in range(M):
                if Z[i,j] > 0:
                    e = A[i,j] - np.dot(U[i,:],V[:,j])
                    U[i,:] = U[i,:] + l*(e*V[:,j] - np.dot(SU[i,:],U))
                    V[:,j] = V[:,j] + l*(e*U[i,:] - np.dot(V,SV[:,j]))
    A_ = np.dot(U,V)
    return A_+mean
    def __init__( self, IC, fitness=5, dispersal=0, ndim=1, square=False,
                  verbose=False ):
        """
        IC -- initial condition. A scale, list, or matrix, depending
        on dimension of system (see ndim)

        fitness -- The fitness on each patch. If scalar, each patch
        has the same fitness. If numpy matrix of fitness values (same
        dimension as ndim), different patches can have different
        fitness values.

        dispersal -- Dispersal parameter. As with fitness, can be either
        scalar or numpy matrix.

        ndim -- dimension of the system.

        verbose -- output debugging and other chatter. (default=False)
        """
        self.dispersal = dispersal # possibly a vector of matrix of different values
        self.fitness = fitness # ditto
        self.ndim = ndim
        self.iter_number = 0
        self._square = square
        self._verbose = verbose
        self._IC = IC
        # change flag if creating dispersal matrix. 
        self._do_dispersal = False 

        if square:
            self.sysdim = ndim * ndim
        else:
            self.sysdim = ndim

        # now create patch population container
        if not hasattr( IC, '__iter__' ):
            # single initial population value will fill entire pop. matrix
            if self._square:
                self.population = np.empty( (ndim,ndim) )
                self.population[:] = IC
            # for single patch case
            else:
                self.population = IC
        else:
            # not much error catching here.
            self.population = np.matrix( IC )

        if self._square:
            # create matrix of off-diagonals for dispersal computation
            if not hasattr( self.dispersal, '__array__' ):
                d = np.ones( self.ndim - 1 ) # clip one entry to account for
                                             # off-diag position
                self.D = np.diagflat( d, 1 ) + np.diagflat( d, -1 )
                self.D = np.matrix( self.D ) # for matrix multiplication
                self._do_dispersal = True
            else:
                print "Multiple values for dispersal not implemented yet!"
예제 #30
0
def make_quad_form(b1,b2,b3,b4,b5,b6,b7,b8,b9):
  """
  Returns the quadratic form for the Fung model with the appropriate convention
  for the material parameters.
  The order of the strain components is assumed to be Theta, Z, R.
  """
  normals = lin.symmetric(np.diagflat([b1,b2,b3]) + 
                          2*np.diagflat([b4,b5],1) +
                          2*np.diagflat([b6],2))
  return el.manual_stiffness(normals,np.diagflat([b7,b8,b9]))
예제 #31
0
def train(f_dataset_train, h_prev, m_prev, t_index):
    inp_unit = f_dataset_train[:, 0].reshape(
        input_size, 1)  # input vector xt taken from image data
    inp_whole = numpy.vstack(
        (inp_unit, h_prev))  # input vector It combined from xt and ht-1
    hidden_whole = numpy.dot(curr_model.get_wm_hidden(),
                             inp_whole)  # raw hidden value for all gates zt
    a_gate = numpy.tanh(hidden_whole[:curr_model.get_hidden_unit_size(), :]
                        )  # split, squashed value from zt
    i_gate = expit(hidden_whole[curr_model.get_hidden_unit_size():2 *
                                curr_model.get_hidden_unit_size(), :])
    f_gate = expit(hidden_whole[2 * curr_model.get_hidden_unit_size():3 *
                                curr_model.get_hidden_unit_size(), :] + f_bias)
    o_gate = expit(hidden_whole[3 * curr_model.get_hidden_unit_size():, :])
    m_curr = i_gate * a_gate + f_gate * m_prev  # memory cell value ct calculated from at, it, ft, and ct-1
    h_curr = o_gate * numpy.tanh(
        m_curr)  # current timestep output value calculated from ot and ct
    if f_dataset_train[:,
                       1:].size == 0:  # on the last step, calculate score, errhc
        score = numpy.dot(curr_model.get_wm_score(),
                          h_curr)  # output layer producing 24 length vector s
        score_prob = numpy.exp(score) / numpy.sum(
            numpy.exp(score))  # softmax layer producing probability vector P
        loss = -numpy.log(
            score_prob[t_index])  # negative log loss error value E
        jac = numpy.diagflat(score_prob) - numpy.dot(
            score_prob, score_prob.T)  # matrix J for deriving softmax layer
        err_score = numpy.dot(
            jac, score_prob -
            target_array[t_index])  # error vector ds from J and audio target
        err_wm_score = numpy.dot(
            err_score, h_curr.T)  # matrix error dws calculated from ds
        curr_model.set_wm_score(
            curr_model.get_wm_score() -
            curr_model.get_learning_rate() * err_wm_score)  # ws update
        err_h_curr = numpy.dot(
            curr_model.get_wm_score().T,
            err_score)  # error vector dht for deriving previous steps
        err_m_curr = 0  # last step has no next memory cell, dct = 0
        err_wm_hidden = 0  # last step has 0 accumulated weight error, dw = 0
    else:  # if not on last step, send ht, ct for forward prop, wait for dht, dct, dw for backward prop
        err_h_curr, err_m_curr, err_wm_hidden, loss = train(
            f_dataset_train[:, 1:], h_curr, m_curr, t_index)
    err_m_curr = err_m_curr + (err_h_curr * o_gate *
                               (1 - numpy.power(numpy.tanh(m_curr), 2))
                               )  # accumulative dct
    err_a_gate = err_m_curr * i_gate  # error vector dat calculated from dct
    err_i_gate = err_m_curr * a_gate  # error vector dat calculated from dct
    err_f_gate = err_m_curr * m_prev  # error vector dat calculated from dct
    err_o_gate = err_h_curr * numpy.tanh(
        m_curr)  # error vector dot calculated from dht
    err_m_prev = err_m_curr * f_gate  # error vector dct-1 calculated from dct
    # calculate error vector for raw, unsplit, unsquashed hidden values for all at, it, ft, and ot
    err_a_inp = err_a_gate * (1 - numpy.power(
        numpy.tanh(hidden_whole[:curr_model.get_hidden_unit_size(), :]), 2))
    err_i_inp = err_i_gate * i_gate * (1 - i_gate)
    err_f_inp = err_f_gate * f_gate * (1 - f_gate)
    err_o_inp = err_o_gate * o_gate * (1 - o_gate)
    err_inp_whole = numpy.vstack(
        (err_a_inp, err_i_inp, err_f_inp,
         err_o_inp))  # dzt concated from hidden value errors
    err_wm_hidden = err_wm_hidden + numpy.dot(
        err_inp_whole, inp_whole.T)  # accumulative dw for weight update
    err_inp_unit = numpy.dot(curr_model.get_wm_hidden().T,
                             err_inp_whole)  # whole input error vector dIt
    err_h_prev = err_inp_unit[input_size:, 0].reshape(
        (curr_model.get_hidden_unit_size(), 1))  # dht-1 split from dIt
    return err_h_prev, err_m_prev, err_wm_hidden, loss  # returns dht-1, dct-1, dw for backprop
예제 #32
0
    def compute_differentials(grey_level_matrix, diagonal_neighbors=True):
        """Computes differences in greylevels for neighboring grid points.

        First part of 'step 4' in the paper.

        Returns n x n x 8 rank 3 array for an n x n grid (if diagonal_neighbors == True)

        The n x nth coordinate corresponds to a grid point.  The eight values are
        the differences between neighboring grid points, in this order:

        upper left
        upper
        upper right
        left
        right
        lower left
        lower
        lower right

        Args:
            grey_level_matrix (numpy.ndarray): grid of values sampled from image
            diagonal_neighbors (Optional[boolean]): whether or not to use diagonal
                neighbors (default True)

        Returns:
            a n x n x 8 rank 3 numpy array for an n x n grid (if diagonal_neighbors == True)

        Examples:
            >>> img = gis.preprocess_image('https://pixabay.com/static/uploads/photo/2012/11/28/08/56/mona-lisa-67506_960_720.jpg')
            >>> window = gis.crop_image(img)
            >>> grid = gis.compute_grid_points(img, window=window)
            >>> grey_levels = gis.compute_mean_level(img, grid[0], grid[1])
            >>> gis.compute_differentials(grey_levels)
            array([[[  0.00000000e+00,   0.00000000e+00,   0.00000000e+00,
                       0.00000000e+00,   1.82683143e-03,  -0.00000000e+00,
                       2.74085276e-01,   1.24737821e-01],
                    [  0.00000000e+00,   0.00000000e+00,   0.00000000e+00,
                      -1.82683143e-03,   2.15563930e-03,   2.72258444e-01,
                       1.22910990e-01,   3.48522956e-01],
                    [  0.00000000e+00,   0.00000000e+00,   0.00000000e+00,
                      -2.15563930e-03,   1.16963917e-01,   1.20755351e-01,
                       3.46367317e-01,   1.99638513e-01],
                    [  0.00000000e+00,   0.00000000e+00,   0.00000000e+00,
                      -1.16963917e-01,   1.32128118e-01,   2.29403399e-01,
                       8.26745956e-02,   1.16455050e-01],
                    ...

        """
        right_neighbors = -np.concatenate(
            (np.diff(grey_level_matrix), np.zeros(
                grey_level_matrix.shape[0]).reshape(
                    (grey_level_matrix.shape[0], 1))),
            axis=1)
        left_neighbors = -np.concatenate(
            (right_neighbors[:, -1:], right_neighbors[:, :-1]), axis=1)

        down_neighbors = -np.concatenate(
            (np.diff(grey_level_matrix,
                     axis=0), np.zeros(grey_level_matrix.shape[1]).reshape(
                         (1, grey_level_matrix.shape[1]))))

        up_neighbors = -np.concatenate(
            (down_neighbors[-1:], down_neighbors[:-1]))

        if diagonal_neighbors:
            # this implementation will only work for a square (m x m) grid
            diagonals = np.arange(-grey_level_matrix.shape[0] + 1,
                                  grey_level_matrix.shape[0])

            upper_left_neighbors = sum([
                np.diagflat(
                    np.insert(np.diff(np.diag(grey_level_matrix, i)), 0, 0), i)
                for i in diagonals
            ])
            lower_right_neighbors = -np.pad(upper_left_neighbors[1:, 1:],
                                            (0, 1),
                                            mode='constant')

            # flip for anti-diagonal differences
            flipped = np.fliplr(grey_level_matrix)
            upper_right_neighbors = sum([
                np.diagflat(np.insert(np.diff(np.diag(flipped, i)), 0, 0), i)
                for i in diagonals
            ])
            lower_left_neighbors = -np.pad(upper_right_neighbors[1:, 1:],
                                           (0, 1),
                                           mode='constant')

            return np.dstack(
                np.array([
                    upper_left_neighbors, up_neighbors,
                    np.fliplr(upper_right_neighbors), left_neighbors,
                    right_neighbors,
                    np.fliplr(lower_left_neighbors), down_neighbors,
                    lower_right_neighbors
                ]))

        return np.dstack(
            np.array([
                up_neighbors, left_neighbors, right_neighbors, down_neighbors
            ]))
예제 #33
0
def second_order_solver(FF, GG, HH):

    from scipy.linalg import qz
    from dolo.numeric.extern.qz import qzdiv

    from numpy import array, mat, c_, r_, eye, zeros, real_if_close, diag, allclose, where, diagflat
    from numpy.linalg import solve

    Psi_mat = array(FF)
    Gamma_mat = array(-GG)
    Theta_mat = array(-HH)
    m_states = FF.shape[0]

    Xi_mat = r_[c_[Gamma_mat, Theta_mat], c_[eye(m_states),
                                             zeros((m_states, m_states))]]

    Delta_mat = r_[c_[Psi_mat, zeros((m_states, m_states))], c_[zeros(
        (m_states, m_states)), eye(m_states)]]

    AAA, BBB, Q, Z = qz(Delta_mat, Xi_mat)

    Delta_up, Xi_up, UUU, VVV = [real_if_close(mm) for mm in (AAA, BBB, Q, Z)]

    Xi_eigval = diag(Xi_up) / where(diag(Delta_up) > TOL, diag(Delta_up), TOL)

    Xi_sortindex = abs(Xi_eigval).argsort()
    # (Xi_sortabs doesn't really seem to be needed)

    Xi_sortval = Xi_eigval[Xi_sortindex]

    Xi_select = slice(0, m_states)

    stake = (abs(Xi_sortval[Xi_select])).max() + TOL

    Delta_up, Xi_up, UUU, VVV = qzdiv(stake, Delta_up, Xi_up, UUU, VVV)

    try:
        # check that all unused roots are unstable
        assert abs(Xi_sortval[m_states]) > (1 - TOL)
        # check that all used roots are stable
        assert abs(Xi_sortval[Xi_select]).max() < 1 + TOL
    except:
        raise BKError('generic')

    # check for unit roots anywhere


#    assert (abs((abs(Xi_sortval) - 1)) > TOL).all()

    Lambda_mat = diagflat(Xi_sortval[Xi_select])
    VVVH = VVV.T
    VVV_2_1 = VVVH[m_states:2 * m_states, :m_states]
    VVV_2_2 = VVVH[m_states:2 * m_states, m_states:2 * m_states]
    UUU_2_1 = UUU[m_states:2 * m_states, :m_states]

    PP = -solve(VVV_2_1, VVV_2_2)

    # slightly different check than in the original toolkit:
    assert allclose(real_if_close(PP), PP.real)
    PP = PP.real
    ## end of solve_qz!

    return [Xi_sortval[Xi_select], PP]
    detunings = np.zeros(states_count)
    for i in range(2 * n - 1):
        if i >= states_count:
            break
        if i < n:
            detunings[i] = (i - s) * rf_freq - (eigenvalues[i] -
                                                eigenvalues[s])
        else:
            detunings[i] = rf_freq - (eigenvalues[i - n + 1] -
                                      eigenvalues[i]) + detunings[i - n + 1]

    detunings *= -1
    energies.append(detunings)

    # Construct a new Hamiltonian using the no-RF detunings along the diagonal.
    hamiltonian_with_rf = e_rf * mat_2_combination + np.diagflat(detunings)

    eigenvalues, eigenvectors = np.linalg.eigh(hamiltonian_with_rf)
    energies_with_rf.append(eigenvalues)

energies = np.array(energies)
energies_with_rf = np.array(energies_with_rf)

fig, (ax1, ax2) = plt.subplots(
    1,
    2,
    sharex='all',
    sharey='all',
    figsize=(8, 5),
)
예제 #35
0
def decomposition(A):
    L = np.tril(A, -1)
    D = np.diagflat(np.diag(A))
    R = np.triu(A, 1)
    return (L, D, R)
예제 #36
0
# Realized marginals mapping into standard Student t realizations
u_stocks = zeros((i_, t_))
epsi_tilde_stocks = zeros((i_, t_))
for i in range(i_):
    # u_stocks([i,:])=min((t.cdf((epsi_stocks[i,:]-mu_marg[i])/sqrt(sig2_marg[i]),nu_marg[i]),0.999))
    u_stocks[i, :] = tstu.cdf(
        (epsi_stocks[i, :] - mu_marg[i]) / sqrt(sig2_marg[i]), nu_marg[i])
    epsi_tilde_stocks[i, :] = tstu.ppf(u_stocks[i, :],
                                       nu_copula)  # Student t realizations

# Correlation matrix characterizing the t copula estimation

# approximate the fit to normal in case of badly scaled warnings
_, sig2, _ = MaxLikelihoodFPLocDispT(epsi_tilde_stocks, p, 1e9, 1e-6, 1)
rho2 = np.diagflat(diag(sig2)**(-1 / 2)) @ sig2 @ np.diagflat(
    diag(sig2)**(-1 / 2))

# Shrink the correlation matrix towards a low-rank-diagonal structure
rho2, beta, *_ = FactorAnalysis(rho2, array([[0]]), k_)
rho2, beta = np.real(rho2), np.real(beta)

# Monte Carlo scenarios for each path node from the t copula
Epsi_tilde_hor = zeros((i_, m_, j_))
optionT = namedtuple('option', 'dim_red stoc_rep')
optionT.dim_red = 0
optionT.stoc_rep = 0
for m in range(m_):
    Epsi_tilde_hor[:, m, :] = Tscenarios(nu_copula, zeros(
        (i_,
         1)), rho2, j_, optionT)  # We simulate scenarios one node at a time
예제 #37
0
 def hessian(self, w):
     self._num_calls += 1
     prob = self.y_pred(self._X @ w)
     return 1 / self.N * self._X.T @ np.diagflat(
         np.multiply(prob, (1 - prob))) @ self._X
예제 #38
0
 def backward(self, dinputs: np.ndarray):
     self.dinputs = np.empty_like(dinputs)
     for i, (output, dinput) in enumerate(zip(self.outputs, dinputs)):
         output = output.reshape(-1, 1)
         jacobian_matrix = np.diagflat(output) - np.dot(output, output.T)
         self.dinputs[i] = np.dot(jacobian_matrix, dinput)
예제 #39
0
 def time_diagflat_l50_l50(self):
     np.diagflat([self.l50, self.l50])
예제 #40
0
Vsector = data_sectors[:k_, :]  # values
Z = (Vsector[:, 1:] - Vsector[:, :-1]) / Vsector[:, :-1]

# ## Compute statistics of the joint distribution of X,Z

[m_XZ, s2_XZ] = FPmeancov(r_[X, Z], p)
s2_X = s2_XZ[:n_, :n_]
s_XZ = s2_XZ[:n_, n_:n_ + k_]
s2_Z = s2_XZ[n_:n_ + k_, n_:n_ + k_]

# ## Solve generalized regression LFM
# ## set inputs for quadratic programming problem

# +
d = np.diagflat(1 / diag(s2_X))
pos = d @ s_XZ
g = -pos.flatten()
q = kron(s2_Z, d)
q_, _ = q.shape

# set constraints
a_eq = ones((1, n_ * k_)) / (n_ * k_)
b_eq = array([[1]])
lb = 0.8 * ones((n_ * k_, 1))
ub = 1.2 * ones((n_ * k_, 1))

# compute optimal loadings
b = quadprog(q, g, a_eq, b_eq, lb, ub)
b = np.array(b)
예제 #41
0
def softmax_d(softmax):
    s = softmax.reshape(-1,1)
    return np.diagflat(s) - np.dot(s, s.T)
예제 #42
0
def garch1f4(x, eps, df):
    ## Fit a GARCH(1,1) model with student-t errors
    #  INPUTS
    #   x     : [vector] (T x 1) data generated by a GARCH(1,1) process
    #  OPS
    #   q     : [vector] (4 x 1) parameters of the GARCH(1,1) process
    #   qerr  : [vector] (4 x 1) standard error of parameter estimates
    #   hf    : [scalar] current conditional heteroskedasticity estimate
    #   hferr : [scalar] standard error on hf
    #  NOTE
    #   o Uses a conditional t-distribution with fixed degrees of freedom
    #   o Originally written by Olivier Ledoit, 4/28/1997
    #   o Difference with garch1f: errors come from the score alone

    # Parameters
    gold = (1 + sqrt(5)) / 2  # step size increment
    tol1 = 1e-7  # for termination criterion
    tol2 = 1e-7  # for closeness to boundary
    big = 2  # for making the hessian negative definite
    maxiter = 50  # maximum number of iterations
    n = 30  # number of points on the grid

    # Rescale
    y = (x.flatten() - mean(x.flatten()))**2
    t = len(y)
    scale = sqrt(mean(y**2))
    y = y / scale
    s = mean(y)
    # Grid search

    [ag, bg] = meshgrid(linspace(0, 1 - eps, n), linspace(0, 1 - eps, n))
    cg = np.maximum(s * (1 - ag - bg), 0)
    likeg = -np.Inf * ones((n, n))
    for i in range(n):
        for j in range(n - i):
            h = filter(array([0, ag[i, j]]), array([1, -bg[i, j]]), y * (df - 2) / df, zi=array([s * (df - 2) / df]))[0] \
                + filter(array([0, cg[i, j]]), array([1, -bg[i, j]]), ones(t))
            likeg[i, j] = -npsum(log(h) + (df + 1) * log(1 + y / h / df))

    maxlikeg = npmax(likeg)
    maxima = where(likeg == maxlikeg)  ##ok<MXFND>

    # Initialize optimization
    a = r_[cg[maxima], ag[maxima], bg[maxima]]
    best = 0
    da = 0
    # term   = 1
    # negdef = 0
    iter = 0

    # Begin optimization loop
    while iter < maxiter:
        iter = iter + 1

        # New parameter1
        a = a + gold**best * da

        # Conditional variance
        h = filter([0, a[1]], [1, -a[2]], y * (df - 2) / df, zi=array([s * (df - 2) / df]))[0] \
            + filter([0, a[0]], [1, -a[2]], ones(t))

        # Likelihood
        if (any(a < 0) or ((a[1] + a[2]) > 1 - eps)):
            like = -np.Inf
        else:
            like = -npsum(log(h) + (df + 1) * log(1 + y / h / df))

        # Gradient
        GG = r_['-1',
                filter([0, 1], [1, -a[2]], ones(t))[..., newaxis],
                filter([0, 1], [1, -a[2]],
                       y * (df - 2) / df)[..., newaxis],
                filter([0, 1], [1, -a[2]], h)[..., newaxis]]
        g1 = ((df + 1) * (y / (y + df * h)) - 1) / h
        G = GG * repeat(g1.reshape(-1, 1), 3, axis=1)
        gra = npsum(G, axis=0)

        # Hessian
        GG2 = GG[:,
                 [0, 1, 2, 0, 1, 2, 0, 1, 2]] * GG[:,
                                                   [0, 0, 0, 1, 1, 1, 2, 2, 2]]
        g2 = -((df + 1) *
               (y /
                (y + df * h)) - 1) / h**2 - (df *
                                             (df + 1)) * (y /
                                                          (y + df * h)**2 / h)
        HH = zeros((t, 9))
        HH[:, 2] = filter([0, 1], [1, -a[2]], GG[:, 0])
        HH[:, 6] = HH[:, 2]
        HH[:, 5] = filter([0, 1], [1, -a[2]], GG[:, 1])
        HH[:, 7] = HH[:, 5]
        HH[:, 8] = filter([0, 2], [1, -a[2]], GG[:, 2])
        H = GG2 * repeat(g2.reshape(-1, 1), 9, axis=1) + HH * repeat(
            g1.reshape(-1, 1), 9, axis=1)
        hes = reshape(npsum(H, axis=0), (3, 3), 'F')

        # Negative definite
        d, u = eig(hes)
        # d = diagflat(d)
        if any(d > 0):
            negdef = 0
            d = min(d, max(d[d < 0]) / big)
            hes = u @ diagflat(d) @ u.T
        else:
            negdef = 1

        # Direction
        da = -gra.dot(pinv(hes))

        # Termination criterion
        term = da @ gra.T
        if (term < tol1) and negdef:
            break

        # Step search
        best = 0
        newa = a + gold**(best - 1) * da
        if (any(newa < 0) or (newa[1] + newa[2] > 1 - eps)):
            left = -np.Inf
        else:
            h = filter([0, newa[1]], [1, -newa[2]], y * (df - 2) / df, zi=array([s * (df - 2) / df]))[0] \
                + filter([0, newa[0]], [1, -newa[2]], ones(t))
            left = -sum(log(h) + (df + 1) * log(1 + y / h / df))

        newa = a + gold**best * da
        if (any(newa < 0) or (newa[1] + newa[2] > 1 - eps)):
            center = -np.Inf
        else:
            h = filter([0, newa[1]], [1, -newa[2]], y * (df - 2) / df, zi=array([s * (df - 2) / df]))[0] \
                + filter([0, newa[0]], [1, -newa[2]], ones(t))
            center = -sum(log(h) + (df + 1) * log(1 + y / h / df))

        newa = a + gold**(best + 1) * da
        if (any(newa < 0) or (newa[1] + newa[2] > 1 - eps)):
            right = -np.Inf
        else:
            h = filter([0, newa[1]], [1, -newa[2]], y * (df - 2) / df, zi=array([s * (df - 2) / df]))[0] \
                + filter([0, newa[0]], [1, -newa[2]], ones(t))
            right = -sum(log(h) + (df + 1) * log(1 + y / h / df))

        if all(like > array([left, center, right])) or all(
                left > array([center, right])):
            while True:
                best = best - 1
                center = left
                newa = a + gold**(best - 1) * da
                if (any(newa < 0) or (newa[1] + newa[2] > 1 - eps)):
                    left = -np.Inf
                else:
                    h = filter([0, newa[1]], [1, -newa[2]], y * (df - 2) / df, zi=array([s * (df - 2) / df]))[0] \
                        + filter([0, newa[0]], [1, -newa[2]], ones(t))
                    left = -sum(log(h) + (df + 1) * log(1 + y / h / df))

                if all(center >= array([like, left])):
                    break

        elif all(right > array([left, center])):
            while True:
                best = best + 1
                center = right
                newa = a + gold**(best + 1) * da
                if (any(newa < 0) or (newa[1] + newa[2]) > 1 - eps):
                    right = -np.Inf
                else:
                    h = filter([0, newa[1]], [1, -newa[2]], y * (df - 2) / df, zi=array([s * (df - 2) / df]))[0] \
                        + filter([0, newa[0]], [1, -newa[2]], ones(t))
                    right = -npsum(log(h) + (df + 1) * log(1 + y / h / df))

                if center > right:
                    break

        # If stuck at boundary then stop
        if (center == like) and (any(a < tol2) or (a[1] + a[2]) > 1 - tol2):
            break

        # End of optimization loop

    a[a < tol2] = zeros(len(a[a < tol2]))
    if a[1] + a[2] > 1 - tol2:
        if a[1] < 1 - tol2:
            a[1] = a[1] + (1 - a[1] - a[2])
        else:
            a[2] = a[2] + (1 - a[1] - a[2])

    # Estimation error and volatility forecast
    # aerr=inv(G.T@G)
    tmp = (G.T @ G)
    aerr = tmp.dot(pinv(eye(tmp.shape[0])))
    hf = a[0] + a[1] * y[t - 1] * (df - 2) / df + a[2] * h[t - 1]
    gf = r_[1, y[t - 1], h[t - 1]] + a[2] * GG[t - 1, :]
    hferr = gf @ aerr @ gf.T
    aerr = diagflat(aerr).T

    # Revert to original scale
    a[0] = a[0] * scale
    aerr[0] = aerr[0] * scale**2
    hf = hf * scale
    hferr = hferr * scale**2

    aerr = sqrt(aerr)
    hferr = sqrt(hferr)
    q = a
    qerr = aerr

    return q, qerr, hf, hferr
예제 #43
0
    def __init__(self,
                 grid=None,
                 edges=None,
                 origin=None,
                 delta=None,
                 metadata=None,
                 **kwargs):
        """
        Create a Grid object from data.

        From a numpy.histogramdd()::
          grid,edges = numpy.histogramdd(...)
          g = Grid(grid,edges=edges)

        From an arbitrary grid::
          g = Grid(grid,origin=origin,delta=delta)

        From a saved file::
          g = Grid(filename)
        or
          g = Grid()
          g.load(filename)

        :Arguments:
          grid
            histogram or density, defined on numpy nD array
          edges
            list of arrays, the lower and upper bin edges along the axes
            (both are output by numpy.histogramdd())
          origin
            cartesian coordinates of the center of grid[0,0,...,0]
          delta
            Either n x n array containing the cell lengths in each dimension,
            or n x 1 array for rectangular arrays.
          metadata
            a user defined dictionary of arbitrary values
            associated with the density; the class does not touch
            metadata[] but stores it with save()
          interpolation_spline_order
            order of interpolation function for resampling; cubic splines = 3 [3]
        """
        # file formats are guess from extension == lower case key
        self._exporters = {
            'DX': self._export_dx,
            'PICKLE': self._export_python,
            'PYTHON': self._export_python,  # compatibility
        }
        self._loaders = {
            'DX': self._load_dx,
            'PLT': self._load_plt,
            'PICKLE': self._load_python,
            'PYTHON': self._load_python,  # compatibility
        }

        if metadata is None:
            metadata = {}
        self.metadata = metadata  # use this to record arbitrary data
        self.__interpolated = None  # cache for interpolated grid
        self.__interpolation_spline_order = kwargs.pop(
            'interpolation_spline_order', 3)
        self.interpolation_cval = None  # default to using min(grid)

        if type(grid) is str:
            # read from a file
            self.load(grid)
        elif not (grid is None or edges is None):
            # set up from histogramdd-type data
            self.grid = numpy.asarray(grid)
            self.edges = edges
            self._update()
        elif not (grid is None or origin is None or delta is None):
            # setup from generic data
            origin = numpy.squeeze(origin)
            delta = numpy.squeeze(delta)
            N = grid.ndim
            assert (N == len(origin))
            if delta.shape == (N, N):
                if numpy.any(delta - numpy.diag(delta)):
                    raise NotImplementedError(
                        "Non-rectangular grids are not supported.")
            elif delta.shape == (N, ):
                delta = numpy.diagflat(delta)
            elif delta.shape == ():
                delta = numpy.diagflat(N * [delta])
            else:
                raise ValueError('delta = %r has the wrong shape' % delta)
            # note that origin is CENTER so edges must be shifted by -0.5*delta
            self.edges = [
                origin[dim] + (numpy.arange(m + 1) - 0.5) * delta[dim, dim]
                for dim, m in enumerate(grid.shape)
            ]
            self.grid = numpy.asarray(grid)
            self._update()
        else:
            # empty, must manually populate with load()
            #print "Setting up empty grid object. Use Grid.load(filename)."
            pass
예제 #44
0
def garch2f8(y, c1, a1, b1, y1, h1, c2, a2, b2, y2, h2, df):
    ## Off-diagonal parameter estimation in bivariate GARCH(1,1) when diagonal parameters are given.
    #  INPUTS
    #   y     : [vector] (T x 1) data generated by a GARCH(1,1) process
    #  OPS
    #   q     : [vector] (4 x 1) parameters of the GARCH(1,1) process
    #   qerr  : [vector] (4 x 1) standard error of parameter estimates
    #   hf    : [scalar] current conditional heteroskedasticity estimate
    #   hferr : [scalar] standard error on hf
    #  NOTE
    #   o Originally written by Olivier Ledoit, 4/28/1997
    #   o Uses a conditional t-distribution with fixed degrees of freedom
    #   o Steepest Ascent on boundary, Hessian off boundary, no grid search

    # Parameters
    gold = (1 + sqrt(5)) / 2  # step size increment
    tol1 = 1e-7  # for termination criterion
    tol2 = 1e-7  # for closeness to boundary
    big = 2  # for making the hessian negative definite
    maxiter = 50  # maximum number of iterations
    # n=30			# number of points on the grid

    # Prepare
    t = len(y)
    y1 = y1.flatten()
    y2 = y2.flatten()
    y = y.flatten()
    s = mean(y)
    # s1=mean((y1))
    # s2=mean((y2))
    h1 = h1.flatten()
    h2 = h2.flatten()

    # Bounds
    low = r_[-sqrt(c1 * c2), 0, 0] + tol2
    high = r_[sqrt(c1 * c2), sqrt(a1 * a2), sqrt(b1 * b2)] - tol2

    # Starting Point
    a0 = 0.9 * sqrt(a1 * a2)
    b0 = 0.9 * sqrt(b1 * b2)
    c0 = mean(y) * (1 - a0 - b0) * (df - 2) / df
    c0 = sign(c0) * min(abs(c0), 0.9 * sqrt(c1 * c2))

    # Initialize optimization
    a = r_[c0, a0, b0]
    best = 0
    da = 0
    # term=1
    # negdef=0
    iter = 0

    # Begin optimization loop
    while iter < maxiter:
        iter = iter + 1

        # New parameter
        # olda = a
        a = a + gold**best * da

        # Conditional variance
        h = filter([0, a[1]], [1, -a[2]], y * (df - 2) / df, zi=array([s * (df - 2) / df]))[0] \
            + filter([0, a[0]], [1, -a[2]], ones(t))
        d = h1 * h2 - h**2
        z = h2 * y1 + h1 * y2 - 2 * h * y

        # Likelihood
        if (any(a < low) or any(a > high)):
            like = -np.Inf
        else:
            # like=-sum(log(h)+y/h))
            # like=-sum(log(h)+(df+1)*log(1+y/h/df))
            if any(d <= 0) or any(1 + z / d / df <= 0):
                like = -np.Inf
            else:
                like = -sum(log(d) + (2 + df) * log(1 + z / d / df)) / 2

        # Gradient
        GG = r_['-1',
                filter([0, 1], [1, -a[2]], ones(t))[..., newaxis],
                filter([0, 1], [1, -a[2]],
                       y * (df - 2) / df)[..., newaxis],
                filter([0, 1], [1, -a[2]], h)[..., newaxis]]
        g1 = h / d + (2 + df) * y / (z + d * df) - (2 + df) * h * z / (
            z + d * df) / d
        G = GG * repeat(g1.reshape(-1, 1), 3, axis=1)
        gra = npsum(G, axis=0)

        # Hessian
        GG2 = GG[:,
                 [0, 1, 2, 0, 1, 2, 0, 1, 2]] * GG[:,
                                                   [0, 0, 0, 1, 1, 1, 2, 2, 2]]
        g2 = 1 / d + 2 * h ** 2 / d ** 2 - (2 + df) * y / (z + d * df) ** 2 * (-2 * y - 2 * df * h) \
             - (2 + df) * z / (z + d * df) / d + 2 * (2 + df) * h * y / (z + d * df) / d \
             + (2 + df) * h * z / (z + d * df) ** 2 / d * (-2 * y - 2 * df * h) \
             - 2 * (2 + df) * h ** 2 * z / (z + d * df) / d ** 2
        HH = zeros((t, 9))
        HH[:, 2] = filter([0, 1], [1, -a[2]], GG[:, 0])
        HH[:, 6] = HH[:, 2]
        HH[:, 5] = filter([0, 1], [1, -a[2]], GG[:, 1])
        HH[:, 7] = HH[:, 5]
        HH[:, 8] = filter([0, 2], [1, -a[2]], GG[:, 2])
        H = GG2 * repeat(g2.reshape(-1, 1), 9, axis=1) + HH * repeat(
            g1.reshape(-1, 1), 9, axis=1)
        hes = reshape(npsum(H, axis=0), (3, 3), 'F')

        # Negative definite
        val, u = eig(hes)
        if all(val > 0):
            hes = -eye(3)
            negdef = 0
        elif any(val > 0):
            negdef = 0
            val = minimum(val, max(val[val < 0]) / big)
            hes = u @ diagflat(val) @ u.T
        else:
            negdef = 1

        # Steepest Ascent or Newton
        if any(a == low) or any(a == high):
            da = -((gra @ gra.T) / (gra @ hes @ gra.T)) * gra
        else:
            da = -gra.dot(pinv(hes))

        # Termination criterion
        term = da @ gra.T
        if ((term < tol1) and negdef):
            break

            # If you are on the boundary and want to get out, slide along
        da[(a == low) & (da < 0)] = zeros(da[(a == low) & (da < 0)].shape)
        da[(a == high) & (da > 0)] = zeros(da[(a == high) & (da > 0)].shape)

        # If you are stuck in a corner, terminate too
        if all(da == 0):
            break

        # Go no further than next boundary
        hit = r_[(low[da != 0] - a[da != 0]) / da[da != 0],
                 (high[da != 0] - a[da != 0]) / da[da != 0]]
        hit = hit[hit > 0]
        da = min(r_[hit, 1]) * da

        # Step search
        best = 0
        newa = a + gold**(best - 1) * da
        if (any(newa < low) or any(newa > high)):
            left = -np.Inf
        else:
            h = filter([0, newa[1]], [1, -newa[2]], y * (df - 2) / df, zi=array([s * (df - 2) / df]))[0] \
                + filter([0, newa[0]], [1, -newa[2]], ones(t))
            d = h1 * h2 - h**2
            z = h2 * y1 + h1 * y2 - 2 * h * y
            if any(d <= 0) or any(1 + z / d / df <= 0):
                left = -np.Inf
            else:
                left = -sum(log(d) + (2 + df) * log(1 + z / d / df)) / 2

        newa = a + gold**best * da
        if (any(newa < low) or any(newa > high)):
            center = -np.Inf
        else:
            h = filter([0, newa[1]], [1, -newa[2]], y * (df - 2) / df, zi=array([s * (df - 2) / df]))[0] \
                + filter([0, newa[0]], [1, -newa[2]], ones(t))
            d = h1 * h2 - h**2
            z = h2 * y1 + h1 * y2 - 2 * h * y
            if any(d <= 0) or any(1 + z / d / df <= 0):
                center = -np.Inf
            else:
                center = -sum(log(d) + (2 + df) * log(1 + z / d / df)) / 2

        newa = a + gold**(best + 1) * da
        if (any(newa < low) or any(newa > high)):
            right = -np.Inf
        else:
            h = filter([0, newa[1]], [1, -newa[2]], y * (df - 2) / df, zi=array([s * (df - 2) / df]))[0] \
                + filter([0, newa[0]], [1, -newa[2]], ones(t))
            d = h1 * h2 - h**2
            z = h2 * y1 + h1 * y2 - 2 * h * y
            if any(d <= 0) or any(1 + z / d / df <= 0):
                right = -np.Inf
            else:
                right = -sum(log(d) + (2 + df) * log(1 + z / d / df)) / 2

        if all(like > array([left, center, right])) or all(
                left > array([center, right])):
            while True:
                best = best - 1
                center = left
                newa = a + gold**(best - 1) * da
                if (any(newa < low) or any(newa > high)):
                    left = -np.Inf
                else:
                    h = filter([0, newa[1]], [1, -newa[2]], y * (df - 2) / df, zi=array([s * (df - 2) / df]))[0] \
                        + filter([0, newa[0]], [1, -newa[2]], ones(t))
                    d = h1 * h2 - h**2
                    z = h2 * y1 + h1 * y2 - 2 * h * y
                    if any(d <= 0) or any(1 + z / d / df <= 0):
                        left = -np.Inf
                    else:
                        left = -sum(log(d) +
                                    (2 + df) * log(1 + z / d / df)) / 2
                if all(center >= [like, left]):
                    break

        elif all(right > array([left, center])):
            while True:
                best = best + 1
                center = right
                newa = a + gold**(best + 1) * da
                if (any(newa < low) or any(newa > high)):
                    right = -np.Inf
                else:
                    h = filter([0, newa[1]], [1, -newa[2]], y * (df - 2) / df, zi=array([s * (df - 2) / df]))[0] \
                        + filter([0, newa[0]], [1, -newa[2]], ones(t))
                    d = h1 * h2 - h**2
                    z = h2 * y1 + h1 * y2 - 2 * h * y
                    if any(d <= 0) or any(1 + z / d / df <= 0):
                        right = -np.Inf
                    else:
                        right = -npsum(
                            log(d) + (2 + df) * log(1 + z / d / df)) / 2
                if center > right:
                    break
    q = a

    return q
예제 #45
0
                plt.text(estimate_xs[-1],
                         estimate_ys[-1],
                         str(i + 5),
                         fontsize=10)
        plt.scatter(estimate_xs,
                    estimate_ys,
                    s=50,
                    c='k',
                    marker='.',
                    label='Landmark Estimate')

        plt.title('Graph SLAM with known correspondences')
        plt.legend()
        plt.xlim((-2.0, 5.5))
        plt.ylim((-7.0, 7.0))


if __name__ == "__main__":
    # Dataset 1
    dataset = "../0.Dataset1"
    start_frame = 800
    end_frame = 2000
    N_iterations = 1
    # State covariance matrix
    R = np.diagflat(np.array([5, 5, 20]))**2
    # Measurement covariance matrix
    Q = np.diagflat(np.array([100.0, 100.0, 1e16]))**2

    graph_slam = GraphSLAM(dataset, start_frame, end_frame, N_iterations, R, Q)
    plt.show()
예제 #46
0
 def softmax_derivative(self, x):
     # Reshape the 1-d softmax to 2-d so that np.dot will do the matrix multiplication
     z = x.reshape(-1, 1)
     return np.diagflat(z) - np.dot(z, z.T)
예제 #47
0
lam = (log(2)) / 120  # half life 4 months
flex_prob = exp(-lam * arange(t_, 1 + -1, -1)).reshape(1, -1)
flex_prob = flex_prob / npsum(flex_prob)

typ = namedtuple('typ', 'Entropy')
typ.Entropy = 'Exp'
ens = EffectiveScenarios(flex_prob, typ)
# -

# ## Twist fix for non-synchroneity in HFP

# +
print('Performing the twist fix for non-synchroneity')
# (step 1-2) HFP MEAN/COVARIANCE/CORRELATION
HFPmu, HFPcov = FPmeancov(epsi, flex_prob)
HFPc2 = np.diagflat(diag(HFPcov)**(-1 / 2)) @ HFPcov @ np.diagflat(
    diag(HFPcov)**(-1 / 2))

# (step 3) TARGET CORRELATIONS
l = 10  # number of lags

flex_prob_l = flex_prob[[0], l:]
flex_prob_l = flex_prob_l / npsum(flex_prob_l)

# concatenate the daily log-returns
y1, y2 = zeros(t_), zeros(t_)
for t in range(l, t_):
    y1[t] = sum(ret1[0, t - l:t])
    y2[t] = sum(ret2[0, t - l:t])

y1 = y1[l:]
예제 #48
0
    def subsolve(self, m, n, epsimin, alfa, beta, p0, q0, P, Q):

        # init variables
        een = np.ones((n, 1))
        eem = np.ones((m, 1))
        epsi = 1
        epsvecn = epsi * np.ones((n, 1))
        epsvecm = epsi * np.ones((m, 1))
        x = 0.5 * (alfa + beta)
        y = np.ones((m, 1))
        z = np.ones((1, 1))
        lam = np.ones((m, 1))
        xsi = np.reciprocal(x - alfa)
        xsi = np.maximum(xsi, een)
        eta = np.reciprocal(beta - x)
        eta = np.maximum(eta, een)
        mu = np.maximum(eem, 0.5 * self.c)
        zet = np.ones((1, 1))
        s = np.ones((m, 1))
        itera = 0

        while epsi > epsimin:

            epsvecn.fill(epsi)
            epsvecm.fill(epsi)
            ux1 = self.upp - x
            xl1 = x - self.low
            ux2 = ux1 * ux1
            xl2 = xl1 * xl1
            uxinv1 = np.reciprocal(ux1)
            xlinv1 = np.reciprocal(xl1)

            plam = p0 + np.transpose(P) * lam
            qlam = q0 + np.transpose(Q) * lam
            gvec = np.dot(P, uxinv1) + np.dot(Q, xlinv1)
            dpsidx = plam / ux2 - qlam / xl2

            rex = dpsidx - xsi + eta
            rey = self.c + self.d * y - mu - lam
            rez = self.a0 - zet - np.transpose(self.a) * lam
            relam = gvec - np.dot(self.a, z) - y + s - self.b
            rexsi = xsi * (x - alfa) - epsvecn
            reeta = eta * (beta - x) - epsvecn
            remu = mu * y - epsvecm
            rezet = zet * z - epsi
            res = lam * s - epsvecm

            residu1 = np.concatenate((rex, rey, rez))
            residu2 = np.concatenate((relam, rexsi, reeta, remu, rezet, res))

            residu = np.concatenate((residu1, residu2))

            #compute norm & norm
            residunorm = np.sqrt((residu * residu).sum())
            residumax = np.max(np.absolute(residu))

            ittt = 0

            while residumax > 0.9 * epsi and ittt < 200:
                ittt = ittt + 1
                itera = itera + 1

                ux1 = self.upp - x
                xl1 = x - self.low
                ux2 = ux1 * ux1
                xl2 = xl1 * xl1
                ux3 = ux1 * ux2
                xl3 = xl1 * xl2
                uxinv1 = np.reciprocal(ux1)

                xlinv1 = np.reciprocal(xl1)
                uxinv2 = np.reciprocal(ux2)
                xlinv2 = np.reciprocal(xl2)

                plam = p0 + np.dot(np.transpose(P), lam)
                qlam = q0 + np.dot(np.transpose(Q), lam)
                gvec = np.dot(P, uxinv1) + np.dot(Q, xlinv1)

                # Note: NumpPy broadcasting interprets these multiplications as if multiplying by a diagonal matrix!
                GG = P * uxinv2.flatten() - Q * xlinv2.flatten()

                dpsidx = plam / ux2 - qlam / xl2
                delx = dpsidx - epsvecn / (x - alfa) + epsvecn / (beta - x)
                dely = self.c + self.d * y - lam - epsvecm / y
                delz = self.a0 - np.dot(np.transpose(self.a), lam) - epsi / z
                dellam = gvec - self.a * z - y - self.b + epsvecm / lam

                diagx = plam / ux3 + qlam / xl3
                diagx = 2 * diagx + xsi / (x - alfa) + eta / (beta - x)
                diagxinv = np.reciprocal(diagx)
                diagy = self.d + mu / y
                diagyinv = np.reciprocal(diagy)
                diaglam = s / lam
                diaglamyi = diaglam + diagyinv

                # less constraints then design variables
                if m < n:
                    blam = dellam + dely / diagy - np.dot(
                        GG, np.divide(delx, diagx))
                    Alam = np.diagflat(diaglamyi) + np.dot(
                        GG * diagxinv.flatten(), np.transpose(GG))

                    # create right hand side
                    bb = np.transpose(
                        np.concatenate((np.transpose(blam), delz), axis=1))

                    # create left hand side
                    AA = np.concatenate(
                        (np.concatenate((Alam, self.a), axis=1),
                         np.concatenate((np.transpose(self.a), -zet / z),
                                        axis=1)))

                    # solve system
                    solut = np.linalg.solve(AA, bb)

                    # store solution
                    dlam = solut[0:m]
                    dz = np.array([solut[m]])
                    dx = -delx / diagx - (np.dot(np.transpose(GG),
                                                 dlam)) / diagx

                # more constraints then design variables
                else:

                    diaglamyiinv = np.reciprocal(diaglamyi)
                    dellamyi = dellam + dely / diagy

                    # prepare LHS
                    Axx = np.diagflat(diagx) + np.dot(
                        np.transpose(GG), np.dot(np.diagflat(diaglamyiinv),
                                                 GG))
                    azz = zet / z + np.transpose(self.a) * (a / diaglamyi)
                    axz = -np.transose(GG) * (a / diaglamyi)

                    # prepare RHS
                    bx = delx + np.transose(GG) * (dellamyi / diaglamyi)
                    bz = delz - np.transose(a) * (dellamyi / diaglamyi)

                    # create LHS
                    AA = np.vstack((np.hstack(
                        (Axx, axz)), np.hstack((np.transpose(axz), azz))))

                    # create RHS
                    bb = np.transpose(np.hstack((-np.transpose(bx), -bz)))

                    # solve system
                    solut = numpy.linalg.solve(AA, bb)

                    # store solution
                    dx = solut[1:n]
                    dz = np.array([solut[n + 1]])
                    dlam = (GG * dx) / diaglamyi - dz * (
                        a / diaglamyi) + dellamyi / diaglamyi

                dy = -dely / diagy + dlam / diagy
                dxsi = -xsi + epsvecn / (x - alfa) - (xsi * dx) / (x - alfa)
                deta = -eta + epsvecn / (beta - x) + (eta * dx) / (beta - x)
                dmu = -mu + epsvecm / y - (mu * dy) / y
                dzet = -zet + epsi / z - zet * dz / z
                ds = -s + epsvecm / lam - (s * dlam) / lam

                # construct xx
                xx = np.concatenate((y, z, lam, xsi, eta, mu, zet, s))

                # construct dxx
                dxx = np.concatenate((dy, dz, dlam, dxsi, deta, dmu, dzet, ds))

                stepxx = -1.01 * dxx / xx
                stmxx = np.amax(stepxx)
                stepalfa = -1.01 * dx / (x - alfa)
                stmalfa = np.amax(stepalfa)
                stepbeta = 1.01 * dx / (beta - x)
                stmbeta = np.amax(stepbeta)
                stmalbe = np.maximum(stmalfa, stmbeta)
                stmalbexx = np.maximum(stmalbe, stmxx)
                stminv = np.maximum(stmalbexx, 1)
                steg = 1 / stminv

                xold = x
                yold = y
                zold = z
                lamold = lam
                xsiold = xsi
                etaold = eta
                muold = mu
                zetold = zet
                sold = s

                # init loop
                itto = 0
                resinew = 2 * residunorm

                while (resinew > residunorm and itto < 50):

                    # increment loop
                    itto += 1

                    x = xold + steg * dx
                    y = yold + steg * dy
                    z = zold + steg * dz
                    lam = lamold + steg * dlam
                    xsi = xsiold + steg * dxsi
                    eta = etaold + steg * deta
                    mu = muold + steg * dmu
                    zet = zetold + steg * dzet
                    s = sold + steg * ds

                    ux1 = self.upp - x
                    xl1 = x - self.low
                    ux2 = ux1 * ux1
                    xl2 = xl1 * xl1
                    uxinv1 = np.reciprocal(ux1)
                    xlinv1 = np.reciprocal(xl1)
                    plam = p0 + np.dot(np.transpose(P), lam)
                    qlam = q0 + np.dot(np.transpose(Q), lam)
                    gvec = np.dot(P, uxinv1) + np.dot(Q, xlinv1)
                    dpsidx = plam / ux2 - qlam / xl2

                    rex = dpsidx - xsi + eta
                    rey = self.c + self.d * y - mu - lam
                    rez = self.a0 - zet - np.transpose(self.a) * lam
                    relam = gvec - self.a * z - y + s - self.b
                    rexsi = xsi * (x - alfa) - epsvecn
                    reeta = eta * (beta - x) - epsvecn
                    remu = mu * y - epsvecm
                    rezet = zet * z - epsi
                    res = lam * s - epsvecm

                    residu1 = np.concatenate((rex, rey, rez))
                    residu2 = np.concatenate(
                        (relam, rexsi, reeta, remu, rezet, res))

                    # complete residu
                    residu = np.concatenate((residu1, residu2))

                    #compute norm
                    resinew = np.sqrt((residu * residu).sum())

                    steg = steg / 2

                # after while loop add new residu's
                residunorm = resinew
                residumax = np.max(np.absolute(residu))
                steg = 2 * steg

#==============================================================================
#             if ittt > 99:
#                 print("Max iterations in subsolve reached: ittt:", ittt, ", epsi:", epsi)
#==============================================================================

            epsi = 0.1 * epsi

        # print("MMA: {0} inner iterations.".format(itera))

        # After all loops return values
        return x, y, z, lam, xsi, eta, mu, zet, s
예제 #49
0
script_dir = os.path.dirname(__file__)
rel_path = "images/gray.png"
abs_file_path = os.path.join(script_dir, rel_path)

rawImage = PIL.Image.open(abs_file_path)
rawImage.load()

A = np.asarray(rawImage)
Aherm = A.transpose()  # Always real-valued, so transpose is fine
AstarA = np.matmul(Aherm, A)

AstarA_eigenValues, AstarA_eigenVectors = np.linalg.eig(AstarA)

#for evalue in AstarA_eigenValues:
#    if evalue < 0:
#        print(evalue)

U, s, V = np.linalg.svd(A)
S = np.diagflat(s)

Areconstructed = np.matmul(np.matmul(U, S), V).astype(int)

# Show comparison
print(A)
print(Areconstructed)

std = np.std(A - Areconstructed)
print("MSE:")
print(std**2)
예제 #50
0
파일: test_new.py 프로젝트: wbj218/RL-GP
                         l_min=1e-5,
                         N_max=2000,
                         sigma=None,
                         layer=128,
                         discount_factor=0.99,
                         scale_reward=False,
                         mean=True)

render = True
scale = True

theta = np.random.uniform(low=0,
                          high=1,
                          size=state_dim + actions_dim + 1 + state_dim)
noise_prior = np.sqrt(theta[-state_dim:])
noise_prior = np.diagflat(noise_prior)
theta = theta[0:-state_dim]
keps = 1.5e-4
B = np.eye(state_dim)

max_episode = 6000
if alpha < 1:
    max_episode = 3000
max_step = 300
eval_step = 300
num_test_per_episode = 3
solved = 0
solve_ep = 0

reward_his = []
window_reward_his = deque(maxlen=100)
예제 #51
0
파일: fileio.py 프로젝트: serbinsh/isofit
    def write_spectrum(self, row, col, states, meas, geom, flush_immediately=False):
        """Write data from a single inversion to all output buffers."""

        self.writes = self.writes + 1

        if len(states) == 0:

            # Write a bad data flag
            atm_bad = np.zeros(len(self.fm.n_chan)*5) * -9999.0
            state_bad = np.zeros(len(self.fm.statevec)) * -9999.0
            data_bad = np.zeros(self.fm.instrument.n_chan) * -9999.0
            to_write = {
                'estimated_state_file': state_bad,
                'estimated_reflectance_file': data_bad,
                'estimated_emission_file': data_bad,
                'modeled_radiance_file': data_bad,
                'apparent_reflectance_file': data_bad,
                'path_radiance_file': data_bad,
                'simulated_measurement_file': data_bad,
                'algebraic_inverse_file': data_bad,
                'atmospheric_coefficients_file': atm_bad,
                'radiometry_correction_file': data_bad,
                'spectral_calibration_file': data_bad,
                'posterior_uncertainty_file': state_bad
            }

        else:

            # The inversion returns a list of states, which are
            # intepreted either as samples from the posterior (MCMC case)
            # or as a gradient descent trajectory (standard case). For
            # gradient descent the last spectrum is the converged solution.
            if self.iv.mode == 'mcmc':
                state_est = states.mean(axis=0)
            else:
                state_est = states[-1, :]

            # Spectral calibration
            wl, fwhm = self.fm.calibration(state_est)
            cal = np.column_stack(
                [np.arange(0, len(wl)), wl / 1000.0, fwhm / 1000.0])

            # If there is no actual measurement, we use the simulated version
            # in subsequent calculations.  Naturally in these cases we're
            # mostly just interested in the simulation result.
            if meas is None:
                meas = self.fm.calc_rdn(state_est, geom)

            # Rodgers diagnostics
            lamb_est, meas_est, path_est, S_hat, K, G = \
                self.iv.forward_uncertainty(state_est, meas, geom)

            # Simulation with noise
            meas_sim = self.fm.instrument.simulate_measurement(meas_est, geom)

            # Algebraic inverse and atmospheric optical coefficients
            x_surface, x_RT, x_instrument = self.fm.unpack(state_est)
            rfl_alg_opt, Ls, coeffs = invert_algebraic(self.fm.surface,
                                                       self.fm.RT, self.fm.instrument, x_surface, x_RT, x_instrument,
                                                       meas, geom)
            rhoatm, sphalb, transm, solar_irr, coszen, transup = coeffs

            L_atm = self.fm.RT.get_L_atm(x_RT, geom)
            L_down_transmitted = self.fm.RT.get_L_down_transmitted(x_RT, geom)

            atm = np.column_stack(list(coeffs[:4]) +
                                  [np.ones((len(wl), 1)) * coszen])
            atm = atm.T.reshape((len(wl)*5,))

            # Upward emission & glint and apparent reflectance
            Ls_est = self.fm.calc_Ls(state_est, geom)
            apparent_rfl_est = lamb_est + Ls_est

            # Radiometric calibration
            factors = np.ones(len(wl))
            if 'radiometry_correction_file' in self.outfiles:
                if 'reference_reflectance_file' in self.infiles:
                    reference_file = self.infiles['reference_reflectance_file']
                    self.rfl_ref = reference_file.read_spectrum(row, col)
                    self.wl_ref = reference_file.wl
                    w, fw = self.fm.instrument.calibration(x_instrument)
                    resamp = resample_spectrum(self.rfl_ref, self.wl_ref,
                                               w, fw, fill=True)
                    meas_est = self.fm.calc_meas(state_est, geom, rfl=resamp)
                    factors = meas_est / meas
                else:
                    logging.warning('No reflectance reference')

            # Assemble all output products
            to_write = {
                'estimated_state_file': state_est,
                'estimated_reflectance_file': np.column_stack((self.fm.surface.wl, lamb_est)),
                'estimated_emission_file': np.column_stack((self.fm.surface.wl, Ls_est)),
                'modeled_radiance_file': np.column_stack((wl, meas_est)),
                'apparent_reflectance_file': np.column_stack((self.fm.surface.wl, apparent_rfl_est)),
                'path_radiance_file': np.column_stack((wl, path_est)),
                'simulated_measurement_file': np.column_stack((wl, meas_sim)),
                'algebraic_inverse_file': np.column_stack((self.fm.surface.wl, rfl_alg_opt)),
                'atmospheric_coefficients_file': atm,
                'radiometry_correction_file': factors,
                'spectral_calibration_file': cal,
                'posterior_uncertainty_file': np.sqrt(np.diag(S_hat))
            }

        for product in self.outfiles:
            logging.debug('IO: Writing '+product)
            self.outfiles[product].write_spectrum(row, col, to_write[product])
            if (self.writes % flush_rate) == 0 or flush_immediately:
                self.outfiles[product].flush_buffers()

        # Special case! samples file is matlab format.
        if self.output.mcmc_samples_file is not None:
            logging.debug('IO: Writing mcmc_samples_file')
            mdict = {'samples': states}
            scipy.io.savemat(self.output.mcmc_samples_file, mdict)

        # Special case! Data dump file is matlab format.
        if self.output.data_dump_file is not None:

            logging.debug('IO: Writing data_dump_file')
            x = state_est
            xall = states
            Seps_inv, Seps_inv_sqrt = self.iv.calc_Seps(x, meas, geom)
            meas_est_window = meas_est[self.iv.winidx]
            meas_window = meas[self.iv.winidx]
            xa, Sa, Sa_inv, Sa_inv_sqrt = self.iv.calc_prior(x, geom)
            prior_resid = (x - xa).dot(Sa_inv_sqrt)
            rdn_est = self.fm.calc_rdn(x, geom)
            rdn_est_all = np.array([self.fm.calc_rdn(xtemp, geom)
                                    for xtemp in states])

            x_surface, x_RT, x_instrument = self.fm.unpack(x)
            Kb = self.fm.Kb(x, geom)
            xinit = invert_simple(self.fm, meas, geom)
            Sy = self.fm.instrument.Sy(meas, geom)
            cost_jac_prior = np.diagflat(x - xa).dot(Sa_inv_sqrt)
            cost_jac_meas = Seps_inv_sqrt.dot(K[self.iv.winidx, :])
            meas_Cov = self.fm.Seps(x, meas, geom)
            lamb_est, meas_est, path_est, S_hat, K, G = \
                self.iv.forward_uncertainty(state_est, meas, geom)
            A = np.matmul(K, G)

            # Form the MATLAB dictionary object and write to file
            mdict = {
                'K': K,
                'G': G,
                'S_hat': S_hat,
                'prior_mu': xa,
                'Ls': Ls,
                'prior_Cov': Sa,
                'meas': meas,
                'rdn_est': rdn_est,
                'rdn_est_all': rdn_est_all,
                'x': x,
                'xall': xall,
                'x_surface': x_surface,
                'x_RT': x_RT,
                'x_instrument': x_instrument,
                'meas_Cov': meas_Cov,
                'wl': wl,
                'fwhm': fwhm,
                'lamb_est': lamb_est,
                'coszen': coszen,
                'cost_jac_prior': cost_jac_prior,
                'Kb': Kb,
                'A': A,
                'cost_jac_meas': cost_jac_meas,
                'winidx': self.iv.winidx,
                'windows': self.iv.windows,
                'prior_resid': prior_resid,
                'noise_Cov': Sy,
                'xinit': xinit,
                'rhoatm': rhoatm,
                'sphalb': sphalb,
                'transm': transm,
                'transup': transup,
                'solar_irr': solar_irr,
                'L_atm': L_atm,
                'L_down_transmitted': L_down_transmitted
            }
            scipy.io.savemat(self.output.data_dump_file, mdict)

        # Write plots, if needed
        if len(states) > 0 and self.output.plot_directory is not None:

            if 'reference_reflectance_file' in self.infiles:
                reference_file = self.infiles['reference_reflectance_file']
                self.rfl_ref = reference_file.read_spectrum(row, col)
                self.wl_ref = reference_file.wl

            for i, x in enumerate(states):

                # Calculate intermediate solutions
                lamb_est, meas_est, path_est, S_hat, K, G = \
                    self.iv.forward_uncertainty(state_est, meas, geom)

                plt.cla()
                red = [0.7, 0.2, 0.2]
                wl, fwhm = self.fm.calibration(x)
                xmin, xmax = min(wl), max(wl)
                fig = plt.subplots(1, 2, figsize=(10, 5))
                plt.subplot(1, 2, 1)
                meas_est = self.fm.calc_meas(x, geom)
                for lo, hi in self.iv.windows:
                    idx = np.where(np.logical_and(wl > lo, wl < hi))[0]
                    p1 = plt.plot(wl[idx], meas[idx], color=red, linewidth=2)
                    p2 = plt.plot(wl, meas_est, color='k', linewidth=1)
                plt.title("Radiance")
                plt.title("Measurement (Scaled DN)")
                ymax = max(meas)*1.25
                ymax = max(meas)+0.01
                ymin = min(meas)-0.01
                plt.text(500, ymax*0.92, "Measured", color=red)
                plt.text(500, ymax*0.86, "Model", color='k')
                plt.ylabel(r"$\mu$W nm$^{-1}$ sr$^{-1}$ cm$^{-2}$")
                plt.ylabel("Intensity")
                plt.xlabel("Wavelength (nm)")
                plt.ylim([-0.001, ymax])
                plt.ylim([ymin, ymax])
                plt.xlim([xmin, xmax])

                plt.subplot(1, 2, 2)
                lamb_est = self.fm.calc_lamb(x, geom)
                ymax = min(max(lamb_est)*1.25, 0.10)
                for lo, hi in self.iv.windows:

                    # black line
                    idx = np.where(np.logical_and(wl > lo, wl < hi))[0]
                    p2 = plt.plot(wl[idx], lamb_est[idx], 'k', linewidth=2)
                    ymax = max(max(lamb_est[idx]*1.2), ymax)

                    # red line
                    if 'reference_reflectance_file' in self.infiles:
                        idx = np.where(np.logical_and(
                            self.wl_ref > lo, self.wl_ref < hi))[0]
                        p1 = plt.plot(self.wl_ref[idx], self.rfl_ref[idx],
                                      color=red, linewidth=2)
                        ymax = max(max(self.rfl_ref[idx]*1.2), ymax)

                    # green and blue lines - surface components
                    if hasattr(self.fm.surface, 'components') and \
                            self.output.plot_surface_components:
                        idx = np.where(np.logical_and(self.fm.surface.wl > lo,
                                                      self.fm.surface.wl < hi))[0]
                        p3 = plt.plot(self.fm.surface.wl[idx],
                                      self.fm.xa(x, geom)[idx], 'b', linewidth=2)
                        for j in range(len(self.fm.surface.components)):
                            z = self.fm.surface.norm(
                                lamb_est[self.fm.surface.idx_ref])
                            mu = self.fm.surface.components[j][0] * z
                            plt.plot(self.fm.surface.wl[idx], mu[idx], 'g:',
                                     linewidth=1)
                plt.text(500, ymax*0.86, "Remote estimate", color='k')
                if 'reference_reflectance_file' in self.infiles:
                    plt.text(500, ymax*0.92, "In situ reference", color=red)
                if hasattr(self.fm.surface, 'components') and \
                        self.output.plot_surface_components:
                    plt.text(500, ymax*0.80, "Prior mean state ",
                             color='b')
                    plt.text(500, ymax*0.74, "Surface components ",
                             color='g')
                plt.ylim([-0.0010, ymax])
                plt.xlim([xmin, xmax])
                plt.title("Reflectance")
                plt.title("Source Model")
                plt.xlabel("Wavelength (nm)")
                fn = os.path.join(self.output.plot_directory, ('frame_%i.png' % i))
                plt.savefig(fn)
                plt.close()
예제 #52
0
def initialize_from_hdf5_file(file_name,
                              structure,
                              read_trajectory=True,
                              initial_cut=1,
                              final_cut=None,
                              memmap=False):
    import h5py

    print("Reading data from hdf5 file: " + file_name)

    trajectory = None
    velocity = None
    vc = None
    reduced_q_vector = None

    #Check file exists
    if not os.path.isfile(file_name):
        print(file_name + ' file does not exist!')
        exit()

    hdf5_file = h5py.File(file_name, "r")
    if "trajectory" in hdf5_file and read_trajectory is True:
        trajectory = hdf5_file['trajectory'][:]
        if final_cut is not None:
            trajectory = trajectory[initial_cut - 1:final_cut]
        else:
            trajectory = trajectory[initial_cut - 1:]

    if "velocity" in hdf5_file:
        velocity = hdf5_file['velocity'][:]
        if final_cut is not None:
            velocity = velocity[initial_cut - 1:final_cut]
        else:
            velocity = velocity[initial_cut - 1:]

    if "vc" in hdf5_file:
        vc = hdf5_file['vc'][:]
        if final_cut is not None:
            vc = vc[initial_cut - 1:final_cut]
        else:
            vc = vc[initial_cut - 1:]

    if "reduced_q_vector" in hdf5_file:
        reduced_q_vector = hdf5_file['reduced_q_vector'][:]
        print("Load trajectory projected onto {0}".format(reduced_q_vector))

    time = hdf5_file['time'][:]
    supercell = hdf5_file['super_cell'][:]
    hdf5_file.close()

    if vc is None:
        return dyn.Dynamics(structure=structure,
                            trajectory=trajectory,
                            velocity=velocity,
                            time=time,
                            supercell=np.dot(np.diagflat(supercell),
                                             structure.get_cell()),
                            memmap=memmap)
    else:
        return vc, reduced_q_vector, dyn.Dynamics(structure=structure,
                                                  time=time,
                                                  supercell=np.dot(
                                                      np.diagflat(supercell),
                                                      structure.get_cell()),
                                                  memmap=memmap)
예제 #53
0
def generate_test_trajectory(
        structure,
        supercell=(1, 1, 1),
        minimum_frequency=0.1,  # THz
        total_time=2,  # picoseconds
        time_step=0.002,  # picoseconds
        temperature=400,  # Kelvin
        silent=False,
        memmap=False,
        phase_0=0.0):

    import random
    from dynaphopy.power_spectrum import _progress_bar

    print('Generating ideal harmonic data for testing')
    kb_boltzmann = 0.831446  # u * A^2 / ( ps^2 * K )

    number_of_unit_cells_phonopy = np.prod(
        np.diag(structure.get_supercell_phonon()))
    number_of_unit_cells = np.prod(supercell)
    #    atoms_relation = float(number_of_unit_cells)/ number_of_unit_cells_phonopy

    #Recover dump trajectory from file (test only)
    import pickle
    if False:

        dump_file = open("trajectory.save", "r")
        trajectory = pickle.load(dump_file)
        return trajectory

    number_of_atoms = structure.get_number_of_cell_atoms()
    number_of_primitive_atoms = structure.get_number_of_primitive_atoms()
    number_of_dimensions = structure.get_number_of_dimensions()

    positions = structure.get_positions(supercell=supercell)
    masses = structure.get_masses(supercell=supercell)

    number_of_atoms = number_of_atoms * number_of_unit_cells

    number_of_primitive_cells = number_of_atoms / number_of_primitive_atoms

    atom_type = structure.get_atom_type_index(supercell=supercell)

    #Generate additional wave vectors sample
    #    structure.set_supercell_phonon_renormalized(np.diag(supercell))

    q_vector_list = pho_interface.get_commensurate_points(
        structure, np.diag(supercell))

    q_vector_list_cart = [
        np.dot(q_vector,
               2 * np.pi * np.linalg.inv(structure.get_primitive_cell()).T)
        for q_vector in q_vector_list
    ]

    atoms_relation = float(
        len(q_vector_list) * number_of_primitive_atoms) / number_of_atoms

    #Generate frequencies and eigenvectors for the testing wave vector samples
    print('Wave vectors included in test (commensurate points)')
    eigenvectors_r = []
    frequencies_r = []
    for i in range(len(q_vector_list)):
        print(q_vector_list[i])
        eigenvectors, frequencies = pho_interface.obtain_eigenvectors_and_frequencies(
            structure, q_vector_list[i])
        eigenvectors_r.append(eigenvectors)
        frequencies_r.append(frequencies)
    number_of_frequencies = len(frequencies_r[0])

    #Generating trajectory
    if not silent:
        _progress_bar(0, 'generating')

    #Generating trajectory
    trajectory = []
    for time in np.arange(total_time, step=time_step):
        coordinates = np.array(positions[:, :], dtype=complex)

        for i_freq in range(number_of_frequencies):
            for i_long, q_vector in enumerate(q_vector_list_cart):

                if abs(
                        frequencies_r[i_long][i_freq]
                ) > minimum_frequency:  # Prevent error due to small frequencies
                    amplitude = np.sqrt(
                        number_of_dimensions * kb_boltzmann * temperature /
                        number_of_primitive_cells * atoms_relation) / (
                            frequencies_r[i_long][i_freq] * 2 * np.pi
                        )  # + random.uniform(-1,1)*0.05
                    normal_mode = amplitude * np.exp(
                        np.complex(0, -1) * frequencies_r[i_long][i_freq] *
                        2.0 * np.pi * time)
                    phase = np.exp(
                        np.complex(0, 1) * np.dot(q_vector, positions.T) +
                        phase_0)

                    coordinates += (1.0 / np.sqrt(masses)[None].T *
                                    eigenvectors_r[i_long][i_freq, atom_type] *
                                    phase[None].T * normal_mode).real

        trajectory.append(coordinates)
        if not silent:
            _progress_bar(
                float(time + time_step) / total_time,
                'generating',
            )

    trajectory = np.array(trajectory)

    time = np.array([i * time_step for i in range(trajectory.shape[0])],
                    dtype=float)
    energy = np.array([
        number_of_atoms * number_of_dimensions * kb_boltzmann * temperature
        for i in range(trajectory.shape[0])
    ],
                      dtype=float)

    #Save a trajectory object to file for later recovery (test only)
    if False:
        dump_file = open("trajectory.save", "w")
        pickle.dump(
            dyn.Dynamics(structure=structure,
                         trajectory=np.array(trajectory, dtype=complex),
                         energy=np.array(energy),
                         time=time,
                         supercell=np.dot(np.diagflat(supercell),
                                          structure.get_cell())), dump_file)

        dump_file.close()

    # structure.set_supercell_phonon_renormalized(None)

    return dyn.Dynamics(structure=structure,
                        trajectory=np.array(trajectory, dtype=complex),
                        energy=np.array(energy),
                        time=time,
                        supercell=np.dot(np.diagflat(supercell),
                                         structure.get_cell()),
                        memmap=memmap)
예제 #54
0
        ])
        A3 = np.array([
            [1.0, 0, DiscTimePeriod, 0],
            [0, 1.0, 0, DiscTimePeriod],
            [
                0, 0,
                np.cos(RotRate * DiscTimePeriod),
                1 * np.sin(RotRate * DiscTimePeriod)
            ],
            [
                0, 0, -1 * np.sin(RotRate * DiscTimePeriod),
                np.cos(RotRate * DiscTimePeriod)
            ],
        ])

        V1 = np.diagflat([0.0025, 0.0025, 0.0005, 0.0005])

        # W1 = np.diagflat([0.000025, 0.000025])
        W1 = np.diagflat([0.0005, 0.0005])
        WL1 = np.diagflat([0.32, 0.32])

        # From https://docs.px4.io/master/en/advanced_config/tuning_the_ecl_ekf.html
        # and https://dewesoft.com/products/interfaces-and-sensors/gps-and-imu-devices/tech-specs

        C1 = np.array([[0, 0, 1, 0], [0, 0, 0, 1]])
        CL1 = np.array([[1, 0, 0, 0], [0, 1, 0, 0]])

        D = np.array([[1, 0], [0, 1]])

        A = TimeVaryingValue([A1, A2, A3], modes)
        V = TimeVaryingValue([V1, V1, V1], modes)
예제 #55
0
def Dsoftmax(x):
    s = x.reshape(-1, 1)
    return np.diagflat(s) - np.dot(s, s.T)
예제 #56
0
 def time_diagflat_l100(self):
     np.diagflat(self.l100)
예제 #57
0
def fractionalDifferenciatorWeights(p,
                                    alpha,
                                    NbPoles=20,
                                    PolesMinMax=(-5, 10),
                                    NbFreqPoints=200,
                                    FreqsMinMax=(1, 48e3),
                                    DoPlot=True):

    # Defintion of the frequency grid
    fmin, fmax = FreqsMinMax
    wmin, wmax = 2 * np.pi * fmin, 2 * np.pi * fmax
    w = np.exp(
        np.log(wmin) +
        np.linspace(0, 1, NbFreqPoints + 1) * np.log(wmax / wmin))
    w12 = np.sqrt(w[1:] * w[:-1])

    # Unpack min and max exponents to define the list of poles
    emin, emax = PolesMinMax
    Xi = np.logspace(emin, emax, NbPoles)  # xi_0 -> xi_{N+1}

    # Input to Output transfer function of the fractional integrator of
    # order 1-alpha
    beta = 1. - alpha

    def transferFunctionFracInt(s):
        return s**-beta

    # Target transfer function evaluated on the frequency grid
    T = transferFunctionFracInt(1j * w12)

    # Return the basis vector of elementary damping with poles Xi
    def Basis(s, Xi):
        return (s + Xi)**-1

    # Matrix of basis transfer function for each poles on the frequency grid
    M = np.zeros((NbFreqPoints, NbPoles), dtype=np.complex64)
    for k in np.arange(NbFreqPoints):
        M[k, :] = Basis(1j * w12[k], Xi)

    # Perceptual weights
    WBuildingVector = (np.log(w[1:]) - np.log(w[:-1])) / (np.abs(T)**2)
    W = np.diagflat(WBuildingVector)

    # Definition of the cost function
    def CostFunction(mu):
        mat = np.dot(M, mu) - T
        cost = np.dot(np.conjugate(mat.T), np.dot(W, mat))
        return cost.real

    # Optimization constraints
    bnds = [(0, None) for n in range(NbPoles)]

    # Optimization
    from scipy.optimize import minimize
    MuOpt = minimize(CostFunction, np.ones(NbPoles), bounds=bnds, tol=EPS)
    Mu = MuOpt.x  # Get the solution

    # Conversion to phs parameters
    diagQ = []
    diagR = []

    # Eliminate 0 valued weigths
    for n in np.arange(NbPoles):
        if Mu[n] > 0:
            diagR.append(p * Mu[n])
            diagQ.append(p * Mu[n] * Xi[n])

    if DoPlot:
        from matplotlib.pyplot import (figure, subplot, plot, semilogx, ylabel,
                                       legend, grid, xlabel)
        TOpt = np.array(M * np.matrix(Mu).T)
        wmin, wmax = 2 * np.pi * fmin, 2 * np.pi * fmax
        figure()
        subplot(2, 1, 1)
        faxis = w12[(wmin < w12) & (w12 < wmax)] / (2 * np.pi)
        v1 = 20 * np.log10(np.abs(T[(wmin < w12) & (w12 < wmax)]))
        v2 = 20 * np.log10(np.abs(TOpt[(wmin < w12) & (w12 < wmax)]))
        v3 = list(map(lambda x, y: x - y, v1, v2))
        semilogx(faxis, v1, label='Target')
        semilogx(faxis, v2, label='Approx')
        ylabel('Transfert (dB)')
        legend(loc=0)
        grid()
        subplot(2, 1, 2)
        plot(faxis, v3, label='Error')
        xlabel('Log-frequencies (log Hz)')
        ylabel('Error (dB)')
        legend(loc=0)
        grid()

    return diagR, diagQ
예제 #58
0
def fit_firth(logit_model,
              start_vec,
              X,
              y,
              step_limit=1000,
              convergence_limit=0.0001):
    """Do firth regression

    Args:
        logit (statsmodels.discrete.discrete_model.Logit)
            Logistic model
        start_vec (numpy.array)
            Pre-initialized vector to speed-up convergence (n, 1)
        X (numpy.array)
            (n, m)
        y (numpy.array)
            (n, )
        step_limit (int)
            Maximum number of iterations
        convergence_limit (float)
            Convergence tolerance

    Returns:
        intercept (float)
           Intercept
        kbeta (float)
            Variant beta
        beta (iterable)
            Covariates betas (n-2)
        bse (float)
            Beta std-err
        fitll (float or None)
            Likelihood of fit or None if could not fit
    """

    beta_iterations = []
    beta_iterations.append(start_vec)
    for i in range(0, step_limit):
        pi = logit_model.predict(beta_iterations[i])
        W = np.diagflat(np.multiply(pi, 1 - pi))
        var_covar_mat = np.linalg.pinv(
            -logit_model.hessian(beta_iterations[i]))

        # build hat matrix
        rootW = np.sqrt(W)
        H = np.dot(np.transpose(X), np.transpose(rootW))
        H = np.matmul(var_covar_mat, H)
        H = np.matmul(np.dot(rootW, X), H)

        # penalised score
        U = np.matmul(np.transpose(X),
                      y - pi + np.multiply(np.diagonal(H), 0.5 - pi))
        new_beta = beta_iterations[i] + np.matmul(var_covar_mat, U)

        # step halving
        j = 0
        while firth_likelihood(new_beta, logit_model) > firth_likelihood(
                beta_iterations[i], logit_model):
            new_beta = beta_iterations[i] + 0.5 * (new_beta -
                                                   beta_iterations[i])
            j = j + 1
            if (j > step_limit):
                return None

        beta_iterations.append(new_beta)
        if i > 0 and (
                np.linalg.norm(beta_iterations[i] - beta_iterations[i - 1]) <
                convergence_limit):
            break

    return_fit = None
    if np.linalg.norm(beta_iterations[i] -
                      beta_iterations[i - 1]) >= convergence_limit:
        pass
    else:
        # Calculate stats
        fitll = -firth_likelihood(beta_iterations[-1], logit_model)
        intercept = beta_iterations[-1][0]
        if len(beta_iterations[-1]) > 1:
            kbeta = beta_iterations[-1][1]
            bse = math.sqrt(-logit_model.hessian(beta_iterations[-1])[1, 1])
        else:
            # Encountered when fitting null without any distances/covariates
            kbeta = None
            bse = None

        if len(beta_iterations[-1]) > 2:
            beta = beta_iterations[-1][2:].tolist()
        else:
            beta = None

        return_fit = intercept, kbeta, beta, bse, fitll

    return return_fit
예제 #59
0
                                            lensmodel,
                                            intrinsics_true[i]) \
                            for i in range(len(intrinsics_true))]),
                 0,1)



# Let's define the observation-time pixel noise. The noise vector
# q_true_sampled_noise has the same shape as q_true for each sample. so
# q_true_sampled_noise.shape = (Nsamples,Npoints,Ncameras,2). The covariance is
# a square matrix with each dimension of length Npoints*Ncameras*2
N_q_true_noise = Npoints*args.Ncameras*2
sigma_qt_sq = \
    args.pixel_uncertainty_stdev_triangulation * \
    args.pixel_uncertainty_stdev_triangulation
var_qt = np.diagflat( (sigma_qt_sq,) * N_q_true_noise )
var_qt_reshaped = var_qt.reshape( Npoints, args.Ncameras, 2,
                                  Npoints, args.Ncameras, 2 )

if args.Ncameras != 2:
    raise Exception("Ncameras == 2 is assumed here")
for ipt in range(Npoints):
    var_qt_reshaped[ipt,0,0, ipt,1,0] = sigma_qt_sq*args.pixel_uncertainty_triangulation_correlation
    var_qt_reshaped[ipt,1,0, ipt,0,0] = sigma_qt_sq*args.pixel_uncertainty_triangulation_correlation
    var_qt_reshaped[ipt,0,1, ipt,1,1] = sigma_qt_sq*args.pixel_uncertainty_triangulation_correlation
    var_qt_reshaped[ipt,1,1, ipt,0,1] = sigma_qt_sq*args.pixel_uncertainty_triangulation_correlation

# Let's actually apply the noise to compute var(distancep) empirically to compare
# against the var(distancep) prediction I just computed
# shape (Nsamples,Npoints,Ncameras,2)
qt_noise = \
예제 #60
0
파일: npt.py 프로젝트: yfyh2013/yaff
    def baro(self, iterative, chainvel0):
        def update_baro_vel():
            # updates the barostat velocity tensor
            if chainvel0 is not None:
                # iL v_{xi} v_g h/8
                self.vel_press *= np.exp(-self.timestep_press*chainvel0/8)
            # definition of P_intV and G
            ptens_vol = np.dot(iterative.vel.T*iterative.masses, iterative.vel) - iterative.vtens
            ptens_vol = 0.5*(ptens_vol.T + ptens_vol)
            G = (ptens_vol+(2.0*iterative.ekin/iterative.ndof-self.press*iterative.ff.system.cell.volume)*np.eye(3))/self.mass_press
            if not self.anisotropic:
                G = np.trace(G)
            if self.vol_constraint:
                G -= np.trace(G)/self.dim*np.eye(self.dim)
            # iL G_g h/4
            self.vel_press += G*self.timestep_press/4
            if chainvel0 is not None:
                # iL v_{xi} v_g h/8
                self.vel_press *= np.exp(-self.timestep_press*chainvel0/8)

        # first part of the barostat velocity tensor update
        update_baro_vel()

        # iL v_g h/2
        if self.anisotropic:
            Dr, Qg = np.linalg.eigh(self.vel_press)
            Daccr = np.diagflat(np.exp(Dr*self.timestep_press/2))
            rot_mat = np.dot(np.dot(Qg, Daccr), Qg.T)
            pos_new = np.dot(iterative.pos, rot_mat)
            rvecs_new = np.dot(iterative.rvecs, rot_mat)
        else:
            c = np.exp(self.vel_press*self.timestep_press/2)
            pos_new = c*iterative.pos
            rvecs_new = c*iterative.rvecs

        # update the positions and cell vectors
        iterative.ff.update_pos(pos_new)
        iterative.pos[:] = pos_new
        iterative.ff.update_rvecs(rvecs_new)
        iterative.rvecs[:] = rvecs_new

        # update the potential energy
        iterative.gpos[:] = 0.0
        iterative.vtens[:] = 0.0
        iterative.epot = iterative.ff.compute(iterative.gpos,iterative.vtens)

        # -iL (v_g + Tr(v_g)/ndof) h/2
        if self.anisotropic:
            if self.vol_constraint:
                Dg, Eg = np.linalg.eigh(self.vel_press)
            else:
                Dg, Eg = np.linalg.eigh(self.vel_press+(np.trace(self.vel_press)/iterative.ndof)*np.eye(3))
            Daccg = np.diagflat(np.exp(-Dg*self.timestep_press/2))
            rot_mat = np.dot(np.dot(Eg, Daccg), Eg.T)
            vel_new = np.dot(iterative.vel, rot_mat)
        else:
            vel_new = np.exp(-((1.0+3.0/iterative.ndof)*self.vel_press)*self.timestep_press/2) * iterative.vel

        # update the velocities and the kinetic energy
        iterative.vel[:] = vel_new
        iterative.ekin = iterative._compute_ekin()

        # second part of the barostat velocity tensor update
        update_baro_vel()