Exemplo n.º 1
0
    def _learnStep(self):
        """ Main part of the algorithm. """
        I = eye(self.numParameters)
        self._produceSamples()
        utilities = self.shapingFunction(self._currentEvaluations)
        utilities /= sum(utilities)  # make the utilities sum to 1
        if self.uniformBaseline:
            utilities -= 1./self.batchSize
        samples = array(map(self._base2sample, self._population))

        dCenter = dot(samples.T, utilities)
        covGradient = dot(array([outer(s,s) - I for s in samples]).T, utilities)
        covTrace = trace(covGradient)
        covGradient -= covTrace/self.numParameters * I
        dA = 0.5 * (self.scaleLearningRate * covTrace/self.numParameters * I
                    +self.covLearningRate * covGradient)

        self._lastLogDetA = self._logDetA
        self._lastInvA = self._invA

        self._center += self.centerLearningRate * dot(self._A, dCenter)
        self._A = dot(self._A, expm2(dA))
        self._invA = dot(expm2(-dA), self._invA)
        self._logDetA += 0.5 * self.scaleLearningRate * covTrace
        if self.storeAllDistributions:
            self._allDistributions.append((self._center.copy(), self._A.copy()))
Exemplo n.º 2
0
    def _learnStep(self):
        """ Main part of the algorithm. """
        I = eye(self.numParameters)
        self._produceSamples()
        utilities = self.shapingFunction(self._currentEvaluations)
        utilities /= sum(utilities)  # make the utilities sum to 1
        if self.uniformBaseline:
            utilities -= 1. / self.batchSize
        samples = array(map(self._base2sample, self._population))

        dCenter = dot(samples.T, utilities)
        covGradient = dot(
            array([outer(s, s) - I for s in samples]).T, utilities)
        covTrace = trace(covGradient)
        covGradient -= covTrace / self.numParameters * I
        dA = 0.5 * (self.scaleLearningRate * covTrace / self.numParameters * I
                    + self.covLearningRate * covGradient)

        self._lastLogDetA = self._logDetA
        self._lastInvA = self._invA

        self._center += self.centerLearningRate * dot(self._A, dCenter)
        self._A = dot(self._A, expm2(dA))
        self._invA = dot(expm2(-dA), self._invA)
        self._logDetA += 0.5 * self.scaleLearningRate * covTrace
        if self.storeAllDistributions:
            self._allDistributions.append(
                (self._center.copy(), self._A.copy()))
Exemplo n.º 3
0
    def test_consistency(self):
        a = array([[0., 1], [-1, 0]])
        assert_array_almost_equal(expm(a), expm2(a))
        assert_array_almost_equal(expm(a), expm3(a))

        a = array([[1j, 1], [-1, -2j]])
        assert_array_almost_equal(expm(a), expm2(a))
        assert_array_almost_equal(expm(a), expm3(a))
Exemplo n.º 4
0
    def test_consistency(self):
        a = array([[0.,1],[-1,0]])
        assert_array_almost_equal(expm(a), expm2(a))
        assert_array_almost_equal(expm(a), expm3(a))

        a = array([[1j,1],[-1,-2j]])
        assert_array_almost_equal(expm(a), expm2(a))
        assert_array_almost_equal(expm(a), expm3(a))
Exemplo n.º 5
0
def rho(t, rho0, W, index=None):
    """ Calculate the probability vector at time t.

    :param t: The time
    :param rho0: Initial condition
    :param W: Transition matrix
    :type rho0: numpy 1d array
    :type W: numpy 2d array
    """
    if index == None:
        return scipy.dot(linalg.expm2(W * t), rho0)
    else:
        return scipy.dot(linalg.expm2(W * t), rho0)[index]
Exemplo n.º 6
0
def discretized_exponentials(dtau,k,c,m,values): #{{{
    """
    Returns a dictionary of the discretized exponentials of the constants
    Hamiltonian terms, as well as the exponentials of all values the decoupled
    potential terms can take.
    """

    exp_dictionary['k'] = expm2(-dtau * k)
    exp_dictionary['c'] = expm2(-dtau * c)
    exp_dictionary['m'] = expm2(-dtau * m)

    exp_dictionary['values'] = exp(values)

    return exp_dictionary #}}}
Exemplo n.º 7
0
def discretized_exponentials(dtau, k, c, m, values):  #{{{
    """
    Returns a dictionary of the discretized exponentials of the constants
    Hamiltonian terms, as well as the exponentials of all values the decoupled
    potential terms can take.
    """

    exp_dictionary['k'] = expm2(-dtau * k)
    exp_dictionary['c'] = expm2(-dtau * c)
    exp_dictionary['m'] = expm2(-dtau * m)

    exp_dictionary['values'] = exp(values)

    return exp_dictionary  #}}}
Exemplo n.º 8
0
def xNES(f, x0, maxEvals=1e6, verbose=False, targetFitness= -1e-10):
    """ Exponential NES (xNES), as described in 
    Glasmachers, Schaul, Sun, Wierstra and Schmidhuber (GECCO'10).
    Maximizes a function f. 
    Returns (best solution found, corresponding fitness).
    """
    dim = len(x0)  
    I = eye(dim)
    learningRate = 0.6 * (3 + log(dim)) / dim / sqrt(dim)
    batchSize = 4 + int(floor(3 * log(dim)))    
    center = x0.copy()
    A = eye(dim)  # sqrt of the covariance matrix
    numEvals = 0
    bestFound = None
    bestFitness = -Inf
    while numEvals + batchSize <= maxEvals and bestFitness < targetFitness:
        # produce and evaluate samples
        samples = [randn(dim) for _ in range(batchSize)]
        fitnesses = [f(dot(A, s) + center) for s in samples]
        if max(fitnesses) > bestFitness:
            bestFitness = max(fitnesses)
            bestFound = samples[argmax(fitnesses)]
        numEvals += batchSize 
        if verbose: print "Step", numEvals / batchSize, ":", max(fitnesses), "best:", bestFitness
        #print A
        # update center and variances
        utilities = computeUtilities(fitnesses)
        center += dot(A, dot(utilities, samples))
        covGradient = sum([u * (outer(s, s) - I) for (s, u) in zip(samples, utilities)])
        A = dot(A, expm2(0.5 * learningRate * covGradient))                      

    return bestFound, bestFitness
Exemplo n.º 9
0
def cohenpid(A,B,C,kp):
    m_t = 0.2
    Astate = [[B,A,1],[-1,0,0],[0,-1,0]]
    Avar = [[-C,0,0],[0,-1,0],[0,0,-1]]
    forcingfunc = [[kp],[0],[0]]
    Amat = np.dot(linalg.inv(Astate),Avar)
    Bmat = np.dot(linalg.inv(Astate),forcingfunc)
    Xo = -1*np.dot(linalg.inv(Amat),Bmat*m_t)
    tfinal = 10# simulation period
    dt = 0.005
    t = np.arange(0, tfinal, dt)
    entries = len(t)
    x = np.zeros((entries,1))
    Astep = m_t
    Bv = kp*Astep
    for i in range(0,entries):
        X = np.dot(1 - linalg.expm2(Amat*t[i]), Xo)
        x[i] = X[0]
    
    t0 = 0
    for j in range(0,entries-1):
        if np.sign(0-x[j])==0:
            t0 = t[j]            
        if np.sign((0.5*Bv) - x[j]) != np.sign((0.5*Bv) -x[j+1]):
            t2 = np.interp(0.5*Bv,[x[j][0],x[j+1][0]],[t[j],t[j+1]])
        if np.sign((0.632*Bv) - x[j]) != np.sign((0.632*Bv) -x[j+1]):
            t3 = np.interp(0.632*Bv,[x[j][0],x[j+1][0]],[t[j],t[j+1]])
    t1 = (t2 - (np.log(2))*t3)/(1 - np.log(2))
    tau = t3-t1
    tdel = t1- t0
    K= Bv/Astep
    r = tdel/tau
    kccc = (1/(r*K))*(0.9 + (r/12))
    ticc = tdel*(30 + (3*r))/(9 + (20*r))
    return kccc,ticc
Exemplo n.º 10
0
def IntervalPdfFromPH(alpha, A, intBounds):
    """
    Returns the approximate probability density function of a
    continuous phase-type distribution, based on the 
    probability of falling into intervals.
    
    Parameters
    ----------
    alpha : vector, shape (1,M)
        The initial probability vector of the phase-type
        distribution.
    A : matrix, shape (M,M)
        The transient generator matrix of the phase-type
        distribution.
    intBounds : vector, shape (K)
        The array of interval boundaries. The pdf is the
        probability of falling into an interval divided by
        the interval length. 
        If the size of intBounds is K, the size of the result is K-1.
    prec : double, optional
        Numerical precision to check if the input is a valid
        phase-type distribution. The default value is 1e-14
    
    Returns
    -------
    x : matrix of doubles, shape(K-1,1)
        The points at which the pdf is computed. It holds the center of the 
        intervals defined by intBounds.
    y : matrix of doubles, shape(K-1,1)
        The values of the density function at the corresponding "x" values
    
    Notes
    -----
    This method is more suitable for comparisons with empirical
    density functions than the exact one (given by PdfFromPH).
    """

    if butools.checkInput and not CheckPHRepresentation(alpha, A):
        raise Exception(
            "IntervalPdfFromPH: Input is not a valid PH representation!")

    steps = len(intBounds)
    x = [(intBounds[i + 1] + intBounds[i]) / 2.0 for i in range(steps - 1)]
    y = [(np.sum(alpha * expm2((A * intBounds[i]).A)) - np.sum(alpha * expm2(
        (A * intBounds[i + 1]).A))) / (intBounds[i + 1] - intBounds[i])
         for i in range(steps - 1)]
    return (np.array(x), np.array(y))
Exemplo n.º 11
0
def IntervalPdfFromPH (alpha, A, intBounds):
    """
    Returns the approximate probability density function of a
    continuous phase-type distribution, based on the 
    probability of falling into intervals.
    
    Parameters
    ----------
    alpha : vector, shape (1,M)
        The initial probability vector of the phase-type
        distribution.
    A : matrix, shape (M,M)
        The transient generator matrix of the phase-type
        distribution.
    intBounds : vector, shape (K)
        The array of interval boundaries. The pdf is the
        probability of falling into an interval divided by
        the interval length. 
        If the size of intBounds is K, the size of the result is K-1.
    prec : double, optional
        Numerical precision to check if the input is a valid
        phase-type distribution. The default value is 1e-14
    
    Returns
    -------
    x : matrix of doubles, shape(K-1,1)
        The points at which the pdf is computed. It holds the center of the 
        intervals defined by intBounds.
    y : matrix of doubles, shape(K-1,1)
        The values of the density function at the corresponding "x" values
    
    Notes
    -----
    This method is more suitable for comparisons with empirical
    density functions than the exact one (given by PdfFromPH).
    """

    if butools.checkInput and not CheckPHRepresentation (alpha, A):
        raise Exception("IntervalPdfFromPH: Input is not a valid PH representation!")

    steps = len(intBounds)
    x = [(intBounds[i+1]+intBounds[i])/2.0 for i in range(steps-1)]
    y = [(np.sum(alpha*expm2((A*intBounds[i]).A)) - np.sum(alpha*expm2((A*intBounds[i+1]).A)))/(intBounds[i+1]-intBounds[i]) for i in range(steps-1)]
    return (np.array(x), np.array(y))
Exemplo n.º 12
0
def get_weights(parent_params, ratemat):
    mapper = (parent_params > ones(len(parent_params)))
    weights = expm2(ratemat)
    m1 = weights[1, 1]
    m2 = weights[0, 1]
    if m1 == 0.0:
        m1 = boundprob(m1)
    if m2 == 0.0:
        m2 = boundprob(m2)
    return m1 * mapper + ~mapper * m2
Exemplo n.º 13
0
 def tell(self, samples, fitnesses):
     print fitnesses
     if max(fitnesses) > self.bestFitness:
         self.bestFitness = max(fitnesses)
         self.bestFound = samples[argmax(fitnesses)]
     self.numEvals += self.batchSize 
     if self.verbose: print "Step", self.numEvals / self.batchSize, ":", max(fitnesses), "best:", self.bestFitness
     #print A
       # update center and variances
     utilities = self.computeUtilities(fitnesses)
     self.center += dot(self.A, dot(utilities, samples))
     covGradient = sum([u * (outer(s, s) - self.I) for (s, u) in zip(samples, utilities)])
     self.A = dot(self.A, expm2(0.5 * self.learningRate * covGradient))                      
Exemplo n.º 14
0
def get_prob_t(pi, rates, edges_dict, edges):
    p_t = defaultdict()
    for parent, child in edges[::-1]:
        if args.model == "F81":
            p_t[parent, child] = subst_models.ptF81(pi, edges_dict[parent,
                                                                   child])
        elif args.model == "JC":
            p_t[parent, child] = subst_models.ptJC(n_chars, edges_dict[parent,
                                                                       child])
        elif args.model == "GTR":
            Q = subst_models.fnGTR(rates, pi)
            p_t[parent, child] = linalg.expm2(Q * edges_dict[parent, child])
    return p_t
Exemplo n.º 15
0
    def Hamiltonian_timedep_complete(self, targetion, omrabi, phase, detuning, wsec, eta, hspace, LDApprox = False):
        """
        everything in the full interaction frame of the ions including the
        harmonic oscillation (opposing to the original version from hartmut
        that still had the phonon energy nonzero)

        in addition, this version won't omit the exp(+- i omega_z terms t) in
        attached to creation/annihilation operators

        this is eqn 3.6 in Christian Roo's thesis.

        """

        # H = exp(i delta_c t+phase) omega(t) (sum_i address_error_i sigma_plus_i) (exp(i eta (a exp(-i omegaz t) + adag exp(i omegaz t)))) + h.c.
        # !!! note !!!
        # here eta is assumed to be the same for all qubits. this is
        # not necessarily the case.
        #a more correct version would allow for
        # individual etas for each qubit ... maybe later. not
        # really required here and would only slow down the code

        targetion = targetion[::-1]

        # prefactor including the timedepending omega and the detuning+phase
        prefac = lambda t: np.exp(1j * (detuning * t + phase)) * omrabi(t) * 1/2.

        # coupling to the sideband. argument of the exponent/ld approximation
        LDpart = lambda t:  1j * eta[-1] * (hspace.operator_dict['a'] * np.exp(-1j * wsec * t ) + hspace.operator_dict['a_dag'] * np.exp(1j * wsec * t ) )

        # calculate the coupling based on the addressing errors
        sys = np.zeros((hspace.levels**hspace.nuions,hspace.levels**hspace.nuions))
        for k in xrange(hspace.nuions):
            sys += targetion[k]*hspace.operator_dict['raising'][:,:,k]

        # Lamb-Dicke approximation yes/no?
        # kron with qubits to get the hamiltonian
        if LDApprox:
            sys_ho = lambda t: npml.kron( sys, np.diag(np.ones(hspace.maxphonons+1)) + LDpart(t) )
        else:
            sys_ho = lambda t: npml.kron( sys, splg.expm2( LDpart(t)) )

        # multiply with rabi-frequency part
        H = lambda t: prefac(t) * sys_ho(t)

        # add h.c. part
        HT = lambda t: H(t)+H(t).transpose().conjugate()

        return HT
Exemplo n.º 16
0
 def tell(self, samples, fitnesses):
     print fitnesses
     if max(fitnesses) > self.bestFitness:
         self.bestFitness = max(fitnesses)
         self.bestFound = samples[argmax(fitnesses)]
     self.numEvals += self.batchSize
     if self.verbose:
         print "Step", self.numEvals / self.batchSize, ":", max(
             fitnesses), "best:", self.bestFitness
     #print A
     # update center and variances
     utilities = self.computeUtilities(fitnesses)
     self.center += dot(self.A, dot(utilities, samples))
     covGradient = sum(
         [u * (outer(s, s) - self.I) for (s, u) in zip(samples, utilities)])
     self.A = dot(self.A, expm2(0.5 * self.learningRate * covGradient))
Exemplo n.º 17
0
    def compute_matrix_exp(self, rate_matrix, dwell_time):
        """
        Computes ``exp(Qt)``

        Parameters
        ----------
        rate_matrix : RateMatrix
        dwell_time : float

        Returns
        -------
        expQt_matrix : RateMatrix
        """
        Q = rate_matrix.as_npy_array()
        expQt = expm2(Q * dwell_time)
        expQt_matrix = rate_matrix.copy()
        expQt_matrix.data_frame.values[:, :] = expQt
        return expQt_matrix
Exemplo n.º 18
0
def U_spinchain(xyz,chainlength,J,J1):
    #function called to get exp(-iH) where H is the spin chain hamiltonian
    H = 0
    expH = 0
    if xyz == 'z':
        H = J1*sigma_n('z',0,chainlength)*sigma_n('z',1,chainlength)
        for i in range(1,chainlength-1):
            H = H + J*sigma_n('z',i,chainlength)*sigma_n('z',i+1,chainlength)
    elif xyz == 'xy':
        H = J1*(sigma_n('x',0,chainlength)*sigma_n('x',1,chainlength) + sigma_n('y',0,chainlength)*sigma_n('y',1,chainlength))
        for i in range(1,chainlength-1):
            H = H + J*(sigma_n('x',i,chainlength)*sigma_n('x',i+1,chainlength) + sigma_n('y',i,chainlength)*sigma_n('y',i+1,chainlength))
        #for closed system where nth qubit interacts with first qubit
        #H = H + J*(sigma_n('x',chainlength-1,chainlength)*sigma_n('x',0,chainlength) + sigma_n('y',chainlength-1,chainlength)*sigma_n('y',0,chainlength))
    else:
        print('H_spinchain: invalid input')
    expH = linalg.expm2(-1j*H)
    return expH
Exemplo n.º 19
0
    def compute_matrix_exp(self, rate_matrix, dwell_time):
        """
        Computes ``exp(Qt)``

        Parameters
        ----------
        rate_matrix : RateMatrix
        dwell_time : float

        Returns
        -------
        expQt_matrix : RateMatrix
        """
        Q = rate_matrix.as_npy_array()
        expQt = expm2(Q * dwell_time)
        expQt_matrix = rate_matrix.copy()
        expQt_matrix.data_frame.values[:,:] = expQt
        return expQt_matrix
Exemplo n.º 20
0
def system(k_c,t_i):
    import numpy as np
   
    tfinal = 200
    dt = 0.2
    t = np.arange(0, tfinal, dt)
    entries = len(t)
    x = np.zeros(entries)
#    k_c = np.zeros(num)
#    t_i = np.zeros(num)
    # coefficients of the transfer function Gp = kp/(s^3 + As^2 + Bs +C)
    A =3
    B = 3
    C = 1
    kp =0.125 
    SP = 1
    kcst = np.arange(0,100,dt)
    # Relatiopnship btwn kc and Ti obtained through the direct substitution method
    tist =kp*kcst*A**2/(((A*B) - C - (kp*kcst))*(C + (kp*kcst)))
#    kczn ,tizn  =ZN(A,B,C,kp) # Ziegler-Nichols settings via function ZN
    kc =k_c
    ti = t_i
    firstrow =[(ti/(kp*kc)*(C + (kp*kc))), (ti/(kp*kc))*B, (ti/(kp*kc))*A, (ti/(kp*kc))] 
    mat = [firstrow,[-1 ,0,0,0],[0,-1,0,0],[0, 0 ,-1,0]]
    Amat = -1*linalg.inv(mat)
    rootsA = np.array(linalg.eigvals(Amat))
    Bmat = np.dot(linalg.inv(mat),[[1],[0] ,[0],[0]])
    Xo = -1*np.dot(linalg.inv(Amat),Bmat*SP)
    print rootsA
    R = np.sign(rootsA.real) 
    I = np.sign(rootsA.imag) 
    if ((R==-1).all()== True): 
        print rootsA
        for i in range(0,entries):
            X = np.dot(1 - linalg.expm2(Amat*t[i]), Xo)
            x[i] = X[0]
    else:
        x = None
        
    kc = k_c
    ti = t_i
    x = np.transpose(x)
    
    return {'x':x ,'kcst':kcst,'tist':tist}
Exemplo n.º 21
0
 def evolution(self,t=[],**karg):
     '''
     This method returns the matrix representation of the time evolution operator.
     Parameters:
         t: 1D array-like
             The time mesh.
         karg: dict, optional
             Other parameters.
     Returns:
         result: 2D ndarray
             The matrix representation of the time evolution operator.
     '''
     nmatrix=len(self.generators['h'].table)
     result=eye(nmatrix,dtype=complex128)
     nt=len(t)
     for i,time in enumerate(t):
         if i<nt-1:
             result=dot(expm2(-1j*self.matrix(t=time,**karg)*(t[i+1]-time)),result)
     return result
Exemplo n.º 22
0
    def discretize(self, A, B, mthd):

        if mthd == 1:  # Matrix Exponential
            Ad = linalg.expm2(A * self.dt)
            eye = np.eye(4)
            Asinv = linalg.solve(A, eye)
            Bd = np.dot(np.dot(Asinv, (Ad - eye)), B)

        elif mthd == 2:  # Zero order hold, allows A to be singular, needed for model 3
            C = np.eye(self.njts * 4)
            D = 0
            Ad, Bd, Cd, Dd, dt = spicycigs.cont2discrete((A, B, C, D),
                                                         self.dt,
                                                         method='bilinear')
        else:
            rospy.signal_shutdown('No discretization method selected')

        Ad = np.matrix(Ad)
        Bd = np.matrix(Bd)

        return Ad, Bd
Exemplo n.º 23
0
def simulate(kc, ti):
    tfinal = 100
    dt = 0.2
    t = np.arange(0, tfinal, dt)
    entries = len(t)
    x = np.zeros(entries)
    Amat, Bmat = simulate_matrices(A, B, C, kp, kc, ti)
    
    Xo = -1*np.dot(linalg.inv(Amat), Bmat*SP)
  
    if stable(Amat): 
       
        for i in range(0,entries):
            X = np.dot(1 - linalg.expm2(Amat*t[i]), Xo)
            x[i] = X[0]
    else:
        x = np.NaN
        
    x = np.transpose(x)
    ik = kcfunction(kc,ti,x,entries,t,tfinal,dt,SP)   
    return x, ik
Exemplo n.º 24
0
def U_spinchain(xyz, chainlength, J, J1):
    #function called to get exp(-iH) where H is the spin chain hamiltonian
    H = 0
    expH = 0
    if xyz == 'z':
        H = J1 * sigma_n('z', 0, chainlength) * sigma_n('z', 1, chainlength)
        for i in range(1, chainlength - 1):
            H = H + J * sigma_n('z', i, chainlength) * sigma_n(
                'z', i + 1, chainlength)
    elif xyz == 'xy':
        H = J1 * (sigma_n('x', 0, chainlength) * sigma_n('x', 1, chainlength) +
                  sigma_n('y', 0, chainlength) * sigma_n('y', 1, chainlength))
        for i in range(1, chainlength - 1):
            H = H + J * (sigma_n('x', i, chainlength) * sigma_n(
                'x', i + 1, chainlength) + sigma_n('y', i, chainlength) *
                         sigma_n('y', i + 1, chainlength))
        #for closed system where nth qubit interacts with first qubit
        #H = H + J*(sigma_n('x',chainlength-1,chainlength)*sigma_n('x',0,chainlength) + sigma_n('y',chainlength-1,chainlength)*sigma_n('y',0,chainlength))
    else:
        print('H_spinchain: invalid input')
    expH = linalg.expm2(-1j * H)
    return expH
Exemplo n.º 25
0
Arquivo: xnes.py Projeto: NASCENCE/alg
    def start(self, maxEvals=1e6, verbose=False, target_fitness= -1e-10, sigma = 1.0, pre_iteration_hook=dummy, post_iteration_hook=dummy_x, best_found_hook=dummy_x):
        """ Exponential NES (xNES), as described in
        Glasmachers, Schaul, Sun, Wierstra and Schmidhuber (GECCO'10).
        Maximizes a function f.
        Returns (best solution found, corresponding fitness).
        """

        I = eye(self.dim)
        n_iterations = 0
        while self.numEvals + self.population <= maxEvals and self.bestFitness < target_fitness:
            pre_iteration_hook(self, self.center)
            # produce and evaluate samples
            print "n evals:", self.numEvals, self.bestFitness
            if verbose: print "Current mean:", self.center, "max min cov:", np.max(self.A), np.min(self.A)
            samples = [randn(self.dim) for _ in range(self.population / 2)]
            samples.extend([-s for s in samples])

            realSamples = [dot(self.A, s) + self.center for s in samples]
            # realSamples.extend([-dot(self.A, s) + self.center for s in samples])
            fitnesses = [self.f(s) for s in realSamples]
            if max(fitnesses) > self.bestFitness:
                self.bestFitness = max(fitnesses)
                self.bestFound = realSamples[argmax(fitnesses)].copy()
                best_found_hook(self.center, self.A, self.bestFound)

            self.numEvals += self.population
            if verbose: print "Step", self.numEvals / self.population, ":", max(fitnesses), "best:", self.bestFitness, realSamples[argmax(fitnesses)]
            #print A
            # update self.center and variances
            utilities = computeUtilities(fitnesses)
            self.center += dot(self.A, dot(utilities, samples)) * self.center_learning_rate
            covGradient = sum([u * (outer(s, s) - I) for (s, u) in zip(samples, utilities)])
            self.A = dot(self.A, expm2(0.5 * self.learningRate * covGradient))
            post_iteration_hook(self.center, self.A, n_iterations)
            n_iterations += 1

        print "Step", self.numEvals / self.population, "best:", self.bestFitness
        return self.bestFound, self.bestFitness
Exemplo n.º 26
0
    def update_amplitudes(self, dt, update_ham=True, H=None, Ct=None):
        """Updates the amplitudes of the trajectory in the bundle.
        Solves d/dt C = -i H C via the computation of
        exp(-i H(t) dt) C(t)."""
        if update_ham:
            self.update_matrices()

        # if no Hamiltonian is passed, use the current effective
        # Hamiltonian
        if H is None:
            Hmat = self.Heff
        else:
            Hmat = H

        # if no vector of amplitudes are supplied (to propagate),
        # propagate the current amplitudes
        if Ct is None:
            old_amp = self.amplitudes()
        else:
            old_amp = Ct

        new_amp = np.zeros(self.nalive, dtype=complex)

        B = -1j * Hmat * dt

        if self.nalive < 150:
            # Eigen-decomposition
            umat = sp_linalg.expm2(B)
        else:
            # Pade approximation
            umat = sp_linalg.expm(B)

        new_amp = np.dot(umat, old_amp)

        for i in range(len(self.alive)):
            self.traj[self.alive[i]].update_amplitude(new_amp[i])
Exemplo n.º 27
0
def Ugate(A):
     return Qobj(expm2(2*np.pi*i*A))
Exemplo n.º 28
0
for k in range(0,num):
    if k==num-1:
        kc = kczn
        ti = tizn
    else:    
        kc =k_c[k]
        ti = t_i[k]
    firstrow =[(ti/(kp*kc)*(C + (kp*kc))), (ti/(kp*kc))*B, (ti/(kp*kc))*A, (ti/(kp*kc))] 
    mat = [firstrow,[-1 ,0,0,0],[0,-1,0,0],[0, 0 ,-1,0]]
    Amat = -1*linalg.inv(mat)
    rootsA = np.array(linalg.eigvals(Amat))
    Bmat = np.dot(linalg.inv(mat),[[1],[0] ,[0],[0]])
    Xo = -1*np.dot(linalg.inv(Amat),Bmat*SP)
   
    if (rootsA.real < 0).all():
        for i in range(0,entries):
            X = np.dot(1 - linalg.expm2(Amat*t[i]), Xo)
            x[i,k] = X[0]
    else:
        x[:,k] = np.NaN
        
    
    k_c[k] = kc
    t_i[k] = ti
kc = k_c
ti = t_i
x = x.T
fig = plotgraphs(kc,ti,x,num,entries,t,tfinal,dt,SP,kcst,tist)


def Ugate(A, N):
    return Qobj(expm2(2 * np.pi * i * N * A))
Exemplo n.º 30
0
 def test_zero(self):
     a = array([[0.,0],[0,0]])
     assert_array_almost_equal(expm(a),[[1,0],[0,1]])
     assert_array_almost_equal(expm2(a),[[1,0],[0,1]])
     assert_array_almost_equal(expm3(a),[[1,0],[0,1]])
Exemplo n.º 31
0
 def getRandomElement(self):
     #exp(alpha_i * A_i) should be randomly distributed
     #over SU(3) for random alpha_i, A_i generators of SU(3)
     return linalg.expm2(1j *
                         self.GELLMANN_MATRICES.T.dot(np.random.rand(8)))
Exemplo n.º 32
0
    def Hamiltonian(self, pulse, params, LDApprox=True):
        ''' Hamiltonian definition using hspace.operator_dict '''
        opdict = params.hspace.operator_dict

        targetion = pulse.targetion[::-1]
        omrabi = pulse.omrabi
        phase = pulse.phase
        detuning = pulse.detuning
        wsec = params.omz
        eta = params.eta
        hspace = params.hspace

        # prefactor including the timedepending omega and the detuning+phase
        prefac = np.exp(1j * phase) * omrabi * 1 / 2.

        # coupling to the sideband. argument of the exponent/ld approximation
        LDpart = 1j * eta[-1] * (opdict['a'] + opdict['a_dag'])
        # calculate the coupling based on the addressing errors

        # Lamb-Dicke approximation yes/no?
        # kron with qubits to get the hamiltonian
        # here we'll use the fact that eta may be different for each ion
        sys_ho = np.zeros(((hspace.maxphonons+1)*hspace.levels**hspace.nuions, \
                          (hspace.maxphonons+1)*hspace.levels**hspace.nuions ), \
                              np.complex128)
        if LDApprox:
            for k in xrange(hspace.nuions):
                #etak = eta[-1] if pulse.ion == -1 else eta[k]
                sys_ho += npml.kron( targetion[k]*opdict['raising'][:,:,k],\
                               np.diag(np.ones(hspace.maxphonons+1)) + \
                               1j * eta[k] * (opdict['a']+opdict['a_dag']) )
        else:
            for k in xrange(hspace.nuions):
                #etak = eta[-1] if pulse.ion == -1 else eta[k]
                sys_ho += npml.kron( targetion[k]*opdict['raising'][:,:,k],\
                               splg.expm2(1j * eta[k] * (opdict['a']+opdict['a_dag'])) )

        # multiply with rabi-frequency part
        H = prefac * sys_ho

        # diagonal terms
        sysz = np.zeros(
            (hspace.levels**hspace.nuions, hspace.levels**hspace.nuions))
        for k in xrange(hspace.nuions):
            sysz += opdict['sigz'][:, :, k] * 1 / 2.
        energies = -detuning * npml.kron(sysz, opdict['id_a']) + \
                   wsec * npml.kron(np.diag(np.ones(hspace.levels**hspace.nuions)), \
                                        np.dot(opdict['a_dag'], opdict['a']) )
        # subtract a zero offset
        energies -= np.diag(energies[0, 0] * np.ones_like(np.diag(energies)))

        # add h.c. part
        HT = H + H.transpose().conjugate() + energies

        # diagonal elements of matrix for basis transformation (lasertoqc)
        lqc = np.diag(HT)

        if pulse.type == 'Z':
            HT = HT + np.diag(self.ACshift_corr(pulse, params))
            # to fix discrepancy between lab/Volkmar and here
            if pulse.theta < 0:
                HT = -HT
                lqc = -lqc

        return HT, lqc
Exemplo n.º 33
0
            COMPONENT1 = np.kron(COMPONENT1, pauli_x)
            COMPONENT2 = np.kron(COMPONENT2, pauli_y)
        elif u == v + 1:
            COMPONENT1 = np.kron(COMPONENT1, pauli_x)
            COMPONENT2 = np.kron(COMPONENT2, pauli_y)
        else:
            COMPONENT1 = np.kron(COMPONENT1, pauli_i)
            COMPONENT2 = np.kron(COMPONENT2, pauli_i)

    if v == 0:
        CHAIN1 = COMPONENT1 + COMPONENT2
    else:
        CHAIN2 = CHAIN2 + COMPONENT1 + COMPONENT2
A = (J1 * CHAIN1 + J * CHAIN2)
A = 1j * A
EXP1 = linalg.expm2(-1 * A)
EXP2 = linalg.expm2(A)
EXP1 = np.kron(I_P, EXP1)
EXP2 = np.kron(I_P, EXP2)
######################################
#INITIAL STATE
INITIALCOIN = (1 / np.sqrt(2)) * np.matrix([[1], [1]])
INITIALCOIN = np.outer(INITIALCOIN, INITIALCOIN)
INITIALCHAIN = np.matrix([[1, 0], [0, 0]])
SYSTEM = np.zeros((2 * STEPS + 1))
SYSTEM[STEPS + 1] = 1
SYSTEM = np.outer(SYSTEM, SYSTEM)
A = np.kron(INITIALCOIN, INITIALCHAIN)
for z in range(CHAINLENGTH - 2):
    A = np.kron(A, INITIALCHAIN)
SYSTEM = np.kron(SYSTEM, A)
Exemplo n.º 34
0
def Generate_Chetans_Prethermal(L, T2, J, epsilon, hz, hy, hx):
    print("\nUSING CHETAN's FUNCTION\n")
    # Hamniltonia
    ########## First Half of evolution
    # Precession term:
    Zprecession = np.zeros((2**L, 2**L), dtype='complex')
    for i in range(L):
        mat = iden
        if i == 0:
            mat = sigmaz
        for k in range(1, L):
            if i == k:
                mat = outer(sigmaz, mat)
            else:
                mat = outer(iden, mat)
        Zprecession += mat * (-hz)

    Yprecession = np.zeros((2**L, 2**L), dtype='complex')
    for i in range(L):
        mat = iden
        if i == 0:
            mat = sigmay
        for k in range(1, L):
            if i == k:
                mat = outer(sigmay, mat)
            else:
                mat = outer(iden, mat)
        Zprecession += mat * (-hy)

    #Interaction term

    interations = np.zeros((2**L, 2**L), dtype='complex')
    #np.identity(2**L) * wei[0]

    for i in range(L):
        j = (i + 1) % L

        mat = iden
        if i == 0 or j == 0:
            mat = sigmaz

        for k in range(1, L):
            if j == k or i == k:
                mat = outer(sigmaz, mat)
            else:
                mat = outer(iden, mat)
        interations += mat * (-J)

    ########## Second Half of evolutions

    Xprecession = np.zeros((2**L, 2**L), dtype='complex')
    for i in range(L):
        mat = iden
        if i == 0:
            mat = sigmax
        for k in range(1, L):
            if i == k:
                mat = outer(sigmax, mat)
            else:
                mat = outer(iden, mat)
        Xprecession += mat * np.pi / 2 * (1 + epsilon)

    Uf1 = slinalg.expm2(
        -1j * (Zprecession + interations + Yprecession + Xprecession *
               (1 + hx)))
    Uf2 = slinalg.expm2(-1j * T2 *
                        (interations + Zprecession + hx * Xprecession))
    ##########

    #(U, s, V) = np.linalg.svd( np.dot(Uf2, Uf1))
    #print( "SVD: ", np.allclose( U.dot(np.diag(s).dot( V) ) , np.dot(Uf2, Uf1) ) )

    #print( s)
    #print( w)

    #print( "SVD: ", np.allclose( U.dot(np.diag(w).dot( V_prime) ) , np.dot(Uf2, Uf1) ) )

    return (v, w, np.conj(v).T, np.dot(Uf2, Uf1), res)
Exemplo n.º 35
0
def Generate_Francisco(args):

    L = args['L']
    Omega = args['Omega']

    T = 2 * np.pi / Omega

    J = args['J']
    U = args['U']

    A = args['A'] * Omega
    dA = args['dA'] * Omega

    fil = args['fil']

    print("\n Francisco\n")
    # Hamiltonian
    ########## First Half of evolution
    # Precession term:

    StaggeredZprecession = np.zeros((2**L, 2**L), dtype='complex64')
    for i in range(L):
        StaggeredZprecession += SigmaTerms(sigmaz, L, [i]) * (-1)**(i + 1)

    #print( StaggeredZprecession)
    #print( ""    )

    XXprec = np.zeros((2**L, 2**L), dtype='complex64')
    for i in range(L - 1):
        j = (i + 1)
        XXprec += SigmaTerms(sigmax, L, [i, j])

    YYprec = np.zeros((2**L, 2**L), dtype='complex64')
    for i in range(L - 1):
        j = (i + 1)
        YYprec += SigmaTerms(sigmay, L, [i, j])

    #print( XXprec + YYprec)
    #print( "")

    ZZprec = np.zeros((2**L, 2**L), dtype='complex64')
    for i in range(L - 1):
        j = (i + 1)
        ZZprec += SigmaTerms(sigmaz, L, [i, j])

    #print( ZZprec)
    #print( ""    )
    LinearTerm = np.zeros((2**L, 2**L), dtype='complex64')
    for i in range(L):
        LinearTerm += SigmaTerms(sigmaz, L, [i]) * (i + 1)
    #print( LinearTerm)
    #print( "")

    H1 = -(J / 2) * (XXprec + YYprec) + (U / 4) * ZZprec - (
        Omega - A) / 4 * StaggeredZprecession - (dA / 2) * LinearTerm
    H2 = -(J / 2) * (XXprec + YYprec) + (U / 4) * ZZprec - (
        Omega + A) / 4 * StaggeredZprecession + (dA / 2) * LinearTerm

    print("H1")
    #print( H1)
    #print( "")

    #plt.imshow( np.abs(H1) > 1e-10)
    #plt.show()

    #plt.imshow( np.abs(H2) > 1e-10)
    #plt.show()
    st = []
    for i in range(2**L):
        counter = 0
        temp = i
        while i > 0:
            counter += i % 2
            i = i / 2
        if counter == L / 2:
            st.append(temp)

    print(st)

    print("ST: ", len(st))
    for i in st:
        for o in range(2**L):
            if np.abs(H1[i][o]) > 1e-10 and (not o in st):
                print("PROBLEM", i, o)
    H1 = H1[st, :]
    H1 = H1[:, st]

    H2 = H2[st, :]
    H2 = H2[:, st]
    ## Only the columns of which are the sum of n different powers of two matter in the half-filling system.

    Uf1 = slinalg.expm2(-1j * T / 2 * H1)
    Uf2 = slinalg.expm2(-1j * T / 2 * H2)
    Floquet = Uf2.dot(Uf1)

    def xi(x):
        return 2 * x / np.pi * np.cos(np.pi * x / 2) / (1 - x**2)

    print("##########\n")
    print(xi((A - dA) / Omega))
    print(xi((A + dA) / Omega))
    print("")

    XXYYOdd = np.zeros((2**L, 2**L), dtype='complex64')
    XXYYEven = np.zeros((2**L, 2**L), dtype='complex64')

    for i in range(0, L - 1):
        j = i + 1
        if (i + 1) % 2 == 0:
            XXYYEven += SigmaTerms(sigmax, L, [i, j]) + SigmaTerms(
                sigmay, L, [i, j])
        else:
            XXYYOdd += SigmaTerms(sigmax, L, [i, j]) + SigmaTerms(
                sigmay, L, [i, j])

    HPrethermal = -xi((A + dA) / Omega) * (XXYYEven / 2) - xi(
        (A - dA) / Omega) * (XXYYOdd / 2) + (U / J) * (ZZprec / 4)
    HPrethermal = HPrethermal / L
    HPrethermal = HPrethermal[:, st]
    HPrethermal = HPrethermal[st, :]

    InfTemp = np.trace(HPrethermal)

    (w, v) = np.linalg.eigh(HPrethermal)
    eigHPrethermal = w
    HPrethermalEigenVectors = v
    groundstate = np.argmin(w)
    print("Range of HPrethermal Hamiltonian: ", np.min(w), np.max(w))
    states = []
    states.append(v[:, groundstate])

    rang = np.max(w) - np.min(w)
    delta = 0.1

    for i in range(30):
        k = randint(0, len(st) - 1)
        states.append(v[:, k])

    #print( len(states))

    (w, v) = EigenVectorsUnitary(Floquet)
    w = w / np.abs(w)
    eigFloquet = w

    U = v
    Udag = np.conj(U).T
    Diag = np.array(w)

    # for i in range(2**L):
    #     if np.abs(np.conj(v[:,i]).T.dot(Floquet).dot(v[:,i]) - w[i]) > 2e-6:
    #         print( np.abs(np.conj(v[:,i]).T.dot(Floquet).dot(v[:,i]) - w[i]))
    #         print( "PROBLEM")
    # for i in range(2**L):
    #     for o in range(2**L):
    #         if i==o: continue
    #         else:
    #             if np.abs(np.conj(v[:,o]).T.dot(Floquet).dot(v[:,i]) ) > 1e-6:
    #                 print( np.conj(v[:,o]).T.dot(Floquet).dot(v[:,i]) )
    #                 print( "prob")

    #plt.imshow(np.log( np.abs(Udag.dot(Floquet).dot(U)) - np.eye(2**L) ) )
    #print( np.max(np.abs(Udag.dot(Floquet).dot(U)) - np.eye(2**L)))
    #plt.show()
    #wa = np.angle(w)
    #wa.sort()
    #wa_ = np.angle(w_test)
    #wa_.sort()

    #print( wa)
    #print( "Major Difference: ", np.max(np.abs(wa-wa_)))

    #print( "Testing Decomposition")

    res = True
    #fig = plt.figure()
    #plt.imshow( np.real(Udag.dot(U) - np.eye(np.shape(Floquet)[0])))
    #fig2 = plt.figure()
    #plt.imshow( np.real(Udag.dot(U) - np.eye(np.shape(Floquet)[0])))
    print(np.max(np.abs(Udag.dot(U) - np.eye(np.shape(Floquet)[0]))))
    #plt.show()
    test1 = np.allclose(Udag.dot(U), np.eye(np.shape(Floquet)[0]))
    test2 = np.allclose(U.dot(Udag), np.eye(np.shape(Floquet)[0]))
    print(test1, test2)

    temp_big = np.max(np.abs(U.dot(np.diag(Diag)).dot(Udag) - Floquet))
    print("Big Diff Floquet:", temp_big)

    Obs_0 = []
    tempObs = SigmaTerms(sigmaz, L, [0])
    tempObs = tempObs[:, st]
    tempObs = tempObs[st, :]
    Obs_0.append(tempObs)

    tempObs = SigmaTerms(sigmaz, L, [L / 4])
    tempObs = tempObs[:, st]
    tempObs = tempObs[st, :]
    Obs_0.append(tempObs)

    tempObs = SigmaTerms(sigmaz, L, [L / 2])
    tempObs = tempObs[:, st]
    tempObs = tempObs[st, :]
    Obs_0.append(tempObs)

    tempObs = SigmaTerms(sigmaz, L, [L - 1 - L / 4])
    tempObs = tempObs[:, st]
    tempObs = tempObs[st, :]
    Obs_0.append(tempObs)

    tempObs = SigmaTerms(sigmaz, L, [L - 1])
    tempObs = tempObs[:, st]
    tempObs = tempObs[st, :]
    Obs_0.append(tempObs)

    Obs_0.append(HPrethermal)

    HPrethermalEvo = HPrethermalEigenVectors.dot(
        np.diag(np.exp(-1j * T * eigHPrethermal))).dot(
            np.conj(HPrethermalEigenVectors).T)

    print("ASDASD")

    preComputation = {
        'HPrethermal': HPrethermal,
        'Floquet': Floquet,
        'U': U,
        'Udag': Udag,
        'Diag': Diag,
        'Obs': Obs_0,
        'ErrorDiag': temp_big,
        'res': res,
        'L': L,
        'InfTemp': InfTemp,
        'args': args,
        'HPrethermalEvo': HPrethermalEvo,
        'eigHPrethermal': eigHPrethermal,
        'D_U': HPrethermalEvo,
        'D_Udag': np.conj(HPrethermalEvo),
        'D_Diag': np.exp(-1j * T * eigHPrethermal),
        'states': states
    }

    np.save(args['dir'] + 'PreComp_' + fil, preComputation)
    print("Finished Precomputation")
    print("Saved to:")
    print('PreComp_' + fil + '.npy')

    return (states, 'PreComp_' + fil + '.npy')
Exemplo n.º 36
0
def Generate_Floquet_Operator(L, T, W, epsilon, eta):
    print("\nUSING THE FLOQUET_OPERATOR FUNCTION\n")
    # Hamniltonia
    ########## First Half of evolution
    # Precession term:
    prec = np.zeros((2**L, 2**L))
    for i in range(2**L):
        n_ones = 0
        t = i
        while t > 0:
            n_ones += t % 2
            t = int(t / 2)
        prec[i][i] = L - 2 * n_ones
    prec = eta * prec
    #Interaction term

    wei = [complex(0)]
    for i in range(1, L):
        wei.append(complex(Ewald(L, i, 1)))

    interations = np.zeros((2**L, 2**L), dtype='complex')
    #np.identity(2**L) * wei[0]

    for i in range(L):
        for j in range(i + 1, L):
            mat = iden
            if i == 0:
                mat = sigmaz

            for k in range(1, L):
                if j == k or i == k:
                    mat = outer(sigmaz, mat)
                else:
                    mat = outer(iden, mat)
            interations += mat * wei[j - i] * W

    Uf1 = slinalg.expm2(-1j * T * (interations + prec))

    ########## Second Half of evolutions

    SingleSpin = iden * np.cos(
        np.pi / 2 * (1 + epsilon)) - 1j * sigmax * np.sin(np.pi / 2 *
                                                          (1 + epsilon))
    Uf2 = SingleSpin
    for i in range(1, L):
        Uf2 = outer(SingleSpin, Uf2)

    ##########
    Floquet = np.dot(Uf2, Uf1)
    (w_t, v_t) = EigenVectorsUnitary(Floquet)
    (w, v) = np.linalg.eig(Floquet)

    res = True
    test = np.allclose(np.conj(v).T.dot(v), np.eye(2**L))
    res = res and test
    print("Unitary: ", test)
    test = np.allclose(v.dot(np.diag(w).dot(np.conj(v).T)), np.dot(Uf2, Uf1))
    res = res and test
    print("Decomp: ", test)
    test = np.allclose(v.dot(np.diag(w**2).dot(np.conj(v).T)),
                       np.dot(Uf2, Uf1).dot(Uf2.dot(Uf1)))
    res = res and test
    print("Decomp2: ", test)
    test = np.allclose(v.dot(np.diag(w**3).dot(np.conj(v).T)),
                       np.dot(Uf2, Uf1).dot(Uf2.dot(Uf1)).dot(Uf2.dot(Uf1)))
    res = res and test
    print("Decomp3: ", test)

    return (v, w, np.conj(v).T, np.dot(Uf2, Uf1), res)
Exemplo n.º 37
0
pbeta = 0.0
Phi = (np.pi / 2.0 / alpha) ** 0.25 * np.exp(-alpha * (x - xbeta) ** 2 + 1j * pbeta * (x - xbeta) / hbar)

xsorted = np.sort(x)
xmin = -3
xmax = 3
xminindex = np.where(xsorted > xmin)[0][0]
xmaxindex = np.where(xsorted > xmax)[0][0]

p = x.argsort()

fig = plt.figure(1)
plt.cla()
plt.ion()
plt.plot(xsorted[xminindex:xmaxindex], np.real(Phi[p])[xminindex:xmaxindex], "b-")
plt.draw()

raw_input("")

dt = 1.0 / float(N * 10)
timesteps = 100

for i in range(timesteps):
    plt.cla()
    Phi = np.dot(U, np.dot(sclinalg.expm2(-1j * HDVR * dt / hbar), np.dot(Ucc, Phi)))
    plt.plot(xsorted[xminindex:xmaxindex], np.real(Phi[p])[xminindex:xmaxindex], "b-")
    plt.draw()
    time.sleep(0.1)

plt.show()
Exemplo n.º 38
0
def simulationCore(pulseseq, params, dec):
    """ heart of the computation process. 
    input: pulse sequence, parameters (includes hilbert space def), and decoherence object. 
    output: a database object containing the time evolution of states. 
    """

    np = numpy
    splg = scipy.linalg
    pi = np.pi

    qmtools = PyTIQC.core.qmtools
    simtools = PyTIQC.core.simtools

    # make list of times and state vector
    totaltime = pulseseq.totaltime
    T = np.append(np.arange(0, totaltime, params.stepsize), totaltime)
    Y = np.zeros((len(T), len(params.y0)), np.complex128)
    Y[0,:] = params.y0

    # initialize indices and temp vars
    p0 = 0  # p0 is index to current pulse
    t0 = 0  # t0 is index to current time (in data list T)
            # tlen is amount of time to compute evolution
    ycur = np.mat(Y[0,:]).T  # convert to matrix to use *
    tcur = 0
    pcur = -1
    Ucur = 1

    # construct the time-dependent omrabi factors
    for pulse in pulseseq.seqqc:
        pulse.maketimedep(params.shape, params.doMSshapingCorr)

    # initialize the hamiltonian and noise objects
    ht = qmtools.Hamilton()
    ns = qmtools.Noise()
    

    # pre-fetch the noise dictionary
    if dec.doRandNtimes > 0:
        noise_dict = ns.Noise(params, dec)
        noise_total = [[noise_dict['none'][0]], [noise_dict['none'][1]], [noise_dict['none'][2]]]

        for key, [mult, add, uni] in noise_dict.iteritems():
            if (dec.dict['all'] or dec.dict[key]): # and not pulse.use_ideal:
                noise_total[0].append(mult)
                noise_total[1].append(add)
                noise_total[2].append(uni)

        noise_mult = ns.prodFunctions(noise_total[0])
        noise_add = ns.sumFunctions(noise_total[1])
        noise_uni = ns.sumFunctions(noise_total[2])

        projtimes = np.union1d(T[np.nonzero(dec.heatingV)], T[np.nonzero(dec.spontdecayV)])
    else:
        noise_mult = lambda t: 1
        noise_add = lambda t: params.hspace.operator_dict['zero']
        noise_uni = lambda t: params.hspace.operator_dict['zero']
        projtimes = np.array([])

    # storage of hidden ions
    hiddenions = np.zeros_like(params.addressing[-1])
    hiddenionsErr = np.ones_like(params.addressing[-1])
    hiddenionsCount = 0
    # a classical register to store intermediate measurements
    classical_reg = []

    if totaltime == 0:
        #set Y=y0 if no time for evolution
        Y[0,:] = params.y0
    else:
      ### time evolution starts here  
      while(tcur < totaltime):
        ### new pulse starts here  
        pulse = pulseseq.seqqc[p0]
        if pcur != p0:
            pcur = p0
            HTsaved = None
            if params.printpulse:
                print "Pulse ", p0, ":",  pulse
            if params.progbar and pulseseq.seqqc[p0].type != 'M':
                widgets = [progressbar.Percentage(), ' ', progressbar.Bar(),' ', progressbar.ETA()]
                pbar = progressbar.ProgressBar(widgets=widgets).start()
            #################
            # check for hiding. modify hiding matrix, then treat as delay
            nuions = params.hspace.nuions
            if pulse.type == "H":
                hiddenionsCount += 1
                if pulse.hide:
                    hiddenions[nuions-pulse.ion-1] = params.addressing[-1][pulse.ion]
                else:
                    hiddenions[nuions-pulse.ion-1] = 0
                    hiddenionsErr[nuions-pulse.ion-1] *= params.hidingerr
            else:
                pulse.targetion[np.nonzero(hiddenions)] = 0
                if dec.dict['all'] or dec.dict['hiding']:
                    # with some probability some ions are "lost" after unhiding
                    rn = np.random.uniform( size= \
                             len(np.nonzero(hiddenionsErr-1)[0]) )
                    for ith, ind in enumerate(np.nonzero(hiddenionsErr-1)[0]):
                        if rn[ith] > hiddenionsErr[np.nonzero(hiddenionsErr-1)[0][ith]]:
                            pulse.targetion[ind] = 0
                        else:
                            pulse.targetion = \
                                np.copy(params.addressing[pulse.ion])
                pulse.calculateIdealUnitary(params, np.nonzero(hiddenions[::-1])[0])
            # check for meas-init. measure and do projection, then treat as delay.
            if pulse.type == "I":
                if (dec.dict['all'] or dec.dict['hiding']) \
                        and pulse.incl_hidingerr:
                    reg = pulse.measure(np.asarray(ycur))
                    reg[1] += hiddenionsCount*params.hidingMeaserr
                    reg[0] -= hiddenionsCount*params.hidingMeaserr
                    classical_reg.append( reg )
                else:
                    classical_reg.append( pulse.measure(np.asarray(ycur)) )
                Uproj = pulse.Uinit()
                U = np.mat(Uproj)
                ynew = U * ycur
                ynew = ynew / np.sqrt(np.sum(np.power(abs(ynew),2)))
                ycur = ynew                

        #################
        # check for Uideal and skip time evolution if yes
        if pulse.use_ideal:
            ynew = np.dot(pulse.Uid, ycur)
            ycur = ynew
            tcur = pulse.endtime
            # save data and advance pulse
            t0 = np.searchsorted(T, tcur)
            if tcur == T[t0]:
                Y[t0,:] = np.asarray(ynew.T)
            else:
                T = np.insert(T, t0, tcur)
                [Yn1, Yn2] = np.array_split(Y, [t0])
                if len(Yn2) != 0:
                    Y = np.concatenate([Yn1, np.asarray(ynew.T), Yn2], axis=0)
                else:
                    Y = np.concatenate([Yn1, np.asarray(ynew.T)], axis=0)
            t0 = t0 + 1
            p0 = p0 + 1

        #################
        #### Unitary evolutions
        elif not pulse.dotimedepPulse and not params.dotimedep:

            assert tcur >= pulse.starttime \
                and tcur <= pulse.endtime \
                and abs(pulse.starttime + pulse.duration - pulse.endtime) < 0.001 \
                and pulse.duration >= 0, \
                "Pulse timing not consistent; missing copy.deepcopy?"

            tlen = min([pulseseq.seqqc[p0].endtime - tcur, T[t0+1]-tcur])

            [HT, lqc] = ht.Hamiltonian(pulse, params, LDApprox = params.LDapproximation)

            if not pulse.use_ideal:
                HT = noise_mult(tcur) * HT + noise_add(tcur)
                lqc = noise_mult(tcur) * lqc + np.diag(noise_add(tcur))


            Ugate = splg.expm2(-1j * tlen * HT)
            Ulqc1 = np.diag(np.exp(-1j * tcur * lqc))
            Ulqc2 = np.diag(np.exp(-1j * (-tlen-tcur) * lqc))

            U = np.mat(Ulqc2) * np.mat(Ugate) * np.mat(Ulqc1)
            ynew = U * ycur

            # normalize in case of jump operators for spontdecay and heating
            ynew = ynew / np.sqrt(np.sum(np.power(abs(ynew),2)))

            Ucur = U * Ucur

            # extra check for projective unitary (spontdecay, heating)
            if dec.doRandNtimes > 0 and np.sum(noise_uni(tcur)) != 0 \
                    and not pulse.use_ideal:
                U = np.mat(noise_uni(tcur))
                ynew = U * ycur
                ynew = ynew / np.sqrt(np.sum(np.power(abs(ynew),2)))
                ycur = ynew
                if abs(ynew[-1])**2 > 0.25:
                    print "Warning: further heating will exceed phonon space"

            ycur = ynew
            tcur = tcur+tlen

            # if reached data-point, store data and advance time
            datasaved = False
            if tcur == T[t0+1]:
                Y[t0+1,:] = np.asarray(ynew.T)
                t0 = t0 + 1
                datasaved = True

            if params.printpulse and params.progbar and pulseseq.seqqc[p0].type != 'M':
                pbar.update(int(1.*(tcur-pulseseq.seqqc[p0].starttime)*100 \
                                    /(pulseseq.seqqc[p0].duration)))

            # if pulse ended, advance to next pulse (if both then do both)
            if tcur == pulseseq.seqqc[p0].endtime:
                pulseseq.seqqc[p0].U = np.copy(np.asarray(Ucur))
                # save current data if not already saved
                if not datasaved:
                    T = np.insert(T, t0+1, tcur)
                    [Yn1, Yn2] = np.array_split(Y, [t0+1])
                    Y = np.concatenate([Yn1, np.asarray(ynew.T), Yn2], axis=0)
                    t0 = t0 + 1
                # advance to next pulse
                p0 = p0 + 1
                Ucur = 1

        #################
        #### Time-dependent HT: ODE solving
        else:
            # choose time step depending on detuning
            if pulseseq.seqqc[p0].detuning > 2*pi*2:
                stepduration = min(2/ (pulseseq.seqqc[p0].detuning/(2*pi)), 1)
            else:
                stepduration = params.ODEtimestep

            # for MS pulse, check and modify omrabi due to hiding
            if pulse.type == "M" and params.doMShidecorr:
                nuions = len(pulse.targetion)
                activeions = len(np.nonzero(pulse.targetion)[0])
                omc_fac = params.MShidecorr[activeions, nuions]
                if omc_fac == -1:
                    print "MS w/ hiding correction factor invalid, ignoring"
                else:
                    pulse.omrabi_b = params.omc_ms * omc_fac
                    pulse.omrabi_r = params.omc_ms * omc_fac

            # set up time-dep Hamiltonian
            if pulse.dobichro:
                HTblue = ht.Hamiltonian_timedep_complete(pulse.targetion, pulse.omrabi_bt, pulse.phase_light + pulse.phase_rb, pulse.detuning_b, params.omz, params.eta, params.hspace, LDApprox = params.LDapproximation)
                HTred =  ht.Hamiltonian_timedep_complete(pulse.targetion, pulse.omrabi_rt, pulse.phase_light - pulse.phase_rb, pulse.detuning_r, params.omz, params.eta, params.hspace, LDApprox = params.LDapproximation)
                HTorig = lambda t: HTblue(t) + HTred(t)
            else:
                HTorig = ht.Hamiltonian_timedep_complete(pulse.targetion, pulse.omrabi_t , pulse.phase, pulse.detuning, params.omz, params.eta, params.hspace, LDApprox = params.LDapproximation)
            HT = lambda t: noise_mult(t) * HTorig(t) + noise_add(t)

            psidot = lambda t,psi: -1j * np.dot(HT(t), psi)
            # ycur needs to be cast as a 1d array
            solver = qmtools.SEsolver(params.solver)

            Tloc = np.array([0.])
            Yloc = np.zeros([1,len(np.asarray(ycur))]) # this is to get the dimensions right, but make sure to remove the first row of 0's

            if params.progbar:
                widgets = [progressbar.Percentage(), ' ', progressbar.Bar(),' ', progressbar.ETA()]
                pbar = progressbar.ProgressBar(widgets=widgets).start()
            else:
                pbar = None

            # ODE solver
            # variable aliases for convenience
            pstarttime = pulseseq.seqqc[p0].starttime
            pendtime = pulseseq.seqqc[p0].endtime
            # first calculate the expected number of datapoints
            testtime = np.arange(tcur, pendtime, stepduration)
            testtime = np.append(np.delete(testtime, 0), pendtime)
            # extra check for projective unitary (spontdecay, heating)
            projtimes_cur = projtimes[np.intersect1d( \
                    np.nonzero(projtimes<pendtime)[0], \
                    np.nonzero(projtimes>pstarttime)[0]) ]
            for tproj in projtimes_cur:
                ms_ode = solver(HT, tcur, tproj, stepduration, np.ravel(ycur), \
                                pbar, pstarttime, pendtime)
                Tloc = np.append(Tloc, np.delete(ms_ode.time, 0))
                Yloc = np.append(Yloc, np.delete(ms_ode.y.transpose(), 0, axis=0), axis=0)
                tcur = tproj
                ycur = Yloc[-1,:].T
                if dec.doRandNtimes > 0 and np.sum(noise_uni(tcur)) != 0 \
                        and not pulse.use_ideal:
                    U = noise_uni(tcur)
                    ynew = np.dot(U, ycur)
                    ynew = ynew / np.sqrt(np.sum(np.power(abs(ynew),2)))
                    ycur = ynew
                    if abs(ynew[-1])**2 > 0.25:
                        print "Warning: further heating will exceed phonon space"
                    Yloc[-1,:] = np.asarray(ynew.T)
                
            ms_ode = solver(HT, tcur, pendtime, stepduration, np.ravel(ycur), \
                                pbar, pstarttime, pendtime)
            Tloc = np.append(Tloc, np.delete(ms_ode.time, 0))
            Yloc = np.append(Yloc, np.delete(ms_ode.y.transpose(), 0, axis=0), axis=0)

            Tloc = np.delete(Tloc, 0)
            Yloc = np.delete(Yloc, 0, axis=0)

            # check the length w.r.t. expected length of T vector and remove extras
            while len(Tloc) > len(testtime):
                for i in range(len(testtime)):
                    if testtime[i] != Tloc[i]:
                        Tloc = np.delete(Tloc, i)
                        Yloc = np.delete(Yloc, i, axis=0)
                        break

            if not params.saveallpoints:
                Tloc = [Tloc[-1]]
                Yloc = np.array([Yloc[-1]])

            # now we put the result into original result array
            # first, update the current time and state
            tcur = Tloc[-1]
            ycur = np.mat(Yloc[-1,:]).T
            # find the end of the pulse in the original T list
            t1 = np.nonzero(T >= tcur)[0][0]
            # only replace point if it's already been calculated
            if T[t1] == tcur:
                tend = t1+1
            else: # T[t1] > tcur
                tend = t1
            # replace the overlapping times in T with Tloc
            [Tnew1, Tnew2] = np.array_split( np.delete(T, range(t0+1, tend)) , [t0+1])
            Tnew = np.concatenate([Tnew1, Tloc, Tnew2])
            # mirror with Y and Yloc
            [Ynew1, Ynew2] = np.array_split( np.delete(Y, range(t0+1, tend), axis=0) , [t0+1])
            # seems that concatenate doesn't work on 2d arrays if they're empty
            if len(Ynew2) == 0:
                Ynew = np.concatenate([Ynew1, Yloc], axis=0)
            else:
                Ynew = np.concatenate([Ynew1, Yloc, Ynew2], axis=0)

            T = Tnew
            Y = Ynew

            p0 = p0 + 1
            t0 = np.nonzero(T >= tcur)[0][0]

    data = simtools.database(T,Y, params.hspace, pulseseq, register=classical_reg)

    data.creationtime = params.savedataname # timestamp the data

    if dec.doSQL:
        sequel.insertJobToDB(data)

    # get rid of lambda functions in order to send results back through pp
    for pulse in pulseseq.seqqc:
        pulse.omrabi_t = 0
        pulse.omrabi_bt = 0
        pulse.omrabi_rt = 0

    return data
Exemplo n.º 39
0
def simulationCore(pulseseq, params, dec):
    """ heart of the computation process. 
    input: pulse sequence, parameters (includes hilbert space def), and decoherence object. 
    output: a database object containing the time evolution of states. 
    """

    np = numpy
    splg = scipy.linalg
    pi = np.pi

    qmtools = PyTIQC.core.qmtools
    simtools = PyTIQC.core.simtools

    # make list of times and state vector
    totaltime = pulseseq.totaltime
    T = np.append(np.arange(0, totaltime, params.stepsize), totaltime)
    Y = np.zeros((len(T), len(params.y0)), np.complex128)
    Y[0, :] = params.y0

    # initialize indices and temp vars
    p0 = 0  # p0 is index to current pulse
    t0 = 0  # t0 is index to current time (in data list T)
    # tlen is amount of time to compute evolution
    ycur = np.mat(Y[0, :]).T  # convert to matrix to use *
    tcur = 0
    pcur = -1
    Ucur = 1

    # construct the time-dependent omrabi factors
    for pulse in pulseseq.seqqc:
        pulse.maketimedep(params.shape, params.doMSshapingCorr)

    # initialize the hamiltonian and noise objects
    ht = qmtools.Hamilton()
    ns = qmtools.Noise()

    # pre-fetch the noise dictionary
    if dec.doRandNtimes > 0:
        noise_dict = ns.Noise(params, dec)
        noise_total = [[noise_dict['none'][0]], [noise_dict['none'][1]],
                       [noise_dict['none'][2]]]

        for key, [mult, add, uni] in noise_dict.iteritems():
            if (dec.dict['all'] or dec.dict[key]):  # and not pulse.use_ideal:
                noise_total[0].append(mult)
                noise_total[1].append(add)
                noise_total[2].append(uni)

        noise_mult = ns.prodFunctions(noise_total[0])
        noise_add = ns.sumFunctions(noise_total[1])
        noise_uni = ns.sumFunctions(noise_total[2])

        projtimes = np.union1d(T[np.nonzero(dec.heatingV)],
                               T[np.nonzero(dec.spontdecayV)])
    else:
        noise_mult = lambda t: 1
        noise_add = lambda t: params.hspace.operator_dict['zero']
        noise_uni = lambda t: params.hspace.operator_dict['zero']
        projtimes = np.array([])

    # storage of hidden ions
    hiddenions = np.zeros_like(params.addressing[-1])
    hiddenionsErr = np.ones_like(params.addressing[-1])
    hiddenionsCount = 0
    # a classical register to store intermediate measurements
    classical_reg = []

    if totaltime == 0:
        #set Y=y0 if no time for evolution
        Y[0, :] = params.y0
    else:
        ### time evolution starts here
        while (tcur < totaltime):
            ### new pulse starts here
            pulse = pulseseq.seqqc[p0]
            if pcur != p0:
                pcur = p0
                HTsaved = None
                if params.printpulse:
                    print "Pulse ", p0, ":", pulse
                if params.progbar and pulseseq.seqqc[p0].type != 'M':
                    widgets = [
                        progressbar.Percentage(), ' ',
                        progressbar.Bar(), ' ',
                        progressbar.ETA()
                    ]
                    pbar = progressbar.ProgressBar(widgets=widgets).start()
                #################
                # check for hiding. modify hiding matrix, then treat as delay
                nuions = params.hspace.nuions
                if pulse.type == "H":
                    hiddenionsCount += 1
                    if pulse.hide:
                        hiddenions[nuions - pulse.ion -
                                   1] = params.addressing[-1][pulse.ion]
                    else:
                        hiddenions[nuions - pulse.ion - 1] = 0
                        hiddenionsErr[nuions - pulse.ion -
                                      1] *= params.hidingerr
                else:
                    pulse.targetion[np.nonzero(hiddenions)] = 0
                    if dec.dict['all'] or dec.dict['hiding']:
                        # with some probability some ions are "lost" after unhiding
                        rn = np.random.uniform( size= \
                                 len(np.nonzero(hiddenionsErr-1)[0]) )
                        for ith, ind in enumerate(
                                np.nonzero(hiddenionsErr - 1)[0]):
                            if rn[ith] > hiddenionsErr[np.nonzero(
                                    hiddenionsErr - 1)[0][ith]]:
                                pulse.targetion[ind] = 0
                            else:
                                pulse.targetion = \
                                    np.copy(params.addressing[pulse.ion])
                    pulse.calculateIdealUnitary(
                        params,
                        np.nonzero(hiddenions[::-1])[0])
                # check for meas-init. measure and do projection, then treat as delay.
                if pulse.type == "I":
                    if (dec.dict['all'] or dec.dict['hiding']) \
                            and pulse.incl_hidingerr:
                        reg = pulse.measure(np.asarray(ycur))
                        reg[1] += hiddenionsCount * params.hidingMeaserr
                        reg[0] -= hiddenionsCount * params.hidingMeaserr
                        classical_reg.append(reg)
                    else:
                        classical_reg.append(pulse.measure(np.asarray(ycur)))
                    Uproj = pulse.Uinit()
                    U = np.mat(Uproj)
                    ynew = U * ycur
                    ynew = ynew / np.sqrt(np.sum(np.power(abs(ynew), 2)))
                    ycur = ynew

            #################
            # check for Uideal and skip time evolution if yes
            if pulse.use_ideal:
                ynew = np.dot(pulse.Uid, ycur)
                ycur = ynew
                tcur = pulse.endtime
                # save data and advance pulse
                t0 = np.searchsorted(T, tcur)
                if tcur == T[t0]:
                    Y[t0, :] = np.asarray(ynew.T)
                else:
                    T = np.insert(T, t0, tcur)
                    [Yn1, Yn2] = np.array_split(Y, [t0])
                    if len(Yn2) != 0:
                        Y = np.concatenate([Yn1, np.asarray(ynew.T), Yn2],
                                           axis=0)
                    else:
                        Y = np.concatenate([Yn1, np.asarray(ynew.T)], axis=0)
                t0 = t0 + 1
                p0 = p0 + 1

            #################
            #### Unitary evolutions
            elif not pulse.dotimedepPulse and not params.dotimedep:

                assert tcur >= pulse.starttime \
                    and tcur <= pulse.endtime \
                    and abs(pulse.starttime + pulse.duration - pulse.endtime) < 0.001 \
                    and pulse.duration >= 0, \
                    "Pulse timing not consistent; missing copy.deepcopy?"

                tlen = min(
                    [pulseseq.seqqc[p0].endtime - tcur, T[t0 + 1] - tcur])

                [HT, lqc] = ht.Hamiltonian(pulse,
                                           params,
                                           LDApprox=params.LDapproximation)

                if not pulse.use_ideal:
                    HT = noise_mult(tcur) * HT + noise_add(tcur)
                    lqc = noise_mult(tcur) * lqc + np.diag(noise_add(tcur))

                Ugate = splg.expm2(-1j * tlen * HT)
                Ulqc1 = np.diag(np.exp(-1j * tcur * lqc))
                Ulqc2 = np.diag(np.exp(-1j * (-tlen - tcur) * lqc))

                U = np.mat(Ulqc2) * np.mat(Ugate) * np.mat(Ulqc1)
                ynew = U * ycur

                # normalize in case of jump operators for spontdecay and heating
                ynew = ynew / np.sqrt(np.sum(np.power(abs(ynew), 2)))

                Ucur = U * Ucur

                # extra check for projective unitary (spontdecay, heating)
                if dec.doRandNtimes > 0 and np.sum(noise_uni(tcur)) != 0 \
                        and not pulse.use_ideal:
                    U = np.mat(noise_uni(tcur))
                    ynew = U * ycur
                    ynew = ynew / np.sqrt(np.sum(np.power(abs(ynew), 2)))
                    ycur = ynew
                    if abs(ynew[-1])**2 > 0.25:
                        print "Warning: further heating will exceed phonon space"

                ycur = ynew
                tcur = tcur + tlen

                # if reached data-point, store data and advance time
                datasaved = False
                if tcur == T[t0 + 1]:
                    Y[t0 + 1, :] = np.asarray(ynew.T)
                    t0 = t0 + 1
                    datasaved = True

                if params.printpulse and params.progbar and pulseseq.seqqc[
                        p0].type != 'M':
                    pbar.update(int(1.*(tcur-pulseseq.seqqc[p0].starttime)*100 \
                                        /(pulseseq.seqqc[p0].duration)))

                # if pulse ended, advance to next pulse (if both then do both)
                if tcur == pulseseq.seqqc[p0].endtime:
                    pulseseq.seqqc[p0].U = np.copy(np.asarray(Ucur))
                    # save current data if not already saved
                    if not datasaved:
                        T = np.insert(T, t0 + 1, tcur)
                        [Yn1, Yn2] = np.array_split(Y, [t0 + 1])
                        Y = np.concatenate([Yn1, np.asarray(ynew.T), Yn2],
                                           axis=0)
                        t0 = t0 + 1
                    # advance to next pulse
                    p0 = p0 + 1
                    Ucur = 1

            #################
            #### Time-dependent HT: ODE solving
            else:
                # choose time step depending on detuning
                if pulseseq.seqqc[p0].detuning > 2 * pi * 2:
                    stepduration = min(
                        2 / (pulseseq.seqqc[p0].detuning / (2 * pi)), 1)
                else:
                    stepduration = params.ODEtimestep

                # for MS pulse, check and modify omrabi due to hiding
                if pulse.type == "M" and params.doMShidecorr:
                    nuions = len(pulse.targetion)
                    activeions = len(np.nonzero(pulse.targetion)[0])
                    omc_fac = params.MShidecorr[activeions, nuions]
                    if omc_fac == -1:
                        print "MS w/ hiding correction factor invalid, ignoring"
                    else:
                        pulse.omrabi_b = params.omc_ms * omc_fac
                        pulse.omrabi_r = params.omc_ms * omc_fac

                # set up time-dep Hamiltonian
                if pulse.dobichro:
                    HTblue = ht.Hamiltonian_timedep_complete(
                        pulse.targetion,
                        pulse.omrabi_bt,
                        pulse.phase_light + pulse.phase_rb,
                        pulse.detuning_b,
                        params.omz,
                        params.eta,
                        params.hspace,
                        LDApprox=params.LDapproximation)
                    HTred = ht.Hamiltonian_timedep_complete(
                        pulse.targetion,
                        pulse.omrabi_rt,
                        pulse.phase_light - pulse.phase_rb,
                        pulse.detuning_r,
                        params.omz,
                        params.eta,
                        params.hspace,
                        LDApprox=params.LDapproximation)
                    HTorig = lambda t: HTblue(t) + HTred(t)
                else:
                    HTorig = ht.Hamiltonian_timedep_complete(
                        pulse.targetion,
                        pulse.omrabi_t,
                        pulse.phase,
                        pulse.detuning,
                        params.omz,
                        params.eta,
                        params.hspace,
                        LDApprox=params.LDapproximation)
                HT = lambda t: noise_mult(t) * HTorig(t) + noise_add(t)

                psidot = lambda t, psi: -1j * np.dot(HT(t), psi)
                # ycur needs to be cast as a 1d array
                solver = qmtools.SEsolver(params.solver)

                Tloc = np.array([0.])
                Yloc = np.zeros(
                    [1, len(np.asarray(ycur))]
                )  # this is to get the dimensions right, but make sure to remove the first row of 0's

                if params.progbar:
                    widgets = [
                        progressbar.Percentage(), ' ',
                        progressbar.Bar(), ' ',
                        progressbar.ETA()
                    ]
                    pbar = progressbar.ProgressBar(widgets=widgets).start()
                else:
                    pbar = None

                # ODE solver
                # variable aliases for convenience
                pstarttime = pulseseq.seqqc[p0].starttime
                pendtime = pulseseq.seqqc[p0].endtime
                # first calculate the expected number of datapoints
                testtime = np.arange(tcur, pendtime, stepduration)
                testtime = np.append(np.delete(testtime, 0), pendtime)
                # extra check for projective unitary (spontdecay, heating)
                projtimes_cur = projtimes[np.intersect1d( \
                        np.nonzero(projtimes<pendtime)[0], \
                        np.nonzero(projtimes>pstarttime)[0]) ]
                for tproj in projtimes_cur:
                    ms_ode = solver(HT, tcur, tproj, stepduration, np.ravel(ycur), \
                                    pbar, pstarttime, pendtime)
                    Tloc = np.append(Tloc, np.delete(ms_ode.time, 0))
                    Yloc = np.append(Yloc,
                                     np.delete(ms_ode.y.transpose(), 0,
                                               axis=0),
                                     axis=0)
                    tcur = tproj
                    ycur = Yloc[-1, :].T
                    if dec.doRandNtimes > 0 and np.sum(noise_uni(tcur)) != 0 \
                            and not pulse.use_ideal:
                        U = noise_uni(tcur)
                        ynew = np.dot(U, ycur)
                        ynew = ynew / np.sqrt(np.sum(np.power(abs(ynew), 2)))
                        ycur = ynew
                        if abs(ynew[-1])**2 > 0.25:
                            print "Warning: further heating will exceed phonon space"
                        Yloc[-1, :] = np.asarray(ynew.T)

                ms_ode = solver(HT, tcur, pendtime, stepduration, np.ravel(ycur), \
                                    pbar, pstarttime, pendtime)
                Tloc = np.append(Tloc, np.delete(ms_ode.time, 0))
                Yloc = np.append(Yloc,
                                 np.delete(ms_ode.y.transpose(), 0, axis=0),
                                 axis=0)

                Tloc = np.delete(Tloc, 0)
                Yloc = np.delete(Yloc, 0, axis=0)

                # check the length w.r.t. expected length of T vector and remove extras
                while len(Tloc) > len(testtime):
                    for i in range(len(testtime)):
                        if testtime[i] != Tloc[i]:
                            Tloc = np.delete(Tloc, i)
                            Yloc = np.delete(Yloc, i, axis=0)
                            break

                if not params.saveallpoints:
                    Tloc = [Tloc[-1]]
                    Yloc = np.array([Yloc[-1]])

                # now we put the result into original result array
                # first, update the current time and state
                tcur = Tloc[-1]
                ycur = np.mat(Yloc[-1, :]).T
                # find the end of the pulse in the original T list
                t1 = np.nonzero(T >= tcur)[0][0]
                # only replace point if it's already been calculated
                if T[t1] == tcur:
                    tend = t1 + 1
                else:  # T[t1] > tcur
                    tend = t1
                # replace the overlapping times in T with Tloc
                [Tnew1,
                 Tnew2] = np.array_split(np.delete(T, range(t0 + 1, tend)),
                                         [t0 + 1])
                Tnew = np.concatenate([Tnew1, Tloc, Tnew2])
                # mirror with Y and Yloc
                [Ynew1, Ynew2
                 ] = np.array_split(np.delete(Y, range(t0 + 1, tend), axis=0),
                                    [t0 + 1])
                # seems that concatenate doesn't work on 2d arrays if they're empty
                if len(Ynew2) == 0:
                    Ynew = np.concatenate([Ynew1, Yloc], axis=0)
                else:
                    Ynew = np.concatenate([Ynew1, Yloc, Ynew2], axis=0)

                T = Tnew
                Y = Ynew

                p0 = p0 + 1
                t0 = np.nonzero(T >= tcur)[0][0]

    data = simtools.database(T,
                             Y,
                             params.hspace,
                             pulseseq,
                             register=classical_reg)

    data.creationtime = params.savedataname  # timestamp the data

    if dec.doSQL:
        sequel.insertJobToDB(data)

    # get rid of lambda functions in order to send results back through pp
    for pulse in pulseseq.seqqc:
        pulse.omrabi_t = 0
        pulse.omrabi_bt = 0
        pulse.omrabi_rt = 0

    return data
Exemplo n.º 40
0
 INITIALCHAIN1 = np.outer(INITIALCHAIN,INITIALCHAIN)
 INITIALCHAIN = np.kron(I_P,I_C)        
 for ad in range(CHAINLENGTH-1):
     INITIALCHAIN = np.kron(INITIALCHAIN,INITIALCHAIN1)   
 
 SYSTEM = np.matrix(SYSTEM)*np.matrix(INITIALCOIN)*np.matrix(INITIALCHAIN)
 del INITIALCHAIN
 del INITIALCOIN
 del INITIALCOIN_nochain
 del INITIALCHAIN1
 ##################################
 ############################################################ 
 #LAST BIT OF CHAIN CODE
 CHAIN = J1*CHAIN1 + J*CHAIN2
 CHAIN = np.kron(I_P,CHAIN)
 EXP1 = linalg.expm2(-1j*CHAIN)
 EXP2 = linalg.expm2(1j*CHAIN)
 #EXP1 = np.kron(I_P,EXP1)
 #EXP2 = np.kron(I_P,EXP2)
 ###############################################################
 #QUANTUM WALK
 p = np.zeros((POSITIONS,STEPS),dtype=complex)
 for r in range(STEPS):
     SYSTEM = np.matrix(EXP1)*np.matrix(SYSTEM)*np.matrix(EXP2)    
     SYSTEM = np.matrix(H)*np.matrix(SYSTEM)*np.matrix(np.matrix.getH(H))
     SYSTEM = np.matrix(SHIFT)*np.matrix(SYSTEM)*np.matrix.getH(SHIFT)
 ##################################
     #MEASUREMENT
     
     for i in range(POSITIONS):
         ket_i = np.zeros((POSITIONS,1))
Exemplo n.º 41
0
def Generate_Floquet_NearestNeighbour(args):
    print("\nUSING THE NEAREST NEIGHBOUR FUNCTION\n")
    # Hamniltonia
    ########## First Half of evolution
    # Precession term:

    L = args['L']
    T = args['T']
    J = args['J']
    epsilon = args['epsilon']
    hx = args['hx']
    hy = args['hy']
    hz = args['hz']

    Zprecession = np.zeros((2**L, 2**L), dtype='complex')
    for i in range(L):
        mat = iden
        if i == 0:
            mat = sigmaz
        for k in range(1, L):
            if i == k:
                mat = outer(sigmaz, mat)
            else:
                mat = outer(iden, mat)
        Zprecession += mat

    Xprecession = np.zeros((2**L, 2**L), dtype='complex')
    for i in range(L):
        mat = iden
        if i == 0:
            mat = sigmax
        for k in range(1, L):
            if i == k:
                mat = outer(sigmax, mat)
            else:
                mat = outer(iden, mat)
        Xprecession += mat

    Yprecession = np.zeros((2**L, 2**L), dtype='complex')
    for i in range(L):
        mat = iden
        if i == 0:
            mat = sigmax
        for k in range(1, L):
            if i == k:
                mat = outer(sigmax, mat)
            else:
                mat = outer(iden, mat)
        Yprecession += mat

    #Interaction term

    interactions = np.zeros((2**L, 2**L), dtype='complex')
    #np.identity(2**L) * wei[0]

    for i in range(L):
        j = (i + 1) % L

        mat = iden
        if i == 0 or j == 0:
            mat = sigmaz

        for k in range(1, L):
            if j == k or i == k:
                mat = outer(sigmaz, mat)
            else:
                mat = outer(iden, mat)
        interactions += mat * J

    Uf1 = slinalg.expm2(-1j * T * (interactions + hz * Zprecession +
                                   hy * Yprecession + hx * Xprecession))

    ########## Second Half of evolutions

    SingleSpin = iden * np.cos(
        np.pi / 2 * (1 + epsilon)) - 1j * sigmax * np.sin(np.pi / 2 *
                                                          (1 + epsilon))
    Uf2 = SingleSpin
    for i in range(1, L):
        Uf2 = outer(SingleSpin, Uf2)

    ##########
    HPrethermal = interactions + hx * Xprecession

    return (HPrethermal, Uf2.dot(Uf1))
Exemplo n.º 42
0
Arquivo: Seu.py Projeto: Aanax/Univer
def y(t,i):
   #return np.dot(la.expm(A*t),y0)[i]#can be expm3(A, ordrer Taylor)
  #return np.dot(la.expm3(A*t,n),y0)[i]
    return np.dot(la.expm2(A*t),y0)[i]
Exemplo n.º 43
0
def sqrtm(M):
    """ Returns the symmetric semi-definite positive square root of a matrix. """
    r = real_if_close(expm2(0.5 * logm(M)), 1e-8)
    return (r + r.T) / 2
Exemplo n.º 44
0
    def Hamiltonian_timedep_complete(self,
                                     targetion,
                                     omrabi,
                                     phase,
                                     detuning,
                                     wsec,
                                     eta,
                                     hspace,
                                     LDApprox=False):
        """
        everything in the full interaction frame of the ions including the
        harmonic oscillation (opposing to the original version from hartmut
        that still had the phonon energy nonzero)

        in addition, this version won't omit the exp(+- i omega_z terms t) in
        attached to creation/annihilation operators

        this is eqn 3.6 in Christian Roo's thesis.

        """

        # H = exp(i delta_c t+phase) omega(t) (sum_i address_error_i sigma_plus_i) (exp(i eta (a exp(-i omegaz t) + adag exp(i omegaz t)))) + h.c.
        # !!! note !!!
        # here eta is assumed to be the same for all qubits. this is
        # not necessarily the case.
        #a more correct version would allow for
        # individual etas for each qubit ... maybe later. not
        # really required here and would only slow down the code

        targetion = targetion[::-1]

        # prefactor including the timedepending omega and the detuning+phase
        prefac = lambda t: np.exp(1j *
                                  (detuning * t + phase)) * omrabi(t) * 1 / 2.

        # coupling to the sideband. argument of the exponent/ld approximation
        LDpart = lambda t: 1j * eta[-1] * (hspace.operator_dict['a'] * np.exp(
            -1j * wsec * t) + hspace.operator_dict['a_dag'] * np.exp(1j * wsec
                                                                     * t))

        # calculate the coupling based on the addressing errors
        sys = np.zeros(
            (hspace.levels**hspace.nuions, hspace.levels**hspace.nuions))
        for k in xrange(hspace.nuions):
            sys += targetion[k] * hspace.operator_dict['raising'][:, :, k]

        # Lamb-Dicke approximation yes/no?
        # kron with qubits to get the hamiltonian
        if LDApprox:
            sys_ho = lambda t: npml.kron(
                sys,
                np.diag(np.ones(hspace.maxphonons + 1)) + LDpart(t))
        else:
            sys_ho = lambda t: npml.kron(sys, splg.expm2(LDpart(t)))

        # multiply with rabi-frequency part
        H = lambda t: prefac(t) * sys_ho(t)

        # add h.c. part
        HT = lambda t: H(t) + H(t).transpose().conjugate()

        return HT
Exemplo n.º 45
0
    def Hamiltonian(self, pulse, params, LDApprox=True):
        ''' Hamiltonian definition using hspace.operator_dict '''
        opdict = params.hspace.operator_dict

        targetion = pulse.targetion[::-1]
        omrabi = pulse.omrabi
        phase = pulse.phase
        detuning = pulse.detuning
        wsec = params.omz
        eta = params.eta
        hspace = params.hspace

        # prefactor including the timedepending omega and the detuning+phase
        prefac = np.exp(1j * phase) * omrabi * 1/2.

        # coupling to the sideband. argument of the exponent/ld approximation
        LDpart = 1j * eta[-1] * (opdict['a']+opdict['a_dag']) 
        # calculate the coupling based on the addressing errors

        # Lamb-Dicke approximation yes/no?
        # kron with qubits to get the hamiltonian
        # here we'll use the fact that eta may be different for each ion
        sys_ho = np.zeros(((hspace.maxphonons+1)*hspace.levels**hspace.nuions, \
                          (hspace.maxphonons+1)*hspace.levels**hspace.nuions ), \
                              np.complex128)
        if LDApprox:
            for k in xrange(hspace.nuions):
                #etak = eta[-1] if pulse.ion == -1 else eta[k]
                sys_ho += npml.kron( targetion[k]*opdict['raising'][:,:,k],\
                               np.diag(np.ones(hspace.maxphonons+1)) + \
                               1j * eta[k] * (opdict['a']+opdict['a_dag']) )
        else:
            for k in xrange(hspace.nuions):
                #etak = eta[-1] if pulse.ion == -1 else eta[k]
                sys_ho += npml.kron( targetion[k]*opdict['raising'][:,:,k],\
                               splg.expm2(1j * eta[k] * (opdict['a']+opdict['a_dag'])) )

        # multiply with rabi-frequency part
        H = prefac * sys_ho

        # diagonal terms
        sysz = np.zeros((hspace.levels**hspace.nuions,hspace.levels**hspace.nuions))
        for k in xrange(hspace.nuions):
            sysz += opdict['sigz'][:,:,k] * 1/2.
        energies = -detuning * npml.kron(sysz, opdict['id_a']) + \
                   wsec * npml.kron(np.diag(np.ones(hspace.levels**hspace.nuions)), \
                                        np.dot(opdict['a_dag'], opdict['a']) )
        # subtract a zero offset
        energies -= np.diag(energies[0,0]*np.ones_like(np.diag(energies)))

        # add h.c. part
        HT = H + H.transpose().conjugate() + energies

        # diagonal elements of matrix for basis transformation (lasertoqc)
        lqc = np.diag(HT)

        if pulse.type == 'Z':
            HT = HT+np.diag(self.ACshift_corr(pulse, params))
            # to fix discrepancy between lab/Volkmar and here
            if pulse.theta < 0:
                HT = -HT
                lqc = -lqc

        return HT, lqc
Exemplo n.º 46
0
 def test_zero(self):
     a = array([[0.,0],[0,0]])
     assert_array_almost_equal(expm(a),[[1,0],[0,1]])
     assert_array_almost_equal(expm2(a),[[1,0],[0,1]])
     assert_array_almost_equal(expm3(a),[[1,0],[0,1]])
Exemplo n.º 47
0
            COMPONENT1 = np.kron(COMPONENT1,pauli_x)
            COMPONENT2 = np.kron(COMPONENT2,pauli_y)
        elif u==v+1:
            COMPONENT1 = np.kron(COMPONENT1,pauli_x)
            COMPONENT2 = np.kron(COMPONENT2,pauli_y)
        else:
            COMPONENT1 = np.kron(COMPONENT1,pauli_i)
            COMPONENT2 = np.kron(COMPONENT2,pauli_i)

    if v==0:
        CHAIN1 = COMPONENT1 + COMPONENT2
    else:
        CHAIN2 = CHAIN2 + COMPONENT1 + COMPONENT2
A = (J1*CHAIN1 + J*CHAIN2)
A = 1j*A
EXP1 = linalg.expm2(-1*A)
EXP2 = linalg.expm2(A)
EXP1 = np.kron(I_P,EXP1)
EXP2 = np.kron(I_P,EXP2)
######################################
#INITIAL STATE   
INITIALCOIN =               (1/np.sqrt(2))*np.matrix([[1],
                                       [1]])
INITIALCOIN = np.outer(INITIALCOIN,INITIALCOIN)
INITIALCHAIN = np.matrix([[1,  0],
                          [0,  0]])  
SYSTEM = np.zeros((2*STEPS+1))
SYSTEM[STEPS+1] = 1
SYSTEM = np.outer(SYSTEM,SYSTEM)
A = np.kron(INITIALCOIN,INITIALCHAIN)
for z in range(CHAINLENGTH-2):
Exemplo n.º 48
0
INITIALCHAIN = (1 / np.sqrt(2)) * np.matrix([[1], [1]])
INITIALCHAIN1 = np.outer(INITIALCHAIN, INITIALCHAIN)
INITIALCHAIN = np.kron(I_P, I_C)
for ad in range(CHAINLENGTH - 1):
    INITIALCHAIN = np.kron(INITIALCHAIN, INITIALCHAIN1)

SYSTEM = np.matrix(SYSTEM) * np.matrix(INITIALCOIN) * np.matrix(INITIALCHAIN)

del INITIALCHAIN
del INITIALCOIN
del INITIALCHAIN1
##################################
############################################################
#LAST BIT OF CHAIN CODE
CHAIN = J * CHAIN1 + J * CHAIN2
EXP1 = linalg.expm2(-1j * CHAIN)
EXP2 = linalg.expm2(1j * CHAIN)
EXP1 = np.kron(I_P, EXP1)
EXP2 = np.kron(I_P, EXP2)
###############################################################
#QUANTUM WALK
for r in range(STEPS):
    SYSTEM = np.matrix(EXP1) * np.matrix(SYSTEM) * np.matrix(EXP2)
    SYSTEM = np.matrix(H) * np.matrix(SYSTEM) * np.matrix(np.matrix.getH(H))
    SYSTEM = np.matrix(SHIFT) * np.matrix(SYSTEM) * np.matrix(
        np.matrix.getH(SHIFT))

##################################
#MEASUREMENT
p = np.zeros((POSITIONS, 1), dtype=complex)
for i in range(POSITIONS):
Exemplo n.º 49
0
        v = mat(random.randn(nTest, 1))
        t = 3.0

        start_time = time.time()
        (w, error, hump) = expv(t, lambda x: M * x, v, normM)
        end_time = time.time()
        print "Krylov Time: %f" % (end_time - start_time)

        start_time = time.time()
        testExp = expm(t * M)
        testV = testExp * v
        end_time = time.time()
        print "Dense Pade Exp Time: %f" % (end_time - start_time)

        start_time = time.time()
        testExp2 = expm2(t * M)
        testV2 = testExp2 * v
        end_time = time.time()
        print "Dense Eig Decomp Exp Time: %f" % (end_time - start_time)

        start_time = time.time()
        testExp3 = expm3(t * M)
        testV3 = testExp3 * v
        end_time = time.time()
        print "Dense Taylor Exp Time: %f" % (end_time - start_time)

        relativeErr = norm(testV - w) / norm(testV)

        print "Relative Difference Between exp(M)*v using Krylov and Dense Pade Approx: %f" % relativeErr
        if abs(relativeErr) > 1e-6:
            print "Tolerance Exceeded:"
def Ugate(A,N):
     return Qobj(expm2(2*np.pi*i*N*A))
        v = mat(random.randn(nTest,1))
        t = 3.0

        start_time = time.time()
        (w, error, hump) = expv(t, lambda x: M*x, v, normM)
        end_time = time.time()
        print "Krylov Time: %f" % (end_time-start_time)

        start_time = time.time()
        testExp = expm(t*M)
        testV = testExp*v
        end_time = time.time()
        print "Dense Pade Exp Time: %f" % (end_time - start_time)

        start_time = time.time()
        testExp2 = expm2(t*M)
        testV2 = testExp2*v
        end_time = time.time()
        print "Dense Eig Decomp Exp Time: %f" % (end_time - start_time)

        start_time = time.time()
        testExp3 = expm3(t*M)
        testV3 = testExp3*v
        end_time = time.time()
        print "Dense Taylor Exp Time: %f" % (end_time - start_time)
        
        relativeErr = norm(testV - w)/norm(testV)
        
        print "Relative Difference Between exp(M)*v using Krylov and Dense Pade Approx: %f" % relativeErr 
        if abs(relativeErr) > 1e-6:
            print "Tolerance Exceeded:"
Exemplo n.º 52
0
def sqrtm(M):
    """ Returns the symmetric semi-definite positive square root of a matrix.
    """
    r = real_if_close(expm2(0.5 * logm(M)), 1e-8)
    return (r + r.T) / 2
Exemplo n.º 53
0
t4 = zeros(size(s))
tt = 0
for i in s:
    print "Matrix size %d" %(i)
    m = zeros((i,i), float64)
    half = int(round(i*i/2))+1
    r = random.randint(0,i, (half*2, ))
    n = random.random((half, ))
    for z in range(0, half*2, 2):
        m[r[z], r[z+1]] = n[z/2]
    start = time()
    expm(m)
    t[tt] = time() - start

    start = time()
    expm2(m)
    t2[tt] = time() - start

    start = time()
    expm3(m)
    t3[tt] = time() - start

    start = time()
    expf(i, 1,  m, i)
    t4[tt] = time() - start
    tt += 1

print s
print t
plot(s, t, label="Pade")
plot(s, t2, label="Eigenvalue")