Exemple #1
0
    def _propagate_SExp_RTOp_ReSymK_Re_pytorch(self,
                                               rhoi,
                                               Ham,
                                               RT,
                                               dt,
                                               use_gpu=False,
                                               L=4):
        """Integration by short exponentional expansion
        
        Integration by expanding exponential (_SExp_) to Lth order. 
        This is a PyTorch (_pytorch) implementation with real (_Re_) matrices
        for  a system part of the system-bath interaction operator  ``K``
        in a form of real symmetric operator (ReSymK). The relaxation tensor
        is assumed in form of a set of operators (_RTOp_)
              
            
        """

        Nref = self.Nref
        Nt = self.Nt
        verbose = self.verbose
        timea = self.TimeAxis
        prop_name = self.propagation_name

        try:
            import torch
        except:
            raise Exception("PyTorch not installed")

        # no self beyond this point

        qr.log_detail("PROPAGATION (short exponential with " +
                      "relaxation in operator form): order ",
                      L,
                      verbose=verbose)
        qr.log_detail("Using pytorch implementation")
        qr.log_detail("Using GPU: ", use_gpu & torch.cuda.is_available())

        pr = ReducedDensityMatrixEvolution(timea, rhoi, name=prop_name)

        rho1_r = torch.from_numpy(numpy.real(rhoi.data))
        rho2_r = torch.from_numpy(numpy.real(rhoi.data))
        rho1_i = torch.from_numpy(numpy.imag(rhoi.data))
        rho2_i = torch.from_numpy(numpy.imag(rhoi.data))

        HH = torch.from_numpy(Ham.data)

        try:
            Km = torch.from_numpy(RT.Km)  #self.RelaxationTensor.Km # real
            Lm_r = torch.from_numpy(numpy.real(
                RT.Lm))  #self.RelaxationTensor.Lm) # complex
            Lm_i = torch.from_numpy(numpy.imag(
                RT.Lm))  #self.RelaxationTensor.Lm)
            Nm = RT.Km.shape[0]
        except:
            raise Exception("Tensor is not in operator form")

        if use_gpu & torch.cuda.is_available():
            rho1_r = rho1_r.cuda()
            rho2_r = rho1_r
            rho1_i = rho1_i.cuda()
            rho2_i = rho1_i
            HH = HH.cuda()
            Km = Km.cuda()
            Lm_r = Lm_r.cuda()
            Lm_i = Lm_i.cuda()

        indx = 1

        # verbosity inside loops
        levs = [qr.LOG_QUICK]
        verb = qr.loglevels2bool(levs)

        # loop over time
        for ii in range(1, Nt):
            qr.printlog(" time step ",
                        ii,
                        "of",
                        Nt,
                        verbose=verb[0],
                        loglevel=levs[0])

            # steps in between saving the results
            for jj in range(Nref):

                # L interations to get short exponential expansion
                for ll in range(1, L + 1):

                    A = torch.matmul(HH, rho1_i)
                    B = torch.matmul(HH, rho1_r)
                    rhoY_r = torch.mul(A + torch.transpose(A, 0, 1), dt / ll)
                    rhoY_i = torch.mul(B - torch.transpose(B, 0, 1), -dt / ll)

                    for mm in range(Nm):

                        a = torch.matmul(Lm_r[mm, :, :], rho1_r)
                        A = a - torch.transpose(a, 0, 1)
                        b = torch.matmul(Lm_i[mm, :, :], rho1_i)
                        B = b - torch.transpose(b, 0, 1)
                        c = torch.matmul(Lm_r[mm, :, :], rho1_i)
                        C = -(c + torch.transpose(c, 0, 1))
                        d = torch.matmul(Lm_i[mm, :, :], rho1_r)
                        D = d + torch.transpose(d, 0, 1)

                        E = B - A
                        F = C - D

                        A = torch.matmul(Km[mm, :, :], E)
                        B = torch.matmul(Km[mm, :, :], F)
                        rhoY_r += torch.mul(A + torch.transpose(A, 0, 1),
                                            dt / ll)
                        rhoY_i += torch.mul(B - torch.transpose(B, 0, 1),
                                            dt / ll)

                    rho1_r = rhoY_r
                    rho1_i = rhoY_i

                    rho2_r += rho1_r
                    rho2_i += rho1_i

                rho1_r = rho2_r
                rho1_i = rho2_i

            if use_gpu & torch.cuda.is_available():
                rho2_sr = rho2_r.cpu()
                rho2_si = rho2_i.cpu()
            else:
                rho2_sr = rho2_r
                rho2_si = rho2_i

            pr.data[indx, :, :] = rho2_sr.numpy() + 1j * rho2_si.numpy()
            indx += 1

        qr.log_detail("...DONE")
        return pr
Exemple #2
0
    def _implementation(self, ham, sbi):
        """ Reference implementation, completely in Python
        
        Implementation of Redfield relaxation tensor according to 
        
        V. May and O. Kuehn, Charge and Energy Transfer Dynamics in Molecular
        System, Wiley-VCH, Berlin, 2000, 1st edition, chapter 3.8.2.
        In particular we refer to Eq. (3.8.13) on page 132
        
        We assume the system-bath interaction operator in form of Eq. (3.5.30)
        with the bath part specified through two-point correlation functions.
        We construct operators K_{m} introduced in Eq. (3.5.30) and 
        operators \Lambda_{m} of Eq. (3.8.11). 
        
        We do not delete the imaginary part of the tensor (as is done later 
        in Section 3.8.3 to get the so-called "Multi-level Redfield
        Equations"). Such a deletion can be done later manually. 
        
        
        """
        
        qr.log_detail("Reference time-independent Redfield tensor calculation")
        #print("Reference Redfield implementation ...")
        #
        # dimension of the Hamiltonian (includes excitons
        # with all multiplicities specified at its creation)
        #
        Na = ham.dim #data.shape[0]
        
        # time axis
        ta = sbi.TimeAxis
        
        #
        # is this beyond single excitation band?
        #
        multi_ex = False
        
        # figure out if the aggregate specifies more than one exciton band
        if sbi.aggregate is not None:
            agg = sbi.aggregate
            if agg.mult > 1:
                multi_ex = True 
        
        #
        # shorten the interval of integration if a cut-off time is set
        #
        if self._has_cutoff_time:
            # index of the cut-off time on the time axis
            tcut = ta.nearest(self.cutoff_time)
            # select the section of the time axis up to the cut-off time
            tm = ta.data[0:tcut]
            # length of the section corresponds to the index of cut-off time
            length = tcut
        else:
            # if cut-off time is not set then we take the whole time axis
            tm = ta.data
            # and the length corresponds to the length of the time axis
            length = ta.length

        #
        # Get eigenenergies and transformation matrix of the Hamiltonian
        #
        if True:
            # FIXME: here we need to access ham._data (we want to protect basis)
            #
            # THIS ASSUMES WE ARE IN SITE BASIS
            # FIXME: devise a mechanism to ensure this!!!!
            #
            hD, SS = numpy.linalg.eigh(ham.data)   
               
        #
        #  Find all transition frequencies
        # 
        Om = numpy.zeros((Na, Na))
        for a in range(Na):
            for b in range(Na):
                Om[a,b] = hD[a] - hD[b]
                
        # number of baths - one per monomer            
        Nb = sbi.N

        #
        # Site K_m operators 
        #

        Km = numpy.zeros((Nb, Na, Na), dtype=numpy.float64) 
        # Transform site operators       
        S1 = scipy.linalg.inv(SS)
        #FIXME: SBI should also be basis controlled
        for ns in range(Nb): 
            Km[ns,:,:] = numpy.dot(S1, numpy.dot(sbi.KK[ns,:,:],SS))
        
        #
        # \Lambda_m operator
        #
        
        # Integrals of correlation functions from the set      
        Lm = numpy.zeros((Nb, Na, Na), dtype=numpy.complex128)
        
        #######################################################################
        # PARALLELIZATION
        #######################################################################

        start_parallel_region()
        for ms in block_distributed_range(0, Nb): #range(Nb):
            qr.log_quick("Calculating bath component", ms, "of", Nb, end="\r")
            #print(ms, "of", Nb)
            #for ns in range(Nb):
            if not multi_ex:
                ns = ms
                
                # correlation function of site ns (if ns == ms)
                # or a cross-correlation function of sites ns and ms
                
                #FIXME: reaching correct correlation function is a nightmare!!!
                rc1 = sbi.CC.get_coft(ms, ns)  
                        
                self._guts_Cmplx_Splines(ms, Lm, Km, Na, Om, length, rc1, tm)
             
        # perform reduction of Lm
        qr.log_quick()
        distributed_configuration().allreduce(Lm, operation="sum")
        close_parallel_region()

        #######################################################################
        #  END PARALLELIZATION
        #######################################################################
                        
        # create the Hermite conjuged version of \Lamnda_m
        Ld = numpy.zeros((Nb, Na, Na), dtype=numpy.complex128)
        for ms in range(Nb):
            Ld[ms, :, :] += numpy.conj(numpy.transpose(Lm[ms,:,:]))        
            
        self._post_implementation(Km, Lm, Ld)
        
        qr.log_detail("... Redfield done")
Exemple #3
0
    def __propagate_short_exp_with_rel_operators(self, rhoi, L=4):
        """Integration by short exponentional expansion
        
        Integration by expanding exponential to Lth order. 
              
            
        """
        mana = Manager()
        save_pytorch = None

        legacy = mana.gen_conf.legacy_relaxation

        if mana.num_conf.gpu_acceleration:
            save_pytorch = mana.num_conf.enable_pytorch
            mana.num_conf.enable_pytorch = True

        if mana.num_conf.enable_pytorch and (not legacy):
            ret = self._propagate_SExp_RTOp_ReSymK_Re_pytorch(
                rhoi,
                self.Hamiltonian,
                self.RelaxationTensor,
                self.dt,
                use_gpu=mana.num_conf.gpu_acceleration,
                L=L)

            if save_pytorch is not None:
                mana.num_conf.enable_pytorch = save_pytorch

            return ret

        elif not legacy:
            return self._propagate_SExp_RTOp_ReSymK_Re_numpy(
                rhoi, self.Hamiltonian, self.RelaxationTensor, self.dt, L=L)

        #
        # legacy version
        #

        pr = ReducedDensityMatrixEvolution(self.TimeAxis,
                                           rhoi,
                                           name=self.propagation_name)

        rho1 = rhoi.data
        rho2 = rhoi.data

        #
        # RWA is applied here
        #
        if self.Hamiltonian.has_rwa:
            HH = self.Hamiltonian.get_RWA_data()  #data  - self.HOmega
        else:
            HH = self.Hamiltonian.data

        qr.log_detail("PROPAGATION (short exponential with " +
                      "relaxation in operator form): order ",
                      L,
                      verbose=self.verbose)
        qr.log_detail("Using complex numpy implementation")

        try:
            Km = self.RelaxationTensor.Km  # real
            Lm = self.RelaxationTensor.Lm  # complex
            Ld = self.RelaxationTensor.Ld  # complex - get by transposition
            Kd = numpy.zeros(Km.shape, dtype=numpy.float64)
            Nm = Km.shape[0]
            for m in range(Nm):
                Kd[m, :, :] = numpy.transpose(Km[m, :, :])
        except:
            raise Exception("Tensor is not in operator form")

        indx = 1

        levs = [qr.LOG_QUICK]  #, 8]
        verb = qr.loglevels2bool(levs)

        # after each step we apply pure dephasing (if present)
        if self.has_PDeph:

            if self.PDeph.dtype == "Lorentzian":
                expo = numpy.exp(-self.PDeph.data * self.dt)
                t0 = 0.0
            elif self.PDeph.dtype == "Gaussian":
                expo = numpy.exp(-self.PDeph.data * (self.dt**2) / 2.0)
                t0 = self.PDeph.data * self.dt

            # loop over time
            for ii in range(1, self.Nt):
                qr.printlog(" time step ",
                            ii,
                            "of",
                            self.Nt,
                            verbose=verb[0],
                            loglevel=levs[0])

                # time at the beginning of the step
                tNt = self.TimeAxis.data[indx - 1]
                #print("tNt = ", tNt)

                # steps in between saving the results
                for jj in range(0, self.Nref):

                    tt = tNt + jj * self.dt  # time right now

                    # L interations to get short exponential expansion
                    for ll in range(1, L + 1):

                        rhoY = -(1j * self.dt / ll) * (numpy.dot(HH, rho1) -
                                                       numpy.dot(rho1, HH))

                        #rhoX = numpy.zeros(rho1.shape, dtype=numpy.complex128)
                        for mm in range(Nm):

                            rhoY += (self.dt / ll) * (
                                numpy.dot(Km[mm, :, :],
                                          numpy.dot(rho1, Ld[mm, :, :])) +
                                numpy.dot(Lm[mm, :, :],
                                          numpy.dot(rho1, Kd[mm, :, :])) -
                                numpy.dot(
                                    numpy.dot(Kd[mm, :, :], Lm[mm, :, :]),
                                    rho1) - numpy.dot(
                                        rho1,
                                        numpy.dot(Ld[mm, :, :], Km[mm, :, :])))

                        rho1 = rhoY  #+ rhoX

                        rho2 = rho2 + rho1

                    # pure dephasing is added here
                    rho2 = rho2 * expo * numpy.exp(-t0 * tt)

                    rho1 = rho2

                pr.data[indx, :, :] = rho2
                indx += 1

        # no extra dephasing
        else:

            # loop over time
            for ii in range(1, self.Nt):
                qr.printlog(" time step ",
                            ii,
                            "of",
                            self.Nt,
                            verbose=verb[0],
                            loglevel=levs[0])

                # steps in between saving the results
                for jj in range(0, self.Nref):

                    # L interations to get short exponential expansion
                    for ll in range(1, L + 1):

                        rhoY = -(1j * self.dt / ll) * (numpy.dot(HH, rho1) -
                                                       numpy.dot(rho1, HH))

                        #rhoX = numpy.zeros(rho1.shape, dtype=numpy.complex128)
                        for mm in range(Nm):

                            rhoY += (self.dt / ll) * (
                                numpy.dot(Km[mm, :, :],
                                          numpy.dot(rho1, Ld[mm, :, :])) +
                                numpy.dot(Lm[mm, :, :],
                                          numpy.dot(rho1, Kd[mm, :, :])) -
                                numpy.dot(
                                    numpy.dot(Kd[mm, :, :], Lm[mm, :, :]),
                                    rho1) - numpy.dot(
                                        rho1,
                                        numpy.dot(Ld[mm, :, :], Km[mm, :, :])))

                        rho1 = rhoY  #+ rhoX

                        rho2 = rho2 + rho1

                    rho1 = rho2

                pr.data[indx, :, :] = rho2
                indx += 1

        qr.log_detail("...DONE")

        if self.Hamiltonian.has_rwa:
            pr.is_in_rwa = True

        return pr
Exemple #4
0
    def _propagate_SExp_RTOp_ReSymK_Re_numpy(self, rhoi, Ham, RT, dt, L=4):
        """Integration by short exponentional expansion
        
        Integration by expanding exponential (_SExp_) to Lth order. 
        This is a numpy (_numpy) implementation with real (_Re_) matrices
        for  a system part of the system-bath interaction operator  ``K``
        in a form of real symmetric operator (ReSymK). The relaxation tensor
        is assumed in form of a set of operators (_RTOp_)
              
            
        """

        Nref = self.Nref
        Nt = self.Nt
        verbose = self.verbose
        timea = self.TimeAxis
        prop_name = self.propagation_name

        # no self beyond this point

        qr.log_detail("PROPAGATION (short exponential with " +
                      "relaxation in operator form): order ",
                      L,
                      verbose=verbose)
        qr.log_detail("Using real valued numpy implementation")

        pr = ReducedDensityMatrixEvolution(timea, rhoi, name=prop_name)

        rho1_r = numpy.real(rhoi.data)
        rho2_r = numpy.real(rhoi.data)
        rho1_i = numpy.imag(rhoi.data)
        rho2_i = numpy.imag(rhoi.data)

        HH = Ham.data

        try:
            Km = RT.Km  #self.RelaxationTensor.Km # real
            Lm_r = numpy.real(RT.Lm)  #self.RelaxationTensor.Lm) # complex
            Lm_i = numpy.imag(RT.Lm)  #self.RelaxationTensor.Lm)
            Nm = Km.shape[0]
        except:
            raise Exception("Tensor is not in operator form")

        indx = 1

        # verbosity inside loops
        levs = [qr.LOG_QUICK]
        verb = qr.loglevels2bool(levs, verbose=self.verbose)

        # after each step we apply pure dephasing (if present)
        if self.has_PDeph:

            # loop over time
            for ii in range(1, Nt):
                qr.printlog("time step ",
                            ii,
                            "of",
                            Nt,
                            verbose=verb[0],
                            loglevel=levs[0],
                            end="\r")

                # steps in between saving the results
                for jj in range(Nref):

                    # L interations to get short exponential expansion
                    for ll in range(1, L + 1):

                        A = numpy.dot(HH, rho1_i)
                        B = numpy.dot(HH, rho1_r)
                        rhoY_r = (dt / ll) * (A + numpy.transpose(A))
                        rhoY_i = -(dt / ll) * (B - numpy.transpose(B))

                        for mm in range(Nm):

                            a = numpy.dot(Lm_r[mm, :, :], rho1_r)
                            A = a - numpy.transpose(a)
                            b = numpy.dot(Lm_i[mm, :, :], rho1_i)
                            B = b - numpy.transpose(b)
                            c = numpy.dot(Lm_r[mm, :, :], rho1_i)
                            C = -(c + numpy.transpose(c))
                            d = numpy.dot(Lm_i[mm, :, :], rho1_r)
                            D = d + numpy.transpose(d)

                            E = B - A
                            F = C - D

                            A = numpy.dot(Km[mm, :, :], E)
                            B = numpy.dot(Km[mm, :, :], F)
                            rhoY_r += (dt / ll) * (A + numpy.transpose(A))
                            rhoY_i += (dt / ll) * (B - numpy.transpose(B))

                        rho1_r = rhoY_r
                        rho1_i = rhoY_i

                        rho2_r += rho1_r
                        rho2_i += rho1_i

                        rho2_r = rho2_r * numpy.exp(-self.PDeph.data * dt)
                        rho2_i = rho2_i * numpy.exp(-self.PDeph.data * dt)

                    rho1_r = rho2_r
                    rho1_i = rho2_i

                pr.data[indx, :, :] = rho2_r + 1j * rho2_i
                indx += 1

        # propagatiomn with no extra dephasing
        else:

            # loop over time
            for ii in range(1, Nt):
                qr.printlog("time step ",
                            ii,
                            "of",
                            Nt,
                            verbose=verb[0],
                            loglevel=levs[0],
                            end="\r")

                # steps in between saving the results
                for jj in range(Nref):

                    # L interations to get short exponential expansion
                    for ll in range(1, L + 1):

                        A = numpy.dot(HH, rho1_i)
                        B = numpy.dot(HH, rho1_r)
                        rhoY_r = (dt / ll) * (A + numpy.transpose(A))
                        rhoY_i = -(dt / ll) * (B - numpy.transpose(B))

                        for mm in range(Nm):

                            a = numpy.dot(Lm_r[mm, :, :], rho1_r)
                            A = a - numpy.transpose(a)
                            b = numpy.dot(Lm_i[mm, :, :], rho1_i)
                            B = b - numpy.transpose(b)
                            c = numpy.dot(Lm_r[mm, :, :], rho1_i)
                            C = -(c + numpy.transpose(c))
                            d = numpy.dot(Lm_i[mm, :, :], rho1_r)
                            D = d + numpy.transpose(d)

                            E = B - A
                            F = C - D

                            A = numpy.dot(Km[mm, :, :], E)
                            B = numpy.dot(Km[mm, :, :], F)
                            rhoY_r += (dt / ll) * (A + numpy.transpose(A))
                            rhoY_i += (dt / ll) * (B - numpy.transpose(B))

                        rho1_r = rhoY_r
                        rho1_i = rhoY_i

                        rho2_r += rho1_r
                        rho2_i += rho1_i

                    rho1_r = rho2_r
                    rho1_i = rho2_i

                pr.data[indx, :, :] = rho2_r + 1j * rho2_i
                indx += 1

        qr.log_detail()
        qr.log_detail("...DONE")

        return pr
Exemple #5
0
# set bath correlation functions to the molecules
for i_m in range(N_molecules):
    mols[i_m].set_transition_environment((0, 1), cf)

# aggregate of molecules
agg = qr.Aggregate(mols)

agg.set_coupling_by_dipole_dipole()

# Building the aggregate
qr.log_report("Building aggregate")
agg.build()
qr.log_report("...done")

qr.log_detail("Resonance coupling matrix: ")
qr.log_detail(qr.convert(agg.resonance_coupling, "int", "1/cm"),
              use_indent=False)

# Dimension of the problem
HH = agg.get_Hamiltonian()
Nr = HH.dim
qr.log_detail("Hamiltonian has a rank:", Nr)

benchmark_report["Dimension"] = Nr

qr.log_report("Calculating Relaxation tensor:")
t1 = time.time()
(RT, ham) = agg.get_RelaxationTensor(timea,
                                     relaxation_theory="standard_Redfield",
                                     as_operators=True)
Exemple #6
0
    
# set bath correlation functions to the molecules
for i_m in range(N_molecules):
    mols[i_m].set_transition_environment((0, 1), cf)
    
# aggregate of molecules
agg = qr.Aggregate(mols)

agg.set_coupling_by_dipole_dipole()

# Building the aggregate
qr.log_report("Building aggregate")
agg.build()
qr.log_report("...done")

qr.log_detail("Resonance coupling matrix: ")
qr.log_detail(qr.convert(agg.resonance_coupling, "int", "1/cm"),
             use_indent=False)

# Dimension of the problem
HH = agg.get_Hamiltonian()
Nr = HH.dim
qr.log_detail("Hamiltonian has a rank:", Nr)

benchmark_report["Dimension"] = Nr
    
qr.log_report("Calculating Relaxation tensor:")
t1 = time.time()
(RT, ham) = agg.get_RelaxationTensor(timea, 
                                     relaxation_theory="standard_Redfield",
                                     as_operators=True)
    def _propagate_SExp_RTOp_ReSymK_Re_pytorch(self, rhoi, Ham, RT, dt,
                                               use_gpu=False, L=4):
        """Integration by short exponentional expansion
        
        Integration by expanding exponential (_SExp_) to Lth order. 
        This is a PyTorch (_pytorch) implementation with real (_Re_) matrices
        for  a system part of the system-bath interaction operator  ``K``
        in a form of real symmetric operator (ReSymK). The relaxation tensor
        is assumed in form of a set of operators (_RTOp_)
              
            
        """

        Nref = self.Nref
        Nt = self.Nt
        verbose = self.verbose
        timea = self.TimeAxis
        prop_name = self.propagation_name
        
        try: 
            import torch
        except:
            raise Exception("PyTorch not installed")
        
        # no self beyond this point
        
        qr.log_detail("PROPAGATION (short exponential with "+
                    "relaxation in operator form): order ", L, 
                    verbose=verbose)
        qr.log_detail("Using pytorch implementation")
        qr.log_detail("Using GPU: ", use_gpu & torch.cuda.is_available())
        
        pr = ReducedDensityMatrixEvolution(timea, rhoi,
                                           name=prop_name)
        
        rho1_r = torch.from_numpy(numpy.real(rhoi.data))
        rho2_r = torch.from_numpy(numpy.real(rhoi.data))
        rho1_i = torch.from_numpy(numpy.imag(rhoi.data))
        rho2_i = torch.from_numpy(numpy.imag(rhoi.data))
         
        HH = torch.from_numpy(Ham.data)
                
        try:
            Km = torch.from_numpy(RT.Km) #self.RelaxationTensor.Km # real
            Lm_r = torch.from_numpy(numpy.real(RT.Lm)) #self.RelaxationTensor.Lm) # complex
            Lm_i = torch.from_numpy(numpy.imag(RT.Lm)) #self.RelaxationTensor.Lm)
            Nm = RT.Km.shape[0]
        except:
            raise Exception("Tensor is not in operator form")
            
        if use_gpu & torch.cuda.is_available():
            rho1_r = rho1_r.cuda()
            rho2_r = rho1_r
            rho1_i = rho1_i.cuda()
            rho2_i = rho1_i
            HH = HH.cuda()
            Km = Km.cuda()
            Lm_r = Lm_r.cuda()
            Lm_i = Lm_i.cuda()
 
        indx = 1
        
        # verbosity inside loops
        levs = [qr.LOG_QUICK] 
        verb = qr.loglevels2bool(levs)
        
        # loop over time
        for ii in range(1, Nt):
            qr.printlog(" time step ", ii, "of", Nt, 
                        verbose=verb[0], loglevel=levs[0])
            
            # steps in between saving the results
            for jj in range(Nref):
                
                # L interations to get short exponential expansion
                for ll in range(1, L+1):

                    A = torch.matmul(HH,rho1_i)
                    B = torch.matmul(HH,rho1_r)
                    rhoY_r = torch.mul(A + torch.transpose(A, 0, 1), dt/ll)
                    rhoY_i = torch.mul(B - torch.transpose(B, 0, 1), -dt/ll) 

                    for mm in range(Nm):
                    
                        a = torch.matmul(Lm_r[mm,:,:], rho1_r)
                        A = a - torch.transpose(a, 0, 1)
                        b = torch.matmul(Lm_i[mm,:,:], rho1_i)
                        B = b - torch.transpose(b, 0, 1)
                        c = torch.matmul(Lm_r[mm,:,:], rho1_i)
                        C = -(c + torch.transpose(c, 0, 1))
                        d = torch.matmul(Lm_i[mm,:,:], rho1_r)
                        D = d + torch.transpose(d, 0, 1)
                        
                        E = B - A
                        F = C - D
                        
                        A = torch.matmul(Km[mm,:,:], E)
                        B = torch.matmul(Km[mm,:,:], F)
                        rhoY_r += torch.mul(A + torch.transpose(A, 0, 1),dt/ll)
                        rhoY_i += torch.mul(B - torch.transpose(B, 0, 1),dt/ll)
 
                    rho1_r = rhoY_r 
                    rho1_i = rhoY_i
                    
                    rho2_r += rho1_r
                    rho2_i += rho1_i
                    
                rho1_r = rho2_r
                rho1_i = rho2_i
            
            if use_gpu & torch.cuda.is_available():
                rho2_sr = rho2_r.cpu()
                rho2_si = rho2_i.cpu()
            else:
                rho2_sr = rho2_r
                rho2_si = rho2_i                
    
            pr.data[indx,:,:] = rho2_sr.numpy() + 1j*rho2_si.numpy() 
            indx += 1             
         
        qr.log_detail("...DONE")
        return pr
    def _propagate_SExp_RTOp_ReSymK_Re_numpy(self, rhoi, Ham, RT, dt, L=4):
        """Integration by short exponentional expansion
        
        Integration by expanding exponential (_SExp_) to Lth order. 
        This is a numpy (_numpy) implementation with real (_Re_) matrices
        for  a system part of the system-bath interaction operator  ``K``
        in a form of real symmetric operator (ReSymK). The relaxation tensor
        is assumed in form of a set of operators (_RTOp_)
              
            
        """

        Nref = self.Nref
        Nt = self.Nt
        verbose = self.verbose
        timea = self.TimeAxis
        prop_name = self.propagation_name
        
        # no self beyond this point
        
        qr.log_detail("PROPAGATION (short exponential with "+
                    "relaxation in operator form): order ", L, 
                    verbose=verbose)
        qr.log_detail("Using real valued numpy implementation")
        
        pr = ReducedDensityMatrixEvolution(timea, rhoi,
                                           name=prop_name)
        
        rho1_r = numpy.real(rhoi.data)
        rho2_r = numpy.real(rhoi.data)
        rho1_i = numpy.imag(rhoi.data)
        rho2_i = numpy.imag(rhoi.data)
         
        HH = Ham.data
                
        try:
            Km = RT.Km #self.RelaxationTensor.Km # real
            Lm_r = numpy.real(RT.Lm) #self.RelaxationTensor.Lm) # complex
            Lm_i = numpy.imag(RT.Lm) #self.RelaxationTensor.Lm)
            Nm = Km.shape[0]
        except:
            raise Exception("Tensor is not in operator form")
            
        indx = 1
        
        # verbosity inside loops
        levs = [qr.LOG_QUICK] 
        verb = qr.loglevels2bool(levs, verbose=self.verbose)

        # after each step we apply pure dephasing (if present)
        if self.has_PDeph:
        
            # loop over time
            for ii in range(1, Nt):
                qr.printlog("time step ", ii, "of", Nt, 
                            verbose=verb[0], loglevel=levs[0], end="\r")
                
                # steps in between saving the results
                for jj in range(Nref):
                    
                    # L interations to get short exponential expansion
                    for ll in range(1, L+1):
    
                        A = numpy.dot(HH,rho1_i)
                        B = numpy.dot(HH,rho1_r)
                        rhoY_r =  (dt/ll)*(A + numpy.transpose(A))
                        rhoY_i = -(dt/ll)*(B - numpy.transpose(B)) 
                        
                        for mm in range(Nm):
                        
                            a = numpy.dot(Lm_r[mm,:,:], rho1_r)
                            A = a - numpy.transpose(a)
                            b = numpy.dot(Lm_i[mm,:,:], rho1_i)
                            B = b - numpy.transpose(b)
                            c = numpy.dot(Lm_r[mm,:,:], rho1_i)
                            C = -(c + numpy.transpose(c))
                            d = numpy.dot(Lm_i[mm,:,:], rho1_r)
                            D = d + numpy.transpose(d)
                            
                            E = B - A
                            F = C - D
                            
                            A = numpy.dot(Km[mm,:,:], E)
                            B = numpy.dot(Km[mm,:,:], F)
                            rhoY_r += (dt/ll)*(A + numpy.transpose(A))
                            rhoY_i += (dt/ll)*(B - numpy.transpose(B))
                            
                        rho1_r = rhoY_r 
                        rho1_i = rhoY_i
                        
                        rho2_r +=  rho1_r
                        rho2_i +=  rho1_i
                        
                        rho2_r = rho2_r*numpy.exp(-self.PDeph.data*dt)
                        rho2_i = rho2_i*numpy.exp(-self.PDeph.data*dt)
                        
                    rho1_r = rho2_r
                    rho1_i = rho2_i
                    
                pr.data[indx,:,:] = rho2_r + 1j*rho2_i 
                indx += 1             

        # propagatiomn with no extra dephasing
        else:
            
            # loop over time
            for ii in range(1, Nt):
                qr.printlog("time step ", ii, "of", Nt, 
                            verbose=verb[0], loglevel=levs[0], end="\r")
                
                # steps in between saving the results
                for jj in range(Nref):
                    
                    # L interations to get short exponential expansion
                    for ll in range(1, L+1):
    
                        A = numpy.dot(HH,rho1_i)
                        B = numpy.dot(HH,rho1_r)
                        rhoY_r =  (dt/ll)*(A + numpy.transpose(A))
                        rhoY_i = -(dt/ll)*(B - numpy.transpose(B)) 
                        
                        for mm in range(Nm):
                        
                            a = numpy.dot(Lm_r[mm,:,:], rho1_r)
                            A = a - numpy.transpose(a)
                            b = numpy.dot(Lm_i[mm,:,:], rho1_i)
                            B = b - numpy.transpose(b)
                            c = numpy.dot(Lm_r[mm,:,:], rho1_i)
                            C = -(c + numpy.transpose(c))
                            d = numpy.dot(Lm_i[mm,:,:], rho1_r)
                            D = d + numpy.transpose(d)
                            
                            E = B - A
                            F = C - D
                            
                            A = numpy.dot(Km[mm,:,:], E)
                            B = numpy.dot(Km[mm,:,:], F)
                            rhoY_r += (dt/ll)*(A + numpy.transpose(A))
                            rhoY_i += (dt/ll)*(B - numpy.transpose(B))
                            
                        rho1_r = rhoY_r 
                        rho1_i = rhoY_i
                        
                        rho2_r +=  rho1_r
                        rho2_i +=  rho1_i
                        
                    rho1_r = rho2_r
                    rho1_i = rho2_i
                    
                pr.data[indx,:,:] = rho2_r + 1j*rho2_i 
                indx += 1             

        
        qr.log_detail()
        qr.log_detail("...DONE")

        return pr
    def __propagate_short_exp_with_rel_operators(self, rhoi, L=4):
        """Integration by short exponentional expansion
        
        Integration by expanding exponential to Lth order. 
              
            
        """
        mana = Manager()
        save_pytorch = None
        
        legacy = mana.gen_conf.legacy_relaxation
        
        if mana.num_conf.gpu_acceleration:
            save_pytorch = mana.num_conf.enable_pytorch
            mana.num_conf.enable_pytorch = True
            
        if mana.num_conf.enable_pytorch and (not legacy):
            ret =  self._propagate_SExp_RTOp_ReSymK_Re_pytorch(rhoi,
                                        self.Hamiltonian,
                                        self.RelaxationTensor,
                                        self.dt, 
                                        use_gpu=mana.num_conf.gpu_acceleration,
                                        L=L)
            
            if save_pytorch is not None:
                mana.num_conf.enable_pytorch = save_pytorch
                
            return ret
        
        elif not legacy:
            return self._propagate_SExp_RTOp_ReSymK_Re_numpy(rhoi,
                                                 self.Hamiltonian,
                                                 self.RelaxationTensor,
                                                 self.dt, L=L)
        
        #
        # legacy version
        #

        pr = ReducedDensityMatrixEvolution(self.TimeAxis, rhoi,
                                           name=self.propagation_name)
        
        rho1 = rhoi.data
        rho2 = rhoi.data
        
        #
        # RWA is applied here
        #
        if self.Hamiltonian.has_rwa:
            HH = self.Hamiltonian.data  - self.HOmega
        else:
            HH = self.Hamiltonian.data
        
        qr.log_detail("PROPAGATION (short exponential with "+
                     "relaxation in operator form): order ", L, 
                     verbose=self.verbose)
        qr.log_detail("Using complex numpy implementation")
        
        try:
            Km = self.RelaxationTensor.Km # real
            Lm = self.RelaxationTensor.Lm # complex
            Ld = self.RelaxationTensor.Ld # complex - get by transposition
            Kd = numpy.zeros(Km.shape, dtype=numpy.float64)
            Nm = Km.shape[0]
            for m in range(Nm):
                Kd[m, :, :] = numpy.transpose(Km[m, :, :])
        except:
            raise Exception("Tensor is not in operator form")
            
        indx = 1

        levs = [qr.LOG_QUICK] #, 8]
        verb = qr.loglevels2bool(levs)

        # after each step we apply pure dephasing (if present)
        if self.has_PDeph:
            
            if self.PDeph.dtype == "Lorentzian":
                expo = numpy.exp(-self.PDeph.data*self.dt)
                t0 = 0.0
            elif self.PDeph.dtype == "Gaussian":
                expo = numpy.exp(-self.PDeph.data*(self.dt**2)/2.0)
                t0 = self.PDeph.data*self.dt
            
            # loop over time
            for ii in range(1, self.Nt):
                qr.printlog(" time step ", ii, "of", self.Nt, 
                            verbose=verb[0], loglevel=levs[0])
                
                # time at the beginning of the step
                tNt = self.TimeAxis.data[indx-1]  
                #print("tNt = ", tNt)
                
                # steps in between saving the results
                for jj in range(0, self.Nref):
                    
                    tt = tNt + jj*self.dt  # time right now 
                    
                    # L interations to get short exponential expansion
                    for ll in range(1, L+1):
                        
                        rhoY =  - (1j*self.dt/ll)*(numpy.dot(HH,rho1) 
                                                 - numpy.dot(rho1,HH))
                        
                        #rhoX = numpy.zeros(rho1.shape, dtype=numpy.complex128)
                        for mm in range(Nm):
                            
                           rhoY += (self.dt/ll)*(
                            numpy.dot(Km[mm,:,:],numpy.dot(rho1, Ld[mm,:,:]))
                           +numpy.dot(Lm[mm,:,:],numpy.dot(rho1, Kd[mm,:,:]))
                           -numpy.dot(numpy.dot(Kd[mm,:,:],Lm[mm,:,:]), rho1)
                           -numpy.dot(rho1, numpy.dot(Ld[mm,:,:],Km[mm,:,:]))
                           )
                                 
                        rho1 = rhoY #+ rhoX
                        
                        rho2 = rho2 + rho1
                       
                    # pure dephasing is added here                        
                    rho2 = rho2*expo*numpy.exp(-t0*tt)
                        
                    rho1 = rho2    
                
                pr.data[indx,:,:] = rho2 
                indx += 1
            
        # no extra dephasing
        else:
            
             # loop over time
            for ii in range(1, self.Nt):
                qr.printlog(" time step ", ii, "of", self.Nt, 
                            verbose=verb[0], loglevel=levs[0])
                
                # steps in between saving the results
                for jj in range(0, self.Nref):
                    
                    # L interations to get short exponential expansion
                    for ll in range(1, L+1):
                        
                        rhoY =  - (1j*self.dt/ll)*(numpy.dot(HH,rho1) 
                                                 - numpy.dot(rho1,HH))
                        
                        #rhoX = numpy.zeros(rho1.shape, dtype=numpy.complex128)
                        for mm in range(Nm):
                            
                           rhoY += (self.dt/ll)*(
                            numpy.dot(Km[mm,:,:],numpy.dot(rho1, Ld[mm,:,:]))
                           +numpy.dot(Lm[mm,:,:],numpy.dot(rho1, Kd[mm,:,:]))
                           -numpy.dot(numpy.dot(Kd[mm,:,:],Lm[mm,:,:]), rho1)
                           -numpy.dot(rho1, numpy.dot(Ld[mm,:,:],Km[mm,:,:]))
                           )
                                 
                        rho1 = rhoY #+ rhoX
                        
                        rho2 = rho2 + rho1
                    
                    rho1 = rho2    
                
                pr.data[indx,:,:] = rho2 
                indx += 1
           
             
        qr.log_detail("...DONE")

        return pr