コード例 #1
0
ファイル: bse.py プロジェクト: TRIQS/tprf
def get_chi0_nk_at_specific_w(g_wk, nw_index=1, nwf=None):
    r""" Compute the generalized bare lattice susceptibility 
    :math:`\chi^{0}_{\bar{a}b\bar{c}d}(i\omega_{n=\mathrm{nw\_index}}, i\nu_n, \mathbf{k})` from the single-particle
    Green's function :math:`G_{a\bar{b}}(i\nu_n, \mathbf{k})` for a specific :math:`i\omega_{n=\mathrm{nw\_index}}`.

    Parameters
    ----------

    g_wk : Gf,
           Single-particle Green's function :math:`G_{a\bar{b}}(i\nu_n, \mathbf{k})`.
    nw_index : int,
               The bosonic Matsubara frequency index :math:`i\omega_{n=\mathrm{nw\_index}}`
               at which :math:`\chi^0` is calculated.
    nwf : int,
          Number of fermionic frequencies in :math:`\chi^0`.    

    Returns
    -------

    chi0_nk : Gf,
               Generalized bare lattice susceptibility
               :math:`\chi^{0}_{\bar{a}b\bar{c}d}(i\omega_{n=\mathrm{nw\_index}}, i\nu_n, \mathbf{k})`.
    """

    fmesh = g_wk.mesh.components[0]
    kmesh = g_wk.mesh.components[1]

    if nwf is None:
        nwf = len(fmesh) // 2

    mpi.barrier()
    mpi.report('g_wk ' + str(g_wk[Idx(2), Idx(0, 1, 2)][0, 0]))
    n = np.sum(g_wk.data) // len(kmesh)
    mpi.report('n ' + str(n))
    mpi.barrier()

    mpi.report('--> g_wr from g_wk')
    g_wr = fourier_wk_to_wr(g_wk)

    mpi.report('--> chi0_wnr from g_wr')
    chi0_nr = chi0_nr_from_gr_PH_at_specific_w(nw_index=nw_index,
                                               nn=nwf,
                                               g_nr=g_wr)
    del g_wr

    mpi.report('--> chi0_wnk from chi0_wnr')
    # Create a 'fake' bosonic mesh to be able to use 'chi0q_from_chi0r'
    chi0_wnr = add_fake_bosonic_mesh(chi0_nr)
    del chi0_nr

    chi0_wnk = chi0q_from_chi0r(chi0_wnr)
    del chi0_wnr

    chi0_nk = chi0_wnk[Idx(0), :, :]
    del chi0_wnk

    return chi0_nk
コード例 #2
0
        def HT(res):
            import triqs.utility.mpi as mpi
            # First compute the eps_hat array
            eps_hat = epsilon_hat(
                self.dos.eps) if epsilon_hat else numpy.array(
                    [x * numpy.identity(N1) for x in self.dos.eps])
            assert eps_hat.shape[0] == self.dos.eps.shape[
                0], "epsilon_hat function behaves incorrectly"
            assert eps_hat.shape[1] == eps_hat.shape[
                2], "epsilon_hat function behaves incorrectly (result not a square matrix)"
            assert N1 == eps_hat.shape[
                1], "Size of Sigma and of epsilon_hat mismatch"

            res.zero()

            # Perform the sum over eps[i]
            tmp, tmp2 = res.copy(), res.copy()
            tmp << iOmega_n + mu + eta * 1j
            if not (Sigma_fnt):
                tmp -= Sigma
            if field != None: tmp -= field

            # I slice all the arrays on the node. Cf reduce operation below.
            for d, e_h, e in zip(*[
                    mpi.slice_array(A)
                    for A in [self.rho_for_sum, eps_hat, self.dos.eps]
            ]):
                tmp2.copy_from(tmp)
                tmp2 -= e_h
                if Sigma_fnt: tmp2 -= Sigma(e)
                tmp2.invert()
                tmp2 *= d
                res += tmp2
            # sum the res GF of all nodes and returns the results on all nodes...
            # Cf Boost.mpi.python, collective communicator for documentation.
            # The point is that res is pickable, hence can be transmitted between nodes without further code...
            res << mpi.all_reduce(mpi.world, res, lambda x, y: x + y)
            mpi.barrier()
コード例 #3
0
    def __call__(self,
                 Sigma,
                 mu=0,
                 eta=0,
                 field=None,
                 epsilon_hat=None,
                 result=None,
                 selected_blocks=()):
        """
        - Computes:
           result <- \[ \sum_k (\omega + \mu - field - t(k) - Sigma(k,\omega)) \]
           if result is None, it returns a new GF with the results.
           otherwise, result must be a GF, in which the calculation is done, and which is then returned.
           (this allows chain calculation: SK(mu = mu,Sigma = Sigma, result = G).total_density()
           which computes the sumK into G,  and returns the density of G.

        - Sigma can be a X, or a function k-> X or a function k,eps ->X where:
            - k is expected to be a 1d-numpy array of size self.dim of float,
              containing the k vector in the basis of the RBZ  (i.e.  -0.5< k_i <0.5)
            - eps is t(k)
            - X is anything such that X[BlockName] can be added/subtracted to a GFBloc for BlockName in selected_blocks.
              e.g. X can be a BlockGf(with at least the selected_blocks), or a dictionnary Blockname -> array
              if the array has the same dimension as the GF blocks (for example to add a static Sigma).

        - field: Any k independant object to be added to the GF

        - epsilon_hat: a function of eps_k returning a matrix, the dimensions of Sigma

        - selected_blocks: The calculation is done with the SAME t(k) for all blocks. If this list is not None
          only the blocks in this list are calculated.
          e.g. G and Sigma have block indices 'up' and 'down'.
               if selected_blocks ==None: 'up' and 'down' are calculated
               if selected_blocks == ['up']: only 'up' is calculated. 'down' is 0.


        """

        assert selected_blocks == (), "selected_blocks not supported for now"
        #S = Sigma.view_selected_blocks(selected_blocks) if selected_blocks else Sigma
        #Gres = result if result else Sigma.copy()
        #G = Gres.view_selected_blocks(selected_blocks) if selected_blocks else Gres

        # check Sigma
        # case 1) Sigma is a BlockGf
        if isinstance(Sigma, BlockGf):
            model = Sigma
            Sigma_fnt = False
        # case 2) Sigma is a function returning a BlockGf
        else:
            assert callable(
                Sigma), "If Sigma is not a BlockGf it must be a function"
            Sigma_Nargs = len(inspect.getargspec(Sigma)[0])
            assert Sigma_Nargs <= 2, "Sigma must be a function of k or of k and epsilon"
            if Sigma_Nargs == 1:
                model = Sigma(self.bz_points[0])
            elif Sigma_Nargs == 2:
                model = Sigma(self.bz_points[0], self.hopping[0])
            Sigma_fnt = True

        G = result if result else model.copy()
        assert isinstance(G, BlockGf), "G must be a BlockGf"

        # check input
        assert self.orthogonal_basis, "Local_G: must be orthogonal. non ortho cases not checked."
        assert len(list(set([g.target_shape[0] for i, g in G]))) == 1
        assert self.bz_weights.shape[0] == self.n_kpts(), "Internal Error"
        no = list(set([g.target_shape[0] for i, g in G]))[0]

        # Initialize
        G.zero()
        tmp, tmp2 = G.copy(), G.copy()
        mupat = mu * numpy.identity(no, numpy.complex_)
        tmp << iOmega_n
        if field != None: tmp -= field
        if not Sigma_fnt: tmp -= Sigma  # substract Sigma once for all

        # Loop on k points...
        for w, k, eps_k in zip(*[
                mpi.slice_array(A)
                for A in [self.bz_weights, self.bz_points, self.hopping]
        ]):

            eps_hat = epsilon_hat(eps_k) if epsilon_hat else eps_k
            tmp2 << tmp
            tmp2 -= tmp2.n_blocks * [eps_hat - mupat]

            if Sigma_fnt:
                if Sigma_Nargs == 1: tmp2 -= Sigma(k)
                elif Sigma_Nargs == 2: tmp2 -= Sigma(k, eps_k)

            tmp2.invert()
            tmp2 *= w
            G += tmp2

        G << mpi.all_reduce(mpi.world, G, lambda x, y: x + y)
        mpi.barrier()

        return G
コード例 #4
0
    def run(self):
        """
        """
        mpi.barrier()
        if mpi.size == 1:  # single machine. Avoid the fork
            while not (self.finished()):
                n = next(self)
                if n != None:
                    self.treate(self.the_function(n), 0)
            return

        # Code for multiprocessor machines
        RequestList, pid = [], 0  # the pid of the child on the master
        node_running, node_stopped = mpi.size * [False], mpi.size * [False]

        if mpi.rank == 0:
            while not (self.finished()
                       ) or pid or [n for n in node_running if n] != []:
                # Treat the request which have self.finished
                def keep_request(r):
                    #if not(mpi.test(r)) :  return True
                    #if r.message !=None : self.treate(*r.message)
                    #node_running[r.status.source] = False
                    T = r.test()
                    if T is None: return True
                    value = T[0]
                    if value != None: self.treate(*value)
                    node_running[T[1].source] = False
                    return False

                RequestList = list(filter(keep_request, RequestList))
                # send new calculation to the nodes or "stop" them
                for node in [
                        n for n in range(1, mpi.size)
                        if not (node_running[n] or node_stopped[n])
                ]:
                    #open('tmp','a').write("master : comm to node %d %s\n"%(node,self.finished()))
                    mpi.send(self.finished(), node)
                    if not (self.finished()):
                        mpi.send(next(self),
                                 node)  # send the data for the computation
                        node_running[node] = True
                        RequestList.append(mpi.irecv(node))  #Post the receive
                    else:
                        node_stopped[node] = True

                # Look if the child process on the master has self.finished.
                if not (pid) or os.waitpid(pid, os.WNOHANG):
                    if pid:
                        RR = pickle.load(open("res_master", 'r'))
                        if RR != None: self.treate(*RR)
                    if not (self.finished()):
                        pid = os.fork()
                        currently_calculated_by_master = next(self)
                        if pid == 0:  # we are on the child
                            if currently_calculated_by_master:
                                res = self.the_function(
                                    currently_calculated_by_master)
                            else:
                                res = None
                            pickle.dump((res, mpi.rank),
                                        open('res_master', 'w'))
                            os._exit(0)  # Cf python doc. Used for child only.
                    else:
                        pid = 0
                if (pid):
                    time.sleep(
                        self.SleepTime
                    )  # so that most of the time is for the actual calculation on the master

        else:  # not master
            while not (mpi.recv(0)):  # master will first send a finished flag
                omega = mpi.recv(0)
                if omega == None:
                    res = None
                else:
                    res = self.the_function(omega)
                mpi.send((res, mpi.rank), 0)

        mpi.barrier()
コード例 #5
0
def run_all(vasp_pid, dmft_cycle, cfg_file, n_iter, n_iter_dft, vasp_version):
    """
    """
    mpi.report("  Waiting for VASP lock to appear...")
    while not is_vasp_lock_present():
        time.sleep(1)

    vasp_running = True

    iter = 0
    while vasp_running:
        if debug: print(bcolors.RED + "rank %s" % (mpi.rank) + bcolors.ENDC)
        mpi.report("  Waiting for VASP lock to disappear...")
        mpi.barrier()
        while is_vasp_lock_present():
            time.sleep(1)
            #            if debug: print bcolors.YELLOW + " waiting: rank %s"%(mpi.rank) + bcolors.ENDC
            if not is_vasp_running(vasp_pid):
                mpi.report("  VASP stopped")
                vasp_running = False
                break


# Tell VASP to stop if the maximum number of iterations is reached

        if debug:
            print(bcolors.MAGENTA + "rank %s" % (mpi.rank) + bcolors.ENDC)
        err = 0
        exc = None
        if debug:
            print(bcolors.BLUE + "plovasp: rank %s" % (mpi.rank) +
                  bcolors.ENDC)
        if mpi.is_master_node():
            converter.generate_and_output_as_text(cfg_file, vasp_dir='./')
            # Read energy from OSZICAR
            dft_energy = get_dft_energy()
        mpi.barrier()

        if debug: print(bcolors.GREEN + "rank %s" % (mpi.rank) + bcolors.ENDC)
        corr_energy, dft_dc = dmft_cycle()
        mpi.barrier()

        if mpi.is_master_node():
            total_energy = dft_energy + corr_energy - dft_dc
            print()
            print("=" * 80)
            print("  Total energy: ", total_energy)
            print("  DFT energy: ", dft_energy)
            print("  Corr. energy: ", corr_energy)
            print("  DFT DC: ", dft_dc)
            print("=" * 80)
            print()

        # check if we should do additional VASP calculations
        # in the standard VASP version, VASP writes out GAMMA itself
        # so that if we want to keep GAMMA fixed we have to copy it to
        # GAMMA_recent and copy it back after VASP has completed an iteration
        # if we are using a hacked Version of VASP where the write out is
        # disabled we can skip this step.
        # the hack consists of removing the call of LPRJ_LDApU in VASP src file
        # electron.F around line 644
        iter_dft = 0

        if vasp_version == 'standard':
            copyfile(src='GAMMA', dst='GAMMA_recent')
        while iter_dft < n_iter_dft:
            if mpi.is_master_node():
                open('./vasp.lock', 'a').close()
            while is_vasp_lock_present():
                time.sleep(1)
                if not is_vasp_running(vasp_pid):
                    mpi.report("  VASP stopped")
                    vasp_running = False
                    break
            iter_dft += 1
            if vasp_version == 'standard':
                copyfile(src='GAMMA_recent', dst='GAMMA')
        iter += 1
        if iter == n_iter:
            print("\n  Maximum number of iterations reached.")
            print("  Aborting VASP iterations...\n")
            f_stop = open('STOPCAR', 'wt')
            f_stop.write("LABORT = .TRUE.\n")
            f_stop.close()
    if mpi.is_master_node():
        total_energy = dft_energy + corr_energy - dft_dc
        with open('TOTENERGY', 'w') as f:
            f.write("  Total energy: %s\n" % (total_energy))
            f.write("  DFT energy: %s\n" % (dft_energy))
            f.write("  Corr. energy: %s\n" % (corr_energy))
            f.write("  DFT DC: %s\n" % (dft_dc))
            f.write("  Energy correction: %s\n" % (corr_energy - dft_dc))

    mpi.report("***Done")
コード例 #6
0
ファイル: maxent_util.py プロジェクト: TRIQS/maxent
def get_G_w_from_A_w(A_w,
                     w_points,
                     np_interp_A=None,
                     np_omega=2000,
                     w_min=-10,
                     w_max=10,
                     broadening_factor=1.0):
    r""" Use Kramers-Kronig to determine the retarded Green function :math:`G(\omega)`

    This calculates :math:`G(\omega)` from the spectral function :math:`A(\omega)`.
    A numerical broadening of :math:`bf * i\Delta\omega`
    is used, with the adjustable broadening factor bf (default is 1).
    This function normalizes :math:`A(\omega)`.
    Use mpi to save time.

    Parameters
    ----------
    A_w : array
        Real-frequency spectral function.
    w_points : array
        Real-frequency grid points.
    np_interp_A : int
        Number of grid points A_w should be interpolated on before
        G_w is calculated. The interpolation is performed on a linear
        grid with np_interp_A points from min(w_points) to max(w_points).
    np_omega : int
        Number of equidistant grid points of the output Green function.
    w_min : float
        Start point of output Green function.
    w_max : float
        End point of output Green function.
    broadening_factor : float
        Factor multiplying the broadening :math:`i\Delta\omega`

    Returns
    -------
    G_w : GfReFreq
        TRIQS retarded Green function.
    """

    shape_A = np.shape(A_w)

    if len(shape_A) == 1:
        indices = [0]
        matrix_valued = False
    elif (len(shape_A) == 3) and (shape_A[0] == shape_A[1]):
        indices = list(range(0, shape_A[0]))
        matrix_valued = True
    else:
        raise Exception('A_w has wrong shape, must be n x n x n_w')

    if w_min > w_max:
        raise Exception('w_min must be smaller than w_max')

    if np_interp_A:
        w_points_interp = np.linspace(np.min(w_points),
                                      np.max(w_points), np_interp_A)
        if matrix_valued:
            A_temp = np.zeros((shape_A[0], shape_A[1], np_interp_A), dtype=complex)
            for i in range(shape_A[0]):
                for j in range(shape_A[1]):
                    A_temp[i, j, :] = np.interp(w_points_interp,
                                                w_points, A_w[i, j, :])
            A_w = A_temp
        else:
            A_w = np.interp(w_points_interp, w_points, A_w)
        w_points = w_points_interp

    G_w = GfReFreq(indices=indices, window=(w_min, w_max), n_points=np_omega)
    G_w.zero()

    iw_points = np.array(list(range(len(w_points))))

    for iw in mpi.slice_array(iw_points):
        domega = (w_points[min(len(w_points) - 1, iw + 1)] -
                  w_points[max(0, iw - 1)]) * 0.5
        if matrix_valued:
            for i in range(shape_A[0]):
                for j in range(shape_A[1]):
                    G_w[i, j] << G_w[i, j] + A_w[i, j, iw] * \
                        inverse(Omega - w_points[iw] + 1j * domega * broadening_factor) * domega
        else:
            G_w << G_w + A_w[iw] * \
                inverse(Omega - w_points[iw] + 1j * domega * broadening_factor) * domega

    G_w << mpi.all_reduce(mpi.world, G_w, lambda x, y: x + y)
    mpi.barrier()

    return G_w
コード例 #7
0
    h = Histogram(0, 10)
    h << x

else:
    h, h_ref = None, None
    
h = mpi.bcast(h)
h_ref = mpi.bcast(h_ref)

for rank in range(mpi.size):
    if rank == mpi.rank:

        print('-'*72)
        print('rank =', mpi.rank)
        print('h =\n', h)
        print('h_ref =\n', h_ref)

        # -- Compare h and h_ref
        pts = np.array([ int(h.mesh_point(idx)) for idx in range(len(h))])

        for pt, val in zip(pts, h.data):
            val = int(val)
            #print pt, val
            if val > 0:
                assert( val == h_ref[pt] )
            else:
                assert( pt not in h_ref )
        
    mpi.barrier()

コード例 #8
0
}

sol_param = {          # Impurity solver parameters:
  "Lambda": 2.0,       #  logarithmic discretization parameter (2.0 is a good choice)
  "Nz": 4,             #  number of interleaved discretization meshes (4 is a good choice for Lambda=2.0)
  "Tmin": 1e-5,        #  lowest temperature/energy scale considered (controls the length of the Wilson chain, choose Tmin<T)
  "keep": 10000,       #  maximum number of states kept in NRG truncation (ensure this number is high enough!)
  "keepenergy": 10.0,  #  maximum energy of states kept in NRG truncation (10.0 is a good choice)
}

solver = dmft.Hubbard_solver(param, dmft_param)
solver.setup_impurity_solver(mesh_param, sol_param)

try:
  solver.solve()

except dmft.Converged as c:
  if mpi.is_master_node(): 
    print("\nConverged:\n%s" % c.message)
    dmft.save_BlockA("A", solver.Gloc)  # converged spectral function for quick plotting

except dmft.FailedToConverge as c:
  if mpi.is_master_node():
    print("\nFailed to converge:\n%s" % c.message)  # ... but restart is possible from the checkpoint file

except dmft.ForcedStop:
  if mpi.is_master_node():
    print("\nStopped by user request")

mpi.barrier()  # Synchronized exit
コード例 #9
0
ファイル: bse.py プロジェクト: TRIQS/tprf
def solve_lattice_bse_at_specific_w(g_wk, gamma_wnn, nw_index):
    r""" Compute the generalized lattice susceptibility 
    :math:`\chi_{\bar{a}b\bar{c}d}(i\omega_{n=\mathrm{nw\_index}}, \mathbf{k})` using the Bethe-Salpeter 
    equation (BSE) for a specific :math:`i\omega_{n=\mathrm{nw\_index}}`.


    Parameters
    ----------

    g_wk : Gf,
           Single-particle Green's function :math:`G_{a\bar{b}}(i\nu_n, \mathbf{k})`.
    gamma_wnn : Gf,
                Local particle-hole vertex function 
                :math:`\Gamma_{a\bar{b}c\bar{d}}(i\omega_n, i\nu_n, i\nu_n')`.
    nw_index : int,
               The bosonic Matsubara frequency index :math:`i\omega_{n=\mathrm{nw\_index}}`
               at which the BSE is solved.

    Returns
    -------
    chi_k : Gf,
            Generalized lattice susceptibility 
            :math:`\chi_{\bar{a}b\bar{c}d}(i\omega_{n=\mathrm{nw\_index}}, \mathbf{k})`.

    chi0_k : Gf,
             Generalized bare lattice susceptibility 
             :math:`\chi^0_{\bar{a}b\bar{c}d}(i\omega_{n=\mathrm{nw\_index}}, \mathbf{k})`.
    """

    # Only use \Gamma at the specific \omega
    gamma_nn = gamma_wnn[Idx(nw_index), :, :]
    # Keep fake bosonic mesh for usability with other functions
    gamma_wnn = add_fake_bosonic_mesh(gamma_nn)

    fmesh_g = g_wk.mesh.components[0]
    kmesh = g_wk.mesh.components[1]

    bmesh = gamma_wnn.mesh.components[0]
    fmesh = gamma_wnn.mesh.components[1]

    nk = len(kmesh)
    nwf = len(fmesh) // 2
    nwf_g = len(fmesh_g) // 2

    if mpi.is_master_node():
        print(tprf_banner(), "\n")
        print(
            'Lattcie BSE with local vertex approximation at specific \omega.\n'
        )
        print('nk    =', nk)
        print('nw_index    =', nw_index)
        print('nwf   =', nwf)
        print('nwf_g =', nwf_g)
        print()

    mpi.report('--> chi0_wk_tail_corr')
    # Calculate chi0_wk up to the specific \omega
    chi0_wk_tail_corr = imtime_bubble_chi0_wk(g_wk,
                                              nw=np.abs(nw_index) + 1,
                                              save_memory=True)
    # Only use specific \omega, but put back on fake bosonic mesh
    chi0_k_tail_corr = chi0_wk_tail_corr[Idx(nw_index), :]
    chi0_wk_tail_corr = add_fake_bosonic_mesh(chi0_k_tail_corr,
                                              beta=bmesh.beta)

    chi0_nk = get_chi0_nk_at_specific_w(g_wk, nw_index=nw_index, nwf=nwf)
    # Keep fake bosonic mesh for usability with other functions
    chi0_wnk = add_fake_bosonic_mesh(chi0_nk)

    mpi.report('--> trace chi0_wnk')
    chi0_wk = chi0q_sum_nu(chi0_wnk)

    dchi_wk = chi0_wk_tail_corr - chi0_wk

    chi0_kw = Gf(mesh=MeshProduct(kmesh, bmesh),
                 target_shape=chi0_wk_tail_corr.target_shape)
    chi0_kw.data[:] = chi0_wk_tail_corr.data.swapaxes(0, 1)

    del chi0_wk
    del chi0_wk_tail_corr

    assert (chi0_wnk.mesh.components[0] == bmesh)
    assert (chi0_wnk.mesh.components[1] == fmesh)
    assert (chi0_wnk.mesh.components[2] == kmesh)

    # -- Lattice BSE calc with built in trace
    mpi.report('--> chi_kw from BSE')
    #mpi.report('DEBUG BSE INACTIVE'*72)
    chi_kw = chiq_sum_nu_from_chi0q_and_gamma_PH(chi0_wnk, gamma_wnn)
    #chi_kw = chi0_kw.copy()

    mpi.barrier()
    mpi.report('--> chi_kw from BSE (done)')

    del chi0_wnk

    mpi.report('--> chi_kw tail corrected (using chi0_wnk)')
    for k in kmesh:
        chi_kw[
            k, :] += dchi_wk[:,
                             k]  # -- account for high freq of chi_0 (better than nothing)

    del dchi_wk

    mpi.report('--> solve_lattice_bse, done.')

    chi_k = chi_kw[:, Idx(0)]
    del chi_kw

    chi0_k = chi0_kw[:, Idx(0)]
    del chi0_kw

    return chi_k, chi0_k
コード例 #10
0
ファイル: bse.py プロジェクト: TRIQS/tprf
def solve_lattice_bse(g_wk, gamma_wnn):
    r""" Compute the generalized lattice susceptibility 
    :math:`\chi_{\bar{a}b\bar{c}d}(\mathbf{k}, \omega_n)` using the Bethe-Salpeter 
    equation (BSE).

    Parameters
    ----------

    g_wk : Gf,
           Single-particle Green's function :math:`G_{a\bar{b}}(i\nu_n, \mathbf{k})`.
    gamma_wnn : Gf,
                Local particle-hole vertex function 
                :math:`\Gamma_{a\bar{b}c\bar{d}}(i\omega_n, i\nu_n, i\nu_n')`.

    Returns
    -------
    chi_kw : Gf,
             Generalized lattice susceptibility 
             :math:`\chi_{\bar{a}b\bar{c}d}(\mathbf{k}, i\omega_n)`.

    chi0_kw : Gf,
              Generalized bare lattice susceptibility 
              :math:`\chi^0_{\bar{a}b\bar{c}d}(\mathbf{k}, i\omega_n)`.
    """

    fmesh_g = g_wk.mesh.components[0]
    kmesh = g_wk.mesh.components[1]

    bmesh = gamma_wnn.mesh.components[0]
    fmesh = gamma_wnn.mesh.components[1]

    nk = len(kmesh)
    nw = (len(bmesh) + 1) // 2
    nwf = len(fmesh) // 2
    nwf_g = len(fmesh_g) // 2

    if mpi.is_master_node():
        print(tprf_banner(), "\n")
        print('Lattcie BSE with local vertex approximation.\n')
        print('nk    =', nk)
        print('nw    =', nw)
        print('nwf   =', nwf)
        print('nwf_g =', nwf_g)
        print()

    mpi.report('--> chi0_wk_tail_corr')
    chi0_wk_tail_corr = imtime_bubble_chi0_wk(g_wk, nw=nw)

    mpi.barrier()
    mpi.report('B1 ' +
               str(chi0_wk_tail_corr[Idx(0), Idx(0, 0, 0)][0, 0, 0, 0]))
    mpi.barrier()

    chi0_wnk = get_chi0_wnk(g_wk, nw=nw, nwf=nwf)

    mpi.barrier()
    mpi.report('C ' + str(chi0_wnk[Idx(0), Idx(0), Idx(0, 0, 0)][0, 0, 0, 0]))
    mpi.barrier()

    mpi.report('--> trace chi0_wnk')
    chi0_wk = chi0q_sum_nu(chi0_wnk)

    mpi.barrier()
    mpi.report('D ' + str(chi0_wk[Idx(0), Idx(0, 0, 0)][0, 0, 0, 0]))
    mpi.barrier()

    dchi_wk = chi0_wk_tail_corr - chi0_wk

    chi0_kw = Gf(mesh=MeshProduct(kmesh, bmesh),
                 target_shape=chi0_wk_tail_corr.target_shape)
    chi0_kw.data[:] = chi0_wk_tail_corr.data.swapaxes(0, 1)

    del chi0_wk
    del chi0_wk_tail_corr

    assert (chi0_wnk.mesh.components[0] == bmesh)
    assert (chi0_wnk.mesh.components[1] == fmesh)
    assert (chi0_wnk.mesh.components[2] == kmesh)

    # -- Lattice BSE calc with built in trace
    mpi.report('--> chi_kw from BSE')
    #mpi.report('DEBUG BSE INACTIVE'*72)
    chi_kw = chiq_sum_nu_from_chi0q_and_gamma_PH(chi0_wnk, gamma_wnn)
    #chi_kw = chi0_kw.copy()

    mpi.barrier()
    mpi.report('--> chi_kw from BSE (done)')

    del chi0_wnk

    mpi.report('--> chi_kw tail corrected (using chi0_wnk)')
    for k in kmesh:
        chi_kw[
            k, :] += dchi_wk[:,
                             k]  # -- account for high freq of chi_0 (better than nothing)

    del dchi_wk

    mpi.report('--> solve_lattice_bse, done.')

    return chi_kw, chi0_kw
コード例 #11
0
ファイル: bse.py プロジェクト: TRIQS/tprf
def get_chi0_wnk(g_wk, nw=1, nwf=None):
    r""" Compute the generalized bare lattice susceptibility 
    :math:`\chi^{0}_{\bar{a}b\bar{c}d}(i\omega_n, i\nu_n, \mathbf{k})` from the single-particle
    Green's function :math:`G_{a\bar{b}}(i\nu_n, \mathbf{k})`.

    Parameters
    ----------

    g_wk : Gf,
           Single-particle Green's function :math:`G_{a\bar{b}}(i\nu_n, \mathbf{k})`.
    nw : int,
         Number of bosonic frequencies in :math:`\chi^0`.
    nwf : int,
          Number of fermionic frequencies in :math:`\chi^0`.    

    Returns
    -------

    chi0_wnk : Gf,
               Generalized bare lattice susceptibility
               :math:`\chi^{0}_{\bar{a}b\bar{c}d}(i\omega_n, i\nu_n, \mathbf{k})`.
    """

    fmesh = g_wk.mesh.components[0]
    kmesh = g_wk.mesh.components[1]

    if nwf is None:
        nwf = len(fmesh) // 2

    mpi.barrier()
    mpi.report('g_wk ' + str(g_wk[Idx(2), Idx(0, 0, 0)][0, 0]))
    n = np.sum(g_wk.data) / len(kmesh)
    mpi.report('n ' + str(n))
    mpi.barrier()

    mpi.report('--> g_wr from g_wk')
    g_wr = fourier_wk_to_wr(g_wk)

    mpi.barrier()
    mpi.report('g_wr ' + str(g_wr[Idx(2), Idx(0, 0, 0)][0, 0]))
    n_r = np.sum(g_wr.data, axis=0)[0]
    mpi.report('n_r=0 ' + str(n_r[0, 0]))
    mpi.barrier()

    mpi.report('--> chi0_wnr from g_wr')
    chi0_wnr = chi0r_from_gr_PH(nw=nw, nn=nwf, g_nr=g_wr)

    #mpi.report('--> chi0_wnr from g_wr (nompi)')
    #chi0_wnr_nompi = chi0r_from_gr_PH_nompi(nw=nw, nn=nwf, g_wr=g_wr)

    del g_wr

    #abs_diff = np.abs(chi0_wnr.data - chi0_wnr_nompi.data)
    #mpi.report('shape = ' + str(abs_diff.shape))
    #idx = np.argmax(abs_diff)
    #mpi.report('argmax = ' + str(idx))
    #diff = np.max(abs_diff)
    #mpi.report('diff = %6.6f' % diff)
    #del chi0_wnr
    #chi0_wnr = chi0_wnr_nompi

    #exit()

    mpi.barrier()
    mpi.report('chi0_wnr ' +
               str(chi0_wnr[Idx(0), Idx(0), Idx(0, 0, 0)][0, 0, 0, 0]))
    chi0_r0 = np.sum(chi0_wnr[:, :, Idx(0, 0, 0)].data)
    mpi.report('chi0_r0 ' + str(chi0_r0))
    mpi.barrier()

    mpi.report('--> chi0_wnk from chi0_wnr')
    chi0_wnk = chi0q_from_chi0r(chi0_wnr)

    del chi0_wnr

    mpi.barrier()
    mpi.report('chi0_wnk ' +
               str(chi0_wnk[Idx(0), Idx(0), Idx(0, 0, 0)][0, 0, 0, 0]))
    chi0 = np.sum(chi0_wnk.data) / len(kmesh)
    mpi.report('chi0 = ' + str(chi0))
    mpi.barrier()

    #if mpi.is_master_node():
    if False:
        from triqs_tprf.ParameterCollection import ParameterCollection
        p = ParameterCollection()
        p.g_wk = g_wk
        p.g_wr = g_wr
        p.chi0_wnr = chi0_wnr
        p.chi0_wnk = chi0_wnk

        print('--> Writing debug info for BSE')
        with HDFArchive('data_debug_bse.h5', 'w') as arch:
            arch['p'] = p

    mpi.barrier()

    return chi0_wnk
コード例 #12
0
ファイル: sumkdft_opt.py プロジェクト: FermiQ/DCore
    def extract_G_loc(self,
                      mu=None,
                      iw_or_w='iw',
                      with_Sigma=True,
                      with_dc=True,
                      broadening=None):
        r"""
        """

        # +++ADDED
        # print_time function is inserted in several places for benchmark
        import time
        start = [time.time(), time.time()]

        def print_time(txt):
            # now = time.time()
            # print("time {:>8.5f}sec {:>8.5f}sec @ {}".format(now-start[0], now-start[1], txt))
            # start[0] = now
            pass

        # print("\n=====================")
        # print("Start extract_G_loc")
        print_time("start")

        if mu is None:
            mu = self.chemical_potential

        if iw_or_w == "iw":
            G_loc = [
                self.Sigma_imp_iw[icrsh].copy()
                for icrsh in range(self.n_corr_shells)
            ]  # this list will be returned
            beta = G_loc[0].mesh.beta
            G_loc_inequiv = [
                BlockGf(name_block_generator=[
                    (block, GfImFreq(indices=inner, mesh=G_loc[0].mesh))
                    for block, inner in self.gf_struct_solver[ish].items()
                ],
                        make_copies=False)
                for ish in range(self.n_inequiv_shells)
            ]
        elif iw_or_w == "w":
            G_loc = [
                self.Sigma_imp_w[icrsh].copy()
                for icrsh in range(self.n_corr_shells)
            ]  # this list will be returned
            mesh = G_loc[0].mesh
            G_loc_inequiv = [
                BlockGf(name_block_generator=[
                    (block, GfReFreq(indices=inner, mesh=mesh))
                    for block, inner in self.gf_struct_solver[ish].items()
                ],
                        make_copies=False)
                for ish in range(self.n_inequiv_shells)
            ]

        for icrsh in range(self.n_corr_shells):
            G_loc[icrsh].zero()  # initialize to zero

        print_time("k-sum start")

        ikarray = numpy.array(list(range(self.n_k)))
        for ik in mpi.slice_array(ikarray):
            print_time("in k-loop: k-sum")
            if iw_or_w == 'iw':
                G_latt = self.lattice_gf(ik=ik,
                                         mu=mu,
                                         iw_or_w=iw_or_w,
                                         with_Sigma=with_Sigma,
                                         with_dc=with_dc,
                                         beta=beta)
            elif iw_or_w == 'w':
                mesh_parameters = (G_loc[0].mesh.omega_min,
                                   G_loc[0].mesh.omega_max, len(G_loc[0].mesh))
                G_latt = self.lattice_gf(ik=ik,
                                         mu=mu,
                                         iw_or_w=iw_or_w,
                                         with_Sigma=with_Sigma,
                                         with_dc=with_dc,
                                         broadening=broadening,
                                         mesh=mesh_parameters)
            print_time("in k-loop: lattice_gf")
            G_latt *= self.bz_weights[ik]

            print_time("in k-loop: *bz_weights")
            for icrsh in range(self.n_corr_shells):
                # init temporary storage
                # tmp = G_loc[icrsh].copy()
                # for bname, gf in tmp:
                #     tmp[bname] << self.downfold(
                #         ik, icrsh, bname, G_latt[bname], gf)
                # G_loc[icrsh] += tmp
                # +++MODIFIED
                # Sum up directly into G_loc (no temporary storage is introduced)
                for bname, gf in G_loc[icrsh]:
                    self.downfold(ik,
                                  icrsh,
                                  bname,
                                  G_latt[bname],
                                  gf,
                                  overwrite_gf_inp=True,
                                  fac=+1)
            print_time("in k-loop: downfold")
        print_time("k-sum end")

        # Collect data from mpi
        for icrsh in range(self.n_corr_shells):
            G_loc[icrsh] << mpi.all_reduce(mpi.world, G_loc[icrsh],
                                           lambda x, y: x + y)
        mpi.barrier()
        print_time("mpi.all_reduce")

        # G_loc[:] is now the sum over k projected to the local orbitals.
        # here comes the symmetrisation, if needed:
        if self.symm_op != 0:
            G_loc = self.symmcorr.symmetrize(G_loc)

        # G_loc is rotated to the local coordinate system:
        if self.use_rotations:
            for icrsh in range(self.n_corr_shells):
                for bname, gf in G_loc[icrsh]:
                    G_loc[icrsh][bname] << self.rotloc(
                        icrsh, gf, direction='toLocal')

        # transform to CTQMC blocks:
        for ish in range(self.n_inequiv_shells):
            for block, inner in self.gf_struct_solver[ish].items():
                for ind1 in inner:
                    for ind2 in inner:
                        block_sumk, ind1_sumk = self.solver_to_sumk[ish][(
                            block, ind1)]
                        block_sumk, ind2_sumk = self.solver_to_sumk[ish][(
                            block, ind2)]
                        G_loc_inequiv[ish][block][ind1, ind2] << G_loc[
                            self.inequiv_to_corr[ish]][block_sumk][ind1_sumk,
                                                                   ind2_sumk]

        print_time("symm, rotations, solver_to_sumk")
        # print("End extract_G_loc")
        # print("=====================\n")

        # return only the inequivalent shells:
        return G_loc_inequiv