コード例 #1
0
ファイル: Toto.py プロジェクト: Wentzell/app4triqs
    def test_mpi(self):

        a=Toto(0)

        if mpi.is_master_node():
            a=Toto(1)
            mpi.bcast(a)

        self.assertEqual(a, Toto(1))
コード例 #2
0
def mpi_parallel_get_G(solver_data_package, dt):
    if mpi.is_master_node():
        print 'master node about to broadcast get_G_parameters...'
        solver_data_package['tag'] = 'get_G'
        solver_data_package['get_G_parameters'] = {}
        solver_data_package['get_G_parameters'][
            'Sigma_IaJb_imp_iw'] = dt.Sigma_IaJb_imp_iw
        solver_data_package['get_G_parameters']['H0_k'] = dt.H0_k
        print "master node sending solver_data_package: ", solver_data_package.keys(
        )
        solver_data_package = mpi.bcast(solver_data_package)

    if not (dt.master_rank is None):
        print "[ master_rank", dt.master_rank, "]: received solver_data_package: ", solver_data_package.keys(
        )
        print "[ master_rank", dt.master_rank, "]: about to do get_G"
        for iwii, iwi in enumerate(dt.iwis_per_master):
            dt.Sigma_imp_data[iwii, :, :] = solver_data_package[
                'get_G_parameters']['Sigma_IaJb_imp_iw'].data[iwi, :, :]
        H0_k = solver_data_package['get_G_parameters']['H0_k']
        parallel_get_Nambu_G_for_cellular(numpy.array(dt.iws_per_master), H0_k,
                                          dt.Sigma_imp_data, dt.G_IaJb_k_iw)
        print "[ master_rank", dt.master_rank, "]: done doing get_G"
    if mpi.is_master_node():
        del solver_data_package['get_G_parameters']
コード例 #3
0
def mpi_parallel_get_G_loc(solver_data_package, dt):
    if mpi.is_master_node():
        print '[ master node ] [ master_rank', dt.master_rank, '] about to broadcast get_G_loc_parameters...'
        dt.G_IaJb_loc_iw << 0.0
        solver_data_package['tag'] = 'get_G_loc'
        solver_data_package['get_G_loc_parameters'] = {}
        solver_data_package['get_G_loc_parameters'][
            'G_IaJb_loc_iw'] = dt.G_IaJb_loc_iw
        solver_data_package = mpi.bcast(solver_data_package)
    G_IaJb_loc_iw = solver_data_package['get_G_loc_parameters'][
        'G_IaJb_loc_iw']
    if not (dt.master_rank is None):
        print "[ master_rank", dt.master_rank, "]: about to do get_G_loc"
        half_niw, nk, nk, nIa, nJb = numpy.shape(dt.G_IaJb_k_iw)
        G_IaJb_loc_iw.data[dt.iwis_per_master[0]:dt.iwis_per_master[-1] +
                           1, :, :] = numpy.sum(dt.G_IaJb_k_iw,
                                                axis=(1, 2)) / nk**2
    #print "[rank ", mpi.rank,"[ master_rank",dt.master_rank,"]: about to reduce G_IaJb_loc_iw"
    G_IaJb_loc_iw = mpi.all_reduce(None, G_IaJb_loc_iw, None)
    if not (dt.master_rank is None):
        print "[ master_rank", dt.master_rank, "]: done doing get_G_loc"
    if mpi.is_master_node():
        dt.G_IaJb_loc_iw << G_IaJb_loc_iw
        fit_fermionic_gf_tail(dt.G_IaJb_loc_iw,
                              starting_iw=14.0,
                              no_loc=False,
                              overwrite_tail=True,
                              max_order=5)
        dt.G_IaJb_loc_tau << InverseFourier(dt.G_IaJb_loc_iw)
        del solver_data_package['get_G_loc_parameters']
        print '[ master node ] [ master_rank', dt.master_rank, '] done doing get_G_loc'
コード例 #4
0
        def initialize_solver(
            nambu=False,
            solver_data_package=None,
            beta=None,
            nsites=None,
            niw=None,
            ntau=100000,
        ):
            if solver_data_package is None: solver_data_package = {}

            if nambu:
                gf_struct = {'nambu': range(2 * nsites)}
            else:
                gf_struct = {'up': range(nsites), 'dn': range(nsites)}

            assert ntau > 2 * niw, "solvers.ctint.initialize_solvers: ERROR! ntau too small!!"

            solver_data_package['constructor_parameters'] = {}
            solver_data_package['constructor_parameters']['beta'] = beta
            solver_data_package['constructor_parameters']['n_iw'] = niw
            solver_data_package['constructor_parameters']['n_tau'] = ntau
            solver_data_package['constructor_parameters'][
                'gf_struct'] = gf_struct
            solver_data_package['tag'] = 'construct'

            if mpi.is_master_node():
                print "solver_data_package:", solver_data_package

            if mpi.size > 1:
                solver_data_package = mpi.bcast(solver_data_package)

            return Solver(**solver_data_package['constructor_parameters'])
コード例 #5
0
        def initialize_solver(
            Q_IaJb_iw_template,
            solver_data_package=None,
            ntau=100000,
        ):
            if solver_data_package is None: solver_data_package = {}

            niw = len(Q_IaJb_iw_template.data[:, 0, 0]) / 2
            beta = Q_IaJb_iw_template.beta

            get_K_container, get_gf_struct, get_h_int, convert_to_K_space, convert_to_IJ_space = Kspace_plaquette(
                Q_IaJb_iw_template)

            gf_struct = get_gf_struct()

            assert ntau > 2 * niw, "solvers.ctint.initialize_solvers: ERROR! ntau too small!!"

            solver_data_package['constructor_parameters'] = {}
            solver_data_package['constructor_parameters']['beta'] = beta
            solver_data_package['constructor_parameters']['n_iw'] = niw
            solver_data_package['constructor_parameters']['n_tau'] = ntau
            solver_data_package['constructor_parameters'][
                'gf_struct'] = gf_struct
            solver_data_package['tag'] = 'construct'

            if mpi.is_master_node():
                print "solver_data_package:", solver_data_package

            if mpi.size > 1:
                solver_data_package = mpi.bcast(solver_data_package)

            return CthybSolver(**solver_data_package['constructor_parameters'])
コード例 #6
0
        def initialize_solvers(data, solver_data_package=None):
            if solver_data_package is None: solver_data_package = {}

            n_tau = 2000
            for C in data.impurity_struct.keys():
                if len(data.impurity_struct[C]) > 16:
                    n_tau = 1000
            assert n_tau > 2 * data.n_iw, "solvers.ctint.initialize_solvers: ERROR! n_tau too small!!"

            solver_data_package['impurity_struct'] = data.impurity_struct
            solver_data_package['constructor_parameters'] = {}
            solver_data_package['constructor_parameters']['beta'] = data.beta
            solver_data_package['constructor_parameters']['n_iw'] = data.n_iw
            solver_data_package['constructor_parameters']['n_tau'] = n_tau
            solver_data_package['construct|run|exit'] = 0

            if MASTER_SLAVE_ARCHITECTURE and (mpi.size > 1):
                solver_data_package = mpi.bcast(solver_data_package)

            for C in data.impurity_struct.keys():
                solver_struct = {
                    'up': data.impurity_struct[C],
                    'dn': data.impurity_struct[C]
                }
                solver_data_package['constructor_parameters'][
                    'gf_struct'] = solver_struct
                data.solvers[C] = cthybSolver(
                    **solver_data_package['constructor_parameters'])
コード例 #7
0
def is_vasp_lock_present():
    """
    small function to check if vasp is still running
    """
    res_bool = False
    if mpi.is_master_node():
        res_bool = os.path.isfile('./vasp.lock')
    res_bool = mpi.bcast(res_bool)
    return res_bool
コード例 #8
0
        def run(solver,
                U,
                G0_IaJb_iw,
                n_cycles=20000,
                max_time=5 * 60,
                solver_data_package=None,
                only_sign=False):

            if solver_data_package is None: solver_data_package = {}

            solver_data_package['solve_parameters'] = {}
            solver_data_package['solve_parameters']['U'] = U
            solver_data_package['solve_parameters']['max_time'] = max_time
            solver_data_package['solve_parameters']["random_name"] = ""
            solver_data_package['solve_parameters']["length_cycle"] = 50
            solver_data_package['solve_parameters']["n_warmup_cycles"] = 50  #0
            solver_data_package['solve_parameters']["n_cycles"] = 100000000
            solver_data_package['solve_parameters']["measure_G_l"] = True
            solver_data_package['solve_parameters']["move_double"] = True
            solver_data_package['solve_parameters']["perform_tail_fit"] = True
            solver_data_package['solve_parameters']["fit_max_moment"] = 2

            print solver_data_package['solve_parameters']

            solver_data_package['G0_IaJb_iw'] = G0_IaJb_iw

            solver_data_package['tag'] = 'run'

            if mpi.size > 1:
                if mpi.is_master_node():
                    print "broadcasting solver_data_package!!"
                solver_data_package = mpi.bcast(solver_data_package)

            if mpi.is_master_node(): print "about to run "
            dct = deepcopy(solver_data_package['solve_parameters'])
            del dct['U']

            get_K_container, get_gf_struct, get_h_int, convert_to_K_space, convert_to_IJ_space = Kspace_plaquette(
                G0_IaJb_iw)
            convert_to_K_space(solver.G0_iw, G0_IaJb_iw)
            h_int = get_h_int(U)
            try:
                solver.solve(h_int=h_int, **dct)
                Sigma_IaJb_iw = G0_IaJb_iw.copy()
                convert_to_IJ_space(Sigma_IaJb_iw, solver.Sigma_iw)
                return Sigma_IaJb_iw
            except Exception as e:
                A = HDFArchive('black_box', 'w')
                A['solver'] = solver
                del A
                raise e
            if mpi.is_master_node():
                print "average sign: ", solver.average_sign
コード例 #9
0
    def __init__(self, hdf_file, subgroup=None):
        """
        Initialises the class.

        Parameters
        ----------
        hdf_file : string
                   Base name of the hdf5 archive with the symmetry data.
        subgroup : string, optional
                   Name of subgroup storing correlated-shell symmetry data. If not given, it is assumed that
                   the data is stored at the root of the hdf5 archive.
        """

        assert type(
            hdf_file) == StringType, "Symmetry: hdf_file must be a filename."
        self.hdf_file = hdf_file
        things_to_read = [
            'n_symm', 'n_atoms', 'perm', 'orbits', 'SO', 'SP', 'time_inv',
            'mat', 'mat_tinv'
        ]
        for it in things_to_read:
            setattr(self, it, 0)

        if mpi.is_master_node():
            # Read the stuff on master:
            ar = HDFArchive(hdf_file, 'r')
            if subgroup is None:
                ar2 = ar
            else:
                ar2 = ar[subgroup]

            for it in things_to_read:
                setattr(self, it, ar2[it])
            del ar2
            del ar

        # Broadcasting
        for it in things_to_read:
            setattr(self, it, mpi.bcast(getattr(self, it)))

        # now define the mapping of orbitals:
        # self.orb_map[iorb] = jorb gives the permutation of the orbitals as given in the list, when the
        # permutation of the atoms is done:
        self.n_orbits = len(self.orbits)
        self.orb_map = [[0 for iorb in range(self.n_orbits)]
                        for i_symm in range(self.n_symm)]
        for i_symm in range(self.n_symm):
            for iorb in range(self.n_orbits):
                srch = copy.deepcopy(self.orbits[iorb])
                srch['atom'] = self.perm[i_symm][self.orbits[iorb]['atom'] - 1]
                self.orb_map[i_symm][iorb] = self.orbits.index(srch)
コード例 #10
0
ファイル: symmetry.py プロジェクト: krivenko/dft_tools
    def __init__(self, hdf_file, subgroup=None):
        """
        Initialises the class.

        Parameters
        ----------
        hdf_file : string
                   Base name of the hdf5 archive with the symmetry data.
        subgroup : string, optional
                   Name of subgroup storing correlated-shell symmetry data. If not given, it is assumed that
                   the data is stored at the root of the hdf5 archive.
        """

        assert type(
            hdf_file) == StringType, "Symmetry: hdf_file must be a filename."
        self.hdf_file = hdf_file
        things_to_read = ['n_symm', 'n_atoms', 'perm',
                          'orbits', 'SO', 'SP', 'time_inv', 'mat', 'mat_tinv']
        for it in things_to_read:
            setattr(self, it, 0)

        if mpi.is_master_node():
            # Read the stuff on master:
            ar = HDFArchive(hdf_file, 'r')
            if subgroup is None:
                ar2 = ar
            else:
                ar2 = ar[subgroup]

            for it in things_to_read:
                setattr(self, it, ar2[it])
            del ar2
            del ar

        # Broadcasting
        for it in things_to_read:
            setattr(self, it, mpi.bcast(getattr(self, it)))

        # now define the mapping of orbitals:
        # self.orb_map[iorb] = jorb gives the permutation of the orbitals as given in the list, when the
        # permutation of the atoms is done:
        self.n_orbits = len(self.orbits)
        self.orb_map = [[0 for iorb in range(
            self.n_orbits)] for i_symm in range(self.n_symm)]
        for i_symm in range(self.n_symm):
            for iorb in range(self.n_orbits):
                srch = copy.deepcopy(self.orbits[iorb])
                srch['atom'] = self.perm[i_symm][self.orbits[iorb]['atom'] - 1]
                self.orb_map[i_symm][iorb] = self.orbits.index(srch)
コード例 #11
0
ファイル: archive.py プロジェクト: henhans/ClusterDMFT
 def load(self, function_name, loop_nr = -1):
     """
     returns a calculated function from archive
     function_name: 'Sigma_c_iw', 'G_c_iw', ...
     loop_nr: int, -1 gives the last loop nr.
     """
     function = None
     if mpi.is_master_node():
         a = HDFArchive(self.archive, 'r')
         if loop_nr < 0:
             function = a['results'][str(self.next_loop() + loop_nr)][function_name]
         else:
             function = a['results'][str(loop_nr)][function_name]
         del a
     function = mpi.bcast(function)
     return function
コード例 #12
0
        def initialize_solvers(data,
                               solver_data_package=None,
                               bosonic_measures=False):
            if solver_data_package is None: solver_data_package = {}

            n_tau = 2000
            for C in data.impurity_struct.keys():
                if len(data.impurity_struct[C]) > 16:
                    n_tau = 1000
            assert n_tau > 2 * data.n_iw, "solvers.ctint.initialize_solvers: ERROR! n_tau too small!!"

            n_tau_b = (5 if (not bosonic_measures) else data.n_iw * 4)
            n_iw_b = (1 if (not bosonic_measures) else data.n_iw)
            solver_data_package['impurity_struct'] = data.impurity_struct
            solver_data_package['constructor_parameters'] = {}
            solver_data_package['constructor_parameters']['beta'] = data.beta
            solver_data_package['constructor_parameters']['n_iw'] = data.n_iw
            solver_data_package['constructor_parameters']['n_tau_g0'] = n_tau
            solver_data_package['constructor_parameters']['n_tau_f'] = n_tau
            solver_data_package['constructor_parameters'][
                'n_tau_dynamical_interactions'] = n_tau_b
            solver_data_package['constructor_parameters'][
                'n_iw_dynamical_interactions'] = n_iw_b
            solver_data_package['constructor_parameters'][
                'n_tau_nnt'] = n_tau_b
            solver_data_package['constructor_parameters']['n_tau_g2t'] = 5
            solver_data_package['constructor_parameters']['n_w_f_g2w'] = 2
            solver_data_package['constructor_parameters']['n_w_b_g2w'] = 2
            solver_data_package['constructor_parameters']['n_tau_M4t'] = 5
            solver_data_package['constructor_parameters']['n_w_f_M4w'] = 2
            solver_data_package['constructor_parameters']['n_w_b_M4w'] = 2
            solver_data_package['construct|run|exit'] = 0

            if MASTER_SLAVE_ARCHITECTURE and (mpi.size > 1):
                solver_data_package = mpi.bcast(solver_data_package)

            for C in data.impurity_struct.keys():
                solver_struct = {
                    'up': data.impurity_struct[C],
                    'dn': data.impurity_struct[C]
                }
                solver_data_package['constructor_parameters'][
                    'gf_struct'] = solver_struct
                data.solvers[C] = Solver(
                    **solver_data_package['constructor_parameters'])
コード例 #13
0
ファイル: sumk_lda.py プロジェクト: boujnah-mourad/dft_tools
    def read_input_from_hdf(self, subgrp, things_to_read, optional_things=[]):
        """
        Reads data from the HDF file
        """
        
        retval = True
        # init variables on all nodes:
        for it in things_to_read: exec "self.%s = 0"%it
        for it in optional_things: exec "self.%s = 0"%it
        
        if (mpi.is_master_node()):
            ar=HDFArchive(self.hdf_file,'a')
            if (subgrp in ar):
                # first read the necessary things:
                for it in things_to_read:
                    if (it in ar[subgrp]):
                        exec "self.%s = ar['%s']['%s']"%(it,subgrp,it)
                    else:
                        mpi.report("Loading %s failed!"%it)
                        retval = False
                   
                if ((retval) and (len(optional_things)>0)):
                    # if necessary things worked, now read optional things:
                    retval = {}
                    for it in optional_things:
                        if (it in ar[subgrp]):
                            exec "self.%s = ar['%s']['%s']"%(it,subgrp,it)
                            retval['%s'%it] = True
                        else:
                            retval['%s'%it] = False
            else:
                mpi.report("Loading failed: No %s subgroup in HDF5!"%subgrp)
                retval = False

            del ar

        # now do the broadcasting:
        for it in things_to_read: exec "self.%s = mpi.bcast(self.%s)"%(it,it)
        for it in optional_things: exec "self.%s = mpi.bcast(self.%s)"%(it,it)
        

        retval = mpi.bcast(retval)
               
        return retval
コード例 #14
0
def slave_run(data_package, printout=True, tasks={}):
    while True:
        if printout: print "[Node ", mpi.rank, "] waiting for instructions..."

        data_package = mpi.bcast(data_package)

        if printout: print "[Node ", mpi.rank, "] received instructions!!!"

        if data_package is None:
            if printout:
                print "[Node ", mpi.rank, "] data_package is None, will exit now. Goodbye."
            break

        if data_package['tag'] in tasks.keys():
            tasks[data_package['tag']](data_package)
        elif data_package['tag'] == 'exit':
            break
        else:
            print "[Node ", mpi.rank, "] ERROR: unknown task tag!!!!"
コード例 #15
0
def optimized_mpi_parallel_get_G_loc(solver_data_package, dt):
    if mpi.is_master_node():
        print '[ master node ] [ master_rank', dt.master_rank, '] about to broadcast get_G_loc_parameters...'
        dt.G_IaJb_loc_iw << 0.0
        solver_data_package['tag'] = 'optimized_get_G_loc'
        solver_data_package['optimized_get_G_loc_parameters'] = {}
        solver_data_package['optimized_get_G_loc_parameters'][
            'G_IaJb_loc_iw'] = dt.G_IaJb_loc_iw
        solver_data_package['optimized_get_G_loc_parameters'][
            'Sigma_IaJb_imp_iw'] = dt.Sigma_IaJb_imp_iw
        solver_data_package['optimized_get_G_loc_parameters']['H0_k'] = dt.H0_k
        solver_data_package = mpi.bcast(solver_data_package)
    G_IaJb_loc_iw = solver_data_package['optimized_get_G_loc_parameters'][
        'G_IaJb_loc_iw']
    Sigma_IaJb_imp_iw = solver_data_package['optimized_get_G_loc_parameters'][
        'Sigma_IaJb_imp_iw']
    H0_k = solver_data_package['optimized_get_G_loc_parameters']['H0_k']
    if not (dt.master_rank is None):
        print "[ master_rank", dt.master_rank, "]: about to do get_G_loc"
        optimized_parallel_get_Nambu_G_for_cellular(
            dt.iws_per_master, H0_k, Sigma_IaJb_imp_iw.data[
                dt.iwis_per_master[0]:dt.iwis_per_master[-1] + 1, :, :],
            G_IaJb_loc_iw.data[dt.iwis_per_master[0]:dt.iwis_per_master[-1] +
                               1, :, :])
    #print "[rank ", mpi.rank,"[ master_rank",dt.master_rank,"]: about to reduce G_IaJb_loc_iw"
    G_IaJb_loc_iw = mpi.all_reduce(None, G_IaJb_loc_iw, None)
    if not (dt.master_rank is None):
        print "[ master_rank", dt.master_rank, "]: done doing get_G_loc"
    if mpi.is_master_node():
        dt.G_IaJb_loc_iw << G_IaJb_loc_iw
        fit_fermionic_gf_tail(dt.G_IaJb_loc_iw,
                              starting_iw=14.0,
                              no_loc=False,
                              overwrite_tail=True,
                              max_order=5)
        dt.G_IaJb_loc_tau << InverseFourier(dt.G_IaJb_loc_iw)
        del solver_data_package['optimized_get_G_loc_parameters']
        print '[ master node ] [ master_rank', dt.master_rank, '] done doing optimized_get_G_loc'
コード例 #16
0
from pytriqs.operators import n, c, c_dag, Operator, dagger

# ----------------------------------------------------------------------

from pyed.ParameterCollection import ParameterCollection
from pyed.TriqsExactDiagonalization import TriqsExactDiagonalization

# ----------------------------------------------------------------------
if __name__ == '__main__':

    if mpi.is_master_node():
        with HDFArchive('data_model.h5','r') as A: m = A["p"]
        with HDFArchive('data_pyed.h5','r') as A: p = A["p"]
    else:
        m, p = None, None
    m, p = mpi.bcast(m), mpi.bcast(p)

    p.chi_field = np.zeros((4, 4, 4, 4), dtype=np.complex)
    p.g_tau_field = {}

    g_tau = GfImTime(name=r'$g$', beta=m.beta,
                     statistic='Fermion', n_points=50,
                     target_shape=(4, 4))

    # -- The field is symmetric in (i1, i2)
    # -- only calculate upper triangle
    
    index_list = []
    for i1 in xrange(4):
        for i2 in xrange(i1, 4):
            index_list.append((i1, i2))
コード例 #17
0
ファイル: histogram_bcast.py プロジェクト: TRIQS/triqs
       4, 5, 1, 9, 8, 0, 4, 5, 3, 2, 4, 8, 8, 6, 4, 3, 5, 3, 9, 8, 1, 0,
       4, 2, 2, 0, 5, 8, 6, 6, 2, 3, 5, 3, 8, 5, 2, 3, 4, 3, 4, 1, 0, 4,
       0, 0, 8, 4, 3, 5, 8, 4, 5, 3, 5, 4, 7, 7, 0, 2, 5, 9, 0, 9, 2, 8,
       0, 4, 2, 2, 0, 2, 3, 0, 9, 6, 9, 7, 2, 4, 9, 7, 0, 3, 8, 0, 0, 7,
       8, 7, 6, 4, 0, 3, 7, 1, 9, 2, 8, 1, 2, 6, 4, 8, 9, 6, 2, 9, 9, 2,
       8, 4, 1, 0, 7, 5, 9, 0, 4, 8, 0, 0, 4, 3, 7, 5, 7, 7, 5, 0, 5, 8,
       8, 7, 2, 7])

    h_ref = py_histogram(x)
    h = Histogram(0, 10)
    h << x

else:
    h, h_ref = None, None
    
h = mpi.bcast(h)
h_ref = mpi.bcast(h_ref)

for rank in xrange(mpi.size):
    if rank == mpi.rank:

        print '-'*72
        print 'rank =', mpi.rank
        print 'h =\n', h
        print 'h_ref =\n', h_ref

        # -- Compare h and h_ref
        pts = np.array([ int(h.mesh_point(idx)) for idx in xrange(len(h))])

        for pt, val in zip(pts, h.data):
            val = int(val)
コード例 #18
0
def dca_calculation(dca_scheme,
                    ph_symmetry=False,
                    Us=[1.0],
                    Ts=[0.125],
                    ns=[0.5],
                    fixed_n=True,
                    mutildes=[0.0],
                    w_cutoff=20.0,
                    min_its=5,
                    max_its=25,
                    mix_Sigma=False,
                    rules=[[0, 0.5], [6, 0.2], [12, 0.65]],
                    do_dmft_first=False,
                    alpha=0.5,
                    delta=0.1,
                    automatic_alpha_and_delta=False,
                    n_cycles=10000000,
                    max_time_rules=[[1, 5 * 60], [2, 20 * 60], [4, 80 * 60],
                                    [8, 200 * 60], [16, 400 * 60]],
                    time_rules_automatic=False,
                    exponent=0.7,
                    overall_prefactor=1.0,
                    no_timing=False,
                    accuracy=1e-4,
                    solver_data_package=None,
                    print_current=1,
                    initial_guess_archive_name='',
                    suffix=''):

    if mpi.is_master_node():
        print "WELCOME TO dca calculation!"

    solver_class = solvers.ctint

    impurity_struct = dca_scheme.get_impurity_struct()
    fermionic_struct = dca_scheme.get_fermionic_struct()

    if mpi.is_master_node(): print "impurity_struct: ", impurity_struct
    if mpi.is_master_node(): print "fermionic_struct: ", fermionic_struct

    if not time_rules_automatic:
        max_times = {}
        for C in impurity_struct:
            for r in max_time_rules:
                if r[0] <= len(impurity_struct[C]):
                    max_times[C] = r[1]
        if mpi.is_master_node(): print "max_times from rules: ", max_times

    beta = 1.0 / Ts[0]

    n_iw = int(((w_cutoff * beta) / math.pi - 1.0) / 2.0)
    if mpi.is_master_node():
        print "PM HUBBARD GW: n_iw: ", n_iw

    dt = dca_data(n_iw=n_iw,
                  beta=beta,
                  impurity_struct=impurity_struct,
                  fermionic_struct=fermionic_struct,
                  archive_name="so_far_nothing_you_shouldnt_see_this_file")

    if fixed_n:
        ps = itertools.product(ns, Us, Ts)
    else:
        ps = itertools.product(mutildes, Us, Ts)

    counter = 0
    old_beta = beta
    for p in ps:
        #name stuff to avoid confusion
        if fixed_n:
            n = p[0]
        else:
            mutilde = p[0]
            n = None
        U = p[1]
        T = p[2]
        beta = 1.0 / T

        if beta != old_beta:
            n_iw = int(((w_cutoff * beta) / math.pi - 1.0) / 2.0)
            dt.change_beta(beta, n_iw)

        old_beta = beta

        filename = "result"
        if len(ns) > 1 and fixed_n:
            filename += ".n%s" % n
        if len(mutildes) > 1 and not fixed_n:
            filename += ".mutilde%s" % mutilde
        if len(Us) > 1: filename += ".U%s" % U
        if len(Ts) > 1: filename += ".T%.4f" % T
        filename += ".h5"
        dt.archive_name = filename

        if mpi.is_master_node():
            if fixed_n:
                print "Working: U: %s T %s n: %s " % (U, T, n)
            else:
                print "Working: U: %s T %s mutilde: %s " % (U, T, mutilde)

        prepare_dca(dt, dca_scheme, solver_class)

        solver_class.initialize_solvers(dt, solver_data_package)

        if no_timing:
            for C in dt.impurity_struct.keys():
                max_times[C] = -1
            if mpi.is_master_node():
                print "no_timing! solvers will run until they perform all the mc steps", max_times

        if time_rules_automatic and (not no_timing):
            max_times = {}
            for C in dt.impurity_struct.keys():
                Nc = len(dt.impurity_struct[C])
                pref = ((dt.beta / 8.0) * U * Nc)**exponent  #**1.2
                print C
                print "Nc: ", Nc,
                print "U: ", U,
                print "beta: ", dt.beta,
                print "pref: ", pref
                max_times[C] = int(overall_prefactor * pref * 5 * 60)
            if mpi.is_master_node(): print "max times automatic: ", max_times

        identical_pairs = dca_scheme.get_identical_pairs()
        if not (identical_pairs is None):
            print "identical pairs not known, there will be no symmetrization"
            identical_pairs = {'x': identical_pairs}

        actions = [
            generic_action(name="lattice",
                           main=lambda data: nested_mains.lattice(
                               data,
                               n=n,
                               ph_symmetry=ph_symmetry,
                               accepted_mu_range=[-2.0, 2.0]),
                           mixers=[],
                           cautionaries=[],
                           allowed_errors=[],
                           printout=lambda data, it: ([
                               data.dump_general(quantities=['GK_iw', 'GR_iw'],
                                                 suffix='-current'),
                               data.dump_scalar(suffix='-current')
                           ] if ((it + 1) % print_current == 0) else None)),
            generic_action(
                name="pre_impurity",
                main=lambda data: nested_mains.pre_impurity(data),
                mixers=[],
                cautionaries=[],
                allowed_errors=[],
                printout=lambda data, it: (data.dump_general(
                    quantities=['GweissK_iw', 'GweissR_iw', 'Gweiss_iw'],
                    suffix='-current') if (
                        (it + 1) % print_current == 0) else None)),
            generic_action(
                name="impurity",
                main=(lambda data: nested_mains.impurity(
                    data,
                    U,
                    symmetrize_quantities=True,
                    alpha=alpha,
                    delta=delta,
                    automatic_alpha_and_delta=automatic_alpha_and_delta,
                    n_cycles=n_cycles,
                    max_times=max_times,
                    solver_data_package=solver_data_package)),
                mixers=[],
                cautionaries=[
                    lambda data, it: local_nan_cautionary(data,
                                                          data.impurity_struct,
                                                          Qs=['Sigma_imp_iw'],
                                                          raise_exception=True
                                                          ), lambda data, it:
                    (symmetric_G_and_self_energy_on_impurity(
                        data.G_imp_iw, data.Sigma_imp_iw, data.solvers,
                        identical_pairs, identical_pairs)
                     if it >= 100 else symmetrize_cluster_impurity(
                         data.Sigma_imp_iw, identical_pairs))
                ],
                allowed_errors=[1],
                printout=lambda data, it: ([
                    data.dump_general(quantities=['Sigma_imp_iw', 'G_imp_iw'],
                                      suffix='-current'),
                    data.dump_solvers(suffix='-current')
                ] if ((it + 1) % print_current == 0) else None)),
            generic_action(
                name="selfenergy",
                main=lambda data: dca_mains.selfenergy(data),
                mixers=[],
                cautionaries=[],
                allowed_errors=[],
                printout=lambda data, it:
                (data.dump_general(quantities=['SigmaR_iw', 'SigmaK_iw'],
                                   suffix='-current')
                 if ((it + 1) % print_current == 0) else None))
        ]

        if mix_Sigma:
            actions[3].mixers.append(
                mixer(mixed_quantity=lambda: dt.SigmaK_iw,
                      rules=rules,
                      func=mixer.mix_block_gf,
                      initialize=True))

        monitors = [
            monitor(monitored_quantity=lambda: dt.ns['00'],
                    h5key='n_vs_it',
                    archive_name=dt.archive_name),
            monitor(monitored_quantity=lambda: dt.mus['00'],
                    h5key='mu_vs_it',
                    archive_name=dt.archive_name),
            monitor(monitored_quantity=lambda: dt.SigmaR_iw['00'].data[
                dt.nw / 2, 0, 0].imag,
                    h5key='ImSigma00_vs_it',
                    archive_name=dt.archive_name),
            monitor(monitored_quantity=lambda: dt.SigmaR_iw['00'].data[
                dt.nw / 2, 0, 0].real,
                    h5key='ReSigma00_vs_it',
                    archive_name=dt.archive_name),
            monitor(monitored_quantity=lambda: dt.GR_iw['00'].data[dt.nw / 2,
                                                                   0, 0].imag,
                    h5key='ImG00_vs_it',
                    archive_name=dt.archive_name),
            monitor(monitored_quantity=lambda: dt.GR_iw['00'].data[dt.nw / 2,
                                                                   0, 0].real,
                    h5key='ReG00_vs_it',
                    archive_name=dt.archive_name),
            monitor(monitored_quantity=lambda: dt.err,
                    h5key='err_vs_it',
                    archive_name=dt.archive_name)
        ]  #,
        #                 monitor( monitored_quantity = lambda: actions[3].errs[0],
        #                          h5key = 'sign_err_vs_it',
        #                          archive_name = dt.archive_name) ]

        convergers = [
            converger(monitored_quantity=lambda: dt.GR_iw,
                      accuracy=accuracy,
                      struct=fermionic_struct,
                      archive_name=dt.archive_name,
                      h5key='diffs_GR'),
            converger(monitored_quantity=lambda: dt.SigmaR_iw,
                      accuracy=accuracy,
                      struct=fermionic_struct,
                      archive_name=dt.archive_name,
                      h5key='diffs_SigmaR')
        ]

        dmft = generic_loop(name="DCA",
                            actions=actions,
                            convergers=convergers,
                            monitors=monitors)

        if (counter == 0):  #do the initial guess only once!
            if initial_guess_archive_name != '':
                if mpi.is_master_node():
                    print "constructing dt from initial guess in a file: ", initial_guess_archive_name, "suffix: ", suffix
                dt.construct_from_file(
                    initial_guess_archive_name,
                    suffix)  #make sure it is the right dca_scheme
                if dt.beta != beta:
                    dt.change_beta(beta, n_iw)
            else:
                if not fixed_n:
                    dt.set_mu(mutilde)
                else:
                    dt.set_mu(U / 2.0)
                for C in dt.impurity_struct.keys():
                    for l in dt.impurity_struct[
                            C]:  #just the local components (but on each site!)
                        dt.Sigma_imp_iw[C][l, l] << U / 2.0
                for K, sig in dt.SigmaK_iw:
                    sig[0, 0] << U / 2.0
            dt.dump_general(quantities=['SigmaK_iw', 'Sigma_imp_iw'],
                            suffix='-initial')

        if (counter == 0) and do_dmft_first:
            assert False, "not implemented"

        #run dca!-------------
        dt.dump_parameters()
        dt.dump_non_interacting()

        err = dmft.run(dt,
                       max_its=max_its,
                       min_its=min_its,
                       max_it_err_is_allowed=7,
                       print_final=True,
                       print_current=1)
        if mpi.is_master_node():
            cmd = 'mv %s %s' % (filename, filename.replace("result", "dca"))
            print cmd
            os.system(cmd)

        if (err == 2):
            print "Cautionary error!!! exiting..."
            solver_data_package['construct|run|exit'] = 2
            if MASTER_SLAVE_ARCHITECTURE and (mpi.size > 1):
                solver_data_package = mpi.bcast(solver_data_package)

            break

        if not MASTER_SLAVE_ARCHITECTURE: mpi.barrier()
        counter += 1
    if not (solver_data_package is None):
        solver_data_package['construct|run|exit'] = 2
    if MASTER_SLAVE_ARCHITECTURE and (mpi.size > 1):
        solver_data_package = mpi.bcast(solver_data_package)

    return dt, monitors, convergers
コード例 #19
0
def cellular_calculation(
        Lx=2,
        Ly=1,
        periodized=False,
        triangular=False,
        Us=[1.0],
        Ts=[0.125],
        ns=[0.5],
        fixed_n=True,
        mutildes=[0.0],
        dispersion=lambda kx, ky: matrix_dispersion(2, -0.25, 0.0, kx, ky),
        ph_symmetry=False,
        dispersion_automatic=True,
        scalar_dispersion=lambda kx, ky: epsilonk_square(kx, ky, -0.25),
        n_ks=[24],
        n_k_automatic=False,
        n_k_rules=[[0.06, 32], [0.03, 48], [0.005, 64], [0.00, 96]],
        w_cutoff=20.0,
        min_its=5,
        max_its=25,
        mix_Sigma=False,
        rules=[[0, 0.5], [6, 0.2], [12, 0.65]],
        do_dmft_first=False,
        use_cthyb=False,
        alpha=0.5,
        delta=0.1,
        n_cycles=100000,
        max_time_rules=[[1, 5 * 60], [2, 20 * 60], [4, 80 * 60], [8, 200 * 60],
                        [16, 400 * 60]],
        time_rules_automatic=False,
        exponent=0.7,
        overall_prefactor=1.0,
        no_timing=False,
        accuracy=1e-4,
        solver_data_package=None,
        print_current=1,
        insulating_initial=False,
        initial_guess_archive_name='',
        suffix='',
        start_from_Gweiss=False):

    if mpi.is_master_node():
        print "WELCOME TO cellular calculation!"
        if n_k_automatic: print "n_k automatic!!!"
    if len(n_ks) == 0 and n_k_automatic: n_ks = [0]

    if use_cthyb:
        solver_class = solvers.cthyb
    else:
        solver_class = solvers.ctint

    fermionic_struct = {'up': [0]}

    Nc = Lx * Ly
    impurity_struct = {'%sx%s' % (Lx, Ly): range(Nc)}

    if not time_rules_automatic:
        max_times = {}
        for C in impurity_struct:
            for r in max_time_rules:
                if r[0] <= len(impurity_struct[C]):
                    max_times[C] = r[1]
        if mpi.is_master_node(): print "max_times from rules: ", max_times

    beta = 1.0 / Ts[0]

    n_iw = int(((w_cutoff * beta) / math.pi - 1.0) / 2.0)
    if mpi.is_master_node():
        print "PM HUBBARD GW: n_iw: ", n_iw

    if not n_k_automatic:
        n_k = n_ks[0]
        print "n_k = ", n_k
    else:
        n_k = n_k_from_rules(Ts[0], n_k_rules)
        #if mpi.is_master_node(): print "n_k automatic!!!"

    dt = cellular_data(
        n_iw=n_iw,
        n_k=n_k,
        beta=beta,
        impurity_struct=impurity_struct,
        fermionic_struct=fermionic_struct,
        archive_name="so_far_nothing_you_shouldnt_see_this_file")

    if fixed_n:
        ps = itertools.product(n_ks, ns, Us, Ts)
    else:
        ps = itertools.product(n_ks, mutildes, Us, Ts)

    counter = 0
    old_nk = n_k
    old_beta = beta

    for p in ps:
        #name stuff to avoid confusion
        nk = (p[0] if (not n_k_automatic) else n_k_from_rules(T, n_k_rules))
        if fixed_n:
            n = p[1]
        else:
            mutilde = p[1]
            n = None
        U = p[2]
        T = p[3]
        beta = 1.0 / T

        if nk != old_nk and (not n_k_automatic):
            dt.change_ks(IBZ.k_grid(nk))

        if beta != old_beta:
            n_iw = int(((w_cutoff * beta) / math.pi - 1.0) / 2.0)
            if n_k_automatic:
                nk = n_k_from_rules(T, n_k_rules)
                if nk != old_nk:
                    dt.change_ks(IBZ.k_grid(nk))
            dt.change_beta(beta, n_iw)

        old_beta = beta
        old_nk = nk

        filename = "result"
        if len(n_ks) > 1 and (not n_k_automatic):
            filename += ".nk%s" % nk
        if len(ns) > 1 and fixed_n:
            filename += ".n%s" % n
        if len(mutildes) > 1 and not fixed_n:
            filename += ".mutilde%s" % mutilde
        if len(Us) > 1: filename += ".U%s" % U
        if len(Ts) > 1: filename += ".T%.4f" % T
        filename += ".h5"
        dt.archive_name = filename

        if mpi.is_master_node():
            if fixed_n:
                print "Working: U: %s T %s n: %s n_k: %s n_iw: %s" % (U, n, T,
                                                                      nk, n_iw)
            else:
                print "Working: U: %s T %s mutilde: %s n_k: %s n_iw: %s" % (
                    U, mutilde, T, nk, n_iw)

        if mpi.is_master_node():
            print "about to fill dispersion. ph-symmetry: ", ph_symmetry
        for key in dt.fermionic_struct.keys():
            for kxi in range(dt.n_k):
                for kyi in range(dt.n_k):
                    dt.epsilonijk[key][:, :, kxi, kyi] = dispersion(
                        dt.ks[kxi], dt.ks[kyi])

        if not triangular:
            prepare_cellular(dt, Lx, Ly, solver_class, periodized)
            identical_pairs = {
                dt.impurity_struct.keys()[0]: get_identical_pair_sets(Lx, Ly)
            }
        else:
            prepare_cellular_triangular(dt, Lx, Ly, solver_class, periodized)
            identical_pairs = {
                dt.impurity_struct.keys()[0]:
                triangular_identical_pair_sets(Lx, Ly)
            }

        solver_class.initialize_solvers(dt, solver_data_package)

        max_times = {}

        if no_timing:
            for C in dt.impurity_struct.keys():
                max_times[C] = -1
            if mpi.is_master_node():
                print "no_timing! solvers will run until they perform all the mc steps", max_times

        if time_rules_automatic and (not no_timing):
            for C in dt.impurity_struct.keys():
                Nc = len(dt.impurity_struct[C])
                pref = ((dt.beta / 8.0) * U * Nc)**exponent  #**1.2
                print C
                print "Nc: ", Nc,
                print "U: ", U,
                print "beta: ", dt.beta,
                print "pref: ", pref
                max_times[C] = int(overall_prefactor * pref * 5 * 60)
            if mpi.is_master_node(): print "max times automatic: ", max_times

        actions = [
            generic_action(
                name="lattice",
                main=lambda data: [
                    data.get_Sigmaijkw(),
                    nested_mains.lattice(data,
                                         n=n,
                                         ph_symmetry=ph_symmetry,
                                         accepted_mu_range=[-2.0, 2.0])
                ],
                mixers=[],
                cautionaries=[],
                allowed_errors=[],
                printout=lambda data, it: ([
                    data.dump_general(quantities=
                                      ['Sigmaijkw', 'Gijkw', 'G_ij_iw'],
                                      suffix='-current'),
                    data.dump_scalar(suffix='-current')
                ] if ((it + 1) % print_current == 0) else None)),
            generic_action(
                name="pre_impurity",
                main=lambda data: nested_mains.pre_impurity(data),
                mixers=[],
                cautionaries=[],
                allowed_errors=[],
                printout=lambda data, it:
                (data.dump_general(quantities=['Gweiss_iw'], suffix='-current')
                 if ((it + 1) % print_current == 0) else None)),
            generic_action(
                name="impurity",
                main=(lambda data: nested_mains.impurity(
                    data,
                    U,
                    symmetrize_quantities=True,
                    alpha=alpha,
                    delta=delta,
                    n_cycles=n_cycles,
                    max_times=max_times,
                    solver_data_package=solver_data_package)) if
                (not use_cthyb) else (lambda data: nested_mains.impurity_cthyb(
                    data,
                    U,
                    symmetrize_quantities=True,
                    n_cycles=n_cycles,
                    max_times=max_times,
                    solver_data_package=solver_data_package)),
                mixers=[],
                cautionaries=[
                    lambda data, it: local_nan_cautionary(data,
                                                          data.impurity_struct,
                                                          Qs=['Sigma_imp_iw'],
                                                          raise_exception=True
                                                          ),
                    lambda data, it: symmetrize_cluster_impurity(
                        data.Sigma_imp_iw, identical_pairs)
                ],
                allowed_errors=[1],
                printout=lambda data, it: ([
                    data.dump_general(quantities=['Sigma_imp_iw', 'G_imp_iw'],
                                      suffix='-current'),
                    data.dump_solvers(suffix='-current')
                ] if ((it + 1) % print_current == 0) else None))
        ]

        monitors = [
            monitor(monitored_quantity=lambda: dt.ns['up'],
                    h5key='n_vs_it',
                    archive_name=dt.archive_name),
            monitor(monitored_quantity=lambda: dt.mus['up'],
                    h5key='mu_vs_it',
                    archive_name=dt.archive_name),
            monitor(monitored_quantity=lambda: dt.err,
                    h5key='err_vs_it',
                    archive_name=dt.archive_name)
        ]  #,
        #                 monitor( monitored_quantity = lambda: actions[3].errs[0],
        #                          h5key = 'sign_err_vs_it',
        #                          archive_name = dt.archive_name) ]

        convergers = [
            converger(monitored_quantity=lambda: dt.G_ij_iw,
                      accuracy=accuracy,
                      struct=impurity_struct,
                      archive_name=dt.archive_name,
                      h5key='diffs_G_loc')
        ]

        convergers.append(
            converger(monitored_quantity=lambda: dt.G_imp_iw,
                      accuracy=accuracy,
                      struct=impurity_struct,
                      archive_name=dt.archive_name,
                      h5key='diffs_G_imp'))

        dmft = generic_loop(name="cellular DMFT",
                            actions=actions,
                            convergers=convergers,
                            monitors=monitors)

        if (counter == 0):  #do the initial guess only once!
            if initial_guess_archive_name != '':
                if start_from_Gweiss:
                    dt.mus['up'] = U / 2.0
                    A = HDFArchive(initial_guess_archive_name, "r")
                    dt.Gweiss_iw << A['Gweiss_iw%s' % suffix]
                    del A
                    dt.dump_general(quantities=['Gweiss_iw'],
                                    suffix='-initial')
                else:
                    if mpi.is_master_node():
                        print "constructing dt from initial guess in a file: ", initial_guess_archive_name, "suffix: ", suffix
                    old_epsilonk = dt.epsilonk
                    dt.construct_from_file(initial_guess_archive_name, suffix)
                    if dt.beta != beta:
                        dt.change_beta(beta, n_iw)
                    if dt.n_k != nk:
                        dt.change_ks(IBZ.k_grid(nk))
                    if mpi.is_master_node():
                        print "putting back the old Jq and epsilonk"
                    dt.epsilonk = old_epsilonk
            else:
                if not fixed_n:
                    dt.mus['up'] = U / 2.0 + mutilde
                else:
                    dt.mus['up'] = U / 2.0
                if 'down' in dt.fermionic_struct.keys():
                    dt.mus['down'] = dt.mus[
                        'up']  #this is not necessary at the moment, but may become
                for C in dt.impurity_struct.keys():
                    for l in dt.impurity_struct[
                            C]:  #just the local components (but on each site!)
                        dt.Sigma_imp_iw[C].data[:, l, l] = U / 2.0 - int(
                            insulating_initial) * 1j / numpy.array(dt.ws)
                for key in fermionic_struct.keys():
                    for l in range(dt.Nc):
                        numpy.transpose(
                            dt.Sigmaijkw[key])[:, :, l, l, :] = U / 2.0 - int(
                                insulating_initial) * 1j / numpy.array(dt.ws)
                dt.dump_general(quantities=['Sigmaijkw', 'Sigma_imp_iw'],
                                suffix='-initial')

        if (counter == 0) and do_dmft_first:
            assert False, "not implemented"

        #run cellular!-------------

        if mix_Sigma:
            actions[3].mixers.append(
                mixer(mixed_quantity=lambda: dt.Sigmaijkw,
                      rules=rules,
                      func=mixer.mix_matrix_lattice_gf,
                      initialize=True))

        dt.dump_parameters()
        dt.dump_non_interacting()

        err = dmft.run(dt,
                       max_its=max_its,
                       min_its=min_its,
                       max_it_err_is_allowed=7,
                       print_final=True,
                       print_current=1,
                       start_from_action_index=(2 if start_from_Gweiss else 0))
        if mpi.is_master_node():
            print "periodizing result..."
            print "filling scalar dispersion..."
            for key in dt.fermionic_struct.keys():
                for kxi in range(dt.n_k):
                    for kyi in range(dt.n_k):
                        dt.epsilonk[key][kxi, kyi] = scalar_dispersion(
                            dt.ks[kxi], dt.ks[kyi])
            dt.dump_general(['epsilonk'], suffix='')
            dt.periodize_cumul()
            dt.dump_general(['Gkw', 'Sigmakw', 'gkw', 'gijw', 'g_imp_iw'],
                            suffix='-periodized_cumul')
            dt.periodize_selfenergy()
            dt.dump_general(['Gkw', 'Sigmakw', 'Sigmaijw'],
                            suffix='-periodized_selfenergy')
            cmd = 'mv %s %s' % (filename, filename.replace(
                "result", "cellular"))
            print cmd
            os.system(cmd)

        if (err == 2):
            print "Cautionary error!!! exiting..."
            solver_data_package['construct|run|exit'] = 2
            if MASTER_SLAVE_ARCHITECTURE and (mpi.size > 1):
                solver_data_package = mpi.bcast(solver_data_package)
            break

        if not MASTER_SLAVE_ARCHITECTURE: mpi.barrier()
        counter += 1
    if not (solver_data_package is None):
        solver_data_package['construct|run|exit'] = 2
    if MASTER_SLAVE_ARCHITECTURE and (mpi.size > 1):
        solver_data_package = mpi.bcast(solver_data_package)
    return dt, monitors, convergers
コード例 #20
0
ファイル: schemes.py プロジェクト: JaksaVucicevic/sc_scripts
    def check_and_fix(self, data, keep_P_negative = True):
      #operates directly on data.P_loc_iw as this is the one that will be used in chiqnu calculation
      clipped = edmft.cautionary.check_and_fix(self, data, finalize=False, keep_P_negative=keep_P_negative)
      if clipped and mpi.is_master_node(): print "GW.cautionary.check_and_fix: edmft.cautionary clipped "
      prefactor = 1.0 - self.ms0 / (self.clip_counter**self.ccpower + 1.0)

      for A in data.bosonic_struct.keys():
        res = numpy.less_equal(data.Pqnu[A][:,:,:].real, (data.Jq[A][:,:])**(-1.0) ) * numpy.less_equal( data.Jq[A][:,:], numpy.zeros((data.n_q, data.n_q)))
        data.Pqnu[A][:,:,:] = (1-res[:,:,:])*data.Pqnu[A][:,:,:] + res[:,:,:]*(data.Jq[A][:,:])**(-1.0)*prefactor
        if not (numpy.sum(res) == 0): 
          clipped = True                     
          #if mpi.is_master_node():
          if mpi.is_master_node(): print "GW.cautionary.check_and_fix: Too negative Polarization!!! Clipping to large value in block ",A

      #for A in data.bosonic_struct.keys():
      #  for nui in range(data.m_to_nui(-3),data.m_to_nui(3)): #careful with the range
      #    for qxi in range(data.n_q):
      #      for qyi in range(data.n_q):
      #        if  ( data.Pqnu[A][nui,qxi,qyi].real < (data.Jq[A][qxi,qyi])**(-1.0) ) and (data.Jq[A][qxi,qyi]<0.0) : #here we assume P is negative
      #          data.Pqnu[A][nui,qxi,qyi] = prefactor*(data.Jq[A][qxi,qyi])**(-1.0) + 1j*data.Pqnu[A][nui,qxi,qyi].imag
      #          clipped = True        
        if keep_P_negative:
          res2 = numpy.less_equal(data.Pqnu[A][:,:,:].real, 0.0 )
          if not numpy.all(res2):
            if mpi.is_master_node(): print "GW.cautionary.check_and_fix: Positive Polarization!!! Clipping to zero in block ",A
            data.Pqnu[A][:,:,:] = data.Pqnu[A][:,:,:]*res2[:,:,:]
            clipped = True 

      nan_found = False
      for U in data.fermionic_struct.keys():
        if numpy.any(numpy.isnan(data.Sigmakw[U])):
          nan_found=True
          if mpi.is_master_node(): print "GW.cautionary.check_and_fix: nan in Sigmakw[",U,"]"
        if numpy.any(numpy.isnan(data.Sigma_loc_iw[U].data[:,0,0])):
          nan_found=True
          if mpi.is_master_node(): print "GW.cautionary.check_and_fix: nan in Sigma_loc_iw[",U,"]"
      for A in data.bosonic_struct.keys():
        if numpy.any(numpy.isnan(data.Pqnu[A])):
          nan_found=True
          if mpi.is_master_node(): print "GW.cautionary.check_and_fix: nan in Pqnu[",A,"]"
        if numpy.any(numpy.isnan(data.P_loc_iw[A].data[:,0,0])):
          nan_found=True
          if mpi.is_master_node(): print "GW.cautionary.check_and_fix: nan in P_loc_iw[",A,"]"
      if nan_found: 
        #if mpi.is_master_node():
        print "[Node",mpi.rank,"]","exiting to system..."
        if mpi.is_master_node():
          data.dump_all(archive_name="black_box_nan", suffix='')          
        #if not MASTER_SLAVE_ARCHITECTURE: mpi.barrier()
        mpi.bcast({'construct|run|exit': 2}) 
        quit()      

      #print ">>>>>>> [Node",mpi.rank,"] Sigmakw", data.Sigmakw['up'][data.nw/2,0,0]
      #print ">>>>>>> [Node",mpi.rank,"] Pqnu 0", data.Pqnu['0'][data.nnu/2,0,0]
      #print ">>>>>>> [Node",mpi.rank,"] Pqnu 1", data.Pqnu['1'][data.nnu/2,0,0]

      if clipped: 
        if mpi.is_master_node(): print "GW.cautionary.check_and_fix: CLIPPED!!"
        self.clip_counter += 1 
      else: 
        self.clip_counter = self.clip_counter/self.ccrelax 

      return clipped
コード例 #21
0
ファイル: gf_bcast.py プロジェクト: JaksaVucicevic/triqs
# Import the Green's functions
from pytriqs.gf.local import GfImFreq, iOmega_n, inverse

# Create the Matsubara-frequency Green's function and initialize it
g = GfImFreq(indices = [1], beta = 50, n_points = 1000, name = "imp")
g << inverse( iOmega_n + 0.5 )

import pytriqs.utility.mpi as mpi

mpi.bcast(g)



#Block

from pytriqs.gf.local import *
g1 = GfImFreq(indices = ['eg1','eg2'], beta = 50, n_points = 1000, name = "egBlock")
g2 = GfImFreq(indices = ['t2g1','t2g2','t2g3'], beta = 50, n_points = 1000, name = "t2gBlock")
G = BlockGf(name_list = ('eg','t2g'), block_list = (g1,g2), make_copies = False)


mpi.bcast(G)


#imtime 
from pytriqs.gf.local import *

# A Green's function on the Matsubara axis set to a semicircular
gw = GfImFreq(indices = [1], beta = 50)
gw << SemiCircular(half_bandwidth = 1)
コード例 #22
0
def is_vasp_lock_present():
    res_bool = False
    if mpi.is_master_node():
        res_bool = os.path.isfile('./vasp.lock')
    res_bool = mpi.bcast(res_bool)
    return res_bool
コード例 #23
0
ファイル: sumk_lda_tools.py プロジェクト: tayral/dft_tools
    def constr_Sigma_real_axis(self, filename, hdf=True, hdf_dataset='SigmaReFreq',n_om=0,orb=0, tol_mesh=1e-6):
        """Uses Data from files to construct Sigma (or GF)  on the real axis."""

        if not hdf:
            # read sigma from text files
            #first get the mesh out of one of the files:
            if (len(self.gf_struct_solver[orb][0][1])==1):
                Fname = filename+'_'+self.gf_struct_solver[orb][0][0]+'.dat'
            else:
                Fname = filename+'_'+self.gf_struct_solver[orb][0][0]+'/'+str(self.gf_struct_solver[orb][0][1][0])+'_'+str(self.gf_struct_solver[orb][0][1][0])+'.dat'

            R = read_fortran_file(Fname)
            mesh = numpy.zeros([n_om],numpy.float_)
            try:
                for i in xrange(n_om):
                    mesh[i] = R.next()
                    sk = R.next()
                    sk = R.next()

            except StopIteration : # a more explicit error if the file is corrupted.
                raise "SumkLDA.read_Sigma_ME : reading mesh failed!"
            R.close()

            # check whether the mesh is uniform
            bin = (mesh[n_om-1]-mesh[0])/(n_om-1)
            for i in xrange(n_om):
                assert abs(i*bin+mesh[0]-mesh[i]) < tol_mesh, 'constr_Sigma_ME: real-axis mesh is non-uniform!'

            # construct Sigma
            a_list = [a for a,al in self.gf_struct_solver[orb]]
            glist = lambda : [ GfReFreq(indices = al, window=(mesh[0],mesh[n_om-1]),n_points=n_om) for a,al in self.gf_struct_solver[orb]]
            SigmaME = BlockGf(name_list = a_list, block_list = glist(),make_copies=False)

            #read Sigma
        
            for i,g in SigmaME:
                mesh=[w for w in g.mesh]
                for iL in g.indices:
                    for iR in g.indices:
                        if (len(g.indices) == 1):
                            Fname = filename+'_%s'%(i)+'.dat'
                        else:
                            Fname = 'SigmaME_'+'%s'%(i)+'_%s'%(iL)+'_%s'%(iR)+'.dat'
                        R = read_fortran_file(Fname)
                        try:
                            for iom in xrange(n_om):
                                sk = R.next()
                                rsig = R.next()
                                isig = R.next()
                                g.data[iom,iL,iR]=rsig+1j*isig
                        except StopIteration : # a more explicit error if the file is corrupted.
                            raise "SumkLDA.read_Sigma_ME : reading Sigma from file failed!"
                        R.close()


        else:

            # read sigma from hdf
            omega_min=0.0
            omega_max=0.0
            n_om=0
            if (mpi.is_master_node()):
                ar = HDFArchive(filename)
                SigmaME = ar[hdf_dataset]
                del ar
                # we need some parameters to construct Sigma on other nodes
                omega_min=SigmaME.mesh.omega_min
                omega_max=SigmaME.mesh.omega_max
                n_om=len(SigmaME.mesh)
            omega_min=mpi.bcast(omega_min)
            omega_max=mpi.bcast(omega_max)
            n_om=mpi.bcast(n_om)
            mpi.barrier()
            # construct Sigma on other nodes
            if (not mpi.is_master_node()):
                a_list = [a for a,al in self.gf_struct_solver[orb]]
                glist = lambda : [ GfReFreq(indices = al, window=(omega_min,omega_max),n_points=n_om) for a,al in self.gf_struct_solver[orb]]
                SigmaME = BlockGf(name_list = a_list, block_list = glist(),make_copies=False)
            # pass SigmaME to other nodes
            SigmaME = mpi.bcast(SigmaME)
            mpi.barrier()

        SigmaME.note='ReFreq'

        return SigmaME
コード例 #24
0
ファイル: gf_tensor_valued.py プロジェクト: TRIQS/triqs
del A

A2=HDFArchive("Tv3.h5",'r')
G3=A2["G"]
del A2
assert G3.data.shape==G.data.shape,"not ok:%s vs %s"%(G3.data.shape, G.data.shape)

#mpi bcast
import pytriqs.utility.mpi as mpi
G4=GfImFreq(beta=1.,statistic="Fermion",n_points=100, indices=[['a'],['b1','b2'],['c1', 'c2']])
if mpi.is_master_node():
   G4.data[:,:,:,:] = 5
   assert G4.data[0,0,0,0]==5, "not ok :%s"%(G4.data[0,0,0,0])
if not mpi.is_master_node():
   assert G4.data[0,0,0,0]==0, "not ok"
G4 = mpi.bcast(G4)
if not mpi.is_master_node():
   assert G4.data[0,0,0,0]==5, "not ok :%s"%(G4.data[0,0,0,0])


##Tv4
print "#############################"
G5=GfImFreq(mesh=m, indices=[['a'],['b1','b2'],['c1', 'c2'], ['c']])
print G5.data.shape

assert G5.data.shape==(20,1,2,2,1),"not ok"
assert G5['a','b1','c2', 'c'].data.shape==(20,), "not ok"

#ImTime, 
print "#############################"
G6=GfImTime(beta=1.,statistic="Fermion",n_points=100, indices=[['a'],['b1','b2'],['c1', 'c2']])
コード例 #25
0
        def run(data,
                C,
                U,
                symmetrize_quantities=True,
                n_cycles=20000,
                max_time=5 * 60,
                solver_data_package=None):
            solver = data.solvers[C]

            block_names = [name for name, g in solver.G0_iw]

            N_states = len(solver.G0_iw[block_names[0]].data[0, 0, :])
            gf_struct = {
                block_names[0]: range(N_states),
                block_names[1]: range(N_states)
            }

            h_int = U * n(block_names[0], 0) * n(block_names[1], 0)
            for i in range(1, N_states):
                h_int += U * n(block_names[0], i) * n(block_names[1], i)

            QN = [
                sum([n(bl, i) for i in range(N_states)], Operator())
                for bl in block_names
            ]

            if solver_data_package is None: solver_data_package = {}

            solver_data_package['which_solver'] = C
            solver_data_package['solve_parameters'] = {}
            solver_data_package['solve_parameters']['U'] = U
            solver_data_package['solve_parameters']["max_time"] = max_time
            solver_data_package['solve_parameters']["random_name"] = ""
            solver_data_package['solve_parameters'][
                "random_seed"] = 123 * mpi.rank + 567
            solver_data_package['solve_parameters']["length_cycle"] = 50
            solver_data_package['solve_parameters']["n_warmup_cycles"] = 5000
            solver_data_package['solve_parameters']["n_cycles"] = n_cycles
            solver_data_package['solve_parameters'][
                "measure_density_matrix"] = True
            solver_data_package['solve_parameters'][
                "use_norm_as_weight"] = True
            solver_data_package['solve_parameters'][
                "partition_method"] = "quantum_numbers"

            print solver_data_package['solve_parameters']

            solver_data_package['G0_iw'] = solver.G0_iw

            solver_data_package['construct|run|exit'] = 1

            if MASTER_SLAVE_ARCHITECTURE and (mpi.size > 1):
                if mpi.is_master_node():
                    print "broadcasting solver_data_package!!"
                solver_data_package = mpi.bcast(solver_data_package)

            if mpi.is_master_node(): print "about to run "
            dct = deepcopy(solver_data_package['solve_parameters'])
            del dct['U']
            solver.solve(h_int=h_int, quantum_numbers=QN, **dct)

            G_tau = deepcopy(solver.G_tau['up'])
            if symmetrize_quantities:
                G_tau.data[:, :, :] = 0.0
                for bl in block_names:
                    G_tau.data[:, :, :] += solver.G_tau[bl].data[:, :, :]
                G_tau.data[:, :, :] /= len(block_names)

            data.G_imp_iw[C] << Fourier(G_tau)
            fit_and_overwrite_tails_on_G(data.G_imp_iw[C], starting_iw=7.0)
            G0 = data.Gweiss_iw[C].copy()
            fit_and_overwrite_tails_on_G(G0, starting_iw=7.0)
            for wi in range(data.nw):
                data.Sigma_imp_iw[C].data[wi, :, :] = inv(
                    G0.data[wi, :, :]) - inv(data.G_imp_iw[C].data[wi, :, :])
            fit_and_overwrite_tails_on_Sigma(data.Sigma_imp_iw[C],
                                             starting_iw=7.0)
コード例 #26
0
ファイル: sc_dmft.py プロジェクト: TRIQS/dft_tools
    return os.path.isfile('./vasp.lock')

def is_vasp_running(vasp_pid):
    """
    Tests if VASP initial process is still alive.
    """
    pid_exists = False
    if mpi.is_master_node():
        try:
            os.kill(vasp_pid, 0)
        except OSError, e:
            pid_exists = e.errno == errno.EPERM
        else:
            pid_exists = True

    pid_exists = mpi.bcast(pid_exists)
    return pid_exists

def get_dft_energy():
    """
    Reads energy from the last line of OSZICAR.
    """
    with open('OSZICAR', 'r') as f:
        nextline = f.readline()
        while nextline.strip():
            line = nextline
            nextline = f.readline()
#            print "OSZICAR: ", line[:-1]

    try:
        dft_energy = float(line.split()[2])
コード例 #27
0
ファイル: triangles.py プロジェクト: krivenko/som
g_tau = GfImTime(beta = beta, n_points = n_tau, indices = indices)
g_w = GfReFreq(window = run_params['energy_window'], n_points = n_w, indices = indices)
S_tau = g_tau.copy()
g_tau_rec = g_tau.copy()

if mpi.is_master_node():
    arch = HDFArchive('triangles.h5','w')
    arch['abs_errors'] = abs_error

for s in abs_error:
    if mpi.is_master_node():
        make_g_tau(g_tau)
        g_tau.data[:] += s * 2*(np.random.rand(*g_tau.data.shape) - 0.5)

    g_tau = mpi.bcast(g_tau)
    S_tau.data[:] = 1.0

    if mpi.is_master_node():
        gr_name = 'abs_error_%.4f' % s
        arch.create_group(gr_name)
        abs_err_gr = arch[gr_name]

    for name, g, S, g_rec in (('g_tau',g_tau,S_tau,g_tau_rec),):

        start = time.clock()
        cont = Som(g, S)
        cont.run(**run_params)
        exec_time = time.clock() - start

        g_rec << cont
コード例 #28
0
def nested_calculation(clusters,
                       nested_struct_archive_name=None,
                       flexible_Gweiss=False,
                       sign=-1,
                       sign_up_to=2,
                       use_Gweiss_causal_cautionary=False,
                       use_G_proj=False,
                       mix_G_proj=False,
                       G_proj_mixing_rules=[[0, 0.0]],
                       Us=[1.0],
                       Ts=[0.125],
                       ns=[0.5],
                       fixed_n=True,
                       mutildes=[0.0],
                       dispersion=lambda kx, ky: epsilonk_square(kx, ky, 0.25),
                       ph_symmetry=True,
                       use_cumulant=False,
                       n_ks=[24],
                       n_k_automatic=False,
                       n_k_rules=[[0.06, 32], [0.03, 48], [0.005, 64],
                                  [0.00, 96]],
                       w_cutoff=20.0,
                       min_its=5,
                       max_its=25,
                       mix_Sigma=False,
                       rules=[[0, 0.5], [6, 0.2], [12, 0.65]],
                       do_dmft_first=False,
                       use_cthyb=False,
                       alpha=0.5,
                       delta=0.1,
                       automatic_alpha_and_delta=False,
                       n_cycles=10000000,
                       max_time_rules=[[1, 5 * 60], [2, 20 * 60], [4, 80 * 60],
                                       [8, 200 * 60], [16, 400 * 60]],
                       time_rules_automatic=False,
                       exponent=0.7,
                       overall_prefactor=1.0,
                       no_timing=False,
                       accuracy=1e-4,
                       solver_data_package=None,
                       print_current=1,
                       insulating_initial=False,
                       initial_guess_archive_name='',
                       suffix=''):

    if mpi.is_master_node():
        print "WELCOME TO %snested calculation!" % ("cumul_"
                                                    if use_cumulant else "")
        if n_k_automatic: print "n_k automatic!!!"
    if len(n_ks) == 0 and n_k_automatic: n_ks = [0]

    if use_cthyb:
        solver_class = solvers.cthyb
    else:
        solver_class = solvers.ctint

    fermionic_struct = {'up': [0]}

    if mpi.is_master_node(): print "nested structure: "
    if not (nested_struct_archive_name is None):
        try:
            nested_scheme = nested_struct.from_file(nested_struct_archive_name)
            if mpi.is_master_node():
                print "nested structure loaded from file", nested_struct_archive_name
        except:
            nested_scheme = nested_struct(clusters)
            nested_scheme.print_to_file(nested_struct_archive_name)
            if mpi.is_master_node():
                print "nested structure printed to file", nested_struct_archive_name
    else:
        nested_scheme = nested_struct(clusters)
    if mpi.is_master_node(): print nested_scheme.get_tex()

    impurity_struct = nested_scheme.get_impurity_struct()

    if not time_rules_automatic:
        max_times = {}
        for C in impurity_struct:
            for r in max_time_rules:
                if r[0] <= len(impurity_struct[C]):
                    max_times[C] = r[1]
        if mpi.is_master_node(): print "max_times from rules: ", max_times

    beta = 1.0 / Ts[0]

    n_iw = int(((w_cutoff * beta) / math.pi - 1.0) / 2.0)
    if mpi.is_master_node():
        print "PM HUBBARD GW: n_iw: ", n_iw

    if not n_k_automatic:
        n_k = n_ks[0]
        print "n_k = ", n_k
    else:
        n_k = n_k_from_rules(Ts[0], n_k_rules)
        #if mpi.is_master_node(): print "n_k automatic!!!"

    dt = nested_data(n_iw=n_iw,
                     n_k=n_k,
                     beta=beta,
                     impurity_struct=impurity_struct,
                     fermionic_struct=fermionic_struct,
                     archive_name="so_far_nothing_you_shouldnt_see_this_file")
    if use_cumulant:
        dt.__class__ = cumul_nested_data
        dt.promote()

    if fixed_n:
        ps = itertools.product(n_ks, ns, Us, Ts)
    else:
        ps = itertools.product(n_ks, mutildes, Us, Ts)

    counter = 0
    old_nk = n_k
    old_beta = beta

    for p in ps:
        #name stuff to avoid confusion
        nk = (p[0] if (not n_k_automatic) else n_k_from_rules(T, n_k_rules))
        if fixed_n:
            n = p[1]
        else:
            mutilde = p[1]
            n = None
        U = p[2]
        T = p[3]
        beta = 1.0 / T

        if nk != old_nk and (not n_k_automatic):
            dt.change_ks(IBZ.k_grid(nk))

        if beta != old_beta:
            n_iw = int(((w_cutoff * beta) / math.pi - 1.0) / 2.0)
            if n_k_automatic:
                nk = n_k_from_rules(T, n_k_rules)
                if nk != old_nk:
                    dt.change_ks(IBZ.k_grid(nk))
            dt.change_beta(beta, n_iw)

        old_beta = beta
        old_nk = nk
        nested_scheme.set_nk(nk)  #don't forget this part

        filename = "result"
        if len(n_ks) > 1 and (not n_k_automatic):
            filename += ".nk%s" % nk
        if len(ns) > 1 and fixed_n:
            filename += ".n%s" % n
        if len(mutildes) > 1 and not fixed_n:
            filename += ".mutilde%s" % mutilde
        if len(Us) > 1: filename += ".U%s" % U
        if len(Ts) > 1: filename += ".T%.4f" % T
        filename += ".h5"
        dt.archive_name = filename

        if mpi.is_master_node():
            if fixed_n:
                print "Working: U: %s T %s n: %s n_k: %s n_iw: %s" % (U, n, T,
                                                                      nk, n_iw)
            else:
                print "Working: U: %s T %s mutilde: %s n_k: %s n_iw: %s" % (
                    U, mutilde, T, nk, n_iw)

        if mpi.is_master_node():
            print "about to fill dispersion. ph-symmetry: ", ph_symmetry
        for key in dt.fermionic_struct.keys():
            for kxi in range(dt.n_k):
                for kyi in range(dt.n_k):
                    dt.epsilonk[key][kxi,
                                     kyi] = dispersion(dt.ks[kxi], dt.ks[kyi])

        if not use_cumulant:
            prepare = prepare_nested
        else:
            prepare = prepare_cumul_nested
        if flexible_Gweiss:
            prepare(dt, nested_scheme, solver_class, flexible_Gweiss, sign,
                    sign_up_to)
        else:
            prepare(dt, nested_scheme, solver_class, use_G_proj=use_G_proj)

        solver_class.initialize_solvers(dt, solver_data_package)

        max_times = {}

        if no_timing:
            for C in dt.impurity_struct.keys():
                max_times[C] = -1
            if mpi.is_master_node():
                print "no_timing! solvers will run until they perform all the mc steps", max_times

        if time_rules_automatic and (not no_timing):
            for C in dt.impurity_struct.keys():
                Nc = len(dt.impurity_struct[C])
                pref = ((dt.beta / 8.0) * U * Nc)**exponent  #**1.2
                print C
                print "Nc: ", Nc,
                print "U: ", U,
                print "beta: ", dt.beta,
                print "pref: ", pref
                max_times[C] = int(overall_prefactor * pref * 5 * 60)
            if mpi.is_master_node(): print "max times automatic: ", max_times

        identical_pairs_Sigma = nested_scheme.get_identical_pairs()
        identical_pairs_G = nested_scheme.get_identical_pairs_for_G()
        identical_pairs_G_ai = nested_scheme.get_identical_pairs_for_G(
            across_imps=True)

        actions = [
            generic_action(name="lattice",
                           main=lambda data: nested_mains.lattice(
                               data,
                               n=n,
                               ph_symmetry=ph_symmetry,
                               accepted_mu_range=[-2.0, 2.0]),
                           mixers=[],
                           cautionaries=[],
                           allowed_errors=[],
                           printout=lambda data, it: ([
                               data.dump_general(quantities=['Gkw', 'Gijw'] +
                                                 (['G_proj_iw']
                                                  if use_G_proj else []),
                                                 suffix='-current'),
                               data.dump_scalar(suffix='-current')
                           ] if ((it + 1) % print_current == 0) else None)),
            generic_action(
                name="pre_impurity",
                main=lambda data: nested_mains.pre_impurity(data),
                mixers=[],
                cautionaries=([
                    lambda data, it: ph_symmetric_Gweiss_causal_cautionary(
                        data, ntau=5000)
                ] if use_Gweiss_causal_cautionary else []),
                allowed_errors=([0] if use_Gweiss_causal_cautionary else []),
                printout=lambda data, it: (
                    (data.dump_general(quantities=[
                        'Gweiss_iw_unfit', 'Gweiss_iw', 'Delta_iw',
                        'Delta_iw_fit', 'Delta_tau', 'Delta_tau_fit'
                    ],
                                       suffix='-%s' % it))
                    if use_Gweiss_causal_cautionary else (data.dump_general(
                        quantities=['Gweiss_iw'], suffix='-current')))),
            generic_action(
                name="impurity",
                main=(lambda data: nested_mains.impurity(
                    data,
                    U,
                    symmetrize_quantities=True,
                    alpha=alpha,
                    delta=delta,
                    automatic_alpha_and_delta=automatic_alpha_and_delta,
                    n_cycles=n_cycles,
                    max_times=max_times,
                    solver_data_package=solver_data_package)) if
                (not use_cthyb) else (lambda data: nested_mains.impurity_cthyb(
                    data,
                    U,
                    symmetrize_quantities=True,
                    n_cycles=n_cycles,
                    max_times=max_times,
                    solver_data_package=solver_data_package)),
                mixers=[],
                cautionaries=[
                    lambda data, it: local_nan_cautionary(data,
                                                          data.impurity_struct,
                                                          Qs=['Sigma_imp_iw'],
                                                          raise_exception=True
                                                          ), lambda data, it:
                    (symmetric_G_and_self_energy_on_impurity(
                        data.G_imp_iw,
                        data.Sigma_imp_iw,
                        data.solvers,
                        identical_pairs_Sigma,
                        identical_pairs_G,
                        across_imps=True,
                        identical_pairs_G_ai=identical_pairs_G_ai)
                     if it >= 0 else symmetrize_cluster_impurity(
                         data.Sigma_imp_iw, identical_pairs_Sigma))
                ],
                allowed_errors=[1],
                printout=lambda data, it: ([
                    data.dump_general(quantities=['Sigma_imp_iw', 'G_imp_iw'],
                                      suffix='-current'),
                    data.dump_solvers(suffix='-current')
                ] if ((it + 1) % print_current == 0) else None)),
            generic_action(
                name="selfenergy",
                main=lambda data: nested_mains.selfenergy(data),
                mixers=[],
                cautionaries=[
                    lambda data, it: nonloc_sign_cautionary(data.Sigmakw['up'],
                                                            desired_sign=-1,
                                                            clip_off=False,
                                                            real_or_imag='imag'
                                                            )
                ],
                allowed_errors=[0],
                printout=lambda data, it:
                (data.dump_general(quantities=['Sigmakw', 'Sigmaijw'],
                                   suffix='-current')
                 if ((it + 1) % print_current == 0) else None))
        ]

        if use_cumulant:
            del actions[3]
            actions.append(
                generic_action(
                    name="cumulant",
                    main=lambda data: cumul_nested_mains.cumulant(data),
                    mixers=[],
                    cautionaries=[
                        lambda data, it: nonloc_sign_cautionary(
                            data.gkw['up'],
                            desired_sign=-1,
                            clip_off=False,
                            real_or_imag='imag')
                    ],
                    allowed_errors=[0],
                    printout=lambda data, it:
                    (data.dump_general(quantities=['gijw', 'gkw'],
                                       suffix='-current')
                     if ((it + 1) % print_current == 0) else None)))

        monitors = [
            monitor(monitored_quantity=lambda: dt.ns['up'],
                    h5key='n_vs_it',
                    archive_name=dt.archive_name),
            monitor(monitored_quantity=lambda: dt.mus['up'],
                    h5key='mu_vs_it',
                    archive_name=dt.archive_name),
            monitor(monitored_quantity=lambda: dt.err,
                    h5key='err_vs_it',
                    archive_name=dt.archive_name)
        ]  #,
        #                 monitor( monitored_quantity = lambda: actions[3].errs[0],
        #                          h5key = 'sign_err_vs_it',
        #                          archive_name = dt.archive_name) ]

        if use_cumulant:
            monitors += [
                monitor(monitored_quantity=lambda: dt.gijw['up'][dt.nw / 2, 0,
                                                                 0].imag,
                        h5key='Img_00_w0_vs_it',
                        archive_name=dt.archive_name),
                monitor(monitored_quantity=lambda: dt.gijw['up'][dt.nw / 2, 0,
                                                                 0].real,
                        h5key='Reg_00_iw0_vs_it',
                        archive_name=dt.archive_name),
                monitor(monitored_quantity=lambda: dt.gkw['up'][
                    dt.nw / 2, dt.n_k / 2, dt.n_k / 2].imag,
                        h5key='Img_pipi_iw0_vs_it',
                        archive_name=dt.archive_name),
                monitor(monitored_quantity=lambda: dt.gkw['up'][
                    dt.nw / 2, dt.n_k / 2, dt.n_k / 2].real,
                        h5key='Reg_pipi_iw0_vs_it',
                        archive_name=dt.archive_name)
            ]
        else:
            monitors += [
                monitor(monitored_quantity=lambda: dt.Sigma_loc_iw['up'].data[
                    dt.nw / 2, 0, 0].imag,
                        h5key='ImSigma_loc_iw0_vs_it',
                        archive_name=dt.archive_name),
                monitor(monitored_quantity=lambda: dt.Sigma_loc_iw['up'].data[
                    dt.nw / 2, 0, 0].real,
                        h5key='ReSigma_loc_iw0_vs_it',
                        archive_name=dt.archive_name),
                monitor(monitored_quantity=lambda: dt.Sigmakw['up'][
                    dt.nw / 2, dt.n_k / 2, dt.n_k / 2].imag,
                        h5key='ImSigmakw_pipi_vs_it',
                        archive_name=dt.archive_name),
                monitor(monitored_quantity=lambda: dt.Sigmakw['up'][
                    dt.nw / 2, dt.n_k / 2, dt.n_k / 2].real,
                        h5key='ReSigmakw_pipi_vs_it',
                        archive_name=dt.archive_name)
            ]

        convergers = [
            converger(monitored_quantity=lambda: dt.G_loc_iw,
                      accuracy=accuracy,
                      struct=fermionic_struct,
                      archive_name=dt.archive_name,
                      h5key='diffs_G_loc'),
            converger(monitored_quantity=lambda: dt.Sigma_loc_iw,
                      accuracy=accuracy,
                      struct=fermionic_struct,
                      archive_name=dt.archive_name,
                      h5key='diffs_Sigma_loc'),
            converger(monitored_quantity=lambda: dt.G_imp_iw,
                      accuracy=accuracy,
                      struct=impurity_struct,
                      archive_name=dt.archive_name,
                      h5key='diffs_G_imp'),
            converger(monitored_quantity=lambda: dt.Gweiss_iw,
                      accuracy=accuracy,
                      struct=impurity_struct,
                      archive_name=dt.archive_name,
                      h5key='diffs_Gweiss')
        ]
        max_dist = 3
        for i in range(max_dist + 1):
            for j in range(0, i + 1):
                convergers.append(
                    converger(monitored_quantity=lambda i=i, j=j: dt.Gijw['up']
                              [:, i, j],
                              accuracy=accuracy,
                              func=converger.check_numpy_array,
                              archive_name=dt.archive_name,
                              h5key='diffs_G_%s%s' % (i, j)))
        dmft = generic_loop(name="nested-cluster DMFT",
                            actions=actions,
                            convergers=convergers,
                            monitors=monitors)

        if (counter == 0):  #do the initial guess only once!
            if initial_guess_archive_name != '':
                if mpi.is_master_node():
                    print "constructing dt from initial guess in a file: ", initial_guess_archive_name, "suffix: ", suffix
                old_epsilonk = dt.epsilonk
                dt.construct_from_file(initial_guess_archive_name, suffix)
                if dt.beta != beta:
                    dt.change_beta(beta, n_iw)
                if dt.n_k != nk:
                    dt.change_ks(IBZ.k_grid(nk))
                if mpi.is_master_node():
                    print "putting back the old Jq and epsilonk"
                dt.epsilonk = old_epsilonk
            else:
                if not fixed_n:
                    dt.mus['up'] = mutilde
                else:
                    dt.mus['up'] = U / 2.0
                if 'down' in dt.fermionic_struct.keys():
                    dt.mus['down'] = dt.mus[
                        'up']  #this is not necessary at the moment, but may become
                for C in dt.impurity_struct.keys():
                    for l in dt.impurity_struct[
                            C]:  #just the local components (but on each site!)
                        dt.Sigma_imp_iw[C].data[:, l, l] = U / 2.0 - int(
                            insulating_initial) * 1j / numpy.array(dt.ws)
                if not use_cumulant:
                    for key in fermionic_struct.keys():
                        dt.Sigmakw[key][:, :, :] = U / 2.0
                        numpy.transpose(dt.Sigmakw[key])[:] -= int(
                            insulating_initial) * 1j / numpy.array(dt.ws)
                else:
                    for key in fermionic_struct.keys():
                        numpy.transpose(dt.gkw[key])[:] = numpy.array(
                            dt.iws[:])**(-1.0)
            if not use_cumulant:
                dt.dump_general(quantities=['Sigmakw', 'Sigma_imp_iw'],
                                suffix='-initial')
            else:
                dt.dump_general(quantities=['gkw'], suffix='-initial')

        if (counter == 0) and do_dmft_first:
            assert False, "this part of code needs to be adjusted"
            #do one short run of dmft before starting nested
            if mpi.is_master_node():
                print "================= 20 iterations of DMFT!!!! ================="
            #save the old stuff
            old_impurity_struct = dt.impurity_struct
            old_name = dmft.name
            #dmft_scheme
            dmft_scheme = nested_scheme([cluster(0, 0, 1, 1)])
            dmft_scheme.set_nk(dt.n_k)
            dt.impurity_struct = dmft_scheme.get_impurity_struct()
            prepare_nested(dt, dmft_scheme, solvers.ctint)
            dmft.name = "dmft"
            #run dmft
            dmft.run(dt,
                     max_its=20,
                     min_its=15,
                     max_it_err_is_allowed=7,
                     print_final=True,
                     print_current=10000)
            #move the result
            if mpi.is_master_node():
                cmd = 'mv %s %s' % (filename, filename.replace(
                    "result", "dmft"))
                print cmd
                os.system(cmd)
            #put everything back the way it was
            dmft.name = old_name
            dt.impurity_struct = old_impurity_struct
            prepare(dt, nested_scheme, solver_class)

        #run nested!-------------

        if mix_Sigma:
            actions[3].mixers.extend([
                mixer(mixed_quantity=lambda: (dt.Sigmakw if
                                              (not use_cumulant) else dt.gkw),
                      rules=rules,
                      func=mixer.mix_lattice_gf,
                      initialize=True)
            ])
            actions[2].mixers.extend([
                mixer(mixed_quantity=lambda: dt.Sigma_imp_iw,
                      rules=rules,
                      func=mixer.mix_block_gf,
                      initialize=True)
            ])

        if mix_G_proj:
            actions[0].mixers.append(
                mixer(mixed_quantity=lambda: dt.G_proj_iw,
                      rules=G_proj_mixing_rules,
                      func=mixer.mix_block_gf,
                      initialize=True))

        dt.dump_parameters()
        dt.dump_non_interacting()

        err = dmft.run(dt,
                       max_its=max_its,
                       min_its=min_its,
                       max_it_err_is_allowed=7,
                       print_final=True,
                       print_current=1)
        if mpi.is_master_node():
            if use_cumulant:
                print "calculating Sigma"
                dt.get_Sigmakw()
                dt.get_Sigma_loc()
                dt.dump_general(['Sigmakw', 'Sigma_loc_iw'], suffix='-final')
            cmd = 'mv %s %s' % (filename, filename.replace("result", "nested"))
            print cmd
            os.system(cmd)

        if (err == 2):
            print "Cautionary error!!! exiting..."
            solver_data_package['construct|run|exit'] = 2
            if MASTER_SLAVE_ARCHITECTURE and (mpi.size > 1):
                solver_data_package = mpi.bcast(solver_data_package)
            break

        if not MASTER_SLAVE_ARCHITECTURE: mpi.barrier()
        counter += 1
    if not (solver_data_package is None):
        solver_data_package['construct|run|exit'] = 2
    if MASTER_SLAVE_ARCHITECTURE and (mpi.size > 1):
        solver_data_package = mpi.bcast(solver_data_package)
    return dt, monitors, convergers
コード例 #29
0
ファイル: block_matrix.py プロジェクト: aeantipov/triqs
from pytriqs.arrays.block_matrix import *
from pytriqs.archive import *
import pytriqs.utility.mpi as mpi
from numpy import matrix

A=BlockMatrix(['up','down'],[matrix([[0]]), matrix([[1.0]])])
A0=mpi.bcast(A)

assert A["up"]==matrix([[0.0]]), "not ok"
assert A(0)==matrix([[0.0]]), "not ok"
assert A.size() ==2, "not ok"

B=A+0.5*A+A*0.5
assert B(1)==matrix([[2.0]]), "not ok"

#HDF5
R=HDFArchive("block_matrix.output.h5",'w')
R["A"]=A
del R

R2=HDFArchive("block_matrix.output.h5",'r')
A2=R2["A"]
del R2

assert A2.matrix_vec==A.matrix_vec, "not ok" 



A3=BlockMatrixComplex(['up','down'],[matrix([[0]]), matrix([[1.0*1j]])])
コード例 #30
0
    return dens.real


#check if there are previous runs in the outfile and if so restart from there
previous_runs = 0
previous_present = False
mu = 0.
if mpi.is_master_node():
    ar = HDFArchive(outfile + '.h5', 'a')
    if 'iterations' in ar:
        previous_present = True
        previous_runs = ar['iterations']
        S.Sigma_iw = ar['Sigma_iw']
        mu = ar['mu-%d' % previous_runs]
        del ar
previous_runs = mpi.bcast(previous_runs)
previous_present = mpi.bcast(previous_present)
S.Sigma_iw = mpi.bcast(S.Sigma_iw)
mu = mpi.bcast(mu)

for iteration_number in range(1, nloops + 1):
    it = iteration_number + previous_runs
    if mpi.is_master_node():
        print('-----------------------------------------------')
        print("Iteration = %s" % it)
        print('-----------------------------------------------')

    if it > 1:
        #set the lattice self energy from the impurity self energy
        Sigma_lat['up'].data[:, 0, 0] = S.Sigma_iw['up'].data[:, 0, 0]
        Sigma_lat['down'].data[:, 0, 0] = S.Sigma_iw['down'].data[:, 0, 0]
コード例 #31
0
ファイル: dft_dmft_cthyb.py プロジェクト: hschnait/dft_tools
Converter.convert_dft_input()
mpi.barrier()

previous_runs = 0
previous_present = False
if mpi.is_master_node():
    f = HDFArchive(dft_filename+'.h5','a')
    if 'dmft_output' in f:
        ar = f['dmft_output']
        if 'iterations' in ar:
            previous_present = True
            previous_runs = ar['iterations']
    else:
        f.create_group('dmft_output')
    del f
previous_runs    = mpi.bcast(previous_runs)
previous_present = mpi.bcast(previous_present)

SK=SumkDFT(hdf_file=dft_filename+'.h5',use_dft_blocks=use_blocks,h_field=h_field)

n_orb = SK.corr_shells[0]['dim']
l = SK.corr_shells[0]['l']
spin_names = ["up","down"]
orb_names = [i for i in range(n_orb)]

# Use GF structure determined by DFT blocks
gf_struct = [(block, indices) for block, indices in SK.gf_struct_solver[0].iteritems()]
# Construct U matrix for density-density calculations
Umat, Upmat = U_matrix_kanamori(n_orb=n_orb, U_int=U, J_hund=J)
# Construct Hamiltonian and solver
h_int = h_int_density(spin_names, orb_names, map_operator_structure=SK.sumk_to_solver[0], U=Umat, Uprime=Upmat, H_dump="H.txt")
コード例 #32
0
def supercond_hubbard_calculation( Ts = [0.12,0.08,0.04,0.02,0.01], 
                            mutildes=[0.0, 0.2, 0.4, 0.6, 0.8],
                            ns = [0.5,0.53,0.55,0.57,0.6], fixed_n = False,   
                            ts=[0.25], t_dispersion = epsilonk_square, ph_symmetry = True,
                            Us = [1.0,2.0,3.0,4.0], alpha=2.0/3.0, ising = False,
                            hs = [0],  
                            frozen_boson = False, 
                            refresh_X = True, strength = 5.0, max_it = 10,
                            n_ks = [24], n_k_automatic = False, n_k_rules = [[0.06, 32],[0.03, 48],[0.005, 64],[0.00, 96]],
                            w_cutoff = 20.0,
                            n_loops_min = 5, n_loops_max=25, rules = [[0, 0.5], [6, 0.2], [12, 0.65]], mix_Sigma = True,
                            trilex = False, edmft = False, local_bubble_for_charge_P = False,charge_boson_GW_style = False, imtime = True, use_optimized = True, N_cores = 1, 
                            do_dmft_first = True, do_normal = True, do_eigenvalue = False, do_superconducting = False, initial_X_prefactor = 2.0,
                            use_cthyb=True, n_cycles=100000, max_time=10*60, accuracy = 1e-4, supercond_accr = 1e-8, solver_data_package = None,
                            print_local_frequency=5, print_non_local_frequency = 5, total_debug=False,
                            initial_guess_archive_name = '', suffix='', clean_up_polarization = True):
  if mpi.is_master_node():
     print "WELCOME TO supercond hubbard calculation!"
     if n_k_automatic: print "n_k automatic!!!"
  if len(n_ks)==0 and n_k_automatic: n_ks=[0]

  loc_from_imp = trilex or edmft    

  bosonic_struct = {'0': [0], '1': [0]}    
  if not ising:
    if alpha==2.0/3.0:
      del bosonic_struct['1']
    if alpha==1.0/3.0:
      del bosonic_struct['0']
  else:
    if alpha==1.0:
      del vks['1']
    if alpha==0.0:
      del vks['0']

  fermionic_struct = {'up': [0], 'down': [0]}
  if not loc_from_imp:
    del fermionic_struct['down']
  beta = 1.0/Ts[0] 
  
  n_iw = int(((w_cutoff*beta)/math.pi-1.0)/2.0)
  if mpi.is_master_node():
    print "PM HUBBARD GW: n_iw: ",n_iw
  n_tau = int(n_iw*pi)


  if not n_k_automatic:
    n_q = n_ks[0]
    n_k = n_q
  else:
    n_k = n_q = n_k_from_rules(Ts[0], n_k_rules)
    if mpi.is_master_node(): print "n_k automatic!!!"

  #init solver
  if use_cthyb and loc_from_imp:
    if solver_data_package is None: solver_data_package = {}
    solver_data_package['solver'] = 'cthyb'
    solver_data_package['constructor_parameters']={}
    solver_data_package['constructor_parameters']['beta'] = beta
    solver_data_package['constructor_parameters']['gf_struct'] = fermionic_struct
    solver_data_package['constructor_parameters']['n_tau_k'] = n_tau
    solver_data_package['constructor_parameters']['n_tau_g'] = 10000
    solver_data_package['constructor_parameters']['n_tau_delta'] = 10000
    solver_data_package['constructor_parameters']['n_tau_nn'] = 4*n_tau
    solver_data_package['constructor_parameters']['n_w_b_nn'] = n_iw
    solver_data_package['constructor_parameters']['n_w'] = n_iw
    solver_data_package['construct|run|exit'] = 0

    if MASTER_SLAVE_ARCHITECTURE and (mpi.size>1): solver_data_package = mpi.bcast(solver_data_package)
     
    solver = Solver( **solver_data_package['constructor_parameters'] )
  else:
    solver = None


  assert not( imtime and trilex ), "imtime bubbles inapplicable in trilex"
  #init data, assign the solver to it
  dt = supercond_data( n_iw = n_iw, 
                       ntau = (None if (imtime) else 3 ), #no need to waste memory on tau-dependent quantities unless we're going to use them (None means ntau=n_iw*5)
                       n_k = n_k,
                       n_q = n_q, 
                       beta = beta, 
                       solver = solver,
                       bosonic_struct = bosonic_struct,
                       fermionic_struct = fermionic_struct,
                       archive_name="so_far_nothing_you_shouldnt_see_this_file" )
  if trilex or (edmft and local_bubble_for_charge_P and not charge_boson_GW_style): #if emdft, nothing to add
    dt.__class__=supercond_trilex_data
    dt.promote(dt.n_iw/2, dt.n_iw/2)

  if use_optimized:
    dt.patch_optimized()

  #init convergence and cautionary measures
  convergers = [ converger( monitored_quantity = lambda: dt.P_loc_iw,
                            accuracy=accuracy, 
                            struct=bosonic_struct, 
                            archive_name="not_yet_you_shouldnt_see_this_file",
                            h5key = 'diffs_P_loc' ),
                 converger( monitored_quantity = lambda: dt.G_loc_iw,
                            accuracy=accuracy, 
                            struct=fermionic_struct, 
                            archive_name="not_yet_you_shouldnt_see_this_file",
                            h5key = 'diffs_G_loc'     ) ]

  #initial guess
  
  #assert not(trilex and fixed_n), "trilex doesn't yet work"

  if fixed_n:
    ps = itertools.product(n_ks,ts,ns,Us,Ts,hs)
  else:
    ps = itertools.product(n_ks,ts,mutildes,Us,Ts,hs)

  counter = 0
  old_nk = n_k
  old_beta = beta

  old_get_Xkw = None
  #-------------------------------------------------------------------------------------------------------------------------------#
  for p in ps:    
    #name stuff to avoid confusion   
    t = p[1]
    if fixed_n:
      n = p[2]
    else:
      mutilde = p[2]
      n = None
    U = p[3]
    T = p[4] 
    beta = 1.0/T
    h = p[5]

    nk = (p[0] if (not n_k_automatic) else n_k_from_rules(T, n_k_rules) )

    if nk!=old_nk and (not n_k_automatic):
      dt.change_ks(IBZ.k_grid(nk))
      old_nk = nk

    if beta!=old_beta:
      n_iw = int(((w_cutoff*beta)/math.pi-1.0)/2.0)
      n_tau = int(n_iw*pi)

      if n_k_automatic:
        nk = n_k_from_rules(T, n_k_rules)
        if nk != old_nk: 
          dt.change_ks(IBZ.k_grid(nk))
          old_nk = nk

      dt.change_beta(beta, n_iw)

      if loc_from_imp:
        if solver_data_package is None: solver_data_package = {}
        solver_data_package['constructor_parameters']={}
        solver_data_package['constructor_parameters']['beta'] = beta
        solver_data_package['constructor_parameters']['gf_struct'] = fermionic_struct
        solver_data_package['constructor_parameters']['n_tau_k'] = n_tau
        solver_data_package['constructor_parameters']['n_tau_g'] = 10000
        solver_data_package['constructor_parameters']['n_tau_delta'] = 10000
        solver_data_package['constructor_parameters']['n_tau_nn'] = 4*n_tau
        solver_data_package['constructor_parameters']['n_w_b_nn'] = n_iw
        solver_data_package['constructor_parameters']['n_w'] = n_iw
        solver_data_package['construct|run|exit'] = 0

        if MASTER_SLAVE_ARCHITECTURE and (mpi.size>1): solver_data_package = mpi.bcast(solver_data_package)
        dt.solver = Solver( **solver_data_package['constructor_parameters'] )
      old_beta = beta

    filename = "result"
    if len(n_ks)>1 and (not n_k_automatic):
      filename += ".nk%s"%nk
    if len(ts)>1: filename += ".t%s"%t
    if len(ns)>1 and fixed_n: 
      filename += ".n%s"%n
    if len(mutildes)>1 and not fixed_n:
      filename += ".mutilde%s"%mutilde      
    if len(Us)>1: filename += ".U%s"%U
    if len(Ts)>1: filename += ".T%.4f"%T
    if len(hs)>1: filename += ".h%s"%h
    filename += ".h5"
    dt.archive_name = filename
    for conv in convergers:
      conv.archive_name = dt.archive_name

    if not ising:
      Uch = (3.0*alpha-1.0)*U
      Usp = (alpha-2.0/3.0)*U
    else:
      Uch = alpha*U
      Usp = (alpha-1.0)*U

    vks = {'0': lambda kx,ky: Uch, '1': lambda kx,ky: Usp}
    if not ising:
      if alpha==2.0/3.0:
        del vks['1']
      if alpha==1.0/3.0:
        del vks['0']
    else:
      if alpha==1.0:
        del vks['1']
      if alpha==0.0:
        del vks['0']
    
    dt.fill_in_Jq( vks )  
    dt.fill_in_epsilonk(dict.fromkeys(fermionic_struct.keys(), partial(t_dispersion, t=t)))

    #assert not(use_optimized and trilex), "don't have optimized freq summation from trilex"
    if trilex and charge_boson_GW_style:
      dt.Lambda_wrapper = lambda A,wi,nui: ( (dt.__class__.Lambda_wrapper(dt,A,wi,nui)) if (A=='1') else 1.0 )

    Lam = ( dt.Lambda_wrapper if trilex else ( lambda A, wi, nui: 1.0 )  )
    if (not use_optimized) or (not imtime): #automatically if trilex because imtime = False is asserted
      dt.get_Sigmakw = lambda: dt.__class__.get_Sigmakw(dt, ising_decoupling = ising, imtime = imtime, Lambda = Lam)
      dt.get_Xkw = lambda: dt.__class__.get_Xkw(dt, ising_decoupling = ising, imtime = imtime, Lambda = Lam) 
      dt.get_Pqnu = lambda: dt.__class__.get_Pqnu(dt, imtime = imtime, Lambda = Lam) 
    else:
      dt.get_Sigmakw =  lambda: GW_data.optimized_get_Sigmakw(dt, ising_decoupling = ising, N_cores=N_cores)
      dt.get_Xkw =  lambda: supercond_data.optimized_get_Xkw(dt, ising_decoupling = ising, N_cores=N_cores) 
      dt.get_Pqnu =  lambda: supercond_data.optimized_get_Pqnu(dt, N_cores=N_cores) 



    dt.get_Sigma_loc_from_local_bubble = lambda: GW_data.get_Sigma_loc_from_local_bubble(dt, ising_decoupling = ising, imtime = imtime, Lambda = Lam)   
    if  local_bubble_for_charge_P:
      dt.get_P_loc_from_local_bubble = lambda: GW_data.get_P_loc_from_local_bubble(dt, imtime = False, Lambda = dt.Lambda_wrapper)
    if charge_boson_GW_style:
      dt.get_P_loc_from_local_bubble = lambda: GW_data.get_P_loc_from_local_bubble(dt, imtime = imtime, Lambda = lambda A,wi,nui: 1.0)

    dt.optimized_get_P_imp = lambda: GW_data.optimized_get_P_imp(dt, use_caution=not local_bubble_for_charge_P and not charge_boson_GW_style, prefactor=0.99, 
                                                                     use_local_bubble_for_charge = local_bubble_for_charge_P or charge_boson_GW_style )
    #dt.optimized_get_P_imp = lambda: GW_data.optimized_get_P_imp(dt, use_caution=False, prefactor=0.99, use_local_bubble_for_charge = local_bubble_for_charge_P)
    if not loc_from_imp:
      dt.get_P_loc_from_local_bubble = lambda: dt.__class__.get_P_loc_from_local_bubble(dt, imtime = imtime, Lambda = Lam)

    if ((h==0.0)or(h==0))and (not refresh_X):
      if mpi.is_master_node(): print "assigning GW_data.Pqnu because no h, no imposed X"
      old_get_Xkw = dt.get_Xkw #save the old one and put it back before returning data   
      old_get_Pqnu = dt.get_Pqnu
      dt.get_Xkw = lambda: None
      if (not use_optimized) or (not imtime):
        dt.get_Pqnu = lambda: GW_data.get_Pqnu(dt, imtime = imtime, Lambda = Lam) 
      else: 
        dt.get_Pqnu = lambda: GW_data.optimized_get_Pqnu(dt, N_cores=N_cores) 

    if trilex or (edmft and local_bubble_for_charge_P and not charge_boson_GW_style): 
      preset = supercond_trilex_hubbard(U=U, alpha=alpha, ising = ising, frozen_boson=(frozen_boson if (T!=Ts[0]) else False), refresh_X = refresh_X, n = n, ph_symmetry = ph_symmetry)
    elif edmft:
      preset = supercond_EDMFTGW_hubbard(U=U, alpha=alpha, ising = ising, frozen_boson=(frozen_boson if (T!=Ts[0]) else False), refresh_X = refresh_X, n = n, ph_symmetry = ph_symmetry)
    else:
      preset = supercond_hubbard(frozen_boson=(frozen_boson if (T!=Ts[0]) else False), refresh_X=refresh_X, n = n, ph_symmetry=ph_symmetry)

    if refresh_X:
      preset.cautionary.refresh_X = partial(preset.cautionary.refresh_X, strength=strength, max_it=max_it)

    if mpi.is_master_node():
      if fixed_n:
        print "U = ",U," alpha= ",alpha, "Uch= ",Uch," Usp=",Usp," n= ",n
      else:
        print "U = ",U," alpha= ",alpha, "Uch= ",Uch," Usp=",Usp," mutilde= ",mutilde
      #print "cautionary safe values: ",preset.cautionary.safe_value    

    if loc_from_imp:
      if trilex or (local_bubble_for_charge_P and not charge_boson_GW_style):
        n_w_f=dt.n_iw_f
        n_w_b=dt.n_iw_b
      else:
        n_w_f=4
        n_w_b=4

      if use_cthyb:
        impurity = partial( solvers.cthyb.run, no_fermionic_bath=False, 
                                           trilex=trilex or (local_bubble_for_charge_P and not charge_boson_GW_style), n_w_f=n_w_f, n_w_b=n_w_b,
                                           n_cycles=n_cycles, max_time=max_time,
                                           solver_data_package = solver_data_package )
        dt.dump_solver = partial(solvers.cthyb.dump, solver = dt.solver, archive_name = dt.archive_name)
      else:
        impurity = partial( solvers.ctint.run, n_cycles=n_cycles)
        dt.dump_solver = partial(solvers.cthyb.dump, solver = dt.solver, archive_name = dt.archive_name)
    else:
      impurity = lambda data: None

    mixers = [mixer( mixed_quantity = lambda: dt.Pqnu,
                      rules=rules,
                      func=mixer.mix_lattice_gf ),
              mixer( mixed_quantity = lambda: dt.P_loc_iw,
                     rules=rules,
                     func=mixer.mix_block_gf ) ]
    if mix_Sigma:
      mixers.extend([mixer( mixed_quantity = lambda: dt.Sigmakw,
                     rules=rules,
                     func=mixer.mix_lattice_gf),
                     mixer( mixed_quantity = lambda: dt.Sigma_loc_iw,
                     rules=rules,
                     func=mixer.mix_block_gf)])

    monitors = [ monitor( monitored_quantity = lambda: dt.ns['up'], 
                          h5key = 'n_vs_it', 
                          archive_name = dt.archive_name),
                 monitor( monitored_quantity = lambda: dt.mus['up'], 
                          h5key = 'mu_vs_it', 
                          archive_name = dt.archive_name),
                 monitor( monitored_quantity = lambda: numpy.amax(dt.Pqnu['1'][dt.m_to_nui(0),:,:]*Usp), 
                          h5key = 'maxPspUsp_vs_it', 
                          archive_name = dt.archive_name),
                 monitor( monitored_quantity = lambda: dt.err, 
                          h5key = 'err_vs_it', 
                          archive_name = dt.archive_name) ]

    if loc_from_imp:
      monitors.extend([ monitor( monitored_quantity = lambda: dt.P_imp_iw['0'].data[dt.m_to_nui(0),0,0], 
                                 h5key = 'Pimp_vs_it', 
                                 archive_name = dt.archive_name), 
                        monitor( monitored_quantity = lambda: dt.W_loc_iw['0'].data[dt.m_to_nui(0),0,0], 
                                 h5key = 'Wloc_vs_it', 
                                 archive_name = dt.archive_name),
                        monitor( monitored_quantity = lambda: dt.Uweiss_iw['0'].data[dt.m_to_nui(0),0,0], 
                                 h5key = 'Uweiss_vs_it', 
                                 archive_name = dt.archive_name),
                        monitor( monitored_quantity = lambda: dt.chi_imp_iw['0'].data[dt.m_to_nui(0),0,0], 
                                 h5key = 'chimp_vs_it',
                                archive_name = dt.archive_name) ])
      #mixers.extend([ mixer( mixed_quantity = lambda: dt.chi_imp_iw['0'],
      #                  rules=[[0,0.9]],
      #                  func=mixer.mix_gf) ] )
 
    #init the dmft_loop 
    dmft = dmft_loop(  cautionary       = preset.cautionary, 
                       lattice          = preset.lattice,
                       pre_impurity     = preset.pre_impurity, 
                       impurity         = impurity, 
                       post_impurity    = preset.post_impurity,
                       selfenergy       = preset.selfenergy, 
                       convergers       = convergers,
                       mixers           = mixers,
                       monitors		= monitors, 
                       after_it_is_done = preset.after_it_is_done )

    #dt.get_G0kw( func = dict.fromkeys(['up', 'down'], dyson.scalar.G_from_w_mu_epsilon_and_Sigma) )  
    #if (T==Ts[0]) and trilex: #do this only once!         
    #  dt.mus['up'] = dt.mus['down'] = mutilde+U/2.0
    #  dt.P_imp_iw << 0.0    
    #  dt.Sigma_imp_iw << U/2.0 + mutilde #making sure that in the first iteration the impurity problem is half-filled. if not solving impurity problem, not needed
    #  for U in fermionic_struct.keys(): dt.Sigmakw[U].fill(0)
    #  for U in fermionic_struct.keys(): dt.Xkw[U].fill(0)
    if (T==Ts[0]): #do the initial guess only once!         
      if initial_guess_archive_name!='':
        if mpi.is_master_node(): print "constructing dt from initial guess in a file: ",initial_guess_archive_name, "suffix: ",suffix
        old_Jq = dt.Jq #this thing is the parameter of the calculation, we don't want to load it from the initial guess file
        old_epsilonk = dt.epsilonk
        dt.construct_from_file(initial_guess_archive_name, suffix) 
        if dt.beta != beta:
          dt.change_beta(beta, n_iw)
        if dt.n_k != nk:
          dt.change_ks(IBZ.k_grid(nk))
        if mpi.is_master_node(): print "putting back the old Jq and epsilonk"
        dt.epsilonk = old_epsilonk
        dt.Jq = old_Jq 
        if clean_up_polarization:
          if mpi.is_master_node(): print "now emptying polarization to be sure"
          for A in dt.bosonic_struct.keys(): #empty the Polarization!!!!!!!!!!!!
            dt.Pqnu[A][:,:,:] = 0.0
            dt.P_loc_iw[A] << 0.0
            dt.chi_imp_iw[A] << 0.0
      else:
        if not fixed_n:  
          dt.mus['up'] = mutilde
        else:
  	  dt.mus['up'] = 0.0
        if 'down' in dt.fermionic_struct.keys(): dt.mus['down'] = dt.mus['up']   #this is not necessary at the moment, but may become
        dt.P_imp_iw << 0.0    
        if loc_from_imp: #making sure that in the first iteration the impurity problem is half-filled. if not solving impurity problem, not needed
 	  dt.Sigma_loc_iw << U/2.0
        else:
	  dt.Sigma_loc_iw << 0.0  
        for U in fermionic_struct.keys(): dt.Sigmakw[U].fill(0)
        for U in fermionic_struct.keys(): dt.Xkw[U].fill(0)
      #note that below from here U is no longer U because of the above for loops     

    if not do_superconducting:
      for U in fermionic_struct.keys(): dt.Xkw[U].fill(0)
 
    if loc_from_imp and (T==Ts[0]) and do_dmft_first:
      #do one short run of dmft before starting emdft+gw
      if mpi.is_master_node(): print "================= 20 iterations of DMFT!!!! ================="
      Jqcopy = deepcopy(dt.Jq) #copy the old Jq
      for A in dt.bosonic_struct.keys():
        dt.Jq[A][:,:] = 0.0 #setting bare bosonic propagators to zero reduces the calculation to dmft.      

      #but we also don't want to do the calculation of Sigmakw and Pqnu 
      get_Sigmakw = dt.get_Sigmakw
      get_Pqnu = dt.get_Pqnu
      get_Xkw = dt.get_Xkw       
 
      def copy_Sigma_loc_to_Sigmakw():
        if mpi.is_master_node(): print ">>>>> just copying Sigma_loc to Sigma_kw" 
        for U in dt.fermionic_struct.keys():
          numpy.transpose(dt.Sigmakw[U])[:] = dt.Sigma_loc_iw[U].data[:,0,0]
     
      dt.get_Sigmakw = copy_Sigma_loc_to_Sigmakw
      dt.get_Pqnu = lambda: None
      dt.get_Xkw = lambda: None

      if trilex: #get rid of vertex related stuff
        get_chi3_imp = dt.get_chi3_imp
        get_chi3tilde_imp = dt.get_chi3tilde_imp
        get_Lambda_imp = dt.get_Lambda_imp

        dt.get_chi3_imp = lambda: None
        dt.get_chi3tilde_imp = lambda: None
        dt.get_Lambda_imp = lambda: None
        old_impurity = dmft.impurity
        dmft.impurity = partial( solvers.cthyb.run, no_fermionic_bath=False, 
                                           trilex=False, n_w_f=n_w_f, n_w_b=n_w_b,
                                           n_cycles=n_cycles, max_time=( max_time if (not trilex) else (max_time/2)), 
                                           solver_data_package = solver_data_package )
      
      dmft.mixers = [] # no mixing
      dmft.cautionary = None # nothing to be cautious about 
       
      # DO THE CALC   
      dmft.run( dt, calculation_name = 'dmft', total_debug = total_debug,
                n_loops_max=20, 
                n_loops_min=15,
                print_local=1, print_impurity_input=1, print_three_leg=100000, print_non_local=10000, print_impurity_output=1,
                skip_self_energy_on_first_iteration=True,
                mix_after_selfenergy = True, 
                last_iteration_err_is_allowed = 100 )
      #move the result
      if mpi.is_master_node():
        cmd = 'mv %s %s'%(filename, filename.replace("result", "dmft")) 
        print cmd
        os.system(cmd)
      # put everything back to normal
      dmft.mixers = mixers
      dmft.cautionary = preset.cautionary

      dt.Jq = Jqcopy #put back the old Jq now for the actual calculation
      for A in dt.bosonic_struct.keys(): #empty the Polarization!!!!!!!!!!!!
        dt.Pqnu[A][:,:,:] = 0.0
        dt.P_loc_iw[A] << 0.0
        dt.chi_imp_iw[A] << 0.0
      dt.get_Sigmakw = get_Sigmakw 
      dt.get_Pqnu = get_Pqnu 
      dt.get_Xkw = get_Xkw       
      if trilex: #put back in the vertex related stuff
        dt.get_chi3_imp = get_chi3_imp
        dt.get_chi3tilde_imp = get_chi3tilde_imp
        dt.get_Lambda_imp = get_Lambda_imp
        dmft.impurity = old_impurity     

    if refresh_X:  
      preset.cautionary.reset()
      preset.cautionary.refresh_X(dt)

    if h!=0:
      for kxi in range(dt.n_k):
        for kyi in range(dt.n_k):
          for wi in range(dt.nw):
            for U in fermionic_struct.keys():
              dt.hsck[U][kxi, kyi] = X_dwave(dt.ks[kxi],dt.ks[kyi], h)
   
    if not MASTER_SLAVE_ARCHITECTURE: mpi.barrier()
    #run dmft!-------------
    if do_normal and not (do_superconducting and T!=Ts[0]):
      err = dmft.run( dt,  calculation_name = 'normal', total_debug = total_debug,
                      n_loops_max=n_loops_max, 
                      n_loops_min=n_loops_min,
                      print_local=print_local_frequency, print_impurity_input=( 1 if loc_from_imp else 1000 ), print_three_leg=1, print_non_local=print_non_local_frequency,
                      skip_self_energy_on_first_iteration=True,
                      mix_after_selfenergy = True, 
                      last_iteration_err_is_allowed = n_loops_max+5 ) #n_loops_max/2 )
      if (err==2): 
        print "Cautionary error!!! exiting..."
        solver_data_package['construct|run|exit'] = 2
        if MASTER_SLAVE_ARCHITECTURE and (mpi.size>1): solver_data_package = mpi.bcast(solver_data_package)
        break

    if not MASTER_SLAVE_ARCHITECTURE: mpi.barrier()
    if do_eigenvalue:
      #get the leading eigenvalue and the corresponding eigenvector to be used as the initial guess for Xkw
      if imtime and use_optimized:
        dt.optimized_get_leading_eigenvalue(max_it = 60, accr = 5e-4, 
                                          ising_decoupling = ising, 
                                          N_cores = N_cores, symmetry = 'd')  #for now let's stick to plain d-wave
      else:
        dt.get_Xkw = lambda: supercond_data.get_Xkw( dt, imtime = False, 
                                            simple = False, 
                                            use_IBZ_symmetry = False,
                                            ising_decoupling=ising,
                                            su2_symmetry=True, wi_list = [], Lambda = dt.Lambda_wrapper)

        dt.get_leading_eigenvalue(max_it = 60, accr = 5e-4, 
                                          ising_decoupling = ising, symmetry = 'd')  #for now let's stick to plain d-wave
      if mpi.is_master_node():
        if dt.eig_ratio < 1.0:
          print ">>>>>>>>>>> eig_ratio<1.0... better luck next time"
        else: 
          print ">>>>>>>>>>> eig_ratio>=1.0!!"
        dt.dump_all(suffix='-final') 
        cmd = 'mv %s %s'%(filename, filename.replace("result", "result_normal")) 
        print cmd
        os.system(cmd)

    if not MASTER_SLAVE_ARCHITECTURE: mpi.barrier()
    if do_superconducting and (dt.eig_ratio >= 1.0):          
      if mpi.is_master_node(): print "--------------------------------- will now do superconducting calculation!!! ------------------------------"
      # start from a small gap
      for U in dt.fermionic_struct.keys():
        dt.Xkw[U] *= initial_X_prefactor

      #put back in the supercond functions for Sigma and P 
      dt.get_Xkw = old_get_Xkw
      dt.get_Pqnu = old_get_Pqnu 

      monitors.extend( [ monitor( monitored_quantity = lambda: dt.Xkw['up'][dt.nw/2,0,dt.n_k/2].real, 
                          h5key = 'Xkw_vs_it', 
                          archive_name = dt.archive_name),
                         monitor( monitored_quantity = lambda: numpy.amax(dt.Fkw['up'][:,:,:].real), 
                          h5key = 'Fkw_vs_it', 
                          archive_name = dt.archive_name) ]   )

      for conv in convergers:
        conv.accuracy = supercond_accr
      #run the calculation again
      err = dmft.run( dt, calculation_name = 'supercond', total_debug = total_debug,
                    n_loops_max=100, 
                    n_loops_min=25,
                    print_local=print_local_frequency, print_impurity_input=( 1 if loc_from_imp else 1000 ), print_three_leg=1, print_non_local=5,
                    skip_self_energy_on_first_iteration=True,
                    mix_after_selfenergy = True, 
                    last_iteration_err_is_allowed = n_loops_max+5 ) #n_loops_max/2 )

    elif do_superconducting:
      if mpi.is_master_node(): print "--------------------------------- no point in doing superconducting calculation: eig_ratio<1"
    counter += 1
  if not (old_get_Xkw is None):
    dt.get_Xkw  = old_get_Xkw #putting back the function for later use
  solver_data_package['construct|run|exit'] = 2
  if MASTER_SLAVE_ARCHITECTURE and (mpi.size>1): solver_data_package = mpi.bcast(solver_data_package)
  return dt, monitors, convergers
コード例 #33
0
ファイル: sc_dmft.py プロジェクト: TRIQS/dft_tools
def run_all(vasp_pid):
    """
    """
    mpi.report("  Waiting for VASP lock to appear...")
    while not is_vasp_lock_present():
        time.sleep(1)

    vasp_running = True

    while vasp_running:
        if debug: print bcolors.RED + "rank %s"%(mpi.rank) + bcolors.ENDC
        mpi.report("  Waiting for VASP lock to disappear...")
        mpi.barrier()
        while is_vasp_lock_present():
            time.sleep(1)
#            if debug: print bcolors.YELLOW + " waiting: rank %s"%(mpi.rank) + bcolors.ENDC
            if not is_vasp_running(vasp_pid):
                mpi.report("  VASP stopped")
                vasp_running = False
                break

        if debug: print bcolors.MAGENTA + "rank %s"%(mpi.rank) + bcolors.ENDC
        err = 0
        exc = None
        try:
            if debug: print bcolors.BLUE + "plovasp: rank %s"%(mpi.rank) + bcolors.ENDC
            if mpi.is_master_node():
                plovasp.generate_and_output_as_text('plo.cfg', vasp_dir='./')
# Read energy from OSZICAR
                dft_energy = get_dft_energy()
        except Exception, exc:
            err = 1

        err = mpi.bcast(err)
        if err:
            if mpi.is_master_node():
                raise exc
            else:
                raise SystemExit(1)

        mpi.barrier()

        try:
            if debug: print bcolors.GREEN + "rank %s"%(mpi.rank) + bcolors.ENDC
            corr_energy, dft_dc = dmft_cycle()
        except:
            if mpi.is_master_node():
                print "  master forwarding the exception..."
                raise
            else:
                print "  rank %i exiting..."%(mpi.rank)
                raise SystemExit(1)
        mpi.barrier()

        if mpi.is_master_node():
            total_energy = dft_energy + corr_energy - dft_dc
            print
            print "="*80
            print "  Total energy: ", total_energy
            print "  DFT energy: ", dft_energy
            print "  Corr. energy: ", corr_energy
            print "  DFT DC: ", dft_dc
            print "="*80
            print

        if mpi.is_master_node() and vasp_running:
            open('./vasp.lock', 'a').close()
コード例 #34
0
ファイル: block_matrix.py プロジェクト: TRIQS/triqs
assert (A2["up"] == -m0).all() and (A2["dn"] == -m1).all(), "In-place subtraction failed"
A2 = A * A
assert (A2["up"] == m0 * m0).all() and (A2["dn"] == m1 * m1).all(), "Multiplication failed"
A2 *= A
assert (A2["up"] == m0 * m0 * m0).all() and (A2["dn"] == m1 * m1 * m1).all(), "In-place multiplication failed"
A2 = 2 * A
assert (A2["up"] == 2 * m0).all() and (A2["dn"] == 2 * m1).all(), "Multiplication by constant failed"
A2 = A * 2
assert (A2["up"] == 2 * m0).all() and (A2["dn"] == 2 * m1).all(), "Multiplication by constant failed"
A2 = A / 2
assert (A2["up"] == 0.5 * m0).all() and (A2["dn"] == 0.5 * m1).all(), "Division by constant failed"
A2 = -A
assert (A2["up"] == -m0).all() and (A2["dn"] == -m1).all(), "Unary minus failed"

# MPI
Abcast = mpi.bcast(A)
assert (A["up"] == Abcast["up"]).all() and (A["dn"] == Abcast["dn"]).all(), "mpi.bcast failed"

# HDF5
with HDFArchive("block_matrix.output.h5", "w") as R:
    R["A"] = A
with HDFArchive("block_matrix.output.h5", "r") as R:
    A2 = R["A"]
assert (A["up"] == A2["up"]).all() and (A["dn"] == A2["dn"]).all(), "HDF5 write/read failed"


########################
# Complex block matrix #
########################

m0 = matrix([[1, 0], [0, 2j]])
コード例 #35
0
        def slave_run(solver_data_package, printout=True):
            while True:
                if printout:
                    print "[Node ", mpi.rank, "] waiting for instructions..."

                solver_data_package = mpi.bcast(solver_data_package)

                if printout:
                    print "[Node ", mpi.rank, "] received instructions!!!"

                try:
                    if solver_data_package['construct|run|exit'] == 0:
                        if printout:
                            print "[Node ", mpi.rank, "] constructing solvers!!!"
                        solvers = {}
                        impurity_struct = solver_data_package[
                            'impurity_struct']
                        for C in impurity_struct.keys():
                            solver_struct = {
                                'up': impurity_struct[C],
                                'dn': impurity_struct[C]
                            }
                            solver_data_package['constructor_parameters'][
                                'gf_struct'] = solver_struct
                            solvers[C] = cthybSolver(**(
                                solver_data_package['constructor_parameters']))

                    if solver_data_package['construct|run|exit'] == 1:
                        if printout:
                            print "[Node ", mpi.rank, "] about to run..."
                        solver = solvers[solver_data_package['which_solver']]
                        solver.G0_iw << solver_data_package['G0_iw']

                        block_names = [name for name, g in solver.G0_iw]
                        N_states = len(solver.G0_iw[block_names[0]].data[0,
                                                                         0, :])
                        gf_struct = {
                            block_names[0]: range(N_states),
                            block_names[1]: range(N_states)
                        }
                        U = solver_data_package['solve_parameters']['U']
                        h_int = U * n(block_names[0], 0) * n(block_names[1], 0)
                        for i in range(1, N_states):
                            h_int += U * n(block_names[0], i) * n(
                                block_names[1], i)
                        QN = [
                            sum([n(bl, i)
                                 for i in range(N_states)], Operator())
                            for bl in block_names
                        ]
                        try:
                            dct = deepcopy(
                                solver_data_package['solve_parameters'])
                            del dct['U']
                            solver.solve(h_int=h_int,
                                         quantum_numbers=QN,
                                         **dct)

                            if printout:
                                print "[Node ", mpi.rank, "] finished running successfully!"
                        except Exception as e:
                            print "[Node ", mpi.rank, "] ERROR: crash during running solver"

                    if solver_data_package['construct|run|exit'] == 2:
                        if printout:
                            print "[Node ", mpi.rank, "] received exit signal, will exit now. Goodbye."
                        break
                except:
                    if printout:
                        print "[Node ", mpi.rank, "] something went wrong. Will exit now! Goodbye."
                    break
コード例 #36
0
def slave_calculation(solver_data_package, printout=False):
    while True:
        if printout: print "[Node ", mpi.rank, "] waiting for instructions..."
        solver_data_package = mpi.bcast(solver_data_package)
        if printout: print "[Node ", mpi.rank, "] received instructions!!!"
        if solver_data_package['construct|run|exit'] == 0:
            if printout: print "[Node ", mpi.rank, "] constructing solver!!!"
            if solver_data_package['solver'] != 'cthyb':
                print "[Node ", mpi.rank, "] ERROR: CTINT NOT IMPLEMENTED"
                quit()
            solver = Solver(**(solver_data_package['constructor_parameters']))
        if solver_data_package['construct|run|exit'] == 1:
            if printout: print "[Node ", mpi.rank, "] about to run..."
            solver.G0_iw << solver_data_package['G0_iw']
            solver.D0_iw << solver_data_package['D0_iw']
            solver.Jperp_iw << solver_data_package['Jperp_iw']
            #solver.solve( **(solver_data_package['solve_parameters'])  )
            try:
                solver.solve(
                    h_int=solver_data_package['solve_parameters']['U_inf'] *
                    n('up', 0) * n('down', 0),
                    hartree_shift=solver_data_package['solve_parameters']
                    ['hartree_shift'],
                    n_cycles=solver_data_package['solve_parameters']
                    ['n_cycles'],
                    length_cycle=solver_data_package['solve_parameters']
                    ['length_cycle'],
                    n_warmup_cycles=solver_data_package['solve_parameters']
                    ['n_warmup_cycles'],
                    max_time=solver_data_package['solve_parameters']
                    ['max_time'],
                    measure_nn=solver_data_package['solve_parameters']
                    ['measure_nn'],
                    measure_nnw=solver_data_package['solve_parameters']
                    ['measure_nnw'],
                    measure_chipmt=solver_data_package['solve_parameters']
                    ['measure_chipmt'],
                    measure_gt=solver_data_package['solve_parameters']
                    ['measure_gt'],
                    measure_ft=solver_data_package['solve_parameters']
                    ['measure_ft'],
                    measure_gw=solver_data_package['solve_parameters']
                    ['measure_gw'],
                    measure_fw=solver_data_package['solve_parameters']
                    ['measure_fw'],
                    measure_g2w=solver_data_package['solve_parameters']
                    ['measure_g2w'],
                    measure_f2w=solver_data_package['solve_parameters']
                    ['measure_f2w'],
                    measure_hist=solver_data_package['solve_parameters']
                    ['measure_hist'],
                    measure_hist_composite=solver_data_package[
                        'solve_parameters']['measure_hist_composite'],
                    measure_nnt=solver_data_package['solve_parameters']
                    ['measure_nnt'],
                    move_group_into_spin_segment=solver_data_package[
                        'solve_parameters']['move_group_into_spin_segment'],
                    move_split_spin_segment=solver_data_package[
                        'solve_parameters']['move_split_spin_segment'],
                    move_swap_empty_lines=solver_data_package[
                        'solve_parameters']['move_swap_empty_lines'],
                    move_move=solver_data_package['solve_parameters']
                    ['move_move'],
                    move_insert_segment=solver_data_package['solve_parameters']
                    ['move_insert_segment'],
                    move_remove_segment=solver_data_package['solve_parameters']
                    ['move_remove_segment'],
                    n_w_f_vertex=solver_data_package['solve_parameters']
                    ['n_w_f_vertex'],
                    n_w_b_vertex=solver_data_package['solve_parameters']
                    ['n_w_b_vertex'],
                    keep_Jperp_negative=solver_data_package['solve_parameters']
                    ['keep_Jperp_negative'])
                if printout:
                    print "[Node ", mpi.rank, "] finished running successfully!"
            except:
                print "[Node ", mpi.rank, "] ERROR: crash during running"
        if solver_data_package['construct|run|exit'] == 2:
            if printout:
                print "[Node ", mpi.rank, "] received exit signal, will exit now. Goodbye."
            break
コード例 #37
0
        def run(data,
                C,
                U,
                symmetrize_quantities=True,
                alpha=0.5,
                delta=0.1,
                n_cycles=20000,
                max_time=5 * 60,
                solver_data_package=None,
                only_sign=False,
                bosonic_measures=False):
            solver = data.solvers[C]

            block_names = [name for name, g in solver.G0_iw]
            N_states = len(solver.G0_iw[block_names[0]].data[0, 0, :])
            gf_struct = {
                block_names[0]: range(N_states),
                block_names[1]: range(N_states)
            }

            h_int = U * n(block_names[0], 0) * n(block_names[1], 0)
            for i in range(1, N_states):
                h_int += U * n(block_names[0], i) * n(block_names[1], i)

            N_s = 2
            ALPHA = [[[alpha + delta * (-1)**(s + sig) for s in range(N_s)]
                      for i in range(N_states)] for sig in range(2)]

            if solver_data_package is None: solver_data_package = {}

            solver_data_package['which_solver'] = C
            solver_data_package['solve_parameters'] = {}
            solver_data_package['solve_parameters']['U'] = U
            solver_data_package['solve_parameters']['alpha'] = ALPHA
            solver_data_package['solve_parameters']['n_cycles'] = n_cycles
            solver_data_package['solve_parameters']['max_time'] = max_time
            solver_data_package['solve_parameters']['length_cycle'] = 200
            solver_data_package['solve_parameters']['n_warmup_cycles'] = 20000
            solver_data_package['solve_parameters']['only_sign'] = only_sign
            solver_data_package['solve_parameters']['measure_nn'] = True
            solver_data_package['solve_parameters'][
                'measure_nnt'] = bosonic_measures
            solver_data_package['solve_parameters']['measure_chipmt'] = False
            solver_data_package['solve_parameters']['measure_gw'] = False
            solver_data_package['solve_parameters']['measure_Mt'] = True
            solver_data_package['solve_parameters']['measure_ft'] = False
            solver_data_package['solve_parameters']['measure_g2t'] = False
            solver_data_package['solve_parameters']['measure_M4t'] = False
            solver_data_package['solve_parameters']['measure_hist'] = True
            solver_data_package['solve_parameters']['g2t_indep'] = []
            solver_data_package['solve_parameters']['post_process'] = True

            print solver_data_package['solve_parameters']

            solver_data_package['G0_iw'] = solver.G0_iw
            solver_data_package['D0_iw'] = solver.D0_iw
            solver_data_package['Jperp_iw'] = solver.Jperp_iw

            solver_data_package['construct|run|exit'] = 1

            if MASTER_SLAVE_ARCHITECTURE and (mpi.size > 1):
                if mpi.is_master_node():
                    print "broadcasting solver_data_package!!"
                solver_data_package = mpi.bcast(solver_data_package)

            if mpi.is_master_node(): print "about to run "
            dct = deepcopy(solver_data_package['solve_parameters'])
            del dct['U']
            solver.solve(h_int=h_int, **dct)
            if mpi.is_master_node():
                print "average sign: ", solver.average_sign
            if not only_sign:
                G_iw = deepcopy(solver.G_iw)
                Sigma_iw = deepcopy(solver.Sigma_iw)
                if symmetrize_quantities:
                    symmetrize_blockgf(G_iw)
                    symmetrize_blockgf(Sigma_iw)
                    selective_symmetrize_blockmatrix(solver.nn,
                                                     ['up|up', 'dn|dn'])
                    selective_symmetrize_blockmatrix(solver.nn,
                                                     ['up|dn', 'dn|up'])
                    if bosonic_measures:
                        selective_symmetrize_blockgf(solver.nn_iw,
                                                     ['up|up', 'dn|dn'])
                        selective_symmetrize_blockgf(solver.nn_iw,
                                                     ['up|dn', 'dn|up'])

                data.G_imp_iw[C] << G_iw['up']
                data.Sigma_imp_iw[C] << Sigma_iw['up']
            else:
                return solver.average_sign
コード例 #38
0
        def run(data,
                no_fermionic_bath,
                symmetrize_quantities=True,
                trilex=False,
                n_w_f=2,
                n_w_b=2,
                n_cycles=20000,
                max_time=10 * 60,
                hartree_shift=0.0,
                solver_data_package=None):
            #------- run solver
            try:
                if solver_data_package is None:
                    solver_data_package = {}
                solver_data_package['solve_parameters'] = {}
                #solver_data_package['solve_parameters']['h_int'] = lambda: data.U_inf * n('up',0) * n('down',0)
                solver_data_package['solve_parameters']['U_inf'] = data.U_inf
                solver_data_package['solve_parameters']['hartree_shift'] = [
                    hartree_shift, hartree_shift
                ]
                solver_data_package['solve_parameters']['n_cycles'] = n_cycles
                solver_data_package['solve_parameters']['length_cycle'] = 1000
                solver_data_package['solve_parameters'][
                    'n_warmup_cycles'] = 1000
                solver_data_package['solve_parameters']['max_time'] = max_time
                solver_data_package['solve_parameters']['measure_nn'] = True
                solver_data_package['solve_parameters']['measure_nnw'] = True
                solver_data_package['solve_parameters'][
                    'measure_chipmt'] = True
                solver_data_package['solve_parameters']['measure_gt'] = False
                solver_data_package['solve_parameters']['measure_ft'] = False
                solver_data_package['solve_parameters'][
                    'measure_gw'] = not no_fermionic_bath
                solver_data_package['solve_parameters'][
                    'measure_fw'] = not no_fermionic_bath
                solver_data_package['solve_parameters']['measure_g2w'] = trilex
                solver_data_package['solve_parameters']['measure_f2w'] = False
                solver_data_package['solve_parameters']['measure_hist'] = True
                solver_data_package['solve_parameters'][
                    'measure_hist_composite'] = True
                solver_data_package['solve_parameters']['measure_nnt'] = False
                solver_data_package['solve_parameters'][
                    'move_group_into_spin_segment'] = not no_fermionic_bath
                solver_data_package['solve_parameters'][
                    'move_split_spin_segment'] = not no_fermionic_bath
                solver_data_package['solve_parameters'][
                    'move_swap_empty_lines'] = True
                solver_data_package['solve_parameters'][
                    'move_move'] = not no_fermionic_bath
                solver_data_package['solve_parameters'][
                    'move_insert_segment'] = not no_fermionic_bath
                solver_data_package['solve_parameters'][
                    'move_remove_segment'] = not no_fermionic_bath

                solver_data_package['solve_parameters']['n_w_f_vertex'] = n_w_f
                solver_data_package['solve_parameters']['n_w_b_vertex'] = n_w_b
                solver_data_package['solve_parameters'][
                    'keep_Jperp_negative'] = True
                print solver_data_package['solve_parameters']

                solver_data_package['G0_iw'] = data.solver.G0_iw
                solver_data_package['D0_iw'] = data.solver.D0_iw
                solver_data_package['Jperp_iw'] = data.solver.Jperp_iw

                solver_data_package['construct|run|exit'] = 1

                if MASTER_SLAVE_ARCHITECTURE and (mpi.size > 1):
                    solver_data_package = mpi.bcast(solver_data_package)
                print "about to run "
                #data.solver.solve( **(solver_data_package['solve_parameters'] + )
                data.solver.solve(
                    h_int=solver_data_package['solve_parameters']['U_inf'] *
                    n('up', 0) * n('down', 0),
                    hartree_shift=solver_data_package['solve_parameters']
                    ['hartree_shift'],
                    n_cycles=solver_data_package['solve_parameters']
                    ['n_cycles'],
                    length_cycle=solver_data_package['solve_parameters']
                    ['length_cycle'],
                    n_warmup_cycles=solver_data_package['solve_parameters']
                    ['n_warmup_cycles'],
                    max_time=solver_data_package['solve_parameters']
                    ['max_time'],
                    measure_nn=solver_data_package['solve_parameters']
                    ['measure_nn'],
                    measure_nnw=solver_data_package['solve_parameters']
                    ['measure_nnw'],
                    measure_chipmt=solver_data_package['solve_parameters']
                    ['measure_chipmt'],
                    measure_gt=solver_data_package['solve_parameters']
                    ['measure_gt'],
                    measure_ft=solver_data_package['solve_parameters']
                    ['measure_ft'],
                    measure_gw=solver_data_package['solve_parameters']
                    ['measure_gw'],
                    measure_fw=solver_data_package['solve_parameters']
                    ['measure_fw'],
                    measure_g2w=solver_data_package['solve_parameters']
                    ['measure_g2w'],
                    measure_f2w=solver_data_package['solve_parameters']
                    ['measure_f2w'],
                    measure_hist=solver_data_package['solve_parameters']
                    ['measure_hist'],
                    measure_hist_composite=solver_data_package[
                        'solve_parameters']['measure_hist_composite'],
                    measure_nnt=solver_data_package['solve_parameters']
                    ['measure_nnt'],
                    move_group_into_spin_segment=solver_data_package[
                        'solve_parameters']['move_group_into_spin_segment'],
                    move_split_spin_segment=solver_data_package[
                        'solve_parameters']['move_split_spin_segment'],
                    move_swap_empty_lines=solver_data_package[
                        'solve_parameters']['move_swap_empty_lines'],
                    move_move=solver_data_package['solve_parameters']
                    ['move_move'],
                    move_insert_segment=solver_data_package['solve_parameters']
                    ['move_insert_segment'],
                    move_remove_segment=solver_data_package['solve_parameters']
                    ['move_remove_segment'],
                    n_w_f_vertex=solver_data_package['solve_parameters']
                    ['n_w_f_vertex'],
                    n_w_b_vertex=solver_data_package['solve_parameters']
                    ['n_w_b_vertex'],
                    keep_Jperp_negative=solver_data_package['solve_parameters']
                    ['keep_Jperp_negative'])

                data.G_imp_iw << data.solver.G_iw

                get_Sigma_from_G_and_G0 = False
                for U in data.fermionic_struct.keys():
                    if numpy.any(numpy.isnan(
                            data.G_imp_iw[U].data[:, 0, 0])) or numpy.any(
                                numpy.isnan(data.solver.F_iw[U].data[:, 0,
                                                                     0])):
                        if numpy.any(
                                numpy.isnan(data.G_imp_iw[U].data[:, 0, 0])):
                            print "[Node", mpi.rank, "]", " nan in F_imp and G_imp!!! exiting to system..."
                            if mpi.is_master_node():
                                data.dump_all(archive_name="black_box_nan",
                                              suffix='')
                                cthyb.dump(data.solver,
                                           archive_name="black_box_nan",
                                           suffix='')
                            if not MASTER_SLAVE_ARCHITECTURE: mpi.barrier()
                            solver_data_package['construct|run|exit'] = 2
                            if MASTER_SLAVE_ARCHITECTURE and (mpi.size > 1):
                                solver_data_package = mpi.bcast(
                                    solver_data_package)
                            quit()
                        else:
                            print "[Node", mpi.rank, "]", " nan in F but not in G!! will be calculating Sigma from G and G0"
                            get_Sigma_from_G_and_G0 = True

                if symmetrize_quantities:
                    symmetrize_blockgf(data.G_imp_iw, data.fermionic_struct)
                    symmetrize_blockgf(data.solver.F_iw, data.fermionic_struct)

                if not get_Sigma_from_G_and_G0:
                    extract_Sigma_from_F_and_G(
                        data.Sigma_imp_iw, data.solver.F_iw, data.G_imp_iw
                    )  #!!!! this thing fails when the S S boson is not SU(2) symmetric
                else:
                    extract_Sigma_from_G0_and_G(data.Sigma_imp_iw,
                                                data.solver.G0_iw,
                                                data.G_imp_iw)
                data.get_Sz(
                )  #moved these in impurity!!!!! maybe not the best idea
                data.get_chi_imp()

            except Exception as e:
                import traceback, os.path, sys
                top = traceback.extract_stack()[-1]
                if mpi.is_master_node():
                    data.dump_impurity_input('black_box', '')
                raise Exception(
                    '%s, %s, %s \t %s ' %
                    (type(e).__name__, os.path.basename(top[0]), top[1], e))
コード例 #39
0

def is_vasp_running(vasp_pid):
    """
    Tests if VASP initial process is still alive.
    """
    pid_exists = False
    if mpi.is_master_node():
        try:
            os.kill(vasp_pid, 0)
        except OSError, e:
            pid_exists = e.errno == errno.EPERM
        else:
            pid_exists = True

    pid_exists = mpi.bcast(pid_exists)
    return pid_exists


def get_dft_energy():
    """
    Reads energy from the last line of OSZICAR.
    """
    with open('OSZICAR', 'r') as f:
        nextline = f.readline()
        while nextline.strip():
            line = nextline
            nextline = f.readline()


#            print "OSZICAR: ", line[:-1]
コード例 #40
0
import pytriqs.utility.mpi as mpi
from pytriqs.archive import HDFArchive

from pyed.ParameterCollection import ParameterCollection

from pomerol2triqs import PomerolED

# ----------------------------------------------------------------------
if __name__ == '__main__':

    if mpi.is_master_node():
        with HDFArchive('data_model.h5', 'r') as A:
            p = A["p"]
    else:
        p = None
    p = mpi.bcast(p)

    p.convert_keys_from_string_to_python('index_converter')

    pom = PomerolED(p.index_converter, verbose=True)
    pom.diagonalize(p.H)

    p.g_iw = pom.G_iw(p.gf_struct, p.beta, n_iw=400)
    p.g_tau = pom.G_tau(p.gf_struct, p.beta, n_tau=200)['0']
    p.tau = np.array([float(tau) for tau in p.g_tau.mesh])

    opt = dict(block_order='AABB',
               beta=p.beta,
               gf_struct=p.gf_struct,
               blocks=set([('0', '0')]),
               n_iw=1,
コード例 #41
0
ファイル: multivar.py プロジェクト: richtma93/triqs
G=BlockGf(name_list = ["ch","sp"],block_list = [g,g])
assert G['ch'].data.shape==(200,199,1,1,1)

A=HDFArchive("g_multivar_3.h5",'a')
A["G"] = G
del A

#mpi bcast
import pytriqs.utility.mpi as mpi
g2=Gf(mesh = mprod, target_shape = [1,1,1])
if mpi.is_master_node():
 g2.data[:,:,:,:,:] = 5
 assert g2.data[0,0,0,0,0] == 5, "not ok : %s"%(g2.data[0,0,0,0,0])
if not mpi.is_master_node():
 assert g2.data[0,0,0,0,0] == 0, "not ok : %s"%(g2.data[0,0,0,0,0])
g2 = mpi.bcast(g2)
if not mpi.is_master_node():
 assert g2.data[0,0,0,0,0] == 5, "not ok : %s"%(g2.data[0,0,0,0,0])

#ImTime
##construct product mesh
m1=MeshImTime(beta=1., S="Fermion", n_max=100)
m2=MeshImTime(beta=1., S="Boson", n_max=100)
mprod=MeshProduct(m1,m2)
g=Gf(mesh = mprod, target_shape = [1,1,1])
f=Gf(mesh = mprod, target_shape =[1,1,1])
g.data[:]=2.5
f.data[:]=2.5
#operations
#inplace
f+=g
コード例 #42
0
ファイル: block_matrix.py プロジェクト: JaksaVucicevic/triqs
from pytriqs.arrays.block_matrix import *
from pytriqs.archive import *
import pytriqs.utility.mpi as mpi
from numpy import matrix

A = BlockMatrix(['up'], matrix([[0]]))
A0 = mpi.bcast(A)

R = HDFArchive("block_matrix.output.h5", 'w')
R["A"] = A
del R
コード例 #43
0
#Converter.convert_dft_input()
#mpi.barrier()

previous_runs = 0
previous_present = False
if mpi.is_master_node():
    f = HDFArchive(dft_filename+'.h5','a')
    if 'dmft_output' in f:
        ar = f['dmft_output']
        if 'iterations' in ar:
            previous_present = True
            previous_runs = ar['iterations']
    else:
        f.create_group('dmft_output')
    del f
previous_runs    = mpi.bcast(previous_runs)
previous_present = mpi.bcast(previous_present)

SK=SumkDFT(hdf_file=dft_filename+'.h5',use_dft_blocks=use_blocks,h_field=h_field)

n_orb = SK.corr_shells[0]['dim']
l = SK.corr_shells[0]['l']
spin_names = ["up","down"]
orb_names = [i for i in range(n_orb)]

# Use GF structure determined by DFT blocks
gf_struct = SK.gf_struct_solver[0]

# Construct Slater U matrix 
Umat = U_matrix(n_orb=n_orb, U_int=U, J_hund=J, basis='cubic',)
コード例 #44
0
ファイル: gf_bcast.py プロジェクト: aherrmann/triqs1.1
# Import the Green's functions
from pytriqs.gf.local import GfImFreq, iOmega_n, inverse

# Create the Matsubara-frequency Green's function and initialize it
g = GfImFreq(indices=[1], beta=50, n_points=1000, name="imp")
g <<= inverse(iOmega_n + 0.5)

import pytriqs.utility.mpi as mpi

mpi.bcast(g)

#Block

from pytriqs.gf.local import *
g1 = GfImFreq(indices=['eg1', 'eg2'], beta=50, n_points=1000, name="egBlock")
g2 = GfImFreq(indices=['t2g1', 't2g2', 't2g3'],
              beta=50,
              n_points=1000,
              name="t2gBlock")
G = BlockGf(name_list=('eg', 't2g'), block_list=(g1, g2), make_copies=False)

mpi.bcast(G)

#imtime
from pytriqs.gf.local import *

# A Green's function on the Matsubara axis set to a semicircular
gw = GfImFreq(indices=[1], beta=50)
gw <<= SemiCircular(half_bandwidth=1)

# Create an imaginary-time Green's function and plot it
コード例 #45
0
def nested_edmft_calculation( clusters, nested_struct_archive_name = None, flexible_Gweiss=False, sign=-1, sign_up_to=2, use_Gweiss_causal_cautionary = False,
                              freeze_Uweiss = False, no_lattice = False,
                              Us = [1.0], decoupling = 'ising', decoupling_alpha = 0.5,
                              Ts = [0.125], 
                              ns = [0.5], fixed_n = True,
                              mutildes = [0.0],
                              dispersion = lambda kx, ky: epsilonk_square(kx,ky, 0.25), ph_symmetry = True,                              
                              n_ks = [64], n_k_automatic = False, n_k_rules = [[0.06, 32],[0.03, 48],[0.005, 64],[0.00, 96]],
                              w_cutoff = 50.0,
                              min_its = 5, max_its=25, 
                              mix_GWlatt = False, rules = [[0, 0.5], [6, 0.2], [12, 0.65]],              
                              mix_Uweiss = False, Uweiss_mix_rules = [[0, 0.5], [6, 0.2], [12, 0.65]],                       
                              use_cthyb = False,
                              alpha = 0.5, delta = 0.1,  automatic_alpha_and_delta = False,
                              n_cycles=10000000, 
                              max_time_rules= [ [1, 5*60], [2, 20*60], [4, 80*60], [8, 200*60], [16,400*60] ], time_rules_automatic=False, exponent = 0.7, overall_prefactor=4.0, no_timing = False,
                              accuracy = 1e-4, 
                              solver_data_package = None,
                              print_current = 1,
                              insulating_initial = False,
                              Wilson_bath_initial = False,
                              bath_initial = False,
                              selfenergy_initial = False,  
                              initial_guess_archive_name = '', suffix=''):

  if mpi.is_master_node():
    print "WELCOME TO nested edmft calculation!"
    if n_k_automatic: print "n_k automatic!!!"
  if len(n_ks)==0 and n_k_automatic: n_ks=[0]

  if use_cthyb:
    assert False, "cthyb usage not implemented"
    solver_class = solvers.cthyb
  else:
    solver_class = solvers.ctint

  fermionic_struct = {'up': [0]}
  bosonic_struct = {'0': [0], '1': [0]}   
  if decoupling=='ising':
    if decoupling_alpha==1.0:
      del bosonic_struct['1']
    if decoupling_alpha==0.0:
      del bosonic_struct['0']
  elif decoupling=='heisenberg':
    if decoupling_alpha==2.0/3.0:
      del bosonic_struct['1']
    if decoupling_alpha==1.0/3.0:
      del bosonic_struct['0']


  if mpi.is_master_node(): print "nested structure: "
  if not (nested_struct_archive_name is None):
    try:
      nested_scheme = nested_struct.from_file(nested_struct_archive_name)
      if mpi.is_master_node(): print "nested structure loaded from file",nested_struct_archive_name 
    except:  
      nested_scheme = nested_struct(clusters)
      nested_scheme.print_to_file(nested_struct_archive_name) 
      if mpi.is_master_node(): print "nested structure printed to file",nested_struct_archive_name 
  else:
    nested_scheme = nested_struct(clusters)
  if mpi.is_master_node(): print nested_scheme.get_tex()

  impurity_struct = nested_scheme.get_impurity_struct()

  if not time_rules_automatic:
    max_times = {}
    for C in impurity_struct:
      for r in max_time_rules:
        if r[0]<=len(impurity_struct[C]):
          max_times[C] = r[1]
    if mpi.is_master_node(): print "max_times from rules: ",max_times


  beta = 1.0/Ts[0] 
  
  n_iw = int(((w_cutoff*beta)/math.pi-1.0)/2.0)
  if mpi.is_master_node():
    print "PM HUBBARD GW: n_iw: ",n_iw

  if not n_k_automatic:
    n_k = n_ks[0]
    print "n_k = ", n_k
  else:
    n_k = n_k_from_rules(Ts[0], n_k_rules)
    #if mpi.is_master_node(): print "n_k automatic!!!"

  dt = nested_edmft_data(  n_iw = n_iw, 
                     n_k = n_k, 
                     beta = beta, 
                     impurity_struct = impurity_struct,
                     fermionic_struct = fermionic_struct,
                     bosonic_struct = bosonic_struct,
                     archive_name="so_far_nothing_you_shouldnt_see_this_file"  )

  if fixed_n:
    ps = itertools.product(n_ks,ns,Us,Ts)
  else:
    ps = itertools.product(n_ks,mutildes,Us,Ts)

  counter = 0
  old_nk = n_k
  old_beta = beta

  for p in ps:    
    #name stuff to avoid confusion   
    nk = (p[0] if (not n_k_automatic) else n_k_from_rules(T, n_k_rules) )
    if fixed_n:
      n = p[1]
    else:
      mutilde = p[1]
      n = None
    U = p[2]
    T = p[3] 
    beta = 1.0/T

    if nk!=old_nk and (not n_k_automatic):
      assert False, "changing n_k not implemented" 
      dt.change_ks(IBZ.k_grid(nk))

    if beta!=old_beta:
      assert False, "changing beta not implemented"
      n_iw = int(((w_cutoff*beta)/math.pi-1.0)/2.0)
      if n_k_automatic:
        nk = n_k_from_rules(T, n_k_rules)
        if nk != old_nk: 
          dt.change_ks(IBZ.k_grid(nk))
      dt.change_beta(beta, n_iw)
 
    old_beta = beta
    old_nk = nk
    nested_scheme.set_nk(nk) #don't forget this part

    filename = "result"
    if len(n_ks)>1 and (not n_k_automatic):
      filename += ".nk%s"%nk
    if len(ns)>1 and fixed_n: 
      filename += ".n%s"%n
    if len(mutildes)>1 and not fixed_n:
      filename += ".mutilde%s"%mutilde      
    if len(Us)>1: filename += ".U%s"%U
    if len(Ts)>1: filename += ".T%.4f"%T
    filename += ".h5"
    dt.archive_name = filename

    if mpi.is_master_node():
      if fixed_n:
        print "Working: U: %s T %s n: %s n_k: %s n_iw: %s"%(U,n,T,nk,n_iw)
      else:
        print "Working: U: %s T %s mutilde: %s n_k: %s n_iw: %s"%(U,mutilde,T,nk,n_iw)

    if mpi.is_master_node():
      print "about to fill dispersion. ph-symmetry: ",ph_symmetry 
    for key in dt.fermionic_struct.keys():
      for kxi in range(dt.n_k):
        for kyi in range(dt.n_k):
          dt.epsilonk[key][kxi,kyi] = dispersion(dt.ks[kxi], dt.ks[kyi])
    if decoupling=='ising':
      for key in dt.bosonic_struct.keys():
        if key=='0': dt.Jq[key][:,:]=decoupling_alpha*U
        if key=='1': dt.Jq[key][:,:]=(decoupling_alpha-1)*U
    elif decoupling=='heisenberg': 
        if key=='0': dt.Jq[key][:,:]=(3.0*decoupling_alpha-1.0)*U
        if key=='1': dt.Jq[key][:,:]=(decoupling_alpha-2.0/3.0)*U
    else: assert False, "unknown decoupling scheme"

    prepare_nested_edmft( dt, nested_scheme, solver_class )

    solver_class.initialize_solvers( dt, solver_data_package, bosonic_measures=True )
 

    if no_timing:
      max_times = {}
      for C in dt.impurity_struct.keys():
        max_times[C] = -1
      if mpi.is_master_node(): print "no_timing! solvers will run until they perform all the mc steps",max_times        

    if time_rules_automatic and (not no_timing): 
      max_times = {}
      for C in dt.impurity_struct.keys():
        Nc = len(dt.impurity_struct[C])
        pref = ((dt.beta/8.0)*U*Nc)**exponent #**1.2
        print C
        print "Nc: ",Nc,
        print "U: ", U,  
        print "beta: ",dt.beta,
        print "pref: ",pref 
        max_times[C] = int(overall_prefactor*pref*5*60)
      if mpi.is_master_node(): print "max times automatic: ",max_times        

    identical_pairs_Sigma = nested_scheme.get_identical_pairs()
    identical_pairs_G = nested_scheme.get_identical_pairs_for_G()
    identical_pairs_G_ai = nested_scheme.get_identical_pairs_for_G(across_imps=True)
 
    def do_print(*args):
      for x in args: print x,
      print "" 

    used_Cs = []

    actions =[  generic_action(  name = "lattice",
                    main = ( (lambda data: nested_edmft_mains.lattice(data, n=n, ph_symmetry=ph_symmetry, accepted_mu_range=[-2.0,2.0]))
                             if (not no_lattice) else
                             (lambda data: [ data.copy_imp_to_latt(used_Cs[0]),
                                             do_print("just copying imp",used_Cs[0],"->latt, no_lattice! Gijw000:",data.Gijw['up'][0,0,0],
                                                      [ "Wijnu_"+A+ "000:"+str(data.Wijnu['0'][0,0,0]) for A in data.Wijnu.keys()] ) ]) #TODO generalize for any size cluster
                           ),
                           
                    mixers = [], cautionaries = [], allowed_errors = [],    
                    printout = lambda data, it: ( [data.dump_general( quantities = ['Gkw','Gijw','Wqnu','Wijnu'], suffix='-current' ), data.dump_scalar(suffix='-current')
                                                  ] if ((it+1) % print_current==0) else None 
                                                )
                              ),
                generic_action(  name = "pre_impurity",
                    main = lambda data: nested_edmft_mains.pre_impurity(data, freeze_Uweiss = freeze_Uweiss, Cs= used_Cs),                       
                    mixers = [], cautionaries = [], allowed_errors = [],        
                    printout = lambda data, it: ( data.dump_general( quantities = ['Gweiss_iw','Uweiss_iw','Uweiss_dyn_iw'], suffix='-current' ) )
                              ),
                generic_action(  name = "impurity",
                    main = (lambda data: nested_mains.impurity(data, U, symmetrize_quantities = True, alpha=alpha, delta=delta, automatic_alpha_and_delta = automatic_alpha_and_delta, 
                                                               n_cycles=n_cycles, max_times = max_times, solver_data_package = solver_data_package, bosonic_measures=not freeze_Uweiss, Cs= used_Cs ))
                           if (not use_cthyb) else
                           (lambda data: nested_mains.impurity_cthyb(data, U, symmetrize_quantities = True, n_cycles=n_cycles, max_times = max_times, solver_data_package = solver_data_package )),
                    mixers = [], cautionaries = [lambda data,it: local_nan_cautionary(data, data.impurity_struct, Qs = ['Sigma_imp_iw'], raise_exception = True),                                                 
                                                 lambda data,it: ( symmetric_G_and_self_energy_on_impurity(data.G_imp_iw, data.Sigma_imp_iw, data.solvers, 
                                                                                                           identical_pairs_Sigma, identical_pairs_G,
                                                                                                           across_imps=True, identical_pairs_G_ai=identical_pairs_G_ai  )
                                                                   if it>=10000 else  
                                                                   symmetrize_cluster_impurity(data.Sigma_imp_iw, identical_pairs_Sigma) )
                                                ], allowed_errors = [1],    
                    printout = lambda data, it: ( [ data.dump_general( quantities = ['Sigma_imp_iw','G_imp_iw'], suffix='-current' ),
                                                    data.dump_solvers(suffix='-current')
                                                  ] if ((it+1) % print_current==0) else None)  ),
                generic_action(  name = "post_impurity",
                    main = lambda data: nested_edmft_mains.post_impurity(data, identical_pairs = identical_pairs_Sigma, Cs= used_Cs),#, homogeneous_pairs = identical_pairs_G), 
                    mixers = [], cautionaries = [], allowed_errors = [],    
                    printout = lambda data, it: (data.dump_general( quantities = ['chi_imp_iw','P_imp_iw','W_imp_iw'], suffix='-current' ) if ((it+1) % print_current==0) else None) ),
                generic_action(  name = "selfenergy",
                    main = lambda data: nested_edmft_mains.selfenergy(data), 
                    mixers = [], cautionaries = [lambda data,it: nonloc_sign_cautionary(data.Sigmakw['up'], desired_sign = -1, clip_off = False, real_or_imag = 'imag')], allowed_errors = [0],    
                    printout = lambda data, it: (data.dump_general( quantities = ['Sigmakw','Sigmaijw','Sigma_loc_iw','Pqnu','Pijnu','P_loc_iw'], suffix='-current' ) if ((it+1) % print_current==0) else None) ) ]



    monitors = [ monitor( monitored_quantity = lambda: dt.ns['up'], 
                          h5key = 'n_vs_it', 
                          archive_name = dt.archive_name),
                 monitor( monitored_quantity = lambda: dt.mus['up'], 
                          h5key = 'mu_vs_it', 
                          archive_name = dt.archive_name),
                 monitor( monitored_quantity = lambda: dt.err, 
                          h5key = 'err_vs_it', 
                          archive_name = dt.archive_name) ]

    monitors+= [ monitor( monitored_quantity = lambda: dt.Sigma_loc_iw['up'].data[dt.nw/2,0,0].imag, 
                          h5key = 'ImSigma_loc_iw0_vs_it', 
                          archive_name = dt.archive_name),
                 monitor( monitored_quantity = lambda: dt.Sigma_loc_iw['up'].data[dt.nw/2,0,0].real, 
                          h5key = 'ReSigma_loc_iw0_vs_it', 
                          archive_name = dt.archive_name),
                 monitor( monitored_quantity = lambda: dt.Sigmakw['up'][dt.nw/2, dt.n_k/2, dt.n_k/2].imag, 
                          h5key = 'ImSigmakw_pipi_vs_it', 
                          archive_name = dt.archive_name),
                 monitor( monitored_quantity = lambda: dt.Sigmakw['up'][dt.nw/2, dt.n_k/2, dt.n_k/2].real, 
                          h5key = 'ReSigmakw_pipi_vs_it', 
                          archive_name = dt.archive_name)]

    
    convergers = [ converger( monitored_quantity = lambda: dt.G_loc_iw,
                            accuracy=accuracy, 
                            struct=fermionic_struct, 
                            archive_name= dt.archive_name,
                            h5key = 'diffs_G_loc' ),
                   converger( monitored_quantity = lambda: dt.W_loc_iw,
                            accuracy=accuracy, 
                            struct=bosonic_struct, 
                            archive_name= dt.archive_name,
                            h5key = 'diffs_W_loc' ),
                   converger( monitored_quantity = lambda: dt.P_loc_iw,
                            accuracy=accuracy, 
                            struct=bosonic_struct, 
                            archive_name= dt.archive_name,
                            h5key = 'diffs_P_loc' ),
                   converger( monitored_quantity = lambda: dt.Sigma_loc_iw,
                            accuracy=accuracy, 
                            struct=fermionic_struct, 
                            archive_name= dt.archive_name,
                            h5key = 'diffs_Sigma_loc') ]
    max_dist = 3
    for i in range(max_dist+1):
      for j in range(0,i+1):
        convergers.append( converger( monitored_quantity = lambda i=i, j=j: dt.Gijw['up'][:,i,j],
                                      accuracy=accuracy,
                                      func = converger.check_numpy_array,  
                                      archive_name= dt.archive_name,
                                      h5key = 'diffs_G_%s%s'%(i,j) ) )
    convergers.append( converger( monitored_quantity = lambda: dt.G_imp_iw,
                                  accuracy=accuracy, 
                                  struct=impurity_struct, 
                                  archive_name= dt.archive_name,
                                  h5key = 'diffs_G_imp' ) )
    convergers.append( converger( monitored_quantity = lambda: dt.Gweiss_iw,
                                  accuracy=accuracy, 
                                  struct=impurity_struct, 
                                  archive_name= dt.archive_name,
                                  h5key = 'diffs_Gweiss' ) )
    combo_imp_struct = {}   
    for CA,Uw in dt.Uweiss_iw:
      combo_imp_struct[CA] = impurity_struct[C]   
    convergers.append( converger( monitored_quantity = lambda: dt.Uweiss_iw,
                                  accuracy=accuracy, 
                                  struct=combo_imp_struct, 
                                  archive_name= dt.archive_name,
                                  h5key = 'diffs_Uweiss' ) )


    dmft = generic_loop(
                name = "nested-cluster EDMFT", 
                actions = actions,
                convergers = convergers,  
                monitors = monitors )

    start_from_action_index = 0

    if freeze_Uweiss:
      if mpi.is_master_node(): print "Uweiss frozen! Equivalent to nested DMFT calculation"

    if (counter==0): #do the initial guess only once!         
      if initial_guess_archive_name!='':
        if selfenergy_initial:
          start_from_action_index = 0
          if mpi.is_master_node(): print "Taking Sigma from initial guess in:",initial_guess_archive_name, "suffix: ",suffix
          HDFA = HDFArchive(initial_guess_archive_name,'r')                
          dt.Sigmakw = deepcopy(HDFA['Sigmakw%s'%suffix])
          dt.Sigma_imp_iw << HDFA['Sigma_imp_iw%s'%suffix]
          dt.mus = HDFA['mus%s'%suffix]
          del HDFA
          dt.dump_general( quantities = ['Sigmakw','Sigma_imp_iw'], suffix='-initial' )  
        elif bath_initial:
          start_from_action_index = 2
          if mpi.is_master_node(): print "Taking Gweiss from initial guess in:",initial_guess_archive_name, "suffix: ",suffix
          HDFA = HDFArchive(initial_guess_archive_name,'r')                
          input_blocks = [C for C,gw in HDFA['Gweiss_iw%s'%suffix]]  
          if set(input_blocks)!=set(impurity_struct.keys()): 
            used_Cs[:]=input_blocks[:]
            print "WARNING: input block structure does not correspond to the nested scheme block structure. Running only the impurities in the input block set."
          for C,gw in HDFA['Gweiss_iw%s'%suffix]: 
            dt.Gweiss_iw[C] << gw 
          dt.mus = HDFA['mus%s'%suffix]
          for C in impurity_struct.keys(): #in any case fill Uweiss for P_imp calculation not to crash
            for A in bosonic_struct:
              print "dt.Jq[",A,"][0,0]:",dt.Jq[A][0,0]
              dt.Uweiss_iw[C+'|'+A] << dt.Jq[A][0,0]
              dt.Uweiss_dyn_iw[C+'|'+A] << 0.0
          del HDFA
          dt.dump_general( quantities = ['Gweiss_iw','Uweiss_iw','Uweiss_dyn_iw'], suffix='-initial' )       
      else:
        if not fixed_n:  
          dt.mus['up'] = mutilde
        else:
          dt.mus['up'] = U/2.0
        if 'down' in dt.fermionic_struct.keys(): dt.mus['down'] = dt.mus['up']   #this is not necessary at the moment, but may become         
        if Wilson_bath_initial:
          start_from_action_index = 2
          if impurity_struct.keys()!=["1x1"]: assert False, "Wilson initializer inapplicable!"
          for A in bosonic_struct:
            print "dt.Jq[",A,"][0,0]:",dt.Jq[A][0,0]
            dt.Uweiss_iw['1x1|'+A] << dt.Jq[A][0,0]
            dt.Uweiss_dyn_iw['1x1|'+A] << 0.0
          dt.Gweiss_iw['1x1'] << inverse(iOmega_n+U/2.0-Wilson(0.25))
          dt.dump_general( quantities = ['Gweiss_iw','Uweiss_iw','Uweiss_dyn_iw'], suffix='-initial' )  
        else: 
          for C in dt.impurity_struct.keys():
            for l in dt.impurity_struct[C]: #just the local components (but on each site!)         
              dt.Sigma_imp_iw[C].data[:,l,l] = U/2.0-int(insulating_initial)*1j/numpy.array(dt.ws)
            for A in bosonic_struct:
              CA = C+"|"+A
              dt.Uweiss_iw[CA] << dt.Jq[A][0,0]
              dt.Uweiss_dyn_iw[CA] << 0.0
              dt.P_imp_iw[CA] << 0.0
          for A in bosonic_struct:
            dt.Pqnu[A][:,:,:] = 0.0
            dt.P_loc_iw[A]<< 0.0
            dt.Pijnu[A][:,:,:]= 0.0        
          for key in fermionic_struct.keys(): 
            dt.Sigmakw[key][:,:,:] = U/2.0    
            numpy.transpose(dt.Sigmakw[key])[:] -= int(insulating_initial)*1j/numpy.array(dt.ws)
          dt.dump_general( quantities = ['Sigmakw','Sigma_imp_iw'], suffix='-initial' )  

 

    #run nested!-------------

    if mix_GWlatt:
      actions[0].mixers.extend([ mixer( mixed_quantity = lambda: dt.Gijw,
                                      rules=rules,
                                      func=mixer.mix_lattice_gf,
                                      initialize = True ) ])
      actions[0].mixers.extend([ mixer( mixed_quantity = lambda: dt.Wijnu,
                                      rules=rules,
                                      func=mixer.mix_lattice_gf,
                                      initialize = True ) ])
    if mix_Uweiss:
      print "mixing Uweiss, rules:", Uweiss_mix_rules
      actions[1].mixers.extend([ mixer( mixed_quantity = lambda: dt.Uweiss_iw,
                                      rules=Uweiss_mix_rules,
                                      func=mixer.mix_block_gf,
                                      initialize = True ) ])
      actions[1].mixers.extend([ mixer( mixed_quantity = lambda: dt.Uweiss_dyn_iw,
                                      rules=Uweiss_mix_rules,
                                      func=mixer.mix_block_gf,
                                      initialize = True ) ])


    dt.dump_parameters()
    dt.dump_non_interacting() 

    err = dmft.run( dt, 
              max_its=max_its, 
              min_its=min_its,
              max_it_err_is_allowed = 7,
              print_final=True, 
              print_current = 1,
              start_from_action_index = start_from_action_index  )
    if mpi.is_master_node():
      cmd = 'mv %s %s'%(filename, filename.replace("result", "nested_edmft")) 
      print cmd
      os.system(cmd)

    if (err==2): 
      print "Cautionary error!!! exiting..."
      solver_data_package['construct|run|exit'] = 2
      if MASTER_SLAVE_ARCHITECTURE and (mpi.size>1): solver_data_package = mpi.bcast(solver_data_package)
      break

    if not MASTER_SLAVE_ARCHITECTURE: mpi.barrier()
    counter += 1
  if not (solver_data_package is None): solver_data_package['construct|run|exit'] = 2
  if MASTER_SLAVE_ARCHITECTURE and (mpi.size>1): solver_data_package = mpi.bcast(solver_data_package)
  return dt, monitors, convergers
コード例 #46
0
ファイル: semicircle.py プロジェクト: krivenko/som
    arch = HDFArchive('semicircle.h5','w')
    arch['abs_errors'] = abs_error
    arch['D'] = D

for s in abs_error:
    if mpi.is_master_node():
        g_iw << SemiCircular(D)
        g_tau << InverseFourier(g_iw)
        g_l << MatsubaraToLegendre(g_iw)

        g_iw.data[:] += s * 2*(np.random.rand(*g_iw.data.shape) - 0.5)
        g_iw.data[:] = 0.5*(g_iw.data[:,:,:] + np.conj(g_iw.data[::-1,:,:]))
        g_tau.data[:] += s * 2*(np.random.rand(*g_tau.data.shape) - 0.5)
        g_l.data[:] += s * 2*(np.random.rand(*g_l.data.shape) - 0.5)

    g_iw = mpi.bcast(g_iw)
    g_tau = mpi.bcast(g_tau)
    g_l = mpi.bcast(g_l)

    S_iw.data[:] = 1.0
    S_tau.data[:] = 1.0
    S_l.data[:] = 1.0

    if mpi.is_master_node():
        gr_name = 'abs_error_%.4f' % s
        arch.create_group(gr_name)
        abs_err_gr = arch[gr_name]

    for name, g, S, g_rec in (('g_iw', g_iw, S_iw, g_iw_rec),
                              ('g_tau',g_tau,S_tau,g_tau_rec),
                              ('g_l',  g_l,  S_l,  g_l_rec)):