def optimize_alpha_and_delta(cls, data, C, U, max_time,
                              solver_data_package):
     if mpi.is_master_node(): print "nested_mains.optimize_alpha_and_delta"
     alphas = [0.5, 0.4, 0.6, 0.3,
               0.7]  #[0.3,0.4,0.45,0.48, 0.5, 0.52, 0.55,0.6,0.7]
     deltas = [0.1, 0.5, 0.8]
     signs = numpy.zeros((len(alphas), len(deltas)))
     breaker = False
     for alpha in alphas:
         for delta in deltas:
             if mpi.is_master_node():
                 print "trying: alpha: %s delta: %s" % (alpha, delta)
             signs[alphas.index(alpha),
                   deltas.index(delta)] = sign = solvers.ctint.run(
                       data,
                       C,
                       U,
                       symmetrize_quantities=False,
                       alpha=alpha,
                       delta=delta,
                       n_cycles=1000000,
                       max_time=max_time,
                       solver_data_package=solver_data_package,
                       only_sign=True)
             if mpi.is_master_node():
                 print "%s >>>> (alpha: %s, delta: %s): sign=%s" % (
                     C, alpha, delta, sign)
             if sign > 0.8:
                 breaker = True
                 break
         if breaker: break
     ai, di = numpy.unravel_index(numpy.argmax(signs),
                                  (len(alphas), len(deltas)))
     max_sign = numpy.amax(signs)
     return alphas[ai], deltas[di], max_sign
def temporal_FT_single_core(Qktau,
                            beta,
                            ntau,
                            n_iw,
                            nk,
                            statistic='Fermion',
                            use_IBZ_symmetry=True):
    if mpi.is_master_node(): print "temporal_FT_single core"
    if statistic == 'Fermion':
        nw = 2 * n_iw
    elif statistic == 'Boson':
        nw = 2 * n_iw - 1
    else:
        if mpi.is_master_node():
            print "statistic not implemented"
        quit()

    Qkw = numpy.zeros((nw, nk, nk), dtype=numpy.complex_)

    if use_IBZ_symmetry: max_kxi = nk / 2 + 1
    else: max_kxi = nk
    for kxi in range(max_kxi):
        if use_IBZ_symmetry: max_kyi = nk / 2 + 1
        else: max_kyi = nk
        numpy.transpose(Qkw[:,kxi,:])[0:max_kyi,:] = [f( deepcopy(Qktau[:,kxi,kyi]), beta, ntau, n_iw, statistic )\
                                                           for kyi in range(max_kyi)]
    if use_IBZ_symmetry:
        for wi in range(nw):
            IBZ.copy_by_weak_symmetry(Qkw[wi, :, :], nk)
    return Qkw
def full_fill_Gweiss_iw_from_Gijw_and_Sigma_imp_iw(Gweiss_iw,Gijw,Sigma_imp_iw, mapping = lambda C,i,j: [0,0]):      
  if mpi.is_master_node(): print "full_fill_Gweiss_iw_from_Gijw_and_Sigma_imp_iw"
  assert 'up' in Gijw.keys(), "this assumes there is only one block in lattice functions. should be generalized for magnetized calculations" 
  block_names = [name for name,g in Gweiss_iw]   
  for C in block_names:
    blockwise_fill_Gweiss_iw_from_Gijw_and_Sigma_imp_iw(Gweiss_iw[C],Gijw['up'],Sigma_imp_iw[C], mapping= lambda i,j: mapping(C,i,j) ) 
  if mpi.is_master_node(): print "done!"
Beispiel #4
0
def mpi_parallel_get_G_loc(solver_data_package, dt):
    if mpi.is_master_node():
        print '[ master node ] [ master_rank', dt.master_rank, '] about to broadcast get_G_loc_parameters...'
        dt.G_IaJb_loc_iw << 0.0
        solver_data_package['tag'] = 'get_G_loc'
        solver_data_package['get_G_loc_parameters'] = {}
        solver_data_package['get_G_loc_parameters'][
            'G_IaJb_loc_iw'] = dt.G_IaJb_loc_iw
        solver_data_package = mpi.bcast(solver_data_package)
    G_IaJb_loc_iw = solver_data_package['get_G_loc_parameters'][
        'G_IaJb_loc_iw']
    if not (dt.master_rank is None):
        print "[ master_rank", dt.master_rank, "]: about to do get_G_loc"
        half_niw, nk, nk, nIa, nJb = numpy.shape(dt.G_IaJb_k_iw)
        G_IaJb_loc_iw.data[dt.iwis_per_master[0]:dt.iwis_per_master[-1] +
                           1, :, :] = numpy.sum(dt.G_IaJb_k_iw,
                                                axis=(1, 2)) / nk**2
    #print "[rank ", mpi.rank,"[ master_rank",dt.master_rank,"]: about to reduce G_IaJb_loc_iw"
    G_IaJb_loc_iw = mpi.all_reduce(None, G_IaJb_loc_iw, None)
    if not (dt.master_rank is None):
        print "[ master_rank", dt.master_rank, "]: done doing get_G_loc"
    if mpi.is_master_node():
        dt.G_IaJb_loc_iw << G_IaJb_loc_iw
        fit_fermionic_gf_tail(dt.G_IaJb_loc_iw,
                              starting_iw=14.0,
                              no_loc=False,
                              overwrite_tail=True,
                              max_order=5)
        dt.G_IaJb_loc_tau << InverseFourier(dt.G_IaJb_loc_iw)
        del solver_data_package['get_G_loc_parameters']
        print '[ master node ] [ master_rank', dt.master_rank, '] done doing get_G_loc'
Beispiel #5
0
    def check_and_fix(self, data, finalize = True, keep_P_negative = True):
      #safe_values = self.get_safe_values(data.Jq, data.bosonic_struct, data.n_q, data.n_q)
      safe_values = {}
      for A in data.bosonic_struct.keys():
        safe_values[A] = 1.0/numpy.amin(data.Jq[A])     
      if mpi.is_master_node(): print "edmft.cautionary: safe_values: ", safe_values
      #print "[Node",mpi.rank,"]","edmft.cautionary: actual safe values: (0,1) = ", 1.0/numpy.amin(data.Jq['0']),1.0/numpy.amin(data.Jq['1'])
      #operates directly on data.P_loc_iw as this is the one that will be used in chiqnu calculation
      clipped = False
  
      prefactor = 1.0 - self.ms0 / (self.clip_counter**self.ccpower + 1.0)
      for A in data.bosonic_struct.keys():
        for i in range(data.nnu):
          if keep_P_negative:
            if (data.P_loc_iw[A].data[i,0,0].real > 0):      
              data.P_loc_iw[A].data[i,0,0] = 0.0
            #clipped = True        
          if (data.P_loc_iw[A].data[i,0,0].real < safe_values[A]) and (safe_values[A]<0.0):      
            data.P_loc_iw[A].data[i,0,0] = prefactor*safe_values[A] + 1j*data.P_loc_iw[A].data[i,0,0].imag
            clipped = True        
            if mpi.is_master_node(): print "edmft.cautionary: clipping P_loc in block ",A
      if clipped and finalize: 
        self.clip_counter += 1 
      else: 
        self.clip_counter = self.clip_counter/self.ccrelax 

      return clipped
Beispiel #6
0
def mpi_parallel_get_G(solver_data_package, dt):
    if mpi.is_master_node():
        print 'master node about to broadcast get_G_parameters...'
        solver_data_package['tag'] = 'get_G'
        solver_data_package['get_G_parameters'] = {}
        solver_data_package['get_G_parameters'][
            'Sigma_IaJb_imp_iw'] = dt.Sigma_IaJb_imp_iw
        solver_data_package['get_G_parameters']['H0_k'] = dt.H0_k
        print "master node sending solver_data_package: ", solver_data_package.keys(
        )
        solver_data_package = mpi.bcast(solver_data_package)

    if not (dt.master_rank is None):
        print "[ master_rank", dt.master_rank, "]: received solver_data_package: ", solver_data_package.keys(
        )
        print "[ master_rank", dt.master_rank, "]: about to do get_G"
        for iwii, iwi in enumerate(dt.iwis_per_master):
            dt.Sigma_imp_data[iwii, :, :] = solver_data_package[
                'get_G_parameters']['Sigma_IaJb_imp_iw'].data[iwi, :, :]
        H0_k = solver_data_package['get_G_parameters']['H0_k']
        parallel_get_Nambu_G_for_cellular(numpy.array(dt.iws_per_master), H0_k,
                                          dt.Sigma_imp_data, dt.G_IaJb_k_iw)
        print "[ master_rank", dt.master_rank, "]: done doing get_G"
    if mpi.is_master_node():
        del solver_data_package['get_G_parameters']
    def change_beta(self, beta_new, n_iw_new=None, finalize=True):
        if mpi.is_master_node(): print ">>>>>>>> CHANGING BETA!!!!"
        if n_iw_new is None: n_iw_new = self.n_iw
        nw_new = n_iw_new * 2

        #---lattice stugff gfs
        for key in self.non_local_fermionic_gfs:
            for U in self.fermionic_struct.keys():
                try:
                    if mpi.is_master_node():
                        print "  doing: ", key, "[", U, "]", " keys: ", vars(
                            self)[key].keys()
                    if not (U in vars(self)[key].keys()):
                        if mpi.is_master_node():
                            print "WARNING: skipping block", U
                        continue
                except:
                    print "WARNING: could not change temperature for ", key
                    continue
                g = numpy.zeros((nw_new, self.n_k, self.n_k),
                                dtype=numpy.complex_)
                for kxi in range(self.n_k):
                    for kyi in range(self.n_k):
                        mats_freq.change_temperature(
                            vars(self)[key][U][:, kxi, kyi], g[:, kxi, kyi],
                            self.ws, ws_new)
                vars(self)[key][U] = copy.deepcopy(g)
        self.nw = nw_new
        self.ws = copy.deepcopy(ws_new)
        self.iws = [1j * w for w in self.ws]
        if finalize:
            self.beta = beta_new
            self.n_iw = n_iw_new
            self.ntau = ntau_new
def blockwise_get_Wqnu_from_Jq_and_Pqnu(Jq,Pqnu):
  if mpi.is_master_node(): print "blockwise_get_Wqnu_from_Jq_and_Pqnu...",
  Wqnu = -Pqnu[:,:,:]
  Wqnu[:,:,:] += 1.0/Jq[:,:]
  Wqnu **= -1.0
  if mpi.is_master_node(): print "done!"
  return Wqnu
def temporal_FT(Qktau, beta, ntau, n_iw, nk, statistic='Fermion', use_IBZ_symmetry = True, N_cores=1):        
  if N_cores==1: return temporal_FT_single_core(Qktau, beta, ntau, n_iw, nk, statistic, use_IBZ_symmetry)
  if mpi.is_master_node(): print "temporal_FT, N_cores: ",N_cores
  if statistic=='Fermion':
    nw = 2*n_iw
  elif statistic=='Boson':
    nw = 2*n_iw-1
  else:
    if mpi.is_master_node(): 
      print "statistic not implemented"
    quit()

  Qkw = numpy.zeros((nw,nk,nk), dtype=numpy.complex_)
        
  if use_IBZ_symmetry: max_kxi = nk/2+1
  else: max_kxi = nk
  pool = Pool(processes=N_cores)              # start worker processes             
  for kxi in range(max_kxi):                    
    if use_IBZ_symmetry: max_kyi = nk/2+1
    else: max_kyi = nk
    numpy.transpose(Qkw[:,kxi,:])[0:max_kyi,:] = pool.map(f_,
                                                      [( deepcopy(Qktau[:,kxi,kyi]),
                                                         beta, ntau, n_iw, statistic
                                                       )\
                                                       for kyi in range(max_kyi)])  
  pool.close()      

  if use_IBZ_symmetry: 
    for wi in range(nw):
      IBZ.copy_by_weak_symmetry(Qkw[wi,:,:], nk)
  return Qkw
def flexible_Gweiss_iw_from_Gweiss_iw_Gijw_and_G_imp_iw(Gweiss_iw, Gijw, G_imp_iw, mapping = lambda C,i,j: [0,0], 
                                                        sign=1, sign_up_to=-1):      
  if mpi.is_master_node(): print "flexible_Gweiss_iw_from_Gweiss_iw_Gijw_and_G_imp_iw"
  assert 'up' in Gijw.keys(), "this assumes there is only one block in lattice functions. should be generalized for magnetized calculations" 
  block_names = [name for name,g in Gweiss_iw]   
  for C in block_names:
    blockwise_flexible_Gweiss_iw_from_Gweiss_iw_Gijw_and_G_imp_iw(
        Gweiss_iw[C],Gijw['up'],G_imp_iw[C], mapping= lambda i,j: mapping(C,i,j), sign=sign, sign_up_to=sign_up_to ) 
  if mpi.is_master_node(): print "done!"
def blockwise_get_Gkw_from_iws_mu_epsiolonk_and_Sigmakw(iws,mu,epsilonk,Sigmakw):
  if mpi.is_master_node(): print "blockwise_get_Gkw_from_iws_mu_epsiolonk_and_Sigmakw...",
  Gkw = -Sigmakw[:,:,:]
  numpy.transpose(Gkw)[:] += iws[:]
  Gkw[:,:,:] += mu
  Gkw[:,:,:] -= epsilonk[:,:]
  Gkw **= -1.0
  if mpi.is_master_node(): print "done!"
  return Gkw 
Beispiel #12
0
    def convert_gf(self,G,G_struct,ish=0,show_warnings=True,**kwargs):
        """ Convert BlockGf from its structure to this structure.

        .. warning::

            Elements that are zero in the new structure due to
            the new block structure will be just ignored, thus
            approximated to zero.

        Parameters
        ----------
        G : BlockGf
            the Gf that should be converted
        G_struct : GfStructure
            the structure of that G
        ish : int
            shell index
        show_warnings : bool or float
            whether to show warnings when elements of the Green's
            function get thrown away
            if float, set the threshold for the magnitude of an element
            about to be thrown away to trigger a warning
            (default: 1.e-10)
        **kwargs :
            options passed to the constructor for the new Gf
        """

        warning_threshold = 1.e-10
        if isinstance(show_warnings, float):
            warning_threshold = show_warnings
            show_warnings = True

        G_new = self.create_gf(ish=ish,**kwargs)
        for block in G_struct.gf_struct_solver[ish].keys():
            for i1 in G_struct.gf_struct_solver[ish][block]:
                for i2 in G_struct.gf_struct_solver[ish][block]:
                    i1_sumk = G_struct.solver_to_sumk[ish][(block,i1)]
                    i2_sumk = G_struct.solver_to_sumk[ish][(block,i2)]
                    i1_sol = self.sumk_to_solver[ish][i1_sumk]
                    i2_sol = self.sumk_to_solver[ish][i2_sumk]
                    if i1_sol[0] is None or i2_sol[0] is None:
                        if show_warnings:
                            if mpi.is_master_node():
                                warn(('Element {},{} of block {} of G is not present '+
                                    'in the new structure').format(i1,i2,block))
                        continue
                    if i1_sol[0]!=i2_sol[0]:
                        if show_warnings and np.max(np.abs(G[block][i1,i2].data)) > warning_threshold:
                            if mpi.is_master_node():
                                warn(('Element {},{} of block {} of G is approximated '+
                                    'to zero to match the new structure. Max abs value: {}').format(
                                        i1,i2,block,np.max(np.abs(G[block][i1,i2].data))))
                        continue
                    G_new[i1_sol[0]][i1_sol[1],i2_sol[1]] = \
                            G[block][i1,i2]
        return G_new
def triangular_full_fill_Sigmaijkw_periodized(Sigmaijkw, Sigma_imp_iw, ks):
    if mpi.is_master_node(): print "triangular_full_fill_Sigmaijkw_periodized"
    assert len(Sigmaijkw.keys())==1, "must be only one block in Sigmaijkw"
    impkeys = [name for name,g in Sigma_imp_iw]    
    assert len(impkeys)==1, "must be only one block in Sigma_imp_iw"      
    impkey = impkeys[0]
    numpy.transpose(Sigmaijkw[Sigmaijkw.keys()[0]])[:,:,:] = numpy.transpose(Sigma_imp_iw[impkey].data)[:,:,:]    
    Nc = numpy.shape(Sigmaijkw[Sigmaijkw.keys()[0]])[1]
    if mpi.is_master_node(): print "Nc =",Nc
    if Nc==2:
      s = Sigma_imp_iw[impkey].data[:,0,1]
      z = deepcopy(s)
      z[:] = 0    
      for kxi, kx in enumerate(ks):
        for kyi, ky in enumerate(ks):
          skAB = s*(exp(1j*(ky-kx))+exp(-1j*kx)+exp(1j*ky))
          skBA = numpy.conj(skAB)

          skAA = s*cos(ky)
          skBB = skAA

          B =       [[skAA,  skAB],
                     [skBA,  skBB]]

          B = numpy.array(B)
          numpy.transpose(Sigmaijkw[Sigmaijkw.keys()[0]])[kyi,kxi,:,:,:] += B

    elif Nc==4:  
      ipss = triangular_identical_pair_sets(Lx,Ly) 
      s = (4*Sigma_imp_iw[impkey].data[:,0,1]+Sigma_imp_iw[impkey].data[:,1,2])/5.0
      sp = Sigma_imp_iw[impkey].data[:,0,3]    
      z = deepcopy(s)
      z[:] = 0    
      for kxi, kx in enumerate(ks):
        for kyi, ky in enumerate(ks):
          skx = s*exp(1j*kx)
          sky = s*exp(1j*ky)
          cskx = s*exp(-1j*kx)
          csky = s*exp(-1j*ky)

          B =       [[z,    skx,  sky,  z  ],
                     [cskx, z,    z,    sky],
                     [csky, z,    z,    skx],
                     [z,    csky, cskx, z  ]]
          spkAD = sp*numpy.conj(  exp(-1j*kx)+exp(-1j*ky) + exp(-1j*(kx+ky))  )
          spkDA = sp*(  exp(-1j*kx)+exp(-1j*ky) + exp(-1j*(kx+ky))  )
          skBC = s*numpy.conj(  exp(1j*kx)+exp(-1j*ky) + exp(1j*(kx-ky))  )
          skCB = s*(  exp(1j*kx)+exp(-1j*ky) + exp(1j*(kx-ky))  )

          C =       [[z,     z,     z,     spkAD ],
                     [z,     z,     skBC,  z     ],
                     [z,     skCB,  z,     z     ],
                     [spkDA, z,     z,     z     ]]

          BC = numpy.array(B)+numpy.array(C)
          numpy.transpose(Sigmaijkw[Sigmaijkw.keys()[0]])[kyi,kxi,:,:,:] += BC
Beispiel #14
0
 def selfenergy(data, frozen_boson):
   if mpi.is_master_node():
     print "selfenergy: frozen_bozon: ",frozen_boson
   data.get_Sigma_loc_from_local_bubble()
   if not frozen_boson: data.get_P_loc_from_local_bubble()
   data.get_Sigmakw()
   data.get_Xkw() #if using optimized scheme make sure this is the order of calls (Sigmakw, Xkw then Pqnu)
   if not frozen_boson: data.get_Pqnu()
   if mpi.is_master_node():
     print "done with selfenergy"
Beispiel #15
0
 def monitor(self):
   try:  
     self.values.append(copy.deepcopy(self.mq()))    
   except:
     if mpi.is_master_node(): print "monitor: ",h5key," cound not read the value. appending nan..."
     self.values.append(float('nan'))    
   if mpi.is_master_node() and (not (self.archive_name is None)):
     A = HDFArchive(self.archive_name)
     A[self.h5key] = self.values
     del A
     print "monitor: ",self.h5key," : ", self.values[-1]
        def run(solver,
                U,
                G0_IaJb_iw,
                n_cycles=20000,
                max_time=5 * 60,
                solver_data_package=None,
                only_sign=False):

            if solver_data_package is None: solver_data_package = {}

            solver_data_package['solve_parameters'] = {}
            solver_data_package['solve_parameters']['U'] = U
            solver_data_package['solve_parameters']['max_time'] = max_time
            solver_data_package['solve_parameters']["random_name"] = ""
            solver_data_package['solve_parameters']["length_cycle"] = 50
            solver_data_package['solve_parameters']["n_warmup_cycles"] = 50  #0
            solver_data_package['solve_parameters']["n_cycles"] = 100000000
            solver_data_package['solve_parameters']["measure_G_l"] = True
            solver_data_package['solve_parameters']["move_double"] = True
            solver_data_package['solve_parameters']["perform_tail_fit"] = True
            solver_data_package['solve_parameters']["fit_max_moment"] = 2

            print solver_data_package['solve_parameters']

            solver_data_package['G0_IaJb_iw'] = G0_IaJb_iw

            solver_data_package['tag'] = 'run'

            if mpi.size > 1:
                if mpi.is_master_node():
                    print "broadcasting solver_data_package!!"
                solver_data_package = mpi.bcast(solver_data_package)

            if mpi.is_master_node(): print "about to run "
            dct = deepcopy(solver_data_package['solve_parameters'])
            del dct['U']

            get_K_container, get_gf_struct, get_h_int, convert_to_K_space, convert_to_IJ_space = Kspace_plaquette(
                G0_IaJb_iw)
            convert_to_K_space(solver.G0_iw, G0_IaJb_iw)
            h_int = get_h_int(U)
            try:
                solver.solve(h_int=h_int, **dct)
                Sigma_IaJb_iw = G0_IaJb_iw.copy()
                convert_to_IJ_space(Sigma_IaJb_iw, solver.Sigma_iw)
                return Sigma_IaJb_iw
            except Exception as e:
                A = HDFArchive('black_box', 'w')
                A['solver'] = solver
                del A
                raise e
            if mpi.is_master_node():
                print "average sign: ", solver.average_sign
Beispiel #17
0
 def monitor(self):
     try:
         self.values.append(copy.deepcopy(self.mq()))
     except:
         if mpi.is_master_node():
             print "monitor: ", h5key, " cound not read the value. appending nan..."
         self.values.append(float('nan'))
     if mpi.is_master_node() and (not (self.archive_name is None)):
         A = HDFArchive(self.archive_name)
         A[self.h5key] = self.values
         del A
         print "monitor: ", self.h5key, " : ", self.values[-1]
Beispiel #18
0
 def selfenergy(data, frozen_boson):
   if mpi.is_master_node():
     print "selfenergy: frozen_bozon: ",frozen_boson
   data.Sigma_loc_iw << data.Sigma_imp_iw 
   #for U in data.fermionic_struct.keys(): 
     #fit_and_remove_constant_tail(data.Sigma_loc_iw[U], max_order=3) #Sigma_loc doesn't contain Hartree shift
   data.P_loc_iw << data.P_imp_iw  
   data.get_Sigmakw()
   data.get_Xkw() #if using optimized scheme make sure this is the order of calls (Sigmakw, Xkw then Pqnu)
   if not frozen_boson: data.get_Pqnu()
   if mpi.is_master_node():
     print "done with selfenergy"
Beispiel #19
0
    def __init__(self, beta, gf_struct, n_iw, spin_orbit=False, verbose=True):

        self.beta = beta
        self.gf_struct = gf_struct
        self.n_iw = n_iw
        self.spin_orbit = spin_orbit
        self.verbose = verbose

        if self.verbose and mpi.is_master_node():
            print "\n*** Hubbard I solver using Pomerol library"
            print "*** gf_struct =", gf_struct
            print "*** n_iw =", n_iw

        # get spin_name and orb_names from gf_struct
        self.__analyze_gf_struct(gf_struct)

        if self.verbose and mpi.is_master_node():
            print "*** spin_names =", self.spin_names
            print "*** orb_names  =", self.orb_names

        # check spin_names
        for sn in self.spin_names:
            if not spin_orbit:
                assert sn in ('down', 'dn', 'up'
                              )  # become either 'down' or 'up'
            if spin_orbit:
                assert sn in ('down', 'dn', 'ud')  # all become 'down'

        # Conversion from TRIQS to Pomerol notation for operator indices
        # TRIQS: ('dn', '-2') --> Pomerol: ('atom', 0, 'down')
        # NOTE: When spin_orbit is true, only spin 'down' is used.
        mkind = get_mkind(True, None)
        index_converter = {
            mkind(sn, bn):
            ("atom", bi, "down" if sn == "dn" or sn == "ud" else sn)
            for sn, (bi,
                     bn) in product(self.spin_names, enumerate(self.orb_names))
        }

        self.__ed = PomerolED(index_converter, verbose, spin_orbit)

        # init G_iw
        glist = lambda: [
            GfImFreq(indices=self.orb_names, beta=beta, n_points=n_iw)
            for block, inner in gf_struct.items()
        ]
        self.G_iw = BlockGf(name_list=self.spin_names,
                            block_list=glist(),
                            make_copies=False)
        self.G_iw.zero()
        self.G0_iw = self.G_iw.copy()
        self.Sigma_iw = self.G_iw.copy()
 def impurity(data,
              U,
              symmetrize_quantities=True,
              alpha=0.5,
              delta=0.1,
              automatic_alpha_and_delta=False,
              n_cycles=20000,
              max_times={'1x1': 5 * 60},
              solver_data_package=None,
              Cs=[],
              bosonic_measures=False,
              static_int_only=False):
     if mpi.is_master_node():
         print "nested_mains.impurity. max_times", max_times
     data.Sigma_imp_iw << 0
     for C in (data.impurity_struct.keys() if Cs == [] else Cs):
         solver_struct = {
             'up': data.impurity_struct[C],
             'dn': data.impurity_struct[C]
         }
         for key in solver_struct.keys():
             data.solvers[C].G0_iw[key] << data.Gweiss_iw[C]
         if not hasattr(data, "Uweiss_dyn_iw") or static_int_only:
             data.solvers[C].D0_iw << 0.0
             data.solvers[C].Jperp_iw << 0.0
         else:
             nested_edmft_mains.prepare_D0_iw(data, C)
             nested_edmft_mains.prepare_Jperp_iw(data, C)
         if automatic_alpha_and_delta:
             if mpi.is_master_node():
                 print "about to optimize alpha and delta for impurity", C
             shorttime = min(600, max(30, int(max_times[C] / 100)))
             if mpi.is_master_node():
                 print "time per alpha,delta", shorttime
             alpha, delta, max_sign = nested_mains.optimize_alpha_and_delta(
                 data, C, U, shorttime, solver_data_package)
             if mpi.is_master_node():
                 print "%s >>>> best (alpha=%s,delta=%s): max_sign=%s" % (
                     C, alpha, delta, max_sign)
         if mpi.is_master_node():
             print "nested_mains.impurity: launching impurity", C
         solvers.ctint.run(data,
                           C,
                           U,
                           symmetrize_quantities,
                           alpha,
                           delta,
                           n_cycles,
                           max_times[C],
                           solver_data_package,
                           bosonic_measures=bosonic_measures)
Beispiel #21
0
def solve_lattice_bse_e_k_sigma_w(mu,
                                  e_k,
                                  sigma_w,
                                  gamma_wnn,
                                  tail_corr_nwf=-1):

    kmesh = e_k.mesh
    fmesh_huge = sigma_w.mesh
    bmesh = gamma_wnn.mesh.components[0]
    fmesh = gamma_wnn.mesh.components[1]

    nk = len(kmesh)
    nw = (len(bmesh) + 1) / 2
    nwf = len(fmesh) / 2
    nwf_sigma = len(fmesh_huge) / 2

    if mpi.is_master_node():
        print tprf_banner(), "\n"

        print 'Lattcie BSE with local vertex approximation.\n'
        print 'nk  =', nk
        print 'nw  =', nw
        print 'nwf           =', nwf
        print 'nwf_sigma     =', nwf_sigma
        print 'nwf_chi0_tail =', tail_corr_nwf
        print

    # -- Lattice BSE calc with built in trace using g_wk
    from triqs_tprf.lattice import chiq_sum_nu_from_e_k_sigma_w_and_gamma_PH

    chi_kw = chiq_sum_nu_from_e_k_sigma_w_and_gamma_PH(
        mu, e_k, sigma_w, gamma_wnn, tail_corr_nwf=tail_corr_nwf)

    return chi_kw
def SIAM(U, e_f, V, D, beta, filename="qmc_results.h5"):
    Delta = V**2 * Flat(D)
    N_MC = 1e5
    l_max = 10
    independent_samples = 16
    for l in range(l_max + 1):
        for i in range(independent_samples):
            S = Solver(beta=beta, gf_struct={'up': [0], 'down': [0]})
            # Initialize the non-interacting Green's function S.G0_iw
            for name, g0 in S.G0_iw:
                g0 << inverse(iOmega_n - e_f - Delta)
            # Run the solver. The results will be in S.G_tau, S.G_iw and S.G_l
            S.solve(
                h_int=U * n('up', 0) * n('down', 0),  # Local Hamiltonian
                n_cycles=int(N_MC / 2**l),  # Number of QMC cycles
                length_cycle=2**l,  # Length of one cycle
                n_warmup_cycles=int(N_MC / 2**l / 100),  #  Warmup cycles
                measure_g_tau=False,  #  Don't measure G_tau
                measure_g_l=False,  #  Don't measure G_l
                perform_post_proc=False,  #  Don't measure G_iw
                use_norm_as_weight=
                True,  # Necessary option for the measurement of the density matrix
                measure_density_matrix=
                True,  # Measure reduced impurity density matrix
                random_seed=i * 8521 + l * 14187 +
                mpi.rank * 7472)  # Random seed, very important!
            # Save the results in an HDF5 file (only on the master node)
            if mpi.is_master_node():
                with HDFArchive(filename) as Results:
                    Results["rho_l{}_i{}".format(l, i)] = S.density_matrix
Beispiel #23
0
 def f(G1,G2) :
     #print abs(G1.data - G2.data)
     dS = max(abs(G1.data - G2.data).flatten())
     aS = max(abs(G1.data).flatten())
     if mpi.is_master_node():
         print "  Distances:", dS, " vs ", aS * dist
     return dS <= aS*dist
Beispiel #24
0
  def lattice(data, funcG, funcW, n): #the only option - we need Gkw and Wqnu for self energy in the next iteration
    #data.get_Gkw(funcG) #gets Gkw from G0 and Sigma
    def func(var, data):
      mu = var[0]
      dt = data[0]
      #print "func call! mu: ", mu, " n: ",dt.ns['up']
      n= data[1] 
      dt.mus['up'] = mu
      dt.mus['down'] = dt.mus['up']
      dt.get_Gkw_direct(funcG) #gets Gkw from w, mu, epsilon and Sigma and X
      dt.get_G_loc() #gets G_loc from Gkw
      dt.get_n_from_G_loc()     
      #print "funcvalue: ",-abs(n - dt.ns['up'])  
      return 1.0-abs(n - dt.ns['up'])  
    if not MASTER_SLAVE_ARCHITECTURE: mpi.barrier()
    varbest, funcvalue, iterations = amoeba(var=[data.mus['up']],
                                              scale=[0.01],
                                              func=func, 
                                              data = [data, n],
                                              itmax=30,
                                              ftolerance=1e-2,
                                              xtolerance=1e-2)
    if mpi.is_master_node():
      print "mu best: ", varbest
      print "-abs(diff n - data.n): ", funcvalue
      print "iterations used: ", iterations

    data.get_Gtildekw() #gets Gkw-G_loc

    data.get_Wqnu_from_func(funcW) #gets Wqnu from P and J 
    data.get_W_loc() #gets W_loc from Wqnu
    data.get_Wtildeqnu() #gets Wqnu-W_loc, 
        def initialize_solver(
            Q_IaJb_iw_template,
            solver_data_package=None,
            ntau=100000,
        ):
            if solver_data_package is None: solver_data_package = {}

            niw = len(Q_IaJb_iw_template.data[:, 0, 0]) / 2
            beta = Q_IaJb_iw_template.beta

            get_K_container, get_gf_struct, get_h_int, convert_to_K_space, convert_to_IJ_space = Kspace_plaquette(
                Q_IaJb_iw_template)

            gf_struct = get_gf_struct()

            assert ntau > 2 * niw, "solvers.ctint.initialize_solvers: ERROR! ntau too small!!"

            solver_data_package['constructor_parameters'] = {}
            solver_data_package['constructor_parameters']['beta'] = beta
            solver_data_package['constructor_parameters']['n_iw'] = niw
            solver_data_package['constructor_parameters']['n_tau'] = ntau
            solver_data_package['constructor_parameters'][
                'gf_struct'] = gf_struct
            solver_data_package['tag'] = 'construct'

            if mpi.is_master_node():
                print "solver_data_package:", solver_data_package

            if mpi.size > 1:
                solver_data_package = mpi.bcast(solver_data_package)

            return CthybSolver(**solver_data_package['constructor_parameters'])
Beispiel #26
0
def print_block_sym(SK, shell_multiplicity):
    # Summary of block structure finder and determination of shell_multiplicity
    shell_multiplicity = [0 for icrsh in range(SK.n_inequiv_shells)]
    if mpi.is_master_node():
        print "\n number of ineq. correlated shells: %d" % (
            SK.n_inequiv_shells)
        # correlated shells and their structure
        print "\n block structure summary"
        for icrsh in range(SK.n_inequiv_shells):
            shlst = []
            for ish in range(SK.n_corr_shells):
                if SK.corr_to_inequiv[ish] == icrsh: shlst.append(ish)
            shell_multiplicity[icrsh] = len(shlst)
            print " -- Shell type #%3d : " % icrsh + format(shlst)
            print "  | shell multiplicity " + str(shell_multiplicity[icrsh])
            print "  | block struct. : " + format(SK.gf_struct_solver[icrsh])
            print "  | deg. orbitals : " + format(SK.deg_shells[icrsh])

        print "\n rotation matrices "
        # rotation matrices
        for icrsh in range(SK.n_corr_shells):
            n_orb = SK.corr_shells[icrsh]['dim']
            print 'rot_mat[%2d] ' % (icrsh) + 'real part'.center(
                9 * n_orb) + '  ' + 'imaginary part'.center(9 * n_orb)
            rot = np.matrix(SK.rot_mat[icrsh])
            for irow in range(n_orb):
                fmt = '{:9.5f}' * n_orb
                row = np.real(rot[irow, :]).tolist()[0] + np.imag(
                    rot[irow, :]).tolist()[0]
                print('           ' + fmt + '  ' + fmt).format(*row)

        print '\n'
Beispiel #27
0
    def repack(self):
        """
        Calls the h5repack routine in order to reduce the file size of the hdf5 archive.

        Note
        ----
        Should only be used before the first invokation of HDFArchive in the program, 
        otherwise the hdf5 linking will be broken.

        """

        import subprocess

        if not (mpi.is_master_node()):
            return
        mpi.report("Repacking the file %s" % self.hdf_file)

        retcode = subprocess.call([
            hdf5_command_path + "/h5repack",
            "-i%s" % self.hdf_file, "-otemphgfrt.h5"
        ])
        if retcode != 0:
            mpi.report("h5repack failed!")
        else:
            subprocess.call(["mv", "-f", "temphgfrt.h5", "%s" % self.hdf_file])
Beispiel #28
0
def SIAM(U, e_f, V, D, beta, filename="qmc_results.h5"):
    # Create hybridization function
    Delta = V**2 * Flat(D)

    # Construct the impurity solver with the inverse temperature
    # and the structure of the Green's functions
    S = Solver(beta=beta, gf_struct={'up': [0], 'down': [0]}, n_l=50)

    # Initialize the non-interacting Green's function S.G0_iw
    for name, g0 in S.G0_iw:
        g0 << inverse(iOmega_n - e_f - Delta)

    # Run the solver. The results will be in S.G_tau, S.G_iw and S.G_l
    S.solve(
        h_int=U * n('up', 0) * n('down', 0),  # Local Hamiltonian
        n_cycles=2000000,  # Number of QMC cycles
        length_cycle=50,  # Length of one cycle
        n_warmup_cycles=20000,  # Warmup cycles
        measure_g_l=
        True,  # Measure G_l (representation of G in terms of Legendre polynomials)
        use_norm_as_weight=
        True,  # Necessary option for the measurement of the density matrix
        measure_density_matrix=True,  # Measure reduced impurity density matrix
        measure_pert_order=True)  # Measure histogram of k

    # Save the results in an HDF5 file (only on the master node)
    if mpi.is_master_node():
        with HDFArchive(filename, 'w') as Results:
            Results["G_tau"] = S.G_tau
            Results["G_iw"] = S.G_iw
            Results["G_l"] = S.G_l
            Results["rho"] = S.density_matrix
            Results["k_histogram"] = S.perturbation_order_total
            Results["average_sign"] = S.average_sign
Beispiel #29
0
 def f(G1, G2):
     #print abs(G1.data - G2.data)
     dS = max(abs(G1.data - G2.data).flatten())
     aS = max(abs(G1.data).flatten())
     if mpi.is_master_node():
         print "  Distances:", dS, " vs ", aS * dist
     return dS <= aS * dist
def full_fill_Sigmaijkw_periodized(Sigmaijkw, Sigma_imp_iw, ks):
    if mpi.is_master_node(): print "full_fill_Sigmaijkw_periodized"
    assert len(Sigmaijkw.keys())==1, "must be only one block in Sigmaijkw"
    impkeys = [name for name,g in Sigma_imp_iw]    
    assert len(impkeys)==1, "must be only one block in Sigma_imp_iw"      
    impkey = impkeys[0]
    numpy.transpose(Sigmaijkw[Sigmaijkw.keys()[0]])[:,:,:] = numpy.transpose(Sigma_imp_iw[impkey].data)[:,:,:]    
       
    s = Sigma_imp_iw[impkey].data[:,0,1]
    sp = Sigma_imp_iw[impkey].data[:,0,3]    
    z = deepcopy(s)
    z[:] = 0    
    for kxi, kx in enumerate(ks):
      for kyi, ky in enumerate(ks):
        skx = s*exp(1j*kx)
        sky = s*exp(1j*ky)
        cskx = s*exp(-1j*kx)
        csky = s*exp(-1j*ky)

        B =       [[z,    skx,  sky,  z  ],
                   [cskx, z,    z,    sky],
                   [csky, z,    z,    skx],
                   [z,    csky, cskx, z  ]]
        spkAD = sp*numpy.conj(  exp(-1j*kx)+exp(-1j*ky) + exp(-1j*(kx+ky))  )
        spkBC = sp*numpy.conj(  exp(1j*kx)+exp(-1j*ky) + exp(1j*(kx-ky))  )
        spkDA = sp*(  exp(-1j*kx)+exp(-1j*ky) + exp(-1j*(kx+ky))  )
        spkCB = sp*(  exp(1j*kx)+exp(-1j*ky) + exp(1j*(kx-ky))  )

        C =       [[z,     z,     z,     spkAD ],
                   [z,     z,     spkBC, z     ],
                   [z,     spkCB, z,     z     ],
                   [spkDA, z,     z,     z     ]]

        BC = numpy.array(B)+numpy.array(C)
        numpy.transpose(Sigmaijkw[Sigmaijkw.keys()[0]])[kyi,kxi,:,:,:] += BC
def full_fill_Gweiss_iw(Gweiss_iw, G_ij_iw, Sigma_imp_iw):
  if mpi.is_master_node(): print "full_fill_Gweiss_iw"
  for name,g in Gweiss_iw:
    nw = len(g.data[:,0,0])
    for wi in range(nw):
      g.data[wi,:,:] = inv( inv(G_ij_iw[name].data[wi,:,:]) + Sigma_imp_iw[name].data[wi,:,:] )
    fit_fermionic_gf_tail(g)
Beispiel #32
0
 def P(fermionic_struct,
       bosonic_struct,
       P,
       G,
       Lambda,
       func,
       su2_symmetry,
       G2=None,
       p={
           '0': 1.0,
           '1': 1.0
       }):
     if G2 is None: G2 = G
     for A in bosonic_struct.keys():
         if not (A in p.keys()):
             if mpi.is_master_node():
                 print "P WARNING: skipping block ", A
             continue
         P[A].fill(0.0)
         for U in fermionic_struct.keys():
             if su2_symmetry and (U != 'up'): continue
             for V in fermionic_struct.keys():
                 if (U != V and A != '+-') or ((U == V) and
                                               (A == '+-')):
                     continue
                 P[A] += p[A] * func(
                     G1=partial(G, key=V),
                     G2=partial(G2, key=U),
                     Lambda=lambda wi1, wi2: Lambda(A, wi2, wi1))
         if su2_symmetry: P[A] *= 2.0
def temporal_inverse_FT(Qkw,
                        beta,
                        ntau,
                        n_iw,
                        nk,
                        statistic='Fermion',
                        use_IBZ_symmetry=True,
                        fit_tail=False,
                        N_cores=1):
    if N_cores == 1:
        return temporal_inverse_FT_single_core(Qkw, beta, ntau, n_iw, nk,
                                               statistic, use_IBZ_symmetry,
                                               fit_tail)
    if mpi.is_master_node(): print "temporal_inverse_FT, N_cores: ", N_cores
    Qktau = numpy.zeros((ntau, nk, nk), dtype=numpy.complex_)

    if use_IBZ_symmetry: max_kxi = nk / 2 + 1
    else: max_kxi = nk
    pool = Pool(processes=N_cores)  # start worker processes
    for kxi in range(max_kxi):
        if use_IBZ_symmetry: max_kyi = nk / 2 + 1
        else: max_kyi = nk
        numpy.transpose(Qktau[:,kxi,:])[0:max_kyi,:] = pool.map(invf_,
                                                          [( Qkw[:,kxi,kyi],
                                                             beta, ntau, n_iw, statistic, fit_tail
                                                            )\
                                                            for kyi in range(max_kyi)])
    pool.close()

    if use_IBZ_symmetry:
        for taui in range(ntau):
            IBZ.copy_by_weak_symmetry(Qktau[taui, :, :], nk)
    return Qktau
        def initialize_solver(
            nambu=False,
            solver_data_package=None,
            beta=None,
            nsites=None,
            niw=None,
            ntau=100000,
        ):
            if solver_data_package is None: solver_data_package = {}

            if nambu:
                gf_struct = {'nambu': range(2 * nsites)}
            else:
                gf_struct = {'up': range(nsites), 'dn': range(nsites)}

            assert ntau > 2 * niw, "solvers.ctint.initialize_solvers: ERROR! ntau too small!!"

            solver_data_package['constructor_parameters'] = {}
            solver_data_package['constructor_parameters']['beta'] = beta
            solver_data_package['constructor_parameters']['n_iw'] = niw
            solver_data_package['constructor_parameters']['n_tau'] = ntau
            solver_data_package['constructor_parameters'][
                'gf_struct'] = gf_struct
            solver_data_package['tag'] = 'construct'

            if mpi.is_master_node():
                print "solver_data_package:", solver_data_package

            if mpi.size > 1:
                solver_data_package = mpi.bcast(solver_data_package)

            return Solver(**solver_data_package['constructor_parameters'])
Beispiel #35
0
 def __init__(self, archive, *args, **kwargs):
     self.archive = archive
     if not os.path.exists(archive) and mpi.is_master_node():
         archive = HDFArchive(archive, 'w')
         archive.create_group('results')
         archive['results']['n_dmft_loops'] = 0
         del archive
     mpi.barrier()
Beispiel #36
0
    def transport_coefficient(self, direction, iq, n, beta, method=None):
        r"""
        Calculates the transport coefficient A_n in a given direction for a given :math:`\Omega`. The required members (Gamma_w, directions, Om_mesh) have to be obtained first
        by calling the function :meth:`transport_distribution <pytriqs.applications.dft.sumk_dft_tools.SumkDFTTools.transport_distribution>`. For n>0 A is set to NaN if :math:`\Omega` is not 0.0. 
        
        Parameters
        ----------
        direction : string
           :math:`\alpha\beta` e.g.: 'xx','yy','zz','xy','xz','yz'.
        iq : integer
            Index of :math:`\Omega` point in the member Om_mesh.
        n : integer
            Number of the desired moment of the transport distribution.
        beta : double
            Inverse temperature :math:`\beta`.
        method : string
            Integration method: cubic spline and scipy.integrate.quad ('quad'), simpson rule ('simps'), trapezoidal rule ('trapz'), rectangular integration (otherwise)
            Note that the sampling points of the the self-energy are used!
        
        Returns
        -------
        A : double
            Transport coefficient.
        """

        if not (mpi.is_master_node()): return
        
        assert hasattr(self,'Gamma_w'), "transport_coefficient: Run transport_distribution first or load data from h5!"
        
        if (self.Om_mesh[iq] == 0.0 or  n == 0.0):
            A = 0.0
            # setup the integrand
            if (self.Om_mesh[iq] == 0.0):
                A_int = self.Gamma_w[direction][iq] * (self.fermi_dis(self.omega,beta) * self.fermi_dis(-self.omega,beta)) * (self.omega*beta)**n
            elif (n == 0.0):
                A_int = self.Gamma_w[direction][iq] * (self.fermi_dis(self.omega,beta) - self.fermi_dis(self.omega+self.Om_mesh[iq],beta))/(self.Om_mesh[iq]*beta)
            
            # w-integration
            if method == 'quad':
                # quad on interpolated w-points with cubic spline
                A_int_interp = interp1d(self.omega,A_int,kind='cubic')
                A = quad(A_int_interp, min(self.omega), max(self.omega), epsabs=1.0e-12,epsrel=1.0e-12,limit = 500)
                A = A[0]
            elif method == 'simps':
                # simpson rule for w-grid
                A = simps(A_int,self.omega)
            elif method == 'trapz':
                # trapezoidal rule for w-grid
                A = numpy.trapz(A_int,self.omega)
            else:
                # rectangular integration for w-grid (orignal implementation) 
                d_w = self.omega[1] - self.omega[0]
                for iw in xrange(self.Gamma_w[direction].shape[1]):
                    A += A_int[iw]*d_w
            A = A * numpy.pi * (2.0-self.SP)
        else:
            A = numpy.nan
        return A
 def __save_eal(self,Filename,it):
     if mpi.is_master_node():
         f=open(Filename,'a')
         f.write('\neff. atomic levels, Iteration %s\n'%it)
         for i in range(self.Nlm*self.Nspin):
             for j in range(self.Nlm*self.Nspin):
                 f.write("%10.6f %10.6f   "%(self.ealmat[i,j].real,self.ealmat[i,j].imag))
             f.write("\n")
         f.close()
def spatial_FT_single_core(Qij):        
  if mpi.is_master_node(): print "spatial_FT_single core"
  n = len(Qij[:,0,0]) 
  nk = len(Qij[0,:,0])    
  Qk = numpy.zeros((n,nk,nk), dtype=numpy.complex_)
        
  Qk[:,:,:] = [ spf(Qij[l,:,:]) for l in range(n)]

  return Qk
Beispiel #39
0
    def conductivity_and_seebeck(self, beta, method=None):
        r"""
        Calculates the Seebeck coefficient and the optical conductivity by calling 
        :meth:`transport_coefficient <pytriqs.applications.dft.sumk_dft_tools.SumkDFTTools.transport_coefficient>`. 
        The required members (Gamma_w, directions, Om_mesh) have to be obtained first by calling the function 
        :meth:`transport_distribution <pytriqs.applications.dft.sumk_dft_tools.SumkDFTTools.transport_distribution>`. 

        Parameters
        ----------
        beta : double
            Inverse temperature :math:`\beta`.

        Returns
        -------
        optic_cond : dictionary of double vectors
            Optical conductivity in each direction and frequency given by Om_mesh.

        seebeck : dictionary of double
            Seebeck coefficient in each direction. If zero is not present in Om_mesh the Seebeck coefficient is set to NaN.
        """

        if not (mpi.is_master_node()):
            return

        assert hasattr(
            self, 'Gamma_w'), "conductivity_and_seebeck: Run transport_distribution first or load data from h5!"
        n_q = self.Gamma_w[self.directions[0]].shape[0]

        A0 = {direction: numpy.full((n_q,), numpy.nan)
              for direction in self.directions}
        A1 = {direction: numpy.full((n_q,), numpy.nan)
              for direction in self.directions}
        self.seebeck = {direction: numpy.nan for direction in self.directions}
        self.optic_cond = {direction: numpy.full(
            (n_q,), numpy.nan) for direction in self.directions}

        for direction in self.directions:
            for iq in xrange(n_q):
                A0[direction][iq] = self.transport_coefficient(
                    direction, iq=iq, n=0, beta=beta, method=method)
                A1[direction][iq] = self.transport_coefficient(
                    direction, iq=iq, n=1, beta=beta, method=method)
                print "A_0 in direction %s for Omega = %.2f    %e a.u." % (direction, self.Om_mesh[iq], A0[direction][iq])
                print "A_1 in direction %s for Omega = %.2f    %e a.u." % (direction, self.Om_mesh[iq], A1[direction][iq])
                if ~numpy.isnan(A1[direction][iq]):
                    # Seebeck is overwritten if there is more than one Omega =
                    # 0 in Om_mesh
                    self.seebeck[direction] = - \
                        A1[direction][iq] / A0[direction][iq] * 86.17
            self.optic_cond[direction] = beta * \
                A0[direction] * 10700.0 / numpy.pi
            for iq in xrange(n_q):
                print "Conductivity in direction %s for Omega = %.2f       %f  x 10^4 Ohm^-1 cm^-1" % (direction, self.Om_mesh[iq], self.optic_cond[direction][iq])
                if not (numpy.isnan(A1[direction][iq])):
                    print "Seebeck in direction      %s for Omega = 0.00      %f  x 10^(-6) V/K" % (direction, self.seebeck[direction])

        return self.optic_cond, self.seebeck
Beispiel #40
0
 def __init__(self, **kwargs):
     self.parameters = dict()
     for i in kwargs:
         self.parameters[i] = kwargs[i]
     super(CDmft, self).__init__(**self.parameters)
     if mpi.is_master_node():
         archive = HDFArchive(self.parameters['archive'], 'a')
         archive['CDmft_version'] = CDmft._version
         del archive
Beispiel #41
0
    def save(self):
        """Saves some quantities into an HDF5 arxiv"""

        if not (mpi.is_master_node()): return # do nothing on nodes

        ar=HDFArchive(self.hdf_file,'a')
        ar[self.lda_data]['chemical_potential'] = self.chemical_potential
        ar[self.lda_data]['dc_energ'] = self.dc_energ
        ar[self.lda_data]['dc_imp'] = self.dc_imp
        del ar      
def spatial_FT(Qij, N_cores=1):   
  if N_cores == 1: return spatial_FT_single_core(Qij)     
  if mpi.is_master_node(): print "spatial_FT, N_cores: ",N_cores
  n = len(Qij[:,0,0]) 
  nk = len(Qij[0,:,0])    
  Qk = numpy.zeros((n,nk,nk), dtype=numpy.complex_)
        
  pool = Pool(processes=N_cores)              # start worker processes             
  Qk[:,:,:] = pool.map(spf, [ Qij[l,:,:] for l in range(n)])  
  pool.close()      
  return Qk
Beispiel #43
0
def is_vasp_running(vasp_pid):
    """
    Tests if VASP initial process is still alive.
    """
    pid_exists = False
    if mpi.is_master_node():
        try:
            os.kill(vasp_pid, 0)
        except OSError, e:
            pid_exists = e.errno == errno.EPERM
        else:
            pid_exists = True
Beispiel #44
0
  def __init__(self, monitored_quantity, accuracy=3e-5, func=None, struct=None, archive_name=None, h5key='diffs'):
    #monitored quantity needs to be a function returning an object in case the object is rewritten (changed address)
    self.mq = monitored_quantity

    self.accuracy = accuracy
    self.diffs = []
    self.func = func

    self.archive_name = archive_name
    self.struct = struct
    self.h5key = h5key

    if mpi.is_master_node(): print "converger initiialized: archive_name: %s h5key: %s accr: %s"%(archive_name,h5key,accuracy) 
Beispiel #45
0
 def test_trilex(data):
   data.__class__ = trilex_data
   data.promote(data.n_iw/2, data.n_iw/2)
   solvers.cthyb.run(data, no_fermionic_bath=False, symmetrize_quantities=True, 
                           trilex=True, n_w_f=data.n_iw_f, n_w_b=data.n_iw_b,
                           n_cycles=20000, max_time=10*60, hartree_shift = 0.0 )
   data.get_chi3_imp()
   data.get_chi3tilde_imp()
   data.get_Lambda_imp()
   data.get_Sigma_test()
   data.get_P_test()
   if mpi.is_master_node():
     data.dump_test(suffix='-final')
Beispiel #46
0
      def func(var, data):
        mu = var[0]
        dt = data[0]
        #print "func call! mu: ", mu, " n: ",dt.ns['up']
        n= data[1] 
        dt.mus['up'] = mu
        if 'down' in dt.fermionic_struct.keys(): dt.mus['down'] = dt.mus['up']
        get_n(dt)        #print "funcvalue: ",-abs(n - dt.ns['up'])  

        val = 1.0-abs(n - dt.ns['up'])  
        if mpi.is_master_node(): print "amoeba func call: val = ",val
        if val != val: return -1e+6
        else: return val
Beispiel #47
0
 def Sigma( fermionic_struct, bosonic_struct, 
            Sigma, G, W, Lambda, 
            func,
            su2_symmetry, ising_decoupling, 
            p = {'0': 1.0, '1': 1.0, 'z': 1.0, 'c': 2.0},
            overwrite = True ):
   if mpi.is_master_node(): print "MSA: ",MASTER_SLAVE_ARCHITECTURE 
   for U in fermionic_struct.keys():
     if su2_symmetry and U!='up': continue      
     if overwrite: Sigma[U].fill(0.0) 
     for V in fermionic_struct.keys():            
       for A in bosonic_struct.keys():     
         if not (A in p.keys()):
           if mpi.is_master_node(): print "Sigma WARNING: skipping block ",A
           continue 
         if (U!=V and A!='+-')or((U==V)and(A=='+-')): continue
         m = -1.0
         if (A=='1' or A=='z') and (not ising_decoupling): m*=3.0
         #print "p[",A,"]: ", p[A], " m: ",m
         Sigma[U] += p[A] * m * ( func( G1 = partial(G, key=V),   G2 = partial(W, key=A),  Lambda = lambda wi1, wi2: Lambda(A, wi1, wi2)  ) )
   if su2_symmetry and ('down' in fermionic_struct.keys()): 
     Sigma['down'] = copy.deepcopy(Sigma['up'])
Beispiel #48
0
    def __init__(self, hdf_file, subgroup=None):
        """
        Initialises the class.

        Parameters
        ----------
        hdf_file : string
                   Base name of the hdf5 archive with the symmetry data.
        subgroup : string, optional
                   Name of subgroup storing correlated-shell symmetry data. If not given, it is assumed that
                   the data is stored at the root of the hdf5 archive.
        """

        assert type(
            hdf_file) == StringType, "Symmetry: hdf_file must be a filename."
        self.hdf_file = hdf_file
        things_to_read = ['n_symm', 'n_atoms', 'perm',
                          'orbits', 'SO', 'SP', 'time_inv', 'mat', 'mat_tinv']
        for it in things_to_read:
            setattr(self, it, 0)

        if mpi.is_master_node():
            # Read the stuff on master:
            ar = HDFArchive(hdf_file, 'r')
            if subgroup is None:
                ar2 = ar
            else:
                ar2 = ar[subgroup]

            for it in things_to_read:
                setattr(self, it, ar2[it])
            del ar2
            del ar

        # Broadcasting
        for it in things_to_read:
            setattr(self, it, mpi.bcast(getattr(self, it)))

        # now define the mapping of orbitals:
        # self.orb_map[iorb] = jorb gives the permutation of the orbitals as given in the list, when the
        # permutation of the atoms is done:
        self.n_orbits = len(self.orbits)
        self.orb_map = [[0 for iorb in range(
            self.n_orbits)] for i_symm in range(self.n_symm)]
        for i_symm in range(self.n_symm):
            for iorb in range(self.n_orbits):
                srch = copy.deepcopy(self.orbits[iorb])
                srch['atom'] = self.perm[i_symm][self.orbits[iorb]['atom'] - 1]
                self.orb_map[i_symm][iorb] = self.orbits.index(srch)
def temporal_inverse_FT_single_core(Qkw, beta, ntau, n_iw, nk, statistic='Fermion', use_IBZ_symmetry = True, fit_tail = False):        
  if mpi.is_master_node(): print "temporal_inverse_FT_single core"
  Qktau = numpy.zeros((ntau,nk,nk), dtype=numpy.complex_)
        
  if use_IBZ_symmetry: max_kxi = nk/2+1
  else: max_kxi = nk
  for kxi in range(max_kxi):                    
    if use_IBZ_symmetry: max_kyi = nk/2+1
    else: max_kyi = nk
    numpy.transpose(Qktau[:,kxi,:])[0:max_kyi,:] =  [ invf( Qkw[:,kxi,kyi], beta, ntau, n_iw, statistic, fit_tail )\
                                                        for kyi in range(max_kyi)] 
  if use_IBZ_symmetry: 
    for taui in range(ntau):
      IBZ.copy_by_weak_symmetry(Qktau[taui,:,:], nk)
  return Qktau
def temporal_FT_single_core(Qktau, beta, ntau, n_iw, nk, statistic='Fermion', use_IBZ_symmetry = True):        
  if mpi.is_master_node(): print "temporal_FT_single core"
  if statistic=='Fermion':
    nw = 2*n_iw
  elif statistic=='Boson':
    nw = 2*n_iw-1
  else:
    if mpi.is_master_node(): 
      print "statistic not implemented"
    quit()

  Qkw = numpy.zeros((nw,nk,nk), dtype=numpy.complex_)
        
  if use_IBZ_symmetry: max_kxi = nk/2+1
  else: max_kxi = nk
  for kxi in range(max_kxi):                    
    if use_IBZ_symmetry: max_kyi = nk/2+1
    else: max_kyi = nk
    numpy.transpose(Qkw[:,kxi,:])[0:max_kyi,:] = [f( deepcopy(Qktau[:,kxi,kyi]), beta, ntau, n_iw, statistic )\
                                                       for kyi in range(max_kyi)] 
  if use_IBZ_symmetry: 
    for wi in range(nw):
      IBZ.copy_by_weak_symmetry(Qkw[wi,:,:], nk)
  return Qkw
Beispiel #51
0
 def load(self, function_name, loop_nr = -1):
     """
     returns a calculated function from archive
     function_name: 'Sigma_c_iw', 'G_c_iw', ...
     loop_nr: int, -1 gives the last loop nr.
     """
     function = None
     if mpi.is_master_node():
         a = HDFArchive(self.archive, 'r')
         if loop_nr < 0:
             function = a['results'][str(self.next_loop() + loop_nr)][function_name]
         else:
             function = a['results'][str(loop_nr)][function_name]
         del a
     function = mpi.bcast(function)
     return function
    def __repack(self):
        """Calls the h5repack routine, in order to reduce the file size of the hdf5 archive.
           Should only be used BEFORE the first invokation of HDFArchive in the program, otherwise
           the hdf5 linking is broken!!!"""

        import subprocess

        if not (mpi.is_master_node()): return

        mpi.report("Repacking the file %s"%self.hdf_file)

        retcode = subprocess.call(["h5repack","-i%s"%self.hdf_file, "-otemphgfrt.h5"])
        if (retcode!=0):
            mpi.report("h5repack failed!")
        else:
            subprocess.call(["mv","-f","temphgfrt.h5","%s"%self.hdf_file])
Beispiel #53
0
 def P(     fermionic_struct, bosonic_struct, 
            P, G, Lambda, 
            func, 
            su2_symmetry,
            G2 = None,  p = {'0': 1.0, '1': 1.0} ):
   if G2 is None: G2 = G
   for A in bosonic_struct.keys(): 
     if not (A in p.keys()):
       if mpi.is_master_node(): print "P WARNING: skipping block ",A
       continue
     P[A].fill(0.0)
     for U in fermionic_struct.keys():
       if su2_symmetry and (U!='up'): continue
       for V in fermionic_struct.keys():            
         if (U!=V and A!='+-')or((U==V)and(A=='+-')): continue
         P[A] +=  p[A] * func( G1 = partial(G, key=V),   G2 = partial(G2, key=U),  Lambda = lambda wi1, wi2: Lambda(A, wi2, wi1) )
     if su2_symmetry: P[A]*=2.0
Beispiel #54
0
 def __init__(self, mutilde, U, alpha, bosonic_struct, ising=False, n=None, ph_symmetry=True): #mutilde is the difference from the half-filled mu, which is not known in advance because it is determined by Uweiss['0']
   #self.lattice = partial(GW.lattice, funcG = dyson.scalar.W_from_P_and_J, funcW = dyson.scalar.W_from_P_and_J)
   if (n is None) or ((n==0.5) and ph_symmetry):
     self.lattice = partial(GW.lattice, funcG =  dict.fromkeys(['up', 'down'], dyson.scalar.G_from_w_mu_epsilon_and_Sigma), 
                                        funcW =  dict.fromkeys(bosonic_struct.keys(), dyson.scalar.W_from_P_and_J) )
   else:
     self.lattice = partial(self.lattice, n = n,  
                                          funcG =  dict.fromkeys(['up', 'down'], dyson.scalar.G_from_w_mu_epsilon_and_Sigma), 
                                          funcW =  dict.fromkeys(bosonic_struct.keys(), dyson.scalar.W_from_P_and_J) )
   if n==0.5 and ph_symmetry: 
     mutilde = 0.0  
     n = None
   self.selfenergy = partial(self.selfenergy, mutilde=mutilde, U=U)
   self.pre_impurity = partial(self.pre_impurity, mutilde=mutilde, U=U, alpha=alpha, ising = ising, n=n)
   self.cautionary = GW.cautionary()    
   self.post_impurity = edmft_tUVJ_pm.post_impurity
   if mpi.is_master_node():
     print "INITIALIZED GW"
Beispiel #55
0
    def read_input_from_hdf(self, subgrp, things_to_read, optional_things=[]):
        """
        Reads data from the HDF file
        """
        
        retval = True
        # init variables on all nodes:
        for it in things_to_read: exec "self.%s = 0"%it
        for it in optional_things: exec "self.%s = 0"%it
        
        if (mpi.is_master_node()):
            ar=HDFArchive(self.hdf_file,'a')
            if (subgrp in ar):
                # first read the necessary things:
                for it in things_to_read:
                    if (it in ar[subgrp]):
                        exec "self.%s = ar['%s']['%s']"%(it,subgrp,it)
                    else:
                        mpi.report("Loading %s failed!"%it)
                        retval = False
                   
                if ((retval) and (len(optional_things)>0)):
                    # if necessary things worked, now read optional things:
                    retval = {}
                    for it in optional_things:
                        if (it in ar[subgrp]):
                            exec "self.%s = ar['%s']['%s']"%(it,subgrp,it)
                            retval['%s'%it] = True
                        else:
                            retval['%s'%it] = False
            else:
                mpi.report("Loading failed: No %s subgroup in HDF5!"%subgrp)
                retval = False

            del ar

        # now do the broadcasting:
        for it in things_to_read: exec "self.%s = mpi.bcast(self.%s)"%(it,it)
        for it in optional_things: exec "self.%s = mpi.bcast(self.%s)"%(it,it)
        

        retval = mpi.bcast(retval)
               
        return retval
Beispiel #56
0
  def check(self):
    if self.func is None:
      self.check_gf()
    else:
      func(self)

    del self.mq_old
    self.get_initial()
    
    if mpi.is_master_node(): 
      print "converger: ",self.h5key," : ", self.diffs[-1]
      if (not (self.archive_name is None)):
        A = HDFArchive(self.archive_name)
        A[self.h5key] = self.diffs
        del A

    if self.diffs[-1]<self.accuracy:
      return True

    return False