def build_gate(tau=1.0, H=j1j2.J1J2_C4V_BIPARTITE(j1=args.j1, j2=args.j2)): """Part of j1j2.py code. H is a class with j1=1.0 and j2=0 by default. Simple pi/2 rotation of the gate gives the Hamiltonian on another bond.""" s2 = su2.SU2(H.phys_dim, dtype=H.dtype, device=H.device) expr_kron = 'ij,ab->iajb' # Spin-spin operator # s1| |s2 # | | # [ S.S ] # | | # s1'| |s2' SS = torch.einsum(expr_kron,s2.SZ(),s2.SZ()) + 0.5*(torch.einsum(expr_kron,s2.SP(),s2.SM()) \ + torch.einsum(expr_kron,s2.SM(),s2.SP())) SS = SS.view(4, 4).contiguous() # Diagonalization of SS and creation of Hamiltonian Ha eig_va, eig_vec = np.linalg.eigh(SS) eig_va = np.exp(-tau / 2 * args.j1 * eig_va) U = torch.tensor(eig_vec) D = torch.diag(torch.tensor(eig_va)) # Ha = U D U^{\dagger} Ga = torch.einsum('ij,jk,lk->il', U, D, U) Ga = Ga.view(2, 2, 2, 2).contiguous() return Ga
def test_ctmrg_RVB(self): cfg.configure(args) torch.set_num_threads(args.omp_cores) model = j1j2.J1J2_C4V_BIPARTITE(j1=args.j1, j2=args.j2) energy_f = model.energy_1x1_lowmem state = read_ipeps_c4v(args.instate) def ctmrg_conv_energy(state, env, history, ctm_args=cfg.ctm_args): with torch.no_grad(): if not history: history = [] e_curr = energy_f(state, env, force_cpu=ctm_args.conv_check_cpu) history.append([e_curr.item()]) if len(history) > 1 and abs(history[-1][0] - history[-2][0] ) < ctm_args.ctm_conv_tol: return True, history return False, history ctm_env_init = ENV_C4V(args.chi, state) init_env(state, ctm_env_init) ctm_env_init, *ctm_log = ctmrg_c4v.run(state, ctm_env_init, conv_check=ctmrg_conv_energy) e_curr0 = energy_f(state, ctm_env_init) obs_values0, obs_labels = model.eval_obs(state, ctm_env_init) obs_dict = dict(zip(obs_labels, obs_values0)) eps_e = 1.0e-8 eps_m = 1.0e-14 self.assertTrue(abs(e_curr0 - (-0.47684229)) < eps_e) self.assertTrue(obs_dict["m"] < eps_m) for l in ["sz", "sp", "sm"]: self.assertTrue(abs(obs_dict[l]) < eps_m)
def main(): cfg.configure(args) cfg.print_config() torch.set_num_threads(args.omp_cores) torch.manual_seed(args.seed) model= j1j2.J1J2_C4V_BIPARTITE(j1=args.j1, j2=args.j2, hz_stag=args.hz_stag, \ delta_zz=args.delta_zz) energy_f = model.energy_1x1_lowmem # initialize the ipeps if args.instate != None: state = read_ipeps_c4v(args.instate) if args.bond_dim > max(state.get_aux_bond_dims()): # extend the auxiliary dimensions state = extend_bond_dim(state, args.bond_dim) state.add_noise(args.instate_noise) # state.sites[(0,0)]= state.sites[(0,0)]/torch.max(torch.abs(state.sites[(0,0)])) state.sites[(0, 0)] = state.site() / state.site().norm() elif args.opt_resume is not None: state = IPEPS_C4V(torch.tensor(0.)) state.load_checkpoint(args.opt_resume) elif args.ipeps_init_type == 'RANDOM': bond_dim = args.bond_dim A= torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\ dtype=cfg.global_args.dtype, device=cfg.global_args.device) # A= make_c4v_symm(A) # A= A/torch.max(torch.abs(A)) A = A / A.norm() state = IPEPS_C4V(A) else: raise ValueError("Missing trial state: -instate=None and -ipeps_init_type= "\ +str(args.ipeps_init_type)+" is not supported") print(state) @torch.no_grad() def ctmrg_conv_f(state, env, history, ctm_args=cfg.ctm_args): if not history: history = dict({"log": []}) rdm2x1 = rdm2x1_sl(state, env, force_cpu=ctm_args.conv_check_cpu) dist = float('inf') if len(history["log"]) > 0: dist = torch.dist(rdm2x1, history["rdm"], p=2).item() history["rdm"] = rdm2x1 history["log"].append(dist) if dist < ctm_args.ctm_conv_tol or len( history["log"]) >= ctm_args.ctm_max_iter: log.info({ "history_length": len(history['log']), "history": history['log'] }) return True, history return False, history state_sym = to_ipeps_c4v(state) ctm_env = ENV_C4V(args.chi, state_sym) init_env(state_sym, ctm_env) ctm_env, *ctm_log = ctmrg_c4v.run(state_sym, ctm_env, conv_check=ctmrg_conv_f) loss = energy_f(state_sym, ctm_env) obs_values, obs_labels = model.eval_obs(state_sym, ctm_env) print(", ".join(["epoch", "energy"] + obs_labels)) print(", ".join([f"{-1}", f"{loss}"] + [f"{v}" for v in obs_values])) def loss_fn(state, ctm_env_in, opt_context): ctm_args = opt_context["ctm_args"] opt_args = opt_context["opt_args"] # 0) preprocess # create a copy of state, symmetrize and normalize making all operations # tracked. This does not "overwrite" the parameters tensors, living outside # the scope of loss_fn state_sym = to_ipeps_c4v(state, normalize=True) # possibly re-initialize the environment if opt_args.opt_ctm_reinit: init_env(state_sym, ctm_env_in) # 1) compute environment by CTMRG ctm_env_out, *ctm_log = ctmrg_c4v.run(state_sym, ctm_env_in, conv_check=ctmrg_conv_f, ctm_args=ctm_args) # 2) evaluate loss with converged environment loss = energy_f(state_sym, ctm_env_out, force_cpu=args.force_cpu) return (loss, ctm_env_out, *ctm_log) def _to_json(l): re = [l[i, 0].item() for i in range(l.size()[0])] im = [l[i, 1].item() for i in range(l.size()[0])] return dict({"re": re, "im": im}) @torch.no_grad() def obs_fn(state, ctm_env, opt_context): if ("line_search" in opt_context.keys() and not opt_context["line_search"]) \ or not "line_search" in opt_context.keys(): state_sym = to_ipeps_c4v(state, normalize=True) epoch = len(opt_context["loss_history"]["loss"]) loss = opt_context["loss_history"]["loss"][-1] obs_values, obs_labels = model.eval_obs(state_sym, ctm_env) print(", ".join([f"{epoch}",f"{loss}"]+[f"{v}" for v in obs_values]+\ [f"{torch.max(torch.abs(state.site((0,0))))}"])) if args.top_freq > 0 and epoch % args.top_freq == 0: coord_dir_pairs = [((0, 0), (1, 0))] for c, d in coord_dir_pairs: # transfer operator spectrum print(f"TOP spectrum(T)[{c},{d}] ", end="") l = transferops_c4v.get_Top_spec_c4v( args.top_n, state_sym, ctm_env) print("TOP " + json.dumps(_to_json(l))) def post_proc(state, ctm_env, opt_context): symm, max_err = verify_c4v_symm_A1(state.site()) # print(f"post_proc {symm} {max_err}") if not symm: # force symmetrization outside of autograd with torch.no_grad(): symm_site = make_c4v_symm(state.site()) # we **cannot** simply normalize the on-site tensors, as the LBFGS # takes into account the scale # symm_site= symm_site/torch.max(torch.abs(symm_site)) state.sites[(0, 0)].copy_(symm_site) # optimize # optimize_state(state, ctm_env, loss_fn, obs_fn=obs_fn, post_proc=post_proc) optimize_state(state, ctm_env, loss_fn, obs_fn=obs_fn) # compute final observables for the best variational state outputstatefile = args.out_prefix + "_state.json" state = read_ipeps_c4v(outputstatefile) ctm_env = ENV_C4V(args.chi, state) init_env(state, ctm_env) ctm_env, *ctm_log = ctmrg_c4v.run(state, ctm_env, conv_check=ctmrg_conv_f) opt_energy = energy_f(state, ctm_env) obs_values, obs_labels = model.eval_obs(state, ctm_env) print(", ".join([f"{args.opt_max_iter}", f"{opt_energy}"] + [f"{v}" for v in obs_values]))
def main(): cfg.configure(args) cfg.print_config() torch.set_num_threads(args.omp_cores) torch.manual_seed(args.seed) model= j1j2.J1J2_C4V_BIPARTITE(j1=args.j1, j2=args.j2, j3=args.j3, \ hz_stag=args.hz_stag, delta_zz=args.delta_zz) energy_f= model.energy_1x1_lowmem # initialize an ipeps if args.instate!=None: state = read_ipeps_u1(args.instate, vertexToSite=None) assert len(state.coeffs)==1, "Not a 1-site ipeps" # TODO extending from smaller bond-dim to higher bond-dim is # currently not possible state.add_noise(args.instate_noise) elif args.opt_resume is not None: if args.bond_dim in [2,3,4,5,6,7,8,9]: u1sym_t= tenU1.import_sym_tensors(2,args.bond_dim,"A_1",\ infile=f"u1sym/D{args.bond_dim}_U1_{args.u1_class}.txt",\ dtype=cfg.global_args.torch_dtype, device=cfg.global_args.device) else: raise ValueError("Unsupported --bond_dim= "+str(args.bond_dim)) A= torch.zeros(len(u1sym_t), dtype=cfg.global_args.torch_dtype, device=cfg.global_args.device) coeffs = {(0,0): A} state= IPEPS_U1SYM(u1sym_t, coeffs) state.load_checkpoint(args.opt_resume) elif args.ipeps_init_type=='RANDOM': if args.bond_dim in [2,3,4,5,6,7,8,9]: u1sym_t= tenU1.import_sym_tensors(2, args.bond_dim, "A_1", \ infile=f"u1sym/D{args.bond_dim}_U1_{args.u1_class}.txt", \ dtype=cfg.global_args.torch_dtype, device=cfg.global_args.device) else: raise ValueError("Unsupported --bond_dim= "+str(args.bond_dim)) A= torch.rand(len(u1sym_t), dtype=cfg.global_args.torch_dtype, device=cfg.global_args.device) A= A/torch.max(torch.abs(A)) coeffs = {(0,0): A} state = IPEPS_U1SYM(u1sym_t, coeffs) else: raise ValueError("Missing trial state: -instate=None and -ipeps_init_type= "\ +str(args.ipeps_init_type)+" is not supported") print(state) @torch.no_grad() def ctmrg_conv_f(state, env, history, ctm_args=cfg.ctm_args): if not history: history=dict({"log": []}) rdm2x1= rdm2x1_sl(state, env, force_cpu=ctm_args.conv_check_cpu) dist= float('inf') if len(history["log"]) > 1: dist= torch.dist(rdm2x1, history["rdm"], p=2).item() history["rdm"]=rdm2x1 history["log"].append(dist) if dist<ctm_args.ctm_conv_tol: log.info({"history_length": len(history['log']), "history": history['log'], "final_multiplets": compute_multiplets(ctm_env)}) return True, history elif len(history['log']) >= ctm_args.ctm_max_iter: log.info({"history_length": len(history['log']), "history": history['log'], "final_multiplets": compute_multiplets(ctm_env)}) return False, history return False, history ctm_env = ENV_C4V(args.chi, state) init_env(state, ctm_env) loss0 = energy_f(state, ctm_env, force_cpu=True) obs_values, obs_labels = model.eval_obs(state,ctm_env,force_cpu=True) print(", ".join(["epoch","energy"]+obs_labels)) print(", ".join([f"{-1}",f"{loss0}"]+[f"{v}" for v in obs_values])) def loss_fn(state, ctm_env_in, opt_context): ctm_args= opt_context["ctm_args"] opt_args= opt_context["opt_args"] # build on-site tensors from su2sym components state.sites= state.build_onsite_tensors() # possibly re-initialize the environment if opt_args.opt_ctm_reinit: init_env(state, ctm_env_in) # 1) compute environment by CTMRG ctm_env_out, history, t_ctm, t_obs= ctmrg_c4v.run(state, ctm_env_in, \ conv_check=ctmrg_conv_f, ctm_args=ctm_args) loss0 = energy_f(state, ctm_env_out, force_cpu=True) loc_ctm_args= copy.deepcopy(ctm_args) loc_ctm_args.ctm_max_iter= 1 ctm_env_out, history1, t_ctm1, t_obs1= ctmrg_c4v.run(state, ctm_env_out, \ ctm_args=loc_ctm_args) loss1 = energy_f(state, ctm_env_out, force_cpu=True) #loss=(loss0+loss1)/2 loss= torch.max(loss0,loss1) return loss, ctm_env_out, history, t_ctm, t_obs def _to_json(l): re=[l[i,0].item() for i in range(l.size()[0])] im=[l[i,1].item() for i in range(l.size()[0])] return dict({"re": re, "im": im}) @torch.no_grad() def obs_fn(state, ctm_env, opt_context): if opt_context["line_search"]: epoch= len(opt_context["loss_history"]["loss_ls"]) loss= opt_context["loss_history"]["loss_ls"][-1] print("LS",end=" ") else: epoch= len(opt_context["loss_history"]["loss"]) loss= opt_context["loss_history"]["loss"][-1] obs_values, obs_labels = model.eval_obs(state,ctm_env,force_cpu=True) print(", ".join([f"{epoch}",f"{loss}"]+[f"{v}" for v in obs_values])) if (not opt_context["line_search"]) and args.top_freq>0 and epoch%args.top_freq==0: coord_dir_pairs=[((0,0), (1,0))] for c,d in coord_dir_pairs: # transfer operator spectrum print(f"TOP spectrum(T)[{c},{d}] ",end="") l= transferops_c4v.get_Top_spec_c4v(args.top_n, state, ctm_env) print("TOP "+json.dumps(_to_json(l))) # optimize optimize_state(state, ctm_env, loss_fn, obs_fn=obs_fn) # compute final observables for the best variational state outputstatefile= args.out_prefix+"_state.json" state= read_ipeps_u1(outputstatefile) ctm_env = ENV_C4V(args.chi, state) init_env(state, ctm_env) ctm_env, *ctm_log = ctmrg_c4v.run(state, ctm_env, conv_check=ctmrg_conv_f) opt_energy = energy_f(state,ctm_env,force_cpu=True) obs_values, obs_labels = model.eval_obs(state,ctm_env,force_cpu=True) print(", ".join([f"{args.opt_max_iter}",f"{opt_energy}"]+[f"{v}" for v in obs_values]))
def main(): # 0) parse command line arguments and configure simulation parameters cfg.configure(args) cfg.print_config() torch.set_num_threads(args.omp_cores) torch.manual_seed(args.seed) model = j1j2.J1J2_C4V_BIPARTITE(j1=args.j1, j2=args.j2) energy_f = model.energy_1x1_lowmem # energy_f= model.energy_1x1 # 1) initialize an ipeps - read from file or create a random one if args.instate != None: state = read_ipeps_c4v(args.instate) if args.bond_dim > max(state.get_aux_bond_dims()): # extend the auxiliary dimensions state = extend_bond_dim(state, args.bond_dim) state.add_noise(args.instate_noise) state.sites[(0, 0)] = state.sites[(0, 0)] / torch.max( torch.abs(state.sites[(0, 0)])) elif args.ipeps_init_type == 'RANDOM': bond_dim = args.bond_dim A= torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\ dtype=cfg.global_args.dtype,device=cfg.global_args.device) A = make_c4v_symm(A) A = A / torch.max(torch.abs(A)) state = IPEPS_C4V(A) else: raise ValueError("Missing trial state: -instate=None and -ipeps_init_type= "\ +str(args.ipeps_init_type)+" is not supported") print(state) # 2) define convergence criterion for CTM algorithm. This function is to be # invoked at every CTM step. We also use it to evaluate observables of interest # during the course of CTM # 2a) convergence criterion based on on-site energy def ctmrg_conv_energy(state, env, history, ctm_args=cfg.ctm_args): with torch.no_grad(): if not history: history = [] e_curr = energy_f(state, env, force_cpu=ctm_args.conv_check_cpu) history.append(e_curr.item()) if args.obs_freq>0 and \ (len(history)%args.obs_freq==0 or (len(history)-1)%args.obs_freq==0): obs_values, obs_labels = model.eval_obs(state, env) print(", ".join([f"{len(history)}", f"{e_curr}"] + [f"{v}" for v in obs_values])) else: print(", ".join([f"{len(history)}", f"{e_curr}"])) if len(history) > 1 and abs(history[-1] - history[-2]) < ctm_args.ctm_conv_tol: log.info({ "history_length": len(history), "history": history, "final_multiplets": compute_multiplets(env) }) return True, history elif len(history) >= ctm_args.ctm_max_iter: log.info({ "history_length": len(history), "history": history, "final_multiplets": compute_multiplets(env) }) return False, history return False, history # 2b) convergence criterion based on 2-site reduced density matrix # of nearest-neighbours def ctmrg_conv_rdm2x1(state, env, history, ctm_args=cfg.ctm_args): with torch.no_grad(): if not history: history = dict({"log": []}) rdm2x1 = rdm2x1_sl(state, env, force_cpu=ctm_args.conv_check_cpu) dist = float('inf') if len(history["log"]) > 1: dist = torch.dist(rdm2x1, history["rdm"], p=2).item() # log dist and observables if args.obs_freq>0 and \ (len(history["log"])%args.obs_freq==0 or (len(history["log"])-1)%args.obs_freq==0): e_curr = energy_f(state, env, force_cpu=ctm_args.conv_check_cpu) obs_values, obs_labels = model.eval_obs(state, env, force_cpu=True) print(", ".join( [f"{len(history['log'])}", f"{dist}", f"{e_curr}"] + [f"{v}" for v in obs_values])) else: print(f"{len(history['log'])}, {dist}") # update history history["rdm"] = rdm2x1 history["log"].append(dist) if dist < ctm_args.ctm_conv_tol: log.info({ "history_length": len(history['log']), "history": history['log'], "final_multiplets": compute_multiplets(env) }) return True, history elif len(history['log']) >= ctm_args.ctm_max_iter: log.info({ "history_length": len(history['log']), "history": history['log'], "final_multiplets": compute_multiplets(env) }) return False, history return False, history # 3) initialize environment ctm_env_init = ENV_C4V(args.chi, state) init_env(state, ctm_env_init) # 4) (optional) compute observables as given by initial environment e_curr0 = energy_f(state, ctm_env_init) obs_values0, obs_labels = model.eval_obs(state, ctm_env_init) print(", ".join(["epoch", "energy"] + obs_labels)) print(", ".join([f"{-1}", f"{e_curr0}"] + [f"{v}" for v in obs_values0])) # 5) (main) execute CTM algorithm ctm_env_init, *ctm_log = ctmrg_c4v.run(state, ctm_env_init, conv_check=ctmrg_conv_rdm2x1) # 6) compute final observables e_curr0 = energy_f(state, ctm_env_init, force_cpu=True) obs_values0, obs_labels = model.eval_obs(state, ctm_env_init, force_cpu=True) history, t_ctm, t_obs = ctm_log print("\n") print(", ".join(["epoch", "energy"] + obs_labels)) print("FINAL " + ", ".join([f"{e_curr0}"] + [f"{v}" for v in obs_values0])) print(f"TIMINGS ctm: {t_ctm} conv_check: {t_obs}") # 7) ----- additional observables --------------------------------------------- corrSS = model.eval_corrf_SS(state, ctm_env_init, args.corrf_r, canonical=args.corrf_canonical) print("\n\nSS r " + " ".join([label for label in corrSS.keys()]) + f" canonical {args.corrf_canonical}") for i in range(args.corrf_r): print(f"{i} " + " ".join([f"{corrSS[label][i]}" for label in corrSS.keys()])) corrDD = model.eval_corrf_DD_H(state, ctm_env_init, args.corrf_r) print("\n\nDD r " + " ".join([label for label in corrDD.keys()])) for i in range(args.corrf_r): print(f"{i} " + " ".join([f"{corrDD[label][i]}" for label in corrDD.keys()])) # environment diagnostics print("\n\nspectrum(C)") u, s, v = torch.svd(ctm_env_init.C[ctm_env_init.keyC], compute_uv=False) for i in range(args.chi): print(f"{i} {s[i]}") # transfer operator spectrum print("\n\nspectrum(T)") l = transferops_c4v.get_Top_spec_c4v(args.top_n, state, ctm_env_init) for i in range(l.size()[0]): print(f"{i} {l[i,0]} {l[i,1]}")
def main(): cfg.configure(args) cfg.print_config() torch.set_num_threads(args.omp_cores) torch.manual_seed(args.seed) model= j1j2.J1J2_C4V_BIPARTITE(j1=args.j1, j2=args.j2, hz_stag=args.hz_stag, \ delta_zz=args.delta_zz) energy_f = model.energy_1x1_lowmem # initialize an ipeps if args.instate != None: state = read_ipeps_u1(args.instate, vertexToSite=None) assert len(state.coeffs) == 1, "Not a 1-site ipeps" state.add_noise(args.instate_noise) elif args.opt_resume is not None: if args.bond_dim in [2, 3, 4, 5, 6, 7, 8]: u1sym_t= tenU1.import_sym_tensors(2,args.bond_dim,"A_1",\ infile=f"u1sym/D{args.bond_dim}_U1_{args.u1_class}.txt",\ dtype=cfg.global_args.dtype, device=cfg.global_args.device) else: raise ValueError("Unsupported --bond_dim= " + str(args.bond_dim)) A = torch.zeros(len(u1sym_t), dtype=cfg.global_args.dtype, device=cfg.global_args.device) coeffs = {(0, 0): A} state = IPEPS_U1SYM(u1sym_t, coeffs) state.load_checkpoint(args.opt_resume) elif args.ipeps_init_type == 'RANDOM': if args.bond_dim in [2, 3, 4, 5, 6, 7, 8]: u1sym_t= tenU1.import_sym_tensors(2, args.bond_dim, "A_1", \ infile=f"u1sym/D{args.bond_dim}_U1_{args.u1_class}.txt", \ dtype=cfg.global_args.dtype, device=cfg.global_args.device) else: raise ValueError("Unsupported --bond_dim= " + str(args.bond_dim)) A = torch.rand(len(u1sym_t), dtype=cfg.global_args.dtype, device=cfg.global_args.device) A = A / torch.max(torch.abs(A)) coeffs = {(0, 0): A} state = IPEPS_U1SYM(u1sym_t, coeffs) else: raise ValueError("Missing trial state: -instate=None and -ipeps_init_type= "\ +str(args.ipeps_init_type)+" is not supported") print(state) @torch.no_grad() def ctmrg_conv_energy(state, env, history, ctm_args=cfg.ctm_args): if not history: history = dict({"log": []}) rdm2x1 = rdm2x1_sl(state, env, force_cpu=ctm_args.conv_check_cpu) dist = float('inf') if len(history["log"]) > 1: dist = torch.dist(rdm2x1, history["rdm"], p=2).item() # log dist and observables if args.obs_freq>0 and \ (len(history["log"])%args.obs_freq==0 or (len(history["log"])-1)%args.obs_freq==0): e_curr = energy_f(state, env, force_cpu=ctm_args.conv_check_cpu) obs_values, obs_labels = model.eval_obs(state, env, force_cpu=True) print( ", ".join([f"{len(history['log'])}", f"{dist}", f"{e_curr}"] + [f"{v}" for v in obs_values])) else: print(f"{len(history['log'])}, {dist}") # update history history["rdm"] = rdm2x1 history["log"].append(dist) if dist < ctm_args.ctm_conv_tol: log.info({ "history_length": len(history['log']), "history": history['log'], "final_multiplets": compute_multiplets(env) }) return True, history elif len(history['log']) >= ctm_args.ctm_max_iter: log.info({ "history_length": len(history['log']), "history": history['log'], "final_multiplets": compute_multiplets(env) }) return False, history return False, history ctm_env_init = ENV_C4V(args.chi, state) init_env(state, ctm_env_init) e_curr0 = energy_f(state, ctm_env_init, force_cpu=True) obs_values0, obs_labels = model.eval_obs(state, ctm_env_init, force_cpu=True) print(", ".join(["epoch", "energy"] + obs_labels)) print(", ".join([f"{-1}", f"{e_curr0}"] + [f"{v}" for v in obs_values0])) ctm_env_init, *ctm_log = ctmrg_c4v.run(state, ctm_env_init, \ conv_check=ctmrg_conv_energy) e_curr0 = energy_f(state, ctm_env_init, force_cpu=True) obs_values0, obs_labels = model.eval_obs(state, ctm_env_init, force_cpu=True) history, t_ctm, t_obs = ctm_log print("\n") print(", ".join(["epoch", "energy"] + obs_labels)) print("FINAL " + ", ".join([f"{e_curr0}"] + [f"{v}" for v in obs_values0])) print(f"TIMINGS ctm: {t_ctm} conv_check: {t_obs}") # ----- additional observables --------------------------------------------- corrSS = model.eval_corrf_SS(state, ctm_env_init, args.corrf_r, canonical=args.corrf_canonical) print("\n\nSS r " + " ".join([label for label in corrSS.keys()]) + f" canonical {args.corrf_canonical}") for i in range(args.corrf_r): print(f"{i} " + " ".join([f"{corrSS[label][i]}" for label in corrSS.keys()])) corrDD = model.eval_corrf_DD_H(state, ctm_env_init, args.corrf_r) print("\n\nDD r " + " ".join([label for label in corrDD.keys()])) for i in range(args.corrf_r): print(f"{i} " + " ".join([f"{corrDD[label][i]}" for label in corrDD.keys()])) if args.corrf_dd_v: corrDD_V = model.eval_corrf_DD_V(state, ctm_env_init, args.corrf_r) print("\n\nDD_v r " + " ".join([label for label in corrDD_V.keys()])) for i in range(args.corrf_r): print(f"{i} " + " ".join( [f"{corrDD_V[label][i]}" for label in corrDD_V.keys()])) # environment diagnostics print("\n\nspectrum(C)") u, s, v = torch.svd(ctm_env_init.C[ctm_env_init.keyC], compute_uv=False) for i in range(args.chi): print(f"{i} {s[i]}") # transfer operator spectrum print("\n\nspectrum(T)") l = transferops_c4v.get_Top_spec_c4v(args.top_n, state, ctm_env_init) for i in range(l.size()[0]): print(f"{i} {l[i,0]} {l[i,1]}") # transfer operator spectrum if args.top2: print("\n\nspectrum(T2)") l = transferops_c4v.get_Top2_spec_c4v(args.top_n, state, ctm_env_init) for i in range(l.size()[0]): print(f"{i} {l[i,0]} {l[i,1]}")