def to_ipeps_c4v(state, normalize=False): #TODO other classes of C4v-symmetric ansatz ? # we choose A1 irrep, in principle, other choices are possible (A2, B1, ...) assert len(state.sites.items() ) == 1, "state has more than a single on-site tensor" A = next(iter(state.sites.values())) if A.is_complex(): A = make_c4v_symm(A.real) + make_c4v_symm(A.imag, irreps=["A2"]) * 1.0j else: A = make_c4v_symm(A) if normalize: A = A / A.norm() return IPEPS_C4V(A)
def to_ipeps_c4v(state, normalize=False): assert len(state.sites.items() ) == 1, "state has more than a single on-site tensor" A = next(iter(state.sites.values())) A = make_c4v_symm(A) # if normalize: A= A/torch.max(torch.abs(A)) if normalize: A = A / A.norm() return IPEPS_C4V(A)
def add_noise(self, noise, symmetrize=False): r""" :param noise: magnitude of the noise :type noise: float Take IPEPS and add random uniform noise with magnitude ``noise`` to on-site tensor """ rand_t = torch.rand(self.site().size(), dtype=self.dtype, device=self.device) self.sites[(0, 0)] = self.site() + noise * rand_t if symmetrize: if self.sites[(0, 0)].is_complex(): self.sites[(0,0)]= make_c4v_symm(self.site().real) \ + make_c4v_symm(self.site().imag, irreps=["A2"]) * 1.0j else: self.sites[(0, 0)] = make_c4v_symm(self.site())
def add_noise(self, noise, symmetrize=False): r""" :param noise: magnitude of the noise :type noise: float Take IPEPS and add random uniform noise with magnitude ``noise`` to on-site tensor """ rand_t = torch.rand(self.site().size(), dtype=self.dtype, device=self.device) self.sites[(0, 0)] = self.site() + noise * rand_t if symmetrize: self.sites[(0, 0)] = make_c4v_symm(self.site())
def loss_fn(state, ctm_env_in, opt_context): # symmetrize on-site tensor state = IPEPS_C4V(state.sites[(0, 0)]) state.sites[(0, 0)] = make_c4v_symm(state.sites[(0, 0)]) state.sites[(0, 0)] = state.sites[(0, 0)] / torch.max(state.sites[(0, 0)]) # possibly re-initialize the environment if cfg.opt_args.opt_ctm_reinit: init_env(state, ctm_env_in) # 1) compute environment by CTMRG ctm_env_out, *ctm_log = ctmrg_c4v.run(state, ctm_env_in, conv_check=ctmrg_conv_rho2x1dist) loss = model.energy_1x1(state, ctm_env_out) return (loss, ctm_env_out, *ctm_log)
def write_to_file(self, outputfile, symmetrize=True, **kwargs): # symmetrize before writing out tmp_t = make_c4v_symm(self.site()) if symmetrize else self.site() tmp_state = IPEPS_C4V(tmp_t) ipeps.write_ipeps(tmp_state, outputfile, **kwargs)
def main(): cfg.configure(args) cfg.print_config() torch.set_num_threads(args.omp_cores) torch.manual_seed(args.seed) if args.c4v_type == "TI": model = jq.JQ_C4V(j1=args.j1, q=args.q) elif args.c4v_type == "BIPARTITE": model = jq.JQ_C4V_BIPARTITE(j1=args.j1, q=args.q) elif args.c4v_type == "PLAQUETTE": q_inter = args.q if args.q_inter is None else args.q_inter model = jq.JQ_C4V_PLAQUETTE(j1=args.j1, q=args.q, q_inter=q_inter) else: raise ValueError("Unsupported C4v ansatz: -c4v_type= "\ +str(args.ipeps_init_type)+" is not supported") # initialize an ipeps # 1) define lattice-tiling function, that maps arbitrary vertex of square lattice # coord into one of coordinates within unit-cell of iPEPS ansatz if args.instate != None: state = read_ipeps_c4v(args.instate) if args.bond_dim > max(state.get_aux_bond_dims()): # extend the auxiliary dimensions state = extend_bond_dim(state, args.bond_dim) state.add_noise(args.instate_noise) state.sites[(0, 0)] = state.sites[(0, 0)] / torch.max( torch.abs(state.sites[(0, 0)])) elif args.opt_resume is not None: state = IPEPS_C4V(torch.tensor(0.)) state.load_checkpoint(args.opt_resume) elif args.ipeps_init_type == 'RANDOM': bond_dim = args.bond_dim A= torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\ dtype=cfg.global_args.dtype,device=cfg.global_args.device) A = make_c4v_symm(A) A = A / torch.max(torch.abs(A)) state = IPEPS_C4V(A) else: raise ValueError("Missing trial state: -instate=None and -ipeps_init_type= "\ +str(args.ipeps_init_type)+" is not supported") print(state) @torch.no_grad() def ctmrg_conv_energy(state, env, history, ctm_args=cfg.ctm_args): if not history: history = [] e_curr = model.energy_1x1(state, env) history.append(e_curr.item()) if (len(history) > 1 and abs(history[-1]-history[-2]) < ctm_args.ctm_conv_tol)\ or len(history) >= ctm_args.ctm_max_iter: log.info({"history_length": len(history), "history": history}) return True, history return False, history ctm_env = ENV_C4V(args.chi, state) init_env(state, ctm_env) ctm_env, *ctm_log = ctmrg_c4v.run(state, ctm_env, conv_check=ctmrg_conv_energy) loss = model.energy_1x1(state, ctm_env) obs_values, obs_labels = model.eval_obs(state, ctm_env) print(", ".join(["epoch", "energy"] + obs_labels)) print(", ".join([f"{-1}", f"{loss}"] + [f"{v}" for v in obs_values])) def loss_fn(state, ctm_env_in, opt_context): ctm_args = opt_context["ctm_args"] opt_args = opt_context["opt_args"] # symmetrize on-site tensor state_sym = to_ipeps_c4v(state, normalize=True) # possibly re-initialize the environment if opt_args.opt_ctm_reinit: init_env(state_sym, ctm_env_in) # 1) compute environment by CTMRG ctm_env_out, *ctm_log= ctmrg_c4v.run(state_sym, ctm_env_in, \ conv_check=ctmrg_conv_energy, ctm_args=ctm_args) # 2) evaluate loss with converged environment loss = model.energy_1x1(state_sym, ctm_env_out) return (loss, ctm_env_out, *ctm_log) @torch.no_grad() def obs_fn(state, ctm_env, opt_context): if ("line_search" in opt_context.keys() and not opt_context["line_search"]) \ or not "line_search" in opt_context.keys(): epoch = len(opt_context["loss_history"]["loss"]) loss = opt_context["loss_history"]["loss"][-1] obs_values, obs_labels = model.eval_obs(state, ctm_env) print(", ".join([f"{epoch}", f"{loss}"] + [f"{v}" for v in obs_values])) # optimize optimize_state(state, ctm_env, loss_fn, obs_fn=obs_fn) # compute final observables for the best variational state outputstatefile = args.out_prefix + "_state.json" state = read_ipeps_c4v(outputstatefile) ctm_env = ENV_C4V(args.chi, state) init_env(state, ctm_env) ctm_env, *ctm_log = ctmrg_c4v.run(state, ctm_env, conv_check=ctmrg_conv_energy) opt_energy = model.energy_1x1(state, ctm_env) obs_values, obs_labels = model.eval_obs(state, ctm_env) print(", ".join([f"{args.opt_max_iter}", f"{opt_energy}"] + [f"{v}" for v in obs_values]))
def main(): # 0) parse command line arguments and configure simulation parameters cfg.configure(args) cfg.print_config() torch.set_num_threads(args.omp_cores) torch.manual_seed(args.seed) model = j1j2.J1J2_C4V_BIPARTITE(j1=args.j1, j2=args.j2) energy_f = model.energy_1x1_lowmem # energy_f= model.energy_1x1 # 1) initialize an ipeps - read from file or create a random one if args.instate != None: state = read_ipeps_c4v(args.instate) if args.bond_dim > max(state.get_aux_bond_dims()): # extend the auxiliary dimensions state = extend_bond_dim(state, args.bond_dim) state.add_noise(args.instate_noise) state.sites[(0, 0)] = state.sites[(0, 0)] / torch.max( torch.abs(state.sites[(0, 0)])) elif args.ipeps_init_type == 'RANDOM': bond_dim = args.bond_dim A= torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\ dtype=cfg.global_args.dtype,device=cfg.global_args.device) A = make_c4v_symm(A) A = A / torch.max(torch.abs(A)) state = IPEPS_C4V(A) else: raise ValueError("Missing trial state: -instate=None and -ipeps_init_type= "\ +str(args.ipeps_init_type)+" is not supported") print(state) # 2) define convergence criterion for CTM algorithm. This function is to be # invoked at every CTM step. We also use it to evaluate observables of interest # during the course of CTM # 2a) convergence criterion based on on-site energy def ctmrg_conv_energy(state, env, history, ctm_args=cfg.ctm_args): with torch.no_grad(): if not history: history = [] e_curr = energy_f(state, env, force_cpu=ctm_args.conv_check_cpu) history.append(e_curr.item()) if args.obs_freq>0 and \ (len(history)%args.obs_freq==0 or (len(history)-1)%args.obs_freq==0): obs_values, obs_labels = model.eval_obs(state, env) print(", ".join([f"{len(history)}", f"{e_curr}"] + [f"{v}" for v in obs_values])) else: print(", ".join([f"{len(history)}", f"{e_curr}"])) if len(history) > 1 and abs(history[-1] - history[-2]) < ctm_args.ctm_conv_tol: log.info({ "history_length": len(history), "history": history, "final_multiplets": compute_multiplets(env) }) return True, history elif len(history) >= ctm_args.ctm_max_iter: log.info({ "history_length": len(history), "history": history, "final_multiplets": compute_multiplets(env) }) return False, history return False, history # 2b) convergence criterion based on 2-site reduced density matrix # of nearest-neighbours def ctmrg_conv_rdm2x1(state, env, history, ctm_args=cfg.ctm_args): with torch.no_grad(): if not history: history = dict({"log": []}) rdm2x1 = rdm2x1_sl(state, env, force_cpu=ctm_args.conv_check_cpu) dist = float('inf') if len(history["log"]) > 1: dist = torch.dist(rdm2x1, history["rdm"], p=2).item() # log dist and observables if args.obs_freq>0 and \ (len(history["log"])%args.obs_freq==0 or (len(history["log"])-1)%args.obs_freq==0): e_curr = energy_f(state, env, force_cpu=ctm_args.conv_check_cpu) obs_values, obs_labels = model.eval_obs(state, env, force_cpu=True) print(", ".join( [f"{len(history['log'])}", f"{dist}", f"{e_curr}"] + [f"{v}" for v in obs_values])) else: print(f"{len(history['log'])}, {dist}") # update history history["rdm"] = rdm2x1 history["log"].append(dist) if dist < ctm_args.ctm_conv_tol: log.info({ "history_length": len(history['log']), "history": history['log'], "final_multiplets": compute_multiplets(env) }) return True, history elif len(history['log']) >= ctm_args.ctm_max_iter: log.info({ "history_length": len(history['log']), "history": history['log'], "final_multiplets": compute_multiplets(env) }) return False, history return False, history # 3) initialize environment ctm_env_init = ENV_C4V(args.chi, state) init_env(state, ctm_env_init) # 4) (optional) compute observables as given by initial environment e_curr0 = energy_f(state, ctm_env_init) obs_values0, obs_labels = model.eval_obs(state, ctm_env_init) print(", ".join(["epoch", "energy"] + obs_labels)) print(", ".join([f"{-1}", f"{e_curr0}"] + [f"{v}" for v in obs_values0])) # 5) (main) execute CTM algorithm ctm_env_init, *ctm_log = ctmrg_c4v.run(state, ctm_env_init, conv_check=ctmrg_conv_rdm2x1) # 6) compute final observables e_curr0 = energy_f(state, ctm_env_init, force_cpu=True) obs_values0, obs_labels = model.eval_obs(state, ctm_env_init, force_cpu=True) history, t_ctm, t_obs = ctm_log print("\n") print(", ".join(["epoch", "energy"] + obs_labels)) print("FINAL " + ", ".join([f"{e_curr0}"] + [f"{v}" for v in obs_values0])) print(f"TIMINGS ctm: {t_ctm} conv_check: {t_obs}") # 7) ----- additional observables --------------------------------------------- corrSS = model.eval_corrf_SS(state, ctm_env_init, args.corrf_r, canonical=args.corrf_canonical) print("\n\nSS r " + " ".join([label for label in corrSS.keys()]) + f" canonical {args.corrf_canonical}") for i in range(args.corrf_r): print(f"{i} " + " ".join([f"{corrSS[label][i]}" for label in corrSS.keys()])) corrDD = model.eval_corrf_DD_H(state, ctm_env_init, args.corrf_r) print("\n\nDD r " + " ".join([label for label in corrDD.keys()])) for i in range(args.corrf_r): print(f"{i} " + " ".join([f"{corrDD[label][i]}" for label in corrDD.keys()])) # environment diagnostics print("\n\nspectrum(C)") u, s, v = torch.svd(ctm_env_init.C[ctm_env_init.keyC], compute_uv=False) for i in range(args.chi): print(f"{i} {s[i]}") # transfer operator spectrum print("\n\nspectrum(T)") l = transferops_c4v.get_Top_spec_c4v(args.top_n, state, ctm_env_init) for i in range(l.size()[0]): print(f"{i} {l[i,0]} {l[i,1]}")
def main(): cfg.configure(args) cfg.print_config() torch.set_num_threads(args.omp_cores) torch.manual_seed(args.seed) model = akltS2.AKLTS2_C4V_BIPARTITE() # initialize an ipeps if args.instate != None: state = read_ipeps_c4v(args.instate) if args.bond_dim > max(state.get_aux_bond_dims()): # extend the auxiliary dimensions state = extend_bond_dim(state, args.bond_dim) state.add_noise(args.instate_noise) state.sites[(0, 0)] = state.sites[(0, 0)] / torch.max( torch.abs(state.sites[(0, 0)])) elif args.opt_resume is not None: state = IPEPS_C4V() state.load_checkpoint(args.opt_resume) elif args.ipeps_init_type == 'RANDOM': bond_dim = args.bond_dim A= torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\ dtype=cfg.global_args.torch_dtype, device=cfg.global_args.device) A = make_c4v_symm(A) A = A / torch.max(torch.abs(A)) state = IPEPS_C4V(A) else: raise ValueError("Missing trial state: -instate=None and -ipeps_init_type= "\ +str(args.ipeps_init_type)+" is not supported") print(state) @torch.no_grad() def ctmrg_conv_rho2x1dist(state, env, history, ctm_args=cfg.ctm_args): if not history: history = dict({"log": []}) rdm2x1 = rdm2x1_sl(state, env, force_cpu=ctm_args.conv_check_cpu) dist = float('inf') if len(history["log"]) > 0: dist = torch.dist(rdm2x1, history["rdm"], p=2).item() history["rdm"] = rdm2x1 history["log"].append(dist) if dist < ctm_args.ctm_conv_tol or len( history["log"]) >= ctm_args.ctm_max_iter: log.info({ "history_length": len(history['log']), "history": history['log'] }) return True, history return False, history ctm_env = ENV_C4V(args.chi, state) init_env(state, ctm_env) ctm_env, *ctm_log = ctmrg_c4v.run(state, ctm_env, conv_check=ctmrg_conv_rho2x1dist) loss = model.energy_1x1(state, ctm_env) obs_values, obs_labels = model.eval_obs(state, ctm_env) print(", ".join(["epoch", "energy"] + obs_labels)) print(", ".join([f"{-1}", f"{loss}"] + [f"{v}" for v in obs_values])) def loss_fn(state, ctm_env_in, opt_context): # symmetrize on-site tensor state = IPEPS_C4V(state.sites[(0, 0)]) state.sites[(0, 0)] = make_c4v_symm(state.sites[(0, 0)]) state.sites[(0, 0)] = state.sites[(0, 0)] / torch.max(state.sites[(0, 0)]) # possibly re-initialize the environment if cfg.opt_args.opt_ctm_reinit: init_env(state, ctm_env_in) # 1) compute environment by CTMRG ctm_env_out, *ctm_log = ctmrg_c4v.run(state, ctm_env_in, conv_check=ctmrg_conv_rho2x1dist) loss = model.energy_1x1(state, ctm_env_out) return (loss, ctm_env_out, *ctm_log) @torch.no_grad() def obs_fn(state, ctm_env, opt_context): epoch = len(opt_context["loss_history"]["loss"]) loss = opt_context["loss_history"]["loss"][-1] obs_values, obs_labels = model.eval_obs(state, ctm_env) print(", ".join([f"{epoch}", f"{loss}"] + [f"{v}" for v in obs_values])) # optimize optimize_state(state, ctm_env, loss_fn, obs_fn=obs_fn) # compute final observables for the best variational state outputstatefile = args.out_prefix + "_state.json" state = read_ipeps_c4v(outputstatefile) ctm_env = ENV_C4V(args.chi, state) init_env(state, ctm_env) ctm_env, *ctm_log = ctmrg_c4v.run(state, ctm_env, conv_check=ctmrg_conv_rho2x1dist) opt_energy = model.energy_1x1(state, ctm_env) obs_values, obs_labels = model.eval_obs(state, ctm_env) print(", ".join([f"{args.opt_max_iter}", f"{opt_energy}"] + [f"{v}" for v in obs_values]))
def main(): cfg.configure(args) cfg.print_config() torch.set_num_threads(args.omp_cores) torch.manual_seed(args.seed) model = akltS2.AKLTS2_C4V_BIPARTITE() # initialize an ipeps if args.instate!=None: state = read_ipeps_c4v(args.instate) if args.bond_dim > max(state.get_aux_bond_dims()): # extend the auxiliary dimensions state = extend_bond_dim(state, args.bond_dim) state.add_noise(args.instate_noise) state.sites[(0,0)]= state.sites[(0,0)]/torch.max(torch.abs(state.sites[(0,0)])) elif args.ipeps_init_type=='RANDOM': bond_dim = args.bond_dim A= torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\ dtype=cfg.global_args.torch_dtype,device=cfg.global_args.device) A= make_c4v_symm(A) A= A/torch.max(torch.abs(A)) state = IPEPS_C4V(A) else: raise ValueError("Missing trial state: -instate=None and -ipeps_init_type= "\ +str(args.ipeps_init_type)+" is not supported") print(state) # convergence by 2x1 subsystem reduced density matrix def ctmrg_conv_f(state, env, history, ctm_args=cfg.ctm_args): with torch.no_grad(): if not history: history=dict({"log": []}) rdm2x1= rdm2x1_sl(state, env, force_cpu=ctm_args.conv_check_cpu) dist= float('inf') # compute observables e_curr = model.energy_1x1(state, env) obs_values, obs_labels = model.eval_obs(state, env) print(", ".join([f"{len(history['log'])}",f"{e_curr}"]+[f"{v}" for v in obs_values])) if len(history["log"]) > 1: dist= torch.dist(rdm2x1, history["rdm"], p=2).item() history["rdm"]=rdm2x1 history["log"].append(dist) if dist<ctm_args.ctm_conv_tol: log.info({"history_length": len(history['log']), "history": history['log']}) return True, history return False, history # convergence by spectrum of the corner matrix # def ctmrg_conv_f(state, env, history, ctm_args=cfg.ctm_args): # with torch.no_grad(): # if not history: # history=[] # e_curr = model.energy_1x1(state, env) # obs_values, obs_labels = model.eval_obs(state, env) # print(", ".join([f"{len(history)}",f"{e_curr}"]+[f"{v}" for v in obs_values])) # u,s,v= torch.svd(env.C[env.keyC], compute_uv=False) # history.append([s]+[e_curr.item()]+obs_values) # if len(history) > 1 and torch.dist(history[-1][0],history[-2][0]) < ctm_args.ctm_conv_tol: # return True # return False ctm_env_init = ENV_C4V(args.chi, state) init_env(state, ctm_env_init) print(ctm_env_init) e_curr0 = model.energy_1x1(state, ctm_env_init) obs_values0, obs_labels = model.eval_obs(state,ctm_env_init) print(", ".join(["epoch","energy"]+obs_labels)) print(", ".join([f"{-1}",f"{e_curr0}"]+[f"{v}" for v in obs_values0])) ctm_env_init, *ctm_log = ctmrg_c4v.run(state, ctm_env_init, conv_check=ctmrg_conv_f) corrSS= model.eval_corrf_SS(state, ctm_env_init, args.corrf_r) print("\nr "+" ".join([label for label in corrSS.keys()])) for i in range(args.corrf_r): print(f"{i} "+" ".join([f"{corrSS[label][i]}" for label in corrSS.keys()])) corrDD= model.eval_corrf_DD_H(state, ctm_env_init, args.corrf_r) print("\nr "+" ".join([label for label in corrDD.keys()])) for i in range(args.corrf_r): print(f"{i} "+" ".join([f"{corrDD[label][i]}" for label in corrDD.keys()])) # environment diagnostics print("\nspectrum(C)") u,s,v= torch.svd(ctm_env_init.C[ctm_env_init.keyC], compute_uv=False) for i in range(args.chi): print(f"{i} {s[i]}")
def main(): cfg.configure(args) cfg.print_config() torch.set_num_threads(args.omp_cores) torch.manual_seed(args.seed) if args.c4v_type == "TI": model = jq.JQ_C4V(j1=args.j1, q=args.q) elif args.c4v_type == "BIPARTITE": model = jq.JQ_C4V_BIPARTITE(j1=args.j1, q=args.q) elif args.c4v_type == "PLAQUETTE": q_inter = args.q if args.q_inter is None else args.q_inter model = jq.JQ_C4V_PLAQUETTE(j1=args.j1, q=args.q, q_inter=q_inter) else: raise ValueError("Unsupported C4v ansatz: -c4v_type= "\ +str(args.ipeps_init_type)+" is not supported") # initialize an ipeps # 1) define lattice-tiling function, that maps arbitrary vertex of square lattice # coord into one of coordinates within unit-cell of iPEPS ansatz if args.instate != None: state = read_ipeps_c4v(args.instate) if args.bond_dim > max(state.get_aux_bond_dims()): # extend the auxiliary dimensions state = extend_bond_dim(state, args.bond_dim) state.add_noise(args.instate_noise) state.sites[(0, 0)] = state.sites[(0, 0)] / torch.max( torch.abs(state.sites[(0, 0)])) elif args.ipeps_init_type == 'RANDOM': bond_dim = args.bond_dim A= torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\ dtype=cfg.global_args.torch_dtype,device=cfg.global_args.device) A = make_c4v_symm(A) A = A / torch.max(torch.abs(A)) state = IPEPS_C4V(A) else: raise ValueError("Missing trial state: -instate=None and -ipeps_init_type= "\ +str(args.ipeps_init_type)+" is not supported") print(state) def ctmrg_conv_energy(state, env, history, ctm_args=cfg.ctm_args): with torch.no_grad(): if not history: history = [] e_curr = model.energy_1x1(state, env) obs_values, obs_labels = model.eval_obs(state, env) history.append([e_curr.item()] + obs_values) print(", ".join([f"{len(history)}", f"{e_curr}"] + [f"{v}" for v in obs_values])) if len(history) > 1 and abs( history[-1][0] - history[-2][0]) < ctm_args.ctm_conv_tol: return True, history return False, history ctm_env_init = ENV_C4V(args.chi, state) init_env(state, ctm_env_init) print(ctm_env_init) e_curr0 = model.energy_1x1(state, ctm_env_init) obs_values0, obs_labels = model.eval_obs(state, ctm_env_init) print(", ".join(["epoch", "energy"] + obs_labels)) print(", ".join([f"{-1}", f"{e_curr0}"] + [f"{v}" for v in obs_values0])) ctm_env_init, *ctm_log = ctmrg_c4v.run(state, ctm_env_init, conv_check=ctmrg_conv_energy) corrSS = model.eval_corrf_SS(state, ctm_env_init, args.corrf_r) print("\n\nSS r " + " ".join([label for label in corrSS.keys()])) for i in range(args.corrf_r): print(f"{i} " + " ".join([f"{corrSS[label][i]}" for label in corrSS.keys()])) corrDD = model.eval_corrf_DD_H(state, ctm_env_init, args.corrf_r) print("\n\nDD r " + " ".join([label for label in corrDD.keys()])) for i in range(args.corrf_r): print(f"{i} " + " ".join([f"{corrDD[label][i]}" for label in corrDD.keys()])) corrDD_V = model.eval_corrf_DD_V(state, ctm_env_init, args.corrf_r) print("\n\nDD_V r " + " ".join([label for label in corrDD_V.keys()])) for i in range(args.corrf_r): print(f"{i} " + " ".join([f"{corrDD_V[label][i]}" for label in corrDD_V.keys()])) # environment diagnostics print("\n\nspectrum(C)") u, s, v = torch.svd(ctm_env_init.C[ctm_env_init.keyC], compute_uv=False) for i in range(args.chi): print(f"{i} {s[i]}") # transfer operator spectrum print("\n\nspectrum(T)") l = transferops_c4v.get_Top_spec_c4v(args.top_n, state, ctm_env_init) for i in range(l.size()[0]): print(f"{i} {l[i,0]} {l[i,1]}")