def main(): cfg.configure(args) cfg.print_config() torch.set_num_threads(args.omp_cores) torch.manual_seed(args.seed) model = coupledLadders.COUPLEDLADDERS(alpha=args.alpha) # initialize an ipeps # 1) define lattice-tiling function, that maps arbitrary vertex of square lattice # coord into one of coordinates within unit-cell of iPEPS ansatz if args.instate != None: state = read_ipeps(args.instate) if args.bond_dim > max(state.get_aux_bond_dims()): # extend the auxiliary dimensions state = extend_bond_dim(state, args.bond_dim) state.add_noise(args.instate_noise) elif args.opt_resume is not None: state = IPEPS(dict(), lX=2, lY=2) state.load_checkpoint(args.opt_resume) elif args.ipeps_init_type == 'RANDOM': bond_dim = args.bond_dim A = torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\ dtype=cfg.global_args.torch_dtype,device=cfg.global_args.device) B = torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\ dtype=cfg.global_args.torch_dtype,device=cfg.global_args.device) C = torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\ dtype=cfg.global_args.torch_dtype,device=cfg.global_args.device) D = torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\ dtype=cfg.global_args.torch_dtype,device=cfg.global_args.device) sites = {(0, 0): A, (1, 0): B, (0, 1): C, (1, 1): D} for k in sites.keys(): sites[k] = sites[k] / torch.max(torch.abs(sites[k])) state = IPEPS(sites, lX=2, lY=2) else: raise ValueError("Missing trial state: --instate=None and --ipeps_init_type= "\ +str(args.ipeps_init_type)+" is not supported") if not state.dtype == model.dtype: cfg.global_args.torch_dtype = state.dtype print( f"dtype of initial state {state.dtype} and model {model.dtype} do not match." ) print(f"Setting default dtype to {cfg.global_args.torch_dtype} and reinitializing "\ +" the model") model = coupledLadders.COUPLEDLADDERS(alpha=args.alpha) print(state) @torch.no_grad() def ctmrg_conv_energy(state, env, history, ctm_args=cfg.ctm_args): if not history: history = [] e_curr = model.energy_2x1_1x2(state, env) e_curr = e_curr.real if e_curr.is_complex() else e_curr history.append(e_curr.item()) if (len(history) > 1 and abs(history[-1]-history[-2]) < ctm_args.ctm_conv_tol)\ or len(history) >= ctm_args.ctm_max_iter: log.info({"history_length": len(history), "history": history}) return True, history return False, history ctm_env = ENV(args.chi, state) init_env(state, ctm_env) ctm_env, *ctm_log = ctmrg.run(state, ctm_env, conv_check=ctmrg_conv_energy) loss0 = model.energy_2x1_1x2(state, ctm_env) obs_values, obs_labels = model.eval_obs(state, ctm_env) print(", ".join(["epoch", "energy"] + obs_labels)) print(", ".join([f"{-1}", f"{loss0}"] + [f"{v}" for v in obs_values])) def loss_fn(state, ctm_env_in, opt_context): ctm_args = opt_context["ctm_args"] opt_args = opt_context["opt_args"] # possibly re-initialize the environment if opt_args.opt_ctm_reinit: init_env(state, ctm_env_in) # 1) compute environment by CTMRG ctm_env_out, *ctm_log = ctmrg.run(state, ctm_env_in, \ conv_check=ctmrg_conv_energy, ctm_args=ctm_args) # 2) evaluate loss with converged environment loss = model.energy_2x1_1x2(state, ctm_env_out) return (loss, ctm_env_out, *ctm_log) def _to_json(l): re = [l[i, 0].item() for i in range(l.size()[0])] im = [l[i, 1].item() for i in range(l.size()[0])] return dict({"re": re, "im": im}) @torch.no_grad() def obs_fn(state, ctm_env, opt_context): if ("line_search" in opt_context.keys() and not opt_context["line_search"]) \ or not "line_search" in opt_context.keys(): epoch = len(opt_context["loss_history"]["loss"]) loss = opt_context["loss_history"]["loss"][-1] obs_values, obs_labels = model.eval_obs(state, ctm_env) print(", ".join([f"{epoch}", f"{loss}"] + [f"{v}" for v in obs_values])) with torch.no_grad(): if args.top_freq > 0 and epoch % args.top_freq == 0: coord_dir_pairs = [((0, 0), (1, 0)), ((0, 0), (0, 1)), ((1, 1), (1, 0)), ((1, 1), (0, 1))] for c, d in coord_dir_pairs: # transfer operator spectrum print(f"TOP spectrum(T)[{c},{d}] ", end="") l = transferops.get_Top_spec(args.top_n, c, d, state, ctm_env) print("TOP " + json.dumps(_to_json(l))) # optimize optimize_state(state, ctm_env, loss_fn, obs_fn=obs_fn) # compute final observables for the best variational state outputstatefile = args.out_prefix + "_state.json" state = read_ipeps(outputstatefile) ctm_env = ENV(args.chi, state) init_env(state, ctm_env) ctm_env, *ctm_log = ctmrg.run(state, ctm_env, conv_check=ctmrg_conv_energy) loss0 = model.energy_2x1_1x2(state, ctm_env) obs_values, obs_labels = model.eval_obs(state, ctm_env) print(", ".join([f"{args.opt_max_iter}", f"{loss0}"] + [f"{v}" for v in obs_values]))
def main(): cfg.configure(args) cfg.print_config() torch.set_num_threads(args.omp_cores) torch.manual_seed(args.seed) model= j1j2.J1J2_C4V_BIPARTITE(j1=args.j1, j2=args.j2, hz_stag=args.hz_stag, \ delta_zz=args.delta_zz) energy_f = model.energy_1x1_lowmem # initialize the ipeps if args.instate != None: state = read_ipeps_c4v(args.instate) if args.bond_dim > max(state.get_aux_bond_dims()): # extend the auxiliary dimensions state = extend_bond_dim(state, args.bond_dim) state.add_noise(args.instate_noise) # state.sites[(0,0)]= state.sites[(0,0)]/torch.max(torch.abs(state.sites[(0,0)])) state.sites[(0, 0)] = state.site() / state.site().norm() elif args.opt_resume is not None: state = IPEPS_C4V(torch.tensor(0.)) state.load_checkpoint(args.opt_resume) elif args.ipeps_init_type == 'RANDOM': bond_dim = args.bond_dim A= torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\ dtype=cfg.global_args.dtype, device=cfg.global_args.device) # A= make_c4v_symm(A) # A= A/torch.max(torch.abs(A)) A = A / A.norm() state = IPEPS_C4V(A) else: raise ValueError("Missing trial state: -instate=None and -ipeps_init_type= "\ +str(args.ipeps_init_type)+" is not supported") print(state) @torch.no_grad() def ctmrg_conv_f(state, env, history, ctm_args=cfg.ctm_args): if not history: history = dict({"log": []}) rdm2x1 = rdm2x1_sl(state, env, force_cpu=ctm_args.conv_check_cpu) dist = float('inf') if len(history["log"]) > 0: dist = torch.dist(rdm2x1, history["rdm"], p=2).item() history["rdm"] = rdm2x1 history["log"].append(dist) if dist < ctm_args.ctm_conv_tol or len( history["log"]) >= ctm_args.ctm_max_iter: log.info({ "history_length": len(history['log']), "history": history['log'] }) return True, history return False, history state_sym = to_ipeps_c4v(state) ctm_env = ENV_C4V(args.chi, state_sym) init_env(state_sym, ctm_env) ctm_env, *ctm_log = ctmrg_c4v.run(state_sym, ctm_env, conv_check=ctmrg_conv_f) loss = energy_f(state_sym, ctm_env) obs_values, obs_labels = model.eval_obs(state_sym, ctm_env) print(", ".join(["epoch", "energy"] + obs_labels)) print(", ".join([f"{-1}", f"{loss}"] + [f"{v}" for v in obs_values])) def loss_fn(state, ctm_env_in, opt_context): ctm_args = opt_context["ctm_args"] opt_args = opt_context["opt_args"] # 0) preprocess # create a copy of state, symmetrize and normalize making all operations # tracked. This does not "overwrite" the parameters tensors, living outside # the scope of loss_fn state_sym = to_ipeps_c4v(state, normalize=True) # possibly re-initialize the environment if opt_args.opt_ctm_reinit: init_env(state_sym, ctm_env_in) # 1) compute environment by CTMRG ctm_env_out, *ctm_log = ctmrg_c4v.run(state_sym, ctm_env_in, conv_check=ctmrg_conv_f, ctm_args=ctm_args) # 2) evaluate loss with converged environment loss = energy_f(state_sym, ctm_env_out, force_cpu=args.force_cpu) return (loss, ctm_env_out, *ctm_log) def _to_json(l): re = [l[i, 0].item() for i in range(l.size()[0])] im = [l[i, 1].item() for i in range(l.size()[0])] return dict({"re": re, "im": im}) @torch.no_grad() def obs_fn(state, ctm_env, opt_context): if ("line_search" in opt_context.keys() and not opt_context["line_search"]) \ or not "line_search" in opt_context.keys(): state_sym = to_ipeps_c4v(state, normalize=True) epoch = len(opt_context["loss_history"]["loss"]) loss = opt_context["loss_history"]["loss"][-1] obs_values, obs_labels = model.eval_obs(state_sym, ctm_env) print(", ".join([f"{epoch}",f"{loss}"]+[f"{v}" for v in obs_values]+\ [f"{torch.max(torch.abs(state.site((0,0))))}"])) if args.top_freq > 0 and epoch % args.top_freq == 0: coord_dir_pairs = [((0, 0), (1, 0))] for c, d in coord_dir_pairs: # transfer operator spectrum print(f"TOP spectrum(T)[{c},{d}] ", end="") l = transferops_c4v.get_Top_spec_c4v( args.top_n, state_sym, ctm_env) print("TOP " + json.dumps(_to_json(l))) def post_proc(state, ctm_env, opt_context): symm, max_err = verify_c4v_symm_A1(state.site()) # print(f"post_proc {symm} {max_err}") if not symm: # force symmetrization outside of autograd with torch.no_grad(): symm_site = make_c4v_symm(state.site()) # we **cannot** simply normalize the on-site tensors, as the LBFGS # takes into account the scale # symm_site= symm_site/torch.max(torch.abs(symm_site)) state.sites[(0, 0)].copy_(symm_site) # optimize # optimize_state(state, ctm_env, loss_fn, obs_fn=obs_fn, post_proc=post_proc) optimize_state(state, ctm_env, loss_fn, obs_fn=obs_fn) # compute final observables for the best variational state outputstatefile = args.out_prefix + "_state.json" state = read_ipeps_c4v(outputstatefile) ctm_env = ENV_C4V(args.chi, state) init_env(state, ctm_env) ctm_env, *ctm_log = ctmrg_c4v.run(state, ctm_env, conv_check=ctmrg_conv_f) opt_energy = energy_f(state, ctm_env) obs_values, obs_labels = model.eval_obs(state, ctm_env) print(", ".join([f"{args.opt_max_iter}", f"{opt_energy}"] + [f"{v}" for v in obs_values]))
def main(): cfg.configure(args) cfg.print_config() torch.set_num_threads(args.omp_cores) torch.manual_seed(args.seed) model = ising.ISING_C4V(hx=args.hx, q=args.q) energy_f = model.energy_1x1_nn if args.q == 0 else model.energy_1x1_plaqette # initialize an ipeps if args.instate != None: state = read_ipeps_c4v(args.instate) if args.bond_dim > max(state.get_aux_bond_dims()): # extend the auxiliary dimensions state = extend_bond_dim(state, args.bond_dim) state.add_noise(args.instate_noise) state.sites[(0, 0)] = state.sites[(0, 0)] / torch.max( torch.abs(state.sites[(0, 0)])) elif args.opt_resume is not None: state = IPEPS_C4V(torch.tensor(0.)) state.load_checkpoint(args.opt_resume) elif args.ipeps_init_type == 'RANDOM': bond_dim = args.bond_dim A= torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\ dtype=cfg.global_args.dtype,device=cfg.global_args.device) # A= make_c4v_symm(A) # A= A/torch.max(torch.abs(A)) A = A / A.norm() state = IPEPS_C4V(A) else: raise ValueError("Missing trial state: -instate=None and -ipeps_init_type= "\ +str(args.ipeps_init_type)+" is not supported") print(state) @torch.no_grad() def ctmrg_conv_energy(state, env, history, ctm_args=cfg.ctm_args): if not history: history = [] e_curr = energy_f(state, env) history.append(e_curr.item()) if (len(history) > 1 and abs(history[-1]-history[-2]) < ctm_args.ctm_conv_tol)\ or len(history) >= ctm_args.ctm_max_iter: log.info({"history_length": len(history), "history": history}) return True, history return False, history @torch.no_grad() def ctmrg_conv_rdm2x1(state, env, history, ctm_args=cfg.ctm_args): if not history: history = dict({"log": []}) rdm2x1 = rdm2x1_sl(state, env, force_cpu=ctm_args.conv_check_cpu) dist = float('inf') if len(history["log"]) > 0: dist = torch.dist(rdm2x1, history["rdm"], p=2).item() history["rdm"] = rdm2x1 history["log"].append(dist) if dist < ctm_args.ctm_conv_tol or len( history["log"]) >= ctm_args.ctm_max_iter: log.info({ "history_length": len(history['log']), "history": history['log'] }) return True, history return False, history state_sym = to_ipeps_c4v(state) ctm_env = ENV_C4V(args.chi, state_sym) init_env(state_sym, ctm_env) ctm_env, *ctm_log = ctmrg_c4v.run(state_sym, ctm_env, conv_check=ctmrg_conv_rdm2x1) loss = energy_f(state_sym, ctm_env) obs_values, obs_labels = model.eval_obs(state_sym, ctm_env) print(", ".join(["epoch", "energy"] + obs_labels)) print(", ".join([f"{-1}", f"{loss}"] + [f"{v}" for v in obs_values])) def loss_fn(state, ctm_env_in, opt_context): ctm_args = opt_context["ctm_args"] opt_args = opt_context["opt_args"] # create a copy of state, symmetrize and normalize making all operations # tracked. This does not "overwrite" the parameters tensors, living outside # the scope of loss_fn state_sym = to_ipeps_c4v(state, normalize=True) # possibly re-initialize the environment if cfg.opt_args.opt_ctm_reinit: init_env(state_sym, ctm_env_in) # 1) compute environment by CTMRG ctm_env_out, *ctm_log = ctmrg_c4v.run(state_sym, ctm_env_in, conv_check=ctmrg_conv_rdm2x1, ctm_args=ctm_args) loss = energy_f(state_sym, ctm_env_out) return (loss, ctm_env_out, *ctm_log) def _to_json(l): re = [l[i, 0].item() for i in range(l.size()[0])] im = [l[i, 1].item() for i in range(l.size()[0])] return dict({"re": re, "im": im}) @torch.no_grad() def obs_fn(state, ctm_env, opt_context): if ("line_search" in opt_context.keys() and not opt_context["line_search"]) \ or not "line_search" in opt_context.keys(): state_sym = to_ipeps_c4v(state, normalize=True) epoch = len(opt_context["loss_history"]["loss"]) loss = opt_context["loss_history"]["loss"][-1] obs_values, obs_labels = model.eval_obs(state_sym, ctm_env) print(", ".join([f"{epoch}",f"{loss}"]+[f"{v}" for v in obs_values]\ + [f"{state.site().norm()}"])) if args.top_freq > 0 and epoch % args.top_freq == 0: coord_dir_pairs = [((0, 0), (1, 0))] for c, d in coord_dir_pairs: # transfer operator spectrum print(f"TOP spectrum(T)[{c},{d}] ", end="") l = transferops_c4v.get_Top_spec_c4v( args.top_n, state_sym, ctm_env) print("TOP " + json.dumps(_to_json(l))) # optimize optimize_state(state, ctm_env, loss_fn, obs_fn=obs_fn) # compute final observables for the best variational state outputstatefile = args.out_prefix + "_state.json" state = read_ipeps_c4v(outputstatefile) ctm_env = ENV_C4V(args.chi, state) init_env(state, ctm_env) ctm_env, *ctm_log = ctmrg_c4v.run(state, ctm_env, conv_check=ctmrg_conv_energy) opt_energy = energy_f(state, ctm_env) obs_values, obs_labels = model.eval_obs(state, ctm_env) print(", ".join([f"{args.opt_max_iter}", f"{opt_energy}"] + [f"{v}" for v in obs_values]))
def main(): cfg.configure(args) cfg.print_config() torch.set_num_threads(args.omp_cores) torch.manual_seed(args.seed) model = ising.ISING(hx=args.hx, q=args.q) # initialize an ipeps # 1) define lattice-tiling function, that maps arbitrary vertex of square lattice # coord into one of coordinates within unit-cell of iPEPS ansatz if args.instate != None: state = read_ipeps(args.instate, vertexToSite=None) if args.bond_dim > max(state.get_aux_bond_dims()): # extend the auxiliary dimensions state = extend_bond_dim(state, args.bond_dim) state.add_noise(args.instate_noise) elif args.opt_resume is not None: state = IPEPS(dict(), lX=1, lY=1) state.load_checkpoint(args.opt_resume) elif args.ipeps_init_type == 'RANDOM': bond_dim = args.bond_dim A = torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\ dtype=cfg.global_args.torch_dtype,device=cfg.global_args.device) # normalization of initial random tensors A = A / torch.max(torch.abs(A)) sites = {(0, 0): A} state = IPEPS(sites) else: raise ValueError("Missing trial state: -instate=None and -ipeps_init_type= "\ +str(args.ipeps_init_type)+" is not supported") print(state) @torch.no_grad() def ctmrg_conv_energy(state, env, history, ctm_args=cfg.ctm_args): if not history: history = [] e_curr = model.energy_1x1(state, env) history.append(e_curr.item()) if (len(history) > 1 and abs(history[-1]-history[-2]) < ctm_args.ctm_conv_tol)\ or len(history) >= ctm_args.ctm_max_iter: log.info({"history_length": len(history), "history": history}) return True, history return False, history ctm_env = ENV(args.chi, state) init_env(state, ctm_env) ctm_env, *ctm_log = ctmrg.run(state, ctm_env, conv_check=ctmrg_conv_energy) loss0 = model.energy_1x1(state, ctm_env) obs_values, obs_labels = model.eval_obs(state, ctm_env) print(", ".join(["epoch", "energy"] + obs_labels)) print(", ".join([f"{-1}", f"{loss0}"] + [f"{v}" for v in obs_values])) def loss_fn(state, ctm_env_in, opt_context): # possibly re-initialize the environment if cfg.opt_args.opt_ctm_reinit: init_env(state, ctm_env_in) # 1) compute environment by CTMRG ctm_env_out, *ctm_log = ctmrg.run(state, ctm_env_in, conv_check=ctmrg_conv_energy) loss = model.energy_1x1(state, ctm_env_out) return (loss, ctm_env_out, *ctm_log) @torch.no_grad() def obs_fn(state, ctm_env, opt_context): epoch = len(opt_context["loss_history"]["loss"]) loss = opt_context["loss_history"]["loss"][-1] obs_values, obs_labels = model.eval_obs(state, ctm_env) print(", ".join([f"{epoch}", f"{loss}"] + [f"{v}" for v in obs_values])) # optimize optimize_state(state, ctm_env, loss_fn, obs_fn=obs_fn) # compute final observables for the best variational state outputstatefile = args.out_prefix + "_state.json" state = read_ipeps(outputstatefile) ctm_env = ENV(args.chi, state) init_env(state, ctm_env) ctm_env, *ctm_log = ctmrg.run(state, ctm_env, conv_check=ctmrg_conv_energy) loss0 = model.energy_1x1(state, ctm_env) obs_values, obs_labels = model.eval_obs(state, ctm_env) print(", ".join([f"{args.opt_max_iter}", f"{loss0}"] + [f"{v}" for v in obs_values]))
def main(): cfg.configure(args) cfg.print_config() torch.set_num_threads(args.omp_cores) torch.manual_seed(args.seed) if args.c4v_type == "TI": model = jq.JQ_C4V(j1=args.j1, q=args.q) elif args.c4v_type == "BIPARTITE": model = jq.JQ_C4V_BIPARTITE(j1=args.j1, q=args.q) elif args.c4v_type == "PLAQUETTE": q_inter = args.q if args.q_inter is None else args.q_inter model = jq.JQ_C4V_PLAQUETTE(j1=args.j1, q=args.q, q_inter=q_inter) else: raise ValueError("Unsupported C4v ansatz: -c4v_type= "\ +str(args.ipeps_init_type)+" is not supported") # initialize an ipeps # 1) define lattice-tiling function, that maps arbitrary vertex of square lattice # coord into one of coordinates within unit-cell of iPEPS ansatz if args.instate != None: state = read_ipeps_c4v(args.instate) if args.bond_dim > max(state.get_aux_bond_dims()): # extend the auxiliary dimensions state = extend_bond_dim(state, args.bond_dim) state.add_noise(args.instate_noise) state.sites[(0, 0)] = state.sites[(0, 0)] / torch.max( torch.abs(state.sites[(0, 0)])) elif args.opt_resume is not None: state = IPEPS_C4V(torch.tensor(0.)) state.load_checkpoint(args.opt_resume) elif args.ipeps_init_type == 'RANDOM': bond_dim = args.bond_dim A= torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\ dtype=cfg.global_args.dtype,device=cfg.global_args.device) A = make_c4v_symm(A) A = A / torch.max(torch.abs(A)) state = IPEPS_C4V(A) else: raise ValueError("Missing trial state: -instate=None and -ipeps_init_type= "\ +str(args.ipeps_init_type)+" is not supported") print(state) @torch.no_grad() def ctmrg_conv_energy(state, env, history, ctm_args=cfg.ctm_args): if not history: history = [] e_curr = model.energy_1x1(state, env) history.append(e_curr.item()) if (len(history) > 1 and abs(history[-1]-history[-2]) < ctm_args.ctm_conv_tol)\ or len(history) >= ctm_args.ctm_max_iter: log.info({"history_length": len(history), "history": history}) return True, history return False, history ctm_env = ENV_C4V(args.chi, state) init_env(state, ctm_env) ctm_env, *ctm_log = ctmrg_c4v.run(state, ctm_env, conv_check=ctmrg_conv_energy) loss = model.energy_1x1(state, ctm_env) obs_values, obs_labels = model.eval_obs(state, ctm_env) print(", ".join(["epoch", "energy"] + obs_labels)) print(", ".join([f"{-1}", f"{loss}"] + [f"{v}" for v in obs_values])) def loss_fn(state, ctm_env_in, opt_context): ctm_args = opt_context["ctm_args"] opt_args = opt_context["opt_args"] # symmetrize on-site tensor state_sym = to_ipeps_c4v(state, normalize=True) # possibly re-initialize the environment if opt_args.opt_ctm_reinit: init_env(state_sym, ctm_env_in) # 1) compute environment by CTMRG ctm_env_out, *ctm_log= ctmrg_c4v.run(state_sym, ctm_env_in, \ conv_check=ctmrg_conv_energy, ctm_args=ctm_args) # 2) evaluate loss with converged environment loss = model.energy_1x1(state_sym, ctm_env_out) return (loss, ctm_env_out, *ctm_log) @torch.no_grad() def obs_fn(state, ctm_env, opt_context): if ("line_search" in opt_context.keys() and not opt_context["line_search"]) \ or not "line_search" in opt_context.keys(): epoch = len(opt_context["loss_history"]["loss"]) loss = opt_context["loss_history"]["loss"][-1] obs_values, obs_labels = model.eval_obs(state, ctm_env) print(", ".join([f"{epoch}", f"{loss}"] + [f"{v}" for v in obs_values])) # optimize optimize_state(state, ctm_env, loss_fn, obs_fn=obs_fn) # compute final observables for the best variational state outputstatefile = args.out_prefix + "_state.json" state = read_ipeps_c4v(outputstatefile) ctm_env = ENV_C4V(args.chi, state) init_env(state, ctm_env) ctm_env, *ctm_log = ctmrg_c4v.run(state, ctm_env, conv_check=ctmrg_conv_energy) opt_energy = model.energy_1x1(state, ctm_env) obs_values, obs_labels = model.eval_obs(state, ctm_env) print(", ".join([f"{args.opt_max_iter}", f"{opt_energy}"] + [f"{v}" for v in obs_values]))
def main(): cfg.configure(args) cfg.print_config() torch.set_num_threads(args.omp_cores) torch.manual_seed(args.seed) model= j1j2.J1J2_C4V_BIPARTITE(j1=args.j1, j2=args.j2, j3=args.j3, \ hz_stag=args.hz_stag, delta_zz=args.delta_zz) energy_f= model.energy_1x1_lowmem # initialize an ipeps if args.instate!=None: state = read_ipeps_u1(args.instate, vertexToSite=None) assert len(state.coeffs)==1, "Not a 1-site ipeps" # TODO extending from smaller bond-dim to higher bond-dim is # currently not possible state.add_noise(args.instate_noise) elif args.opt_resume is not None: if args.bond_dim in [2,3,4,5,6,7,8,9]: u1sym_t= tenU1.import_sym_tensors(2,args.bond_dim,"A_1",\ infile=f"u1sym/D{args.bond_dim}_U1_{args.u1_class}.txt",\ dtype=cfg.global_args.torch_dtype, device=cfg.global_args.device) else: raise ValueError("Unsupported --bond_dim= "+str(args.bond_dim)) A= torch.zeros(len(u1sym_t), dtype=cfg.global_args.torch_dtype, device=cfg.global_args.device) coeffs = {(0,0): A} state= IPEPS_U1SYM(u1sym_t, coeffs) state.load_checkpoint(args.opt_resume) elif args.ipeps_init_type=='RANDOM': if args.bond_dim in [2,3,4,5,6,7,8,9]: u1sym_t= tenU1.import_sym_tensors(2, args.bond_dim, "A_1", \ infile=f"u1sym/D{args.bond_dim}_U1_{args.u1_class}.txt", \ dtype=cfg.global_args.torch_dtype, device=cfg.global_args.device) else: raise ValueError("Unsupported --bond_dim= "+str(args.bond_dim)) A= torch.rand(len(u1sym_t), dtype=cfg.global_args.torch_dtype, device=cfg.global_args.device) A= A/torch.max(torch.abs(A)) coeffs = {(0,0): A} state = IPEPS_U1SYM(u1sym_t, coeffs) else: raise ValueError("Missing trial state: -instate=None and -ipeps_init_type= "\ +str(args.ipeps_init_type)+" is not supported") print(state) @torch.no_grad() def ctmrg_conv_f(state, env, history, ctm_args=cfg.ctm_args): if not history: history=dict({"log": []}) rdm2x1= rdm2x1_sl(state, env, force_cpu=ctm_args.conv_check_cpu) dist= float('inf') if len(history["log"]) > 1: dist= torch.dist(rdm2x1, history["rdm"], p=2).item() history["rdm"]=rdm2x1 history["log"].append(dist) if dist<ctm_args.ctm_conv_tol: log.info({"history_length": len(history['log']), "history": history['log'], "final_multiplets": compute_multiplets(ctm_env)}) return True, history elif len(history['log']) >= ctm_args.ctm_max_iter: log.info({"history_length": len(history['log']), "history": history['log'], "final_multiplets": compute_multiplets(ctm_env)}) return False, history return False, history ctm_env = ENV_C4V(args.chi, state) init_env(state, ctm_env) loss0 = energy_f(state, ctm_env, force_cpu=True) obs_values, obs_labels = model.eval_obs(state,ctm_env,force_cpu=True) print(", ".join(["epoch","energy"]+obs_labels)) print(", ".join([f"{-1}",f"{loss0}"]+[f"{v}" for v in obs_values])) def loss_fn(state, ctm_env_in, opt_context): ctm_args= opt_context["ctm_args"] opt_args= opt_context["opt_args"] # build on-site tensors from su2sym components state.sites= state.build_onsite_tensors() # possibly re-initialize the environment if opt_args.opt_ctm_reinit: init_env(state, ctm_env_in) # 1) compute environment by CTMRG ctm_env_out, history, t_ctm, t_obs= ctmrg_c4v.run(state, ctm_env_in, \ conv_check=ctmrg_conv_f, ctm_args=ctm_args) loss0 = energy_f(state, ctm_env_out, force_cpu=True) loc_ctm_args= copy.deepcopy(ctm_args) loc_ctm_args.ctm_max_iter= 1 ctm_env_out, history1, t_ctm1, t_obs1= ctmrg_c4v.run(state, ctm_env_out, \ ctm_args=loc_ctm_args) loss1 = energy_f(state, ctm_env_out, force_cpu=True) #loss=(loss0+loss1)/2 loss= torch.max(loss0,loss1) return loss, ctm_env_out, history, t_ctm, t_obs def _to_json(l): re=[l[i,0].item() for i in range(l.size()[0])] im=[l[i,1].item() for i in range(l.size()[0])] return dict({"re": re, "im": im}) @torch.no_grad() def obs_fn(state, ctm_env, opt_context): if opt_context["line_search"]: epoch= len(opt_context["loss_history"]["loss_ls"]) loss= opt_context["loss_history"]["loss_ls"][-1] print("LS",end=" ") else: epoch= len(opt_context["loss_history"]["loss"]) loss= opt_context["loss_history"]["loss"][-1] obs_values, obs_labels = model.eval_obs(state,ctm_env,force_cpu=True) print(", ".join([f"{epoch}",f"{loss}"]+[f"{v}" for v in obs_values])) if (not opt_context["line_search"]) and args.top_freq>0 and epoch%args.top_freq==0: coord_dir_pairs=[((0,0), (1,0))] for c,d in coord_dir_pairs: # transfer operator spectrum print(f"TOP spectrum(T)[{c},{d}] ",end="") l= transferops_c4v.get_Top_spec_c4v(args.top_n, state, ctm_env) print("TOP "+json.dumps(_to_json(l))) # optimize optimize_state(state, ctm_env, loss_fn, obs_fn=obs_fn) # compute final observables for the best variational state outputstatefile= args.out_prefix+"_state.json" state= read_ipeps_u1(outputstatefile) ctm_env = ENV_C4V(args.chi, state) init_env(state, ctm_env) ctm_env, *ctm_log = ctmrg_c4v.run(state, ctm_env, conv_check=ctmrg_conv_f) opt_energy = energy_f(state,ctm_env,force_cpu=True) obs_values, obs_labels = model.eval_obs(state,ctm_env,force_cpu=True) print(", ".join([f"{args.opt_max_iter}",f"{opt_energy}"]+[f"{v}" for v in obs_values]))
def main(): cfg.configure(args) cfg.print_config() torch.set_num_threads(args.omp_cores) torch.manual_seed(args.seed) model = j1j2.J1J2(j1=args.j1, j2=args.j2) # initialize an ipeps # 1) define lattice-tiling function, that maps arbitrary vertex of square lattice # coord into one of coordinates within unit-cell of iPEPS ansatz if args.tiling == "BIPARTITE": def lattice_to_site(coord): vx = (coord[0] + abs(coord[0]) * 2) % 2 vy = abs(coord[1]) return ((vx + vy) % 2, 0) elif args.tiling == "1SITE": def lattice_to_site(coord): return (0, 0) elif args.tiling == "2SITE": def lattice_to_site(coord): vx = (coord[0] + abs(coord[0]) * 2) % 2 vy = (coord[1] + abs(coord[1]) * 1) % 1 return (vx, vy) elif args.tiling == "4SITE": def lattice_to_site(coord): vx = (coord[0] + abs(coord[0]) * 2) % 2 vy = (coord[1] + abs(coord[1]) * 2) % 2 return (vx, vy) elif args.tiling == "8SITE": def lattice_to_site(coord): shift_x = coord[0] + 2 * (coord[1] // 2) vx = shift_x % 4 vy = coord[1] % 2 return (vx, vy) else: raise ValueError("Invalid tiling: "+str(args.tiling)+" Supported options: "\ +"BIPARTITE, 1SITE, 2SITE, 4SITE, 8SITE") if args.instate != None: state = read_ipeps(args.instate, vertexToSite=lattice_to_site) if args.bond_dim > max(state.get_aux_bond_dims()): # extend the auxiliary dimensions state = extend_bond_dim(state, args.bond_dim) state.add_noise(args.instate_noise) elif args.opt_resume is not None: if args.tiling == "BIPARTITE" or args.tiling == "2SITE": state = IPEPS(dict(), lX=2, lY=1) elif args.tiling == "1SITE": state = IPEPS(dict(), lX=1, lY=1) elif args.tiling == "4SITE": state = IPEPS(dict(), lX=2, lY=2) elif args.tiling == "8SITE": state = IPEPS(dict(), lX=4, lY=2) state.load_checkpoint(args.opt_resume) elif args.ipeps_init_type == 'RANDOM': bond_dim = args.bond_dim A = torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\ dtype=cfg.global_args.torch_dtype,device=cfg.global_args.device) # normalization of initial random tensors A = A / torch.max(torch.abs(A)) sites = {(0, 0): A} if args.tiling in ["BIPARTITE", "2SITE", "4SITE", "8SITE"]: B = torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\ dtype=cfg.global_args.torch_dtype,device=cfg.global_args.device) sites[(1, 0)] = B / torch.max(torch.abs(B)) if args.tiling in ["4SITE", "8SITE"]: C= torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\ dtype=cfg.global_args.torch_dtype,device=cfg.global_args.device) D= torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\ dtype=cfg.global_args.torch_dtype,device=cfg.global_args.device) sites[(0, 1)] = C / torch.max(torch.abs(C)) sites[(1, 1)] = D / torch.max(torch.abs(D)) if args.tiling == "8SITE": E= torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\ dtype=cfg.global_args.torch_dtype,device=cfg.global_args.device) F= torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\ dtype=cfg.global_args.torch_dtype,device=cfg.global_args.device) G= torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\ dtype=cfg.global_args.torch_dtype,device=cfg.global_args.device) H= torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\ dtype=cfg.global_args.torch_dtype,device=cfg.global_args.device) sites[(2, 0)] = E / torch.max(torch.abs(E)) sites[(3, 0)] = F / torch.max(torch.abs(F)) sites[(2, 1)] = G / torch.max(torch.abs(G)) sites[(3, 1)] = H / torch.max(torch.abs(H)) state = IPEPS(sites, vertexToSite=lattice_to_site) else: raise ValueError("Missing trial state: -instate=None and -ipeps_init_type= "\ +str(args.ipeps_init_type)+" is not supported") if not state.dtype == model.dtype: cfg.global_args.torch_dtype = state.dtype print( f"dtype of initial state {state.dtype} and model {model.dtype} do not match." ) print(f"Setting default dtype to {cfg.global_args.torch_dtype} and reinitializing "\ +" the model") model = j1j2.J1J2(alpha=args.alpha) print(state) # 2) select the "energy" function if args.tiling == "BIPARTITE" or args.tiling == "2SITE": energy_f = model.energy_2x2_2site eval_obs_f = model.eval_obs elif args.tiling == "1SITE": energy_f = model.energy_2x2_1site_BP # TODO include eval_obs with rotation on B-sublattice eval_obs_f = model.eval_obs elif args.tiling == "4SITE": energy_f = model.energy_2x2_4site eval_obs_f = model.eval_obs elif args.tiling == "8SITE": energy_f = model.energy_2x2_8site eval_obs_f = model.eval_obs else: raise ValueError("Invalid tiling: "+str(args.tiling)+" Supported options: "\ +"BIPARTITE, 2SITE, 4SITE, 8SITE") @torch.no_grad() def ctmrg_conv_energy(state, env, history, ctm_args=cfg.ctm_args): if not history: history = [] e_curr = energy_f(state, env) history.append(e_curr.item()) if (len(history) > 1 and abs(history[-1]-history[-2]) < ctm_args.ctm_conv_tol)\ or len(history) >= ctm_args.ctm_max_iter: log.info({"history_length": len(history), "history": history}) return True, history return False, history ctm_env = ENV(args.chi, state) init_env(state, ctm_env) ctm_env, *ctm_log = ctmrg.run(state, ctm_env, conv_check=ctmrg_conv_energy) loss0 = energy_f(state, ctm_env) obs_values, obs_labels = eval_obs_f(state, ctm_env) print(", ".join(["epoch", "energy"] + obs_labels)) print(", ".join([f"{-1}", f"{loss0}"] + [f"{v}" for v in obs_values])) def loss_fn(state, ctm_env_in, opt_context): ctm_args = opt_context["ctm_args"] opt_args = opt_context["opt_args"] # possibly re-initialize the environment if opt_args.opt_ctm_reinit: init_env(state, ctm_env_in) # 1) compute environment by CTMRG ctm_env_out, *ctm_log= ctmrg.run(state, ctm_env_in, \ conv_check=ctmrg_conv_energy, ctm_args=ctm_args) # 2) evaluate loss with the converged environment loss = energy_f(state, ctm_env_out) return (loss, ctm_env_out, *ctm_log) def _to_json(l): re = [l[i, 0].item() for i in range(l.size()[0])] im = [l[i, 1].item() for i in range(l.size()[0])] return dict({"re": re, "im": im}) @torch.no_grad() def obs_fn(state, ctm_env, opt_context): if ("line_search" in opt_context.keys() and not opt_context["line_search"]) \ or not "line_search" in opt_context.keys(): epoch = len(opt_context["loss_history"]["loss"]) loss = opt_context["loss_history"]["loss"][-1] obs_values, obs_labels = eval_obs_f(state, ctm_env) print(", ".join([f"{epoch}", f"{loss}"] + [f"{v}" for v in obs_values])) log.info("Norm(sites): " + ", ".join([f"{t.norm()}" for c, t in state.sites.items()])) with torch.no_grad(): if args.top_freq > 0 and epoch % args.top_freq == 0: coord_dir_pairs = [((0, 0), (1, 0)), ((0, 0), (0, 1)), ((1, 1), (1, 0)), ((1, 1), (0, 1))] for c, d in coord_dir_pairs: # transfer operator spectrum print(f"TOP spectrum(T)[{c},{d}] ", end="") l = transferops.get_Top_spec(args.top_n, c, d, state, ctm_env) print("TOP " + json.dumps(_to_json(l))) # optimize optimize_state(state, ctm_env, loss_fn, obs_fn=obs_fn) # compute final observables for the best variational state outputstatefile = args.out_prefix + "_state.json" state = read_ipeps(outputstatefile, vertexToSite=state.vertexToSite) ctm_env = ENV(args.chi, state) init_env(state, ctm_env) ctm_env, *ctm_log = ctmrg.run(state, ctm_env, conv_check=ctmrg_conv_energy) loss0 = energy_f(state, ctm_env) obs_values, obs_labels = eval_obs_f(state, ctm_env) print(", ".join([f"{args.opt_max_iter}", f"{loss0}"] + [f"{v}" for v in obs_values]))
def main(): cfg.configure(args) cfg.print_config() torch.set_num_threads(args.omp_cores) torch.manual_seed(args.seed) model = triangle.triangle(j1=args.j1, j2=args.j2) # initialize an ipeps # 1) define lattice-tiling function, that maps arbitrary vertex of square lattice # coord into one of coordinates within unit-cell of iPEPS ansatz if args.tiling == "3x3": def lattice_to_site(coord): vx = (-coord[0] + abs(coord[0]) * 3) % 3 vy = (-coord[1] + abs(coord[1]) * 3) % 3 #print(vx,vy) return ((vx + vy) % 3, 0) #return (vx,vy) elif args.tiling == "9SITE": def lattice_to_site(coord): vx = (coord[0] + abs(coord[0]) * 3) % 3 vy = (coord[1] + abs(coord[1]) * 3) % 3 return (vx, vy) else: raise ValueError("Invalid tiling: "+str(args.tiling)+" Supported options: "\ +"3x3") if args.instate != None: state = read_ipeps(args.instate, vertexToSite=lattice_to_site) if args.bond_dim > max(state.get_aux_bond_dims()): # extend the auxiliary dimensions state = extend_bond_dim(state, args.bond_dim) state.add_noise(args.instate_noise) elif args.opt_resume is not None: if args.tiling == "3x3": state = IPEPS(dict(), lX=3, lY=1) state.load_checkpoint(args.opt_resume) elif args.ipeps_init_type == 'RANDOM': bond_dim = args.bond_dim A = torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\ dtype=cfg.global_args.dtype,device=cfg.global_args.device) B = torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\ dtype=cfg.global_args.dtype,device=cfg.global_args.device) # normalization of initial random tensors A = A / (torch.max(torch.abs(A))) B = B / (torch.max(torch.abs(B))) sites = {(0, 0): A, (1, 0): B} if args.tiling == "3x3": C= torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\ dtype=cfg.global_args.dtype,device=cfg.global_args.device) sites[(2, 0)] = C / torch.max(torch.abs(C)) state = IPEPS(sites, vertexToSite=lattice_to_site) if args.tiling == "9SITE": C= torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\ dtype=cfg.global_args.dtype,device=cfg.global_args.device) D= torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\ dtype=cfg.global_args.dtype,device=cfg.global_args.device) E= torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\ dtype=cfg.global_args.dtype,device=cfg.global_args.device) F= torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\ dtype=cfg.global_args.dtype,device=cfg.global_args.device) G= torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\ dtype=cfg.global_args.dtype,device=cfg.global_args.device) H= torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\ dtype=cfg.global_args.dtype,device=cfg.global_args.device) I= torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\ dtype=cfg.global_args.dtype,device=cfg.global_args.device) sites[(2, 0)] = C / torch.max(torch.abs(C)) sites[(0, 1)] = D / torch.max(torch.abs(D)) sites[(1, 1)] = E / torch.max(torch.abs(E)) sites[(2, 1)] = F / torch.max(torch.abs(F)) sites[(0, 2)] = G / torch.max(torch.abs(G)) sites[(1, 2)] = H / torch.max(torch.abs(H)) sites[(2, 2)] = I / torch.max(torch.abs(I)) state = IPEPS(sites, vertexToSite=lattice_to_site) else: raise ValueError("Missing trial state: -instate=None and -ipeps_init_type= "\ +str(args.ipeps_init_type)+" is not supported") # 2) select the "energy" function if args.tiling == "3x3": energy_f = model.energy_2x2_9site elif args.tiling == "9SITE": energy_f = model.energy_2x2_9site else: raise ValueError("Invalid tiling: "+str(args.tiling)+" Supported options: "\ +"BIPARTITE, 2SITE, 4SITE, 8SITE") @torch.no_grad() def ctmrg_conv_energy(state, env, history, ctm_args=cfg.ctm_args): if not history: history = [] e_curr = energy_f(state, env) history.append(e_curr.item()) if (len(history) > 1 and abs(history[-1]-history[-2]) < ctm_args.ctm_conv_tol)\ or len(history) >= ctm_args.ctm_max_iter: log.info({"history_length": len(history), "history": history}) return True, history return False, history ctm_env = ENV(args.chi, state) init_env(state, ctm_env) ctm_env, *ctm_log = ctmrg.run(state, ctm_env, conv_check=ctmrg_conv_energy) loss0 = energy_f(state, ctm_env) obs_values, obs_labels = model.eval_obs(state, ctm_env) print(", ".join(["epoch", "energy"] + obs_labels)) print(", ".join([f"{-1}", f"{loss0}"] + [f"{v}" for v in obs_values])) def loss_fn(state, ctm_env_in, opt_context): ctm_args = opt_context["ctm_args"] opt_args = opt_context["opt_args"] # possibly re-initialize the environment if opt_args.opt_ctm_reinit: init_env(state, ctm_env_in) # 1) compute environment by CTMRG ctm_env_out, *ctm_log= ctmrg.run(state, ctm_env_in, \ conv_check=ctmrg_conv_energy, ctm_args=ctm_args) # 2) evaluate loss with the converged environment loss = energy_f(state, ctm_env_out) return (loss, ctm_env_out, *ctm_log) @torch.no_grad() def obs_fn(state, ctm_env, opt_context): if ("line_search" in opt_context.keys() and not opt_context["line_search"]) \ or not "line_search" in opt_context.keys(): epoch = len(opt_context["loss_history"]["loss"]) loss = opt_context["loss_history"]["loss"][-1] #obs_values, obs_labels = model.eval_obs(state,ctm_env) #print(", ".join([f"{epoch}",f"{loss}"]+[f"{v}" for v in obs_values])) print(", ".join([f"{epoch}", f"{loss}"])) log.info("Norm(sites): " + ", ".join([f"{t.norm()}" for c, t in state.sites.items()])) # optimize optimize_state(state, ctm_env, loss_fn, obs_fn=obs_fn) # compute final observables for the best variational state outputstatefile = args.out_prefix + "_state.json" state = read_ipeps(outputstatefile, vertexToSite=state.vertexToSite) ctm_env = ENV(args.chi, state) init_env(state, ctm_env) ctm_env, *ctm_log = ctmrg.run(state, ctm_env, conv_check=ctmrg_conv_energy) opt_energy = energy_f(state, ctm_env) obs_values, obs_labels = model.eval_obs(state, ctm_env)
def main(): cfg.configure(args) cfg.print_config() torch.set_num_threads(args.omp_cores) torch.manual_seed(args.seed) model = coupledLadders.COUPLEDLADDERS_D2_BIPARTITE(alpha=args.alpha) # initialize an ipeps if args.instate != None: state = read_ipeps_d2(args.instate) if args.bond_dim > max(state.get_aux_bond_dims()): # extend the auxiliary dimensions state = extend_bond_dim(state, args.bond_dim) state.add_noise(args.instate_noise) elif args.opt_resume is not None: state = IPEPS_D2SYM() state.load_checkpoint(args.opt_resume) elif args.ipeps_init_type == 'RANDOM': bond_dim = args.bond_dim A = torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\ dtype=cfg.global_args.torch_dtype,device=cfg.global_args.device) # A= make_d2_symm(A) A = A / torch.max(torch.abs(A)) state = IPEPS_D2SYM(A) else: raise ValueError("Missing trial state: -instate=None and -ipeps_init_type= "\ +str(args.ipeps_init_type)+" is not supported") state.sites = state.build_onsite_tensors() print(state) @torch.no_grad() def ctmrg_conv_energy(state, env, history, ctm_args=cfg.ctm_args): if not history: history = [] e_curr = model.energy_2x1_1x2(state, env) history.append(e_curr.item()) if (len(history) > 1 and abs(history[-1]-history[-2]) < ctm_args.ctm_conv_tol)\ or len(history) >= ctm_args.ctm_max_iter: log.info({"history_length": len(history), "history": history}) return True, history return False, history ctm_env = ENV(args.chi, state) init_env(state, ctm_env) ctm_env, *ctm_log = ctmrg.run(state, ctm_env, conv_check=ctmrg_conv_energy) loss0 = model.energy_2x1_1x2(state, ctm_env) obs_values, obs_labels = model.eval_obs(state, ctm_env) print(", ".join(["epoch", "energy"] + obs_labels)) print(", ".join([f"{-1}", f"{loss0}"] + [f"{v}" for v in obs_values])) def loss_fn(state, ctm_env_in, opt_context): ctm_args = opt_context["ctm_args"] opt_args = opt_context["opt_args"] # symmetrize and normalize # symm_site= make_d2_symm(state.parent_site) # symm_site= symm_site/torch.max(torch.abs(symm_site)) symm_site = state.parent_site / torch.max(torch.abs(state.parent_site)) state = IPEPS_D2SYM(symm_site) # state.parent_tensors[c]+= state.parent_tensors[c].permute(0,1,4,3,2) # state.parent_tensors[c]*= 1.0/torch.max(torch.abs(state.parent_tensors[c])) # possibly re-initialize the environment if opt_args.opt_ctm_reinit: init_env(state, ctm_env_in) # 1) compute environment by CTMRG ctm_env_out, *ctm_log = ctmrg.run(state, ctm_env_in, \ conv_check=ctmrg_conv_energy, ctm_args=ctm_args) # 2) evaluate loss with converged environment loss = model.energy_2x1_1x2(state, ctm_env_out) return (loss, ctm_env_out, *ctm_log) @torch.no_grad() def obs_fn(state, ctm_env, opt_context): if ("line_search" in opt_context.keys() and not opt_context["line_search"]) \ or not "line_search" in opt_context.keys(): epoch = len(opt_context["loss_history"]["loss"]) loss = opt_context["loss_history"]["loss"][-1] obs_values, obs_labels = model.eval_obs(state, ctm_env) print(", ".join([f"{epoch}", f"{loss}"] + [f"{v}" for v in obs_values])) # optimize optimize_state(state, ctm_env, loss_fn, obs_fn=obs_fn) # compute final observables for the best variational state outputstatefile = args.out_prefix + "_state.json" state = read_ipeps_d2(outputstatefile) ctm_env = ENV(args.chi, state) init_env(state, ctm_env) ctm_env, *ctm_log = ctmrg.run(state, ctm_env, conv_check=ctmrg_conv_energy) loss0 = model.energy_2x1_1x2(state, ctm_env) obs_values, obs_labels = model.eval_obs(state, ctm_env) print(", ".join([f"{args.opt_max_iter}", f"{loss0}"] + [f"{v}" for v in obs_values]))
def main(): cfg.configure(args) cfg.print_config() torch.set_num_threads(args.omp_cores) torch.manual_seed(args.seed) model = j1j2.J1J2(j1=args.j1, j2=args.j2) # initialize an ipeps # 1) define lattice-tiling function, that maps arbitrary vertex of square lattice # coord into one of coordinates within unit-cell of iPEPS ansatz def lattice_to_site(coord): return (0, 0) if args.instate!=None: state = read_ipeps(args.instate, vertexToSite=lattice_to_site) if args.bond_dim > max(state.get_aux_bond_dims()): # extend the auxiliary dimensions state = extend_bond_dim(state, args.bond_dim) state.add_noise(args.instate_noise) elif args.opt_resume is not None: state= IPEPS(dict(), lX=1, lY=1, vertexToSite=lattice_to_site) state.load_checkpoint(args.opt_resume) elif args.ipeps_init_type=='RANDOM': bond_dim = args.bond_dim A = torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\ dtype=cfg.global_args.dtype,device=cfg.global_args.device) # normalization of initial random tensors A = A/torch.max(torch.abs(A)) sites = {(0,0): A} state = IPEPS(sites, vertexToSite=lattice_to_site) else: raise ValueError("Missing trial state: -instate=None and -ipeps_init_type= "\ +str(args.ipeps_init_type)+" is not supported") print(state) # 2) select the "energy" function energy_f=model.energy_2x2_1site_BP @torch.no_grad() def ctmrg_conv_energy(state, env, history, ctm_args=cfg.ctm_args): if not history: history=[] e_curr = energy_f(state, env) history.append(e_curr.item()) if (len(history) > 1 and abs(history[-1]-history[-2]) < ctm_args.ctm_conv_tol)\ or len(history) >= ctm_args.ctm_max_iter: log.info({"history_length": len(history), "history": history}) return True, history return False, history # 3) choose C4v irrep (or their mix) def symmetrize(state): A= state.site((0,0)) A_symm= make_c4v_symm_A1(A) symm_state= IPEPS({(0,0): A_symm}, vertexToSite=state.vertexToSite) return symm_state symm_state= symmetrize(state) ctm_env= ENV(args.chi, symm_state) init_env(symm_state, ctm_env) ctm_env, *ctm_log= ctmrg.run(symm_state, ctm_env, conv_check=ctmrg_conv_energy) loss0 = energy_f(symm_state, ctm_env) obs_values, obs_labels = model.eval_obs(symm_state,ctm_env) print(", ".join(["epoch","energy"]+obs_labels)) print(", ".join([f"{-1}",f"{loss0}"]+[f"{v}" for v in obs_values])) def loss_fn(state, ctm_env_in, opt_context): ctm_args= opt_context["ctm_args"] opt_args= opt_context["opt_args"] symm_state= symmetrize(state) # possibly re-initialize the environment if opt_args.opt_ctm_reinit: init_env(symm_state, ctm_env_in) # 1) compute environment by CTMRG ctm_env_out, *ctm_log= ctmrg.run(symm_state, ctm_env_in, \ conv_check=ctmrg_conv_energy, ctm_args=ctm_args) # 2) evaluate loss with the converged environment loss = energy_f(symm_state, ctm_env_out) return (loss, ctm_env_out, *ctm_log) @torch.no_grad() def obs_fn(state, ctm_env, opt_context): if ("line_search" in opt_context.keys() and not opt_context["line_search"]) \ or not "line_search" in opt_context.keys(): symm_state= symmetrize(state) epoch= len(opt_context["loss_history"]["loss"]) loss= opt_context["loss_history"]["loss"][-1] obs_values, obs_labels = model.eval_obs(symm_state,ctm_env) print(", ".join([f"{epoch}",f"{loss}"]+[f"{v}" for v in obs_values]+\ [f"{torch.max(torch.abs(symm_state.site((0,0))))}"])) # optimize optimize_state(state, ctm_env, loss_fn, obs_fn=obs_fn) # compute final observables for the best variational state outputstatefile= args.out_prefix+"_state.json" state= read_ipeps(outputstatefile, vertexToSite=state.vertexToSite) symm_state= symmetrize(state) ctm_env = ENV(args.chi, symm_state) init_env(symm_state, ctm_env) ctm_env, *ctm_log= ctmrg.run(symm_state, ctm_env, conv_check=ctmrg_conv_energy) opt_energy = energy_f(symm_state,ctm_env) obs_values, obs_labels = model.eval_obs(symm_state,ctm_env) print(", ".join([f"{args.opt_max_iter}",f"{opt_energy}"]+[f"{v}" for v in obs_values]))