Example #1
0
    def obs_fn(state, ctm_env, opt_context):
        if ("line_search" in opt_context.keys() and not opt_context["line_search"]) \
            or not "line_search" in opt_context.keys():
            state_sym= to_ipeps_c4v(state, normalize=True)
            epoch= len(opt_context["loss_history"]["loss"])
            loss= opt_context["loss_history"]["loss"][-1]
            obs_values, obs_labels = model.eval_obs(state_sym, ctm_env)
            print(", ".join([f"{epoch}",f"{loss}"]+[f"{v}" for v in obs_values]+\
                [f"{torch.max(torch.abs(state.site((0,0))))}"]))

            if args.top_freq>0 and epoch%args.top_freq==0:
                coord_dir_pairs=[((0,0), (1,0))]
                for c,d in coord_dir_pairs:
                    # transfer operator spectrum
                    print(f"TOP spectrum(T)[{c},{d}] ",end="")
                    l= transferops_c4v.get_Top_spec_c4v(args.top_n, state_sym, ctm_env)
                    print("TOP "+json.dumps(_to_json(l)))
    def obs_fn(state, ctm_env, opt_context):
        if opt_context["line_search"]:
            epoch= len(opt_context["loss_history"]["loss_ls"])
            loss= opt_context["loss_history"]["loss_ls"][-1]
            print("LS",end=" ")
        else:
            epoch= len(opt_context["loss_history"]["loss"]) 
            loss= opt_context["loss_history"]["loss"][-1] 
        obs_values, obs_labels = model.eval_obs(state,ctm_env,force_cpu=True)
        print(", ".join([f"{epoch}",f"{loss}"]+[f"{v}" for v in obs_values]))

        if (not opt_context["line_search"]) and args.top_freq>0 and epoch%args.top_freq==0:
            coord_dir_pairs=[((0,0), (1,0))]
            for c,d in coord_dir_pairs:
                # transfer operator spectrum
                print(f"TOP spectrum(T)[{c},{d}] ",end="")
                l= transferops_c4v.get_Top_spec_c4v(args.top_n, state, ctm_env)
                print("TOP "+json.dumps(_to_json(l)))
Example #3
0
def main():
    # 0) parse command line arguments and configure simulation parameters
    cfg.configure(args)
    cfg.print_config()
    torch.set_num_threads(args.omp_cores)
    torch.manual_seed(args.seed)

    model = j1j2.J1J2_C4V_BIPARTITE(j1=args.j1, j2=args.j2)
    energy_f = model.energy_1x1_lowmem
    # energy_f= model.energy_1x1

    # 1) initialize an ipeps - read from file or create a random one
    if args.instate != None:
        state = read_ipeps_c4v(args.instate)
        if args.bond_dim > max(state.get_aux_bond_dims()):
            # extend the auxiliary dimensions
            state = extend_bond_dim(state, args.bond_dim)
        state.add_noise(args.instate_noise)
        state.sites[(0, 0)] = state.sites[(0, 0)] / torch.max(
            torch.abs(state.sites[(0, 0)]))
    elif args.ipeps_init_type == 'RANDOM':
        bond_dim = args.bond_dim

        A= torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\
            dtype=cfg.global_args.dtype,device=cfg.global_args.device)
        A = make_c4v_symm(A)
        A = A / torch.max(torch.abs(A))

        state = IPEPS_C4V(A)
    else:
        raise ValueError("Missing trial state: -instate=None and -ipeps_init_type= "\
            +str(args.ipeps_init_type)+" is not supported")

    print(state)

    # 2) define convergence criterion for CTM algorithm. This function is to be
    #    invoked at every CTM step. We also use it to evaluate observables of interest
    #    during the course of CTM
    # 2a) convergence criterion based on on-site energy
    def ctmrg_conv_energy(state, env, history, ctm_args=cfg.ctm_args):
        with torch.no_grad():
            if not history:
                history = []

            e_curr = energy_f(state, env, force_cpu=ctm_args.conv_check_cpu)
            history.append(e_curr.item())

            if args.obs_freq>0 and \
                (len(history)%args.obs_freq==0 or (len(history)-1)%args.obs_freq==0):
                obs_values, obs_labels = model.eval_obs(state, env)
                print(", ".join([f"{len(history)}", f"{e_curr}"] +
                                [f"{v}" for v in obs_values]))
            else:
                print(", ".join([f"{len(history)}", f"{e_curr}"]))

            if len(history) > 1 and abs(history[-1] -
                                        history[-2]) < ctm_args.ctm_conv_tol:
                log.info({
                    "history_length": len(history),
                    "history": history,
                    "final_multiplets": compute_multiplets(env)
                })
                return True, history
            elif len(history) >= ctm_args.ctm_max_iter:
                log.info({
                    "history_length": len(history),
                    "history": history,
                    "final_multiplets": compute_multiplets(env)
                })
                return False, history
        return False, history

    # 2b) convergence criterion based on 2-site reduced density matrix
    #     of nearest-neighbours
    def ctmrg_conv_rdm2x1(state, env, history, ctm_args=cfg.ctm_args):
        with torch.no_grad():
            if not history:
                history = dict({"log": []})
            rdm2x1 = rdm2x1_sl(state, env, force_cpu=ctm_args.conv_check_cpu)
            dist = float('inf')
            if len(history["log"]) > 1:
                dist = torch.dist(rdm2x1, history["rdm"], p=2).item()
            # log dist and observables
            if args.obs_freq>0 and \
                (len(history["log"])%args.obs_freq==0 or
                (len(history["log"])-1)%args.obs_freq==0):
                e_curr = energy_f(state,
                                  env,
                                  force_cpu=ctm_args.conv_check_cpu)
                obs_values, obs_labels = model.eval_obs(state,
                                                        env,
                                                        force_cpu=True)
                print(", ".join(
                    [f"{len(history['log'])}", f"{dist}", f"{e_curr}"] +
                    [f"{v}" for v in obs_values]))
            else:
                print(f"{len(history['log'])}, {dist}")
            # update history
            history["rdm"] = rdm2x1
            history["log"].append(dist)
            if dist < ctm_args.ctm_conv_tol:
                log.info({
                    "history_length": len(history['log']),
                    "history": history['log'],
                    "final_multiplets": compute_multiplets(env)
                })
                return True, history
            elif len(history['log']) >= ctm_args.ctm_max_iter:
                log.info({
                    "history_length": len(history['log']),
                    "history": history['log'],
                    "final_multiplets": compute_multiplets(env)
                })
                return False, history
        return False, history

    # 3) initialize environment
    ctm_env_init = ENV_C4V(args.chi, state)
    init_env(state, ctm_env_init)

    # 4) (optional) compute observables as given by initial environment
    e_curr0 = energy_f(state, ctm_env_init)
    obs_values0, obs_labels = model.eval_obs(state, ctm_env_init)
    print(", ".join(["epoch", "energy"] + obs_labels))
    print(", ".join([f"{-1}", f"{e_curr0}"] + [f"{v}" for v in obs_values0]))

    # 5) (main) execute CTM algorithm
    ctm_env_init, *ctm_log = ctmrg_c4v.run(state,
                                           ctm_env_init,
                                           conv_check=ctmrg_conv_rdm2x1)

    # 6) compute final observables
    e_curr0 = energy_f(state, ctm_env_init, force_cpu=True)
    obs_values0, obs_labels = model.eval_obs(state,
                                             ctm_env_init,
                                             force_cpu=True)
    history, t_ctm, t_obs = ctm_log
    print("\n")
    print(", ".join(["epoch", "energy"] + obs_labels))
    print("FINAL " + ", ".join([f"{e_curr0}"] + [f"{v}" for v in obs_values0]))
    print(f"TIMINGS ctm: {t_ctm} conv_check: {t_obs}")

    # 7) ----- additional observables ---------------------------------------------
    corrSS = model.eval_corrf_SS(state,
                                 ctm_env_init,
                                 args.corrf_r,
                                 canonical=args.corrf_canonical)
    print("\n\nSS r " + " ".join([label for label in corrSS.keys()]) +
          f" canonical {args.corrf_canonical}")
    for i in range(args.corrf_r):
        print(f"{i} " +
              " ".join([f"{corrSS[label][i]}" for label in corrSS.keys()]))

    corrDD = model.eval_corrf_DD_H(state, ctm_env_init, args.corrf_r)
    print("\n\nDD r " + " ".join([label for label in corrDD.keys()]))
    for i in range(args.corrf_r):
        print(f"{i} " +
              " ".join([f"{corrDD[label][i]}" for label in corrDD.keys()]))

    # environment diagnostics
    print("\n\nspectrum(C)")
    u, s, v = torch.svd(ctm_env_init.C[ctm_env_init.keyC], compute_uv=False)
    for i in range(args.chi):
        print(f"{i} {s[i]}")

    # transfer operator spectrum
    print("\n\nspectrum(T)")
    l = transferops_c4v.get_Top_spec_c4v(args.top_n, state, ctm_env_init)
    for i in range(l.size()[0]):
        print(f"{i} {l[i,0]} {l[i,1]}")
Example #4
0
def main():
    cfg.configure(args)
    cfg.print_config()
    torch.set_num_threads(args.omp_cores)
    torch.manual_seed(args.seed)

    if args.c4v_type == "TI":
        model = jq.JQ_C4V(j1=args.j1, q=args.q)
    elif args.c4v_type == "BIPARTITE":
        model = jq.JQ_C4V_BIPARTITE(j1=args.j1, q=args.q)
    elif args.c4v_type == "PLAQUETTE":
        q_inter = args.q if args.q_inter is None else args.q_inter
        model = jq.JQ_C4V_PLAQUETTE(j1=args.j1, q=args.q, q_inter=q_inter)
    else:
        raise ValueError("Unsupported C4v ansatz: -c4v_type= "\
            +str(args.ipeps_init_type)+" is not supported")

    # initialize an ipeps
    # 1) define lattice-tiling function, that maps arbitrary vertex of square lattice
    # coord into one of coordinates within unit-cell of iPEPS ansatz

    if args.instate != None:
        state = read_ipeps_c4v(args.instate)
        if args.bond_dim > max(state.get_aux_bond_dims()):
            # extend the auxiliary dimensions
            state = extend_bond_dim(state, args.bond_dim)
        state.add_noise(args.instate_noise)
        state.sites[(0, 0)] = state.sites[(0, 0)] / torch.max(
            torch.abs(state.sites[(0, 0)]))
    elif args.ipeps_init_type == 'RANDOM':
        bond_dim = args.bond_dim
        A= torch.rand((model.phys_dim, bond_dim, bond_dim, bond_dim, bond_dim),\
            dtype=cfg.global_args.torch_dtype,device=cfg.global_args.device)
        A = make_c4v_symm(A)
        A = A / torch.max(torch.abs(A))
        state = IPEPS_C4V(A)
    else:
        raise ValueError("Missing trial state: -instate=None and -ipeps_init_type= "\
            +str(args.ipeps_init_type)+" is not supported")

    print(state)

    def ctmrg_conv_energy(state, env, history, ctm_args=cfg.ctm_args):
        with torch.no_grad():
            if not history:
                history = []
            e_curr = model.energy_1x1(state, env)
            obs_values, obs_labels = model.eval_obs(state, env)
            history.append([e_curr.item()] + obs_values)
            print(", ".join([f"{len(history)}", f"{e_curr}"] +
                            [f"{v}" for v in obs_values]))

            if len(history) > 1 and abs(
                    history[-1][0] - history[-2][0]) < ctm_args.ctm_conv_tol:
                return True, history
        return False, history

    ctm_env_init = ENV_C4V(args.chi, state)
    init_env(state, ctm_env_init)
    print(ctm_env_init)

    e_curr0 = model.energy_1x1(state, ctm_env_init)
    obs_values0, obs_labels = model.eval_obs(state, ctm_env_init)

    print(", ".join(["epoch", "energy"] + obs_labels))
    print(", ".join([f"{-1}", f"{e_curr0}"] + [f"{v}" for v in obs_values0]))

    ctm_env_init, *ctm_log = ctmrg_c4v.run(state,
                                           ctm_env_init,
                                           conv_check=ctmrg_conv_energy)

    corrSS = model.eval_corrf_SS(state, ctm_env_init, args.corrf_r)
    print("\n\nSS r " + " ".join([label for label in corrSS.keys()]))
    for i in range(args.corrf_r):
        print(f"{i} " +
              " ".join([f"{corrSS[label][i]}" for label in corrSS.keys()]))

    corrDD = model.eval_corrf_DD_H(state, ctm_env_init, args.corrf_r)
    print("\n\nDD r " + " ".join([label for label in corrDD.keys()]))
    for i in range(args.corrf_r):
        print(f"{i} " +
              " ".join([f"{corrDD[label][i]}" for label in corrDD.keys()]))

    corrDD_V = model.eval_corrf_DD_V(state, ctm_env_init, args.corrf_r)
    print("\n\nDD_V r " + " ".join([label for label in corrDD_V.keys()]))
    for i in range(args.corrf_r):
        print(f"{i} " +
              " ".join([f"{corrDD_V[label][i]}" for label in corrDD_V.keys()]))

    # environment diagnostics
    print("\n\nspectrum(C)")
    u, s, v = torch.svd(ctm_env_init.C[ctm_env_init.keyC], compute_uv=False)
    for i in range(args.chi):
        print(f"{i} {s[i]}")

    # transfer operator spectrum
    print("\n\nspectrum(T)")
    l = transferops_c4v.get_Top_spec_c4v(args.top_n, state, ctm_env_init)
    for i in range(l.size()[0]):
        print(f"{i} {l[i,0]} {l[i,1]}")
Example #5
0
def main():
    cfg.configure(args)
    cfg.print_config()
    torch.set_num_threads(args.omp_cores)
    torch.manual_seed(args.seed)

    model= j1j2.J1J2_C4V_BIPARTITE(j1=args.j1, j2=args.j2, hz_stag=args.hz_stag, \
        delta_zz=args.delta_zz)
    energy_f = model.energy_1x1_lowmem

    # initialize an ipeps
    if args.instate != None:
        state = read_ipeps_u1(args.instate, vertexToSite=None)
        assert len(state.coeffs) == 1, "Not a 1-site ipeps"
        state.add_noise(args.instate_noise)
    elif args.opt_resume is not None:
        if args.bond_dim in [2, 3, 4, 5, 6, 7, 8]:
            u1sym_t= tenU1.import_sym_tensors(2,args.bond_dim,"A_1",\
                infile=f"u1sym/D{args.bond_dim}_U1_{args.u1_class}.txt",\
                dtype=cfg.global_args.dtype, device=cfg.global_args.device)
        else:
            raise ValueError("Unsupported --bond_dim= " + str(args.bond_dim))
        A = torch.zeros(len(u1sym_t),
                        dtype=cfg.global_args.dtype,
                        device=cfg.global_args.device)
        coeffs = {(0, 0): A}
        state = IPEPS_U1SYM(u1sym_t, coeffs)
        state.load_checkpoint(args.opt_resume)
    elif args.ipeps_init_type == 'RANDOM':
        if args.bond_dim in [2, 3, 4, 5, 6, 7, 8]:
            u1sym_t= tenU1.import_sym_tensors(2, args.bond_dim, "A_1", \
                infile=f"u1sym/D{args.bond_dim}_U1_{args.u1_class}.txt", \
                dtype=cfg.global_args.dtype, device=cfg.global_args.device)
        else:
            raise ValueError("Unsupported --bond_dim= " + str(args.bond_dim))

        A = torch.rand(len(u1sym_t),
                       dtype=cfg.global_args.dtype,
                       device=cfg.global_args.device)
        A = A / torch.max(torch.abs(A))
        coeffs = {(0, 0): A}
        state = IPEPS_U1SYM(u1sym_t, coeffs)
    else:
        raise ValueError("Missing trial state: -instate=None and -ipeps_init_type= "\
            +str(args.ipeps_init_type)+" is not supported")

    print(state)

    @torch.no_grad()
    def ctmrg_conv_energy(state, env, history, ctm_args=cfg.ctm_args):
        if not history:
            history = dict({"log": []})
        rdm2x1 = rdm2x1_sl(state, env, force_cpu=ctm_args.conv_check_cpu)
        dist = float('inf')
        if len(history["log"]) > 1:
            dist = torch.dist(rdm2x1, history["rdm"], p=2).item()
        # log dist and observables
        if args.obs_freq>0 and \
            (len(history["log"])%args.obs_freq==0 or
            (len(history["log"])-1)%args.obs_freq==0):
            e_curr = energy_f(state, env, force_cpu=ctm_args.conv_check_cpu)
            obs_values, obs_labels = model.eval_obs(state, env, force_cpu=True)
            print(
                ", ".join([f"{len(history['log'])}", f"{dist}", f"{e_curr}"] +
                          [f"{v}" for v in obs_values]))
        else:
            print(f"{len(history['log'])}, {dist}")
        # update history
        history["rdm"] = rdm2x1
        history["log"].append(dist)
        if dist < ctm_args.ctm_conv_tol:
            log.info({
                "history_length": len(history['log']),
                "history": history['log'],
                "final_multiplets": compute_multiplets(env)
            })
            return True, history
        elif len(history['log']) >= ctm_args.ctm_max_iter:
            log.info({
                "history_length": len(history['log']),
                "history": history['log'],
                "final_multiplets": compute_multiplets(env)
            })
            return False, history
        return False, history

    ctm_env_init = ENV_C4V(args.chi, state)
    init_env(state, ctm_env_init)

    e_curr0 = energy_f(state, ctm_env_init, force_cpu=True)
    obs_values0, obs_labels = model.eval_obs(state,
                                             ctm_env_init,
                                             force_cpu=True)
    print(", ".join(["epoch", "energy"] + obs_labels))
    print(", ".join([f"{-1}", f"{e_curr0}"] + [f"{v}" for v in obs_values0]))

    ctm_env_init, *ctm_log = ctmrg_c4v.run(state, ctm_env_init, \
        conv_check=ctmrg_conv_energy)

    e_curr0 = energy_f(state, ctm_env_init, force_cpu=True)
    obs_values0, obs_labels = model.eval_obs(state,
                                             ctm_env_init,
                                             force_cpu=True)
    history, t_ctm, t_obs = ctm_log
    print("\n")
    print(", ".join(["epoch", "energy"] + obs_labels))
    print("FINAL " + ", ".join([f"{e_curr0}"] + [f"{v}" for v in obs_values0]))
    print(f"TIMINGS ctm: {t_ctm} conv_check: {t_obs}")

    # ----- additional observables ---------------------------------------------
    corrSS = model.eval_corrf_SS(state,
                                 ctm_env_init,
                                 args.corrf_r,
                                 canonical=args.corrf_canonical)
    print("\n\nSS r " + " ".join([label for label in corrSS.keys()]) +
          f" canonical {args.corrf_canonical}")
    for i in range(args.corrf_r):
        print(f"{i} " +
              " ".join([f"{corrSS[label][i]}" for label in corrSS.keys()]))

    corrDD = model.eval_corrf_DD_H(state, ctm_env_init, args.corrf_r)
    print("\n\nDD r " + " ".join([label for label in corrDD.keys()]))
    for i in range(args.corrf_r):
        print(f"{i} " +
              " ".join([f"{corrDD[label][i]}" for label in corrDD.keys()]))

    if args.corrf_dd_v:
        corrDD_V = model.eval_corrf_DD_V(state, ctm_env_init, args.corrf_r)
        print("\n\nDD_v r " + " ".join([label for label in corrDD_V.keys()]))
        for i in range(args.corrf_r):
            print(f"{i} " + " ".join(
                [f"{corrDD_V[label][i]}" for label in corrDD_V.keys()]))

    # environment diagnostics
    print("\n\nspectrum(C)")
    u, s, v = torch.svd(ctm_env_init.C[ctm_env_init.keyC], compute_uv=False)
    for i in range(args.chi):
        print(f"{i} {s[i]}")

    # transfer operator spectrum
    print("\n\nspectrum(T)")
    l = transferops_c4v.get_Top_spec_c4v(args.top_n, state, ctm_env_init)
    for i in range(l.size()[0]):
        print(f"{i} {l[i,0]} {l[i,1]}")

    # transfer operator spectrum
    if args.top2:
        print("\n\nspectrum(T2)")
        l = transferops_c4v.get_Top2_spec_c4v(args.top_n, state, ctm_env_init)
        for i in range(l.size()[0]):
            print(f"{i} {l[i,0]} {l[i,1]}")