def load_cft_data(pars, **kwargs): if kwargs: pars = pars.copy() pars.update(kwargs) id_pars = get_id_pars(pars) filename = os.path.basename(__file__) try: res = read_tensor_file("scals_by_alpha", pars=id_pars, filename=filename) except RuntimeError: # TODO Remove the following once all important files have been # renamed. # old_id_pars = id_pars.copy() # del(old_id_pars["do_eigenvectors"]) # try: # res = read_tensor_file(prefix="scals_by_alpha", pars=old_id_pars, # filename=filename) # write_tensor_file(data=res, prefix="scals_by_alpha", pars=id_pars, # filename=filename) # print("Renamed old style scaldim file.") # except RuntimeError: print("Constructing scaling dimensions.") timer = Timer() timer.start() res = get_cft_data(pars) print("Done constructing scaling dimensions.\n") timer.print_elapsed() if pars["save_scaldim_file"]: write_tensor_file(data=res, prefix="scals_by_alpha", pars=id_pars, filename=filename) return res
def generate_normalized_tensors(pars): # Number of tensors to use to fix the normalization n = max(8, pars["iter_count"] + 4) # Number of tensors from the beginning to discard n_discard = max(min(pars["iter_count"]-3, 3), 0) tensors_and_log_facts = [] for i in reversed(list(range(n+1))): tensors_and_log_facts.append(get_tensors(pars=pars, iter_count=i)) tensors_and_log_facts = tuple(reversed(tensors_and_log_facts)) A_lists = tuple(map(lambda t: t[0], tensors_and_log_facts)) log_fact_lists = np.array(tuple(map(lambda t: t[1], tensors_and_log_facts))) Us = np.array(tuple(map(lambda t: t[2], tensors_and_log_facts))) Zs = np.array(tuple(scon(A_list, ([1,2,3,2], [3,4,1,4])).norm() for A_list in A_lists)) log_Zs = np.log(Zs) log_Zs = np.array(tuple(log_Z + log_fact_list[0] + log_fact_list[1] for log_Z, log_fact_list in zip(log_Zs, log_fact_lists))) Ns = np.array([4*4**i-2**i for i in range(n+1)]) A, B = np.polyfit(Ns[pars["n_discard"]:], log_Zs[pars["n_discard"]:], 1) if pars["print_errors"]: print("Fit when normalizing Ts: %.3e * N + %.3e"%(A,B)) A_lists = [[A_list[0]/np.exp(N*A/2 - log_fact_list[0]), A_list[1]/np.exp(N*A/2 - log_fact_list[1])] for A_list, N, log_fact_list in zip(A_lists, Ns, log_fact_lists)] id_pars = get_tensor_id_pars(pars) for i, (A_list, U) in enumerate(zip(A_lists, Us)): write_tensor_file(data=(A_list, U), prefix="KW_tensors_normalized", pars=id_pars, iter_count=i, filename=filename) A_list = A_lists[pars["iter_count"]] U = Us[pars["iter_count"]] return A_list, U
def generate_tensors(pars, **kwargs): pars = pars.copy() pars.update(kwargs) if pars["iter_count"] == 0: A_list, log_fact_list, U = generate_initial_tensors(pars) else: # Get the tensor from the previous step A_list, log_fact_list, U = get_tensors(pars, iter_count=pars["iter_count"]-1) # and ascend it. A_list, log_fact_list, U = ascend_tensors(A_list, log_fact_list, U, pars) # Save to file. id_pars = get_tensor_id_pars(pars) write_tensor_file((A_list, log_fact_list, U), prefix="KW_tensors", pars=id_pars, filename=filename) return A_list, log_fact_list, U
def get_eigs(pars, **kwargs): pars = pars.copy() pars.update(kwargs) id_pars = get_scaldim_id_pars(pars) try: result = read_tensor_file(prefix="cdf_eigs", pars=id_pars, filename=filename) except RuntimeError: T = tensordispenser.get_normalized_tensor(pars) A_list = get_normalized_tensors(pars) A1, A2 = A_list result = generate_eigs(T, A1, A2, pars) write_tensor_file(data=result, prefix="cdf_eigs", pars=id_pars, filename=filename) return result
def load_eigs(pars, **kwargs): if kwargs: pars = pars.copy() pars.update(kwargs) id_pars = get_id_pars(pars) filename = os.path.basename(__file__) try: res = read_tensor_file("cdf_ed_eigs", pars=id_pars, filename=filename) except RuntimeError: print("Constructing eigs.") timer = Timer() timer.start() res = get_eigs(pars) print("Done constructing eigs.\n") timer.print_elapsed() if pars["save_scaldim_file"]: write_tensor_file(data=res, prefix="cdf_ed_eigs", pars=id_pars, filename=filename) return res
def get_scaldims(pars, **kwargs): pars = pars.copy() pars.update(kwargs) id_pars= get_scaldim_id_pars(pars) try: result = read_tensor_file(prefix="KW_scaldims", pars=id_pars, filename=filename) except RuntimeError: T = tensordispenser.get_normalized_tensor(pars) A_list, U = get_normalized_tensors(pars) A1, A2 = A_list if pars["do_coarse_momenta"]: temp_pars = pars.copy() temp_pars["do_coarse_momenta"] = False temp_pars["do_momenta"] = False res_fine = get_scaldims(temp_pars) res_coarse = generate_scaldims(T, A1, A2, U, pars) result = combine_coarse_momenta(res_fine, res_coarse) else: result = generate_scaldims(T, A1, A2, U, pars) write_tensor_file(data=result, prefix="KW_scaldims", pars=id_pars, filename=filename) return result