nrepeats = len(forward_data_files) / args.ntrajs_per_block ntotal_trajs = nrepeats * args.ntrajs_per_block forward_data_files = forward_data_files[:ntotal_trajs] backward_data_files = backward_data_files[:ntotal_trajs] out_nc_handle = nc.Dataset(args.out, "w", format="NETCDF4") k, lambda_t, z_t, w_t, indices = extract_multiple_nc(forward_data_files, TIME_STRIDE, nrepeats, args.ntrajs_per_block) data = {"dt" : np.array([DT], dtype=float), "pulling_times" : DT*indices, "ks" : np.array([k]), "lambda_F" : lambda_t, "wF_t" : w_t, "zF_t" : z_t} save_to_nc(data, out_nc_handle) if len(backward_data_files) == 0: data = {"lambda_R": lambda_t[::-1], "wR_t": w_t[::-1, ::-1, :], "zR_t": z_t[::-1, ::-1, :]} else: k, lambda_t, z_t, w_t, indices = extract_multiple_nc(backward_data_files, TIME_STRIDE, nrepeats, args.ntrajs_per_block) data = {"lambda_R": lambda_t, "wR_t": w_t, "zR_t": z_t} save_to_nc(data, out_nc_handle) out_nc_handle.close() print("DONE")
def _convert_2_acc_work(w_t, z_t, lambda_t, k): """ use Eq. (31) in Hummer and Szabo 2005 :param w_t: 2d array of shape (ntrajs, times) :param z_t: 2d array of shape (ntrajs, times) :param lambda_t: 1d array of shape (times,) :param k: float, pulling force constant :return: acc_work """ v_t = V(z_t, k, lambda_t) # v_t has shape (ntrajs, times) v_0 = v_t[:, [0]] # v_0 has shape (ntrajs, 1) acc_w_t = w_t + v_t - v_0 return acc_w_t if args.work_in_file == args.work_out_file: raise ValueError("in and out files are the same") with nc.Dataset(args.work_in_file, "r") as handle: data = {key: handle.variables[key][:] for key in handle.variables.keys()} ks = data["ks"][0] # kcal/mol/A^2quit data["wF_t"] = _convert_2_acc_work(data["wF_t"], data["zF_t"], data["lambda_F"], ks) data["wR_t"] = _convert_2_acc_work(data["wR_t"], data["zR_t"], data["lambda_R"], ks) with nc.Dataset(args.work_out_file, "w", format="NETCDF4") as handle: save_to_nc(data, handle) print("DONE")
print("Repeat ", repeat) #zF_t[repeat, :, :], wF_t[repeat, :, :] = switching(args.ks, lambda_F, args.equilibration_steps, # args.trajs_per_repeat, args.dt, U, dU_dx) lower = repeat * args.trajs_per_repeat upper = (repeat + 1) * args.trajs_per_repeat zF_t[lower : upper, :], wF_t[lower : upper, :] = switching(args.ks, lambda_F, args.equilibration_steps, args.trajs_per_repeat, args.dt, U, dU_dx) #zR_t[repeat, :, :], wR_t[repeat, :, :] = switching(args.ks, lambda_R, args.equilibration_steps, # args.trajs_per_repeat, args.dt, U, dU_dx) zR_t[lower : upper, :], wR_t[lower : upper, :] = switching(args.ks, lambda_R, args.equilibration_steps, args.trajs_per_repeat, args.dt, U, dU_dx) data = {} data["lambda_F"] = lambda_F data["lambda_R"] = lambda_R data["ks"] = np.array([args.ks], dtype=float) data["dt"] = np.array([args.dt], dtype=float) data["zF_t"] = zF_t data["wF_t"] = wF_t data["zR_t"] = zR_t data["wR_t"] = wR_t nc_handle = nc.Dataset(args.out, "w", format="NETCDF4") save_to_nc(data, nc_handle) nc_handle.close()
COLVAR_SETUP_FILE_MATCH = "colvar_*.in" COLVAR_TRAJ_FILE = "equilibrate.colvars.traj" NAMD_LOGFILE = "logfile" COLVAR_SETUP_PREFIX, COLVAR_SETUP_SUFFIX = COLVAR_SETUP_FILE_MATCH.split("*") colvar_setup_files = [ os.path.join(args.colvar_setup_dir, COLVAR_SETUP_PREFIX + "%d" % i + COLVAR_SETUP_SUFFIX) for i in range(args.nwindows) ] colvar_traj_files = [ os.path.join(args.namd_dir, "%d" % i, COLVAR_TRAJ_FILE) for i in range(args.nwindows) ] namd_logfiles = [ os.path.join(args.namd_dir, "%d" % i, NAMD_LOGFILE) for i in range(args.nwindows) ] u_kln, N_k = potential_energy_matrix(colvar_setup_files, colvar_traj_files, namd_logfiles) nc_handle = nc.Dataset(args.out, mode="w", format="NETCDF4") save_to_nc({"u_kln": u_kln, "N_k": N_k}, nc_handle) nc_handle.close()
coordinates = np.array(coordinates) unbiased_potentials = np.array(unbiased_potentials) if args.symmetrized_states: print("symmetrized states") us_centers = np.concatenate((us_centers, -us_centers)) coordinates = np.concatenate((coordinates, -coordinates), axis=0) unbiased_potentials = np.concatenate((unbiased_potentials, unbiased_potentials), axis=0) u_kln = cal_u_kln(args.force_constant, us_centers, coordinates, unbiased_potentials) u_kln *= BETA # kcal/mol to kT us_centers /= 10. # Angstrom to nm nc_handle = nc.Dataset(args.u_kln_out, "w", format="NETCDF4") save_to_nc({"u_kln":u_kln, "us_centers":us_centers}, nc_handle) nc_handle.close() K = u_kln.shape[0] N = u_kln.shape[-1] N_k = np.array([N]*K, dtype=int) mbar = pymbar.MBAR(u_kln, N_k, verbose=True) fe = mbar.f_k if args.symmetrized_states: out_data = {"fe": fe[ :K/2 ], "lambdas": us_centers[ :K/2 ]} else: out_data = {"fe":fe, "lambdas":us_centers} pickle.dump(out_data, open(args.fe_out, "w"))
namd_logfiles = [ os.path.join(args.namd_dir, "%d" % i, NAMD_LOGFILE) for i in use_windows ] u_kln, N_k = potential_energy_matrix(colvar_setup_files, colvar_traj_files, namd_logfiles) #mbar = pymbar.MBAR( u_kln, N_k, verbose=True ) mbar = _run_mbar(u_kln, N_k) weights = mbar.getWeights() weights = weights[:, -1] K = u_kln.shape[0] weights = weights.reshape((K, -1)) nc_handle = nc.Dataset(args.out, mode="w", format="NETCDF4") save_to_nc({"weights": weights}, nc_handle) nc_handle.close() for i in range(weights.shape[0]): print(i, weights[i].sum()) print("max weight", weights.max())