jsonfile = outfile.split(".")[0]+".json" else: jsonfile = outfile print("INFO: Writing mean configuration to", jsonfile, file=stderr) with open(jsonfile, "w") as file: file.write(mean_file) if outoxdna: #save output as oxDNA .dat format if outjson == True: #if making both outputs, automatically set file extensions. outname = outfile.split(".")[0]+".dat" else: outname = outfile from mean2dat import make_dat make_dat(loads(mean_file), outname) #If requested, run compute_deviations.py using the output from this script. if dev_file: print("INFO: launching compute_deviations.py", file=stderr) #fire up a subprocess running compute_deviations.py import subprocess from sys import executable, path launchargs = [executable, path[0]+"/compute_deviations.py", jsonfile, traj_file, top_file, "-o {}".format(dev_file)] if parallel: launchargs.append("-p {}".format(n_cpus)) subprocess.run(launchargs) #compute_deviations needs the json meanfile, but its not useful for visualization #so we dump it
a1s = [] a3s = [] ts = [] out = parallelize_erik_onefile.fire_multiprocess( traj_file, compute_centroid, num_confs, n_cpus, mean_structure) [candidates.append(i[0]) for i in out] [rmsfs.append(i[3]) for i in out] [a1s.append(i[1]) for i in out] [a3s.append(i[2]) for i in out] [ts.append(i[4]) for i in out] min_id = rmsfs.index(min(rmsfs)) centroid = candidates[min_id] centroid_a1s = a1s[min_id] centroid_a3s = a3s[min_id] centroid_time = ts[min_id] centroid_rmsf = rmsfs[min_id] print( "INFO: Centroid configuration found at configuration t = {}, RMSF = {}" .format(centroid_time, centroid_rmsf), file=stderr) from mean2dat import make_dat make_dat( { 'g_mean': centroid, 'a1_mean': centroid_a1s, 'a3_mean': centroid_a3s }, outfile)