Пример #1
0
    def compute_cv_coordinates(self) -> np.array:
        def _natural_sort(l): 
            convert = lambda text: int(text) if text.isdigit() else text.lower()
            alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
            return sorted(l, key=alphanum_key)
        logger.info("Remember to remove unfinished strings")
        cv_coordinates = None
        for it in range(self.first_iteration, self.last_iteration + 1):
            iteration_md_dir = "{}/{}/*/s*/*xvg".format(self.md_dir, it)
            xvg_files = _natural_sort(glob.glob(iteration_md_dir))
            if len(xvg_files) == 0:
                logger.info(
                    "No output files found for iteration %s. Not looking further",
                    it,
                )
                return cv_coordinates
            values = None
            for file_idx, xf in enumerate(xvg_files):
                data = mdtools.load_xvg(file_name=xf)
                # Skip first column which contains the time and include only first and last frame
                data = data[[0, -1], 1:]
                if values is None:
                    n_cvs = data.shape[1]
                    values = np.empty((len(xvg_files), 2, n_cvs))
                values[file_idx, :, :] = data

            if cv_coordinates is None:
                cv_coordinates = values
            else:
                cv_coordinates = np.append(cv_coordinates, values, axis=0)
        return cv_coordinates
Пример #2
0
def grompp(
    structure_file: str,
    mdp_file: str,
    topology_file: str,
    index_file: str,
    tpr_file: str,
    mdp_output_file: str,
):
    input_files = {
        "-n": index_file,
        "-f": mdp_file,
        "-p": topology_file,
        "-c": structure_file,
    }
    output_files = {"-o": tpr_file, "-po": mdp_output_file}
    prep = gmx.commandline_operation(
        executable="gmx",
        arguments=["grompp"],
        input_files=input_files,
        output_files=output_files,
    )
    prep.run()
    output = str(prep.output.erroroutput.result()).strip()
    if output:
        logger.info("grompp output:\n%s", output)
    return prep
    def compute_cv_coordinates(self) -> np.array:
        logger.info("Remember to remove unfinished strings")
        cv_coordinates = None
        for it in range(self.first_iteration, self.last_iteration + 1):
            iteration_md_dir = "{}/{}/*/s*/*xvg".format(self.md_dir, it)
            xvg_files = glob.glob(iteration_md_dir)
            if len(xvg_files) == 0:
                logger.info(
                    "No output files found for iteration %s. Not looking further",
                    it,
                )
                return cv_coordinates
            values = None
            for file_idx, xf in enumerate(xvg_files):
                data = mdtools.load_xvg(file_name=xf)
                # Skip first column which contains the time and include only first and last frame
                data = data[[0, -1], 1:]
                if values is None:
                    n_cvs = data.shape[1]
                    values = np.empty((len(xvg_files), 2, n_cvs))
                values[file_idx, :, :] = data

            if cv_coordinates is None:
                cv_coordinates = values
            else:
                cv_coordinates = np.append(cv_coordinates, values, axis=0)
        return cv_coordinates
Пример #4
0
def mdrun(
    mpi_rank: int,
    output_dir: str,
    tpr_file: str,
    check_point_file: str = None,
    mdrun_options: list = None,
    gpus_per_node: int = None,
):
    mpi_rank = mpi_rank - 1
    cwd = os.path.abspath(os.getcwd())
    os.chdir(output_dir)
    input_files = {"-s": tpr_file}
    if check_point_file is not None:
        input_files["-cpi"] = check_point_file
    # SPC increased state printing to every 5 minutes since swarms are short
    if mdrun_options is None:
        mdrun_options_parse = []
    else:
        mdrun_options_parse = mdrun_options

    # Search for -nt number of threads option in mdrun_options.
    for i, o in enumerate(mdrun_options):
        if o == "-nt":
            number_threads = int(mdrun_options[i + 1])
            pin_offset = str(mpi_rank * number_threads)
            mdrun_options_parse += [
                "-pin",
                "on",
                "-pinoffset",
                f"{pin_offset}",
                "-pinstride",
                "1",
            ]
            break

    if gpus_per_node is not None:
        mpi_rank = str(mpi_rank % gpus_per_node)
        mdrun_options_parse += ["-gpu_id", f"{mpi_rank}"]

    md = gmx.commandline_operation(
        executable="gmx",
        arguments=["mdrun", "-cpt", "5"] + mdrun_options_parse,
        input_files=input_files,
        output_files={},
    )
    md.run()
    output = str(md.output.erroroutput.result()).strip()
    if output:
        logger.info("mdrun output:\n%s", output)
    os.chdir(cwd)
    # simulation_input = gmx.read_tpr(tpr_file)
    # md = gmx.mdrun(input=simulation_input)
    # md.run()
    # path = md.output.trajectory.result()
    # path = path[:path.rfind("/") + 1]
    # _move_all_files(path, output_dir)
    # os.removedirs(path)
    return md
Пример #5
0
 def _compute_probability_distribution_eigenvector(self):
     """
     Find the eigenvector (s) of the transition matrix
     :param transition_count:
     :return:
     """
     transition_probability = np.zeros(self.transition_count.shape)
     transition_count = self._remove_transitions_to_isolated_bins(
         self.transition_count)
     for rowidx, row in enumerate(transition_count):
         # transition_probability[rowidx, rowidx] = 0
         rowsum = np.sum(row)
         if rowsum > 0:
             transition_probability[rowidx] = row / rowsum
     eigenvalues, eigenvectors = np.linalg.eig(transition_probability.T)
     stationary_solution = None
     unit_eigenval = None  # The eigenvalue closest to 1
     for idx, eigenval in enumerate(eigenvalues):
         vec = eigenvectors[:, idx]
         # logger.debug("Eigenvec for eigenvalue %s:\n%s", eigenval, vec)
         if np.isclose(1.0, eigenval, rtol=1e-2):
             neg_vec, pos_vec = vec[vec < 0], vec[vec > 0]
             if len(pos_vec) == 0:
                 # No positive entries. All must be negative. We can multiply the eigenvector by a factor of -1
                 vec = -1 * vec
             elif len(neg_vec) > 0:
                 logger.warning(
                     "Found a vector with eigenvalue ~1(%s) but with negative entries in its eigenvector",
                     eigenval,
                 )
                 continue
             if stationary_solution is not None:
                 raise Exception(
                     "Multiple stationary solutions found. Perhaps there were no transitions between states. Eigenvalues:\n%s"
                     % eigenvalues)
             vec = np.real(vec)
             stationary_solution = vec / np.sum(vec)
             unit_eigenval = eigenval
     relaxation_eigenval = (
         None  # corresponds to the largest eigenvalue less than 1
     )
     for idx, eigenval in enumerate(eigenvalues):
         if eigenval < 1 and eigenval != unit_eigenval:
             if (relaxation_eigenval is None
                     or eigenval > relaxation_eigenval):
                 relaxation_eigenval = eigenval
     if stationary_solution is None:
         raise Exception("No stationary solution found. Eigenvalues:\n%s",
                         eigenvalues)
     if relaxation_eigenval is not None:
         logger.info(
             "Relaxation time for system: %s [units of lag time]. Eigenval=%s",
             -np.log(relaxation_eigenval),
             relaxation_eigenval,
         )
     return stationary_solution
Пример #6
0
def submit(tasks: List[Tuple[str, dict]], step=None):
    global _instance
    if mpi.n_ranks == 1:
        # We're running this on a single MPI rank. No need for a master-slave setup
        logger.info("Running all jobs on a single rank")
        _instance = GmxSlave()
        _instance.run_all(tasks)
        logger.info("Finished with step %s on a single MPI rank", step)
    elif mpi.is_master():
        logger.info("Distributing all jobs to %s ranks", mpi.n_ranks - 1)
        # TODO should start a slave on this rank as well to best utilize computational resources
        _instance = GmxMaster(slaves=range(1, mpi.n_ranks))
        try:
            _instance.run(tasks)
        finally:
            logger.info("Stopping all workers for step %s", step)
            _instance.terminate_slaves()
    else:
        _instance = GmxSlave()
        _instance.run()
Пример #7
0
def init():
    if is_master():
        logger.info("Using %s MPI ranks ", n_ranks)