def addLigandBox(topology, positions, system, resname, dummy, radius, worker):
    masses = []
    coords = np.ndarray(shape=(0, 3))
    ligand_atoms = []
    for atom in topology.atoms():
        if atom.residue.name == resname and atom.element.symbol != "H":
            masses.append(atom.element.mass.value_in_unit(unit=unit.dalton))
            coords = np.vstack(
                (coords,
                 positions[atom.index].value_in_unit(unit=unit.nanometer)))
            ligand_atoms.append(atom)
    masses = np.array(masses)
    masses /= masses.sum()
    mass_center = coords.astype('float64').T.dot(masses)
    atomClosestToMassCenter = min(
        ligand_atoms,
        key=lambda x: np.linalg.norm(mass_center - positions[x.index].
                                     value_in_unit(unit=unit.nanometer)))
    if worker == 0:
        utilities.print_unbuffered(
            "Ligand atom selected to check distance to the box",
            atomClosestToMassCenter.residue.name, atomClosestToMassCenter.name,
            atomClosestToMassCenter.index)
    ligand_atom = atomClosestToMassCenter.index
    forceFB = mm.CustomBondForce('step(r-r0)*(k_box/2) * (r-r0)^2')
    forceFB.addPerBondParameter("k_box")
    forceFB.addPerBondParameter("r0")
    forceFB.addBond(dummy, ligand_atom, [
        5.0 * unit.kilocalories_per_mole / unit.angstroms**2,
        radius * unit.angstroms
    ])
    system.addForce(forceFB)
def calculateSASA(trajectory, topology, res_name):
    """
        Calculate the SASA of a ligand in a trajectory

        :param trajectory: Name of the trajectory file
        :type trajectory: str
        :param topology: Topology of the trajectory (needed for non-pdb trajs)
        :type topology: str
        :param res_name: Ligand resname
        :type res_name: str
    """
    utilities.print_unbuffered("Processing", trajectory)
    t = md.load(trajectory, top=topology)
    t.remove_solvent(inplace=True)
    res_atoms = t.top.select("resname '%s'" % res_name)
    if not len(res_atoms):
        raise ValueError("Nothing found using resname %s" % res_name)
    t2 = t.atom_slice(res_atoms)
    for atom in t2.top.atoms:
        # mdtraj complains if the ligand residue index is not 0 when
        # isolated
        atom.residue.index = 0
    res_index = t.top.atom(res_atoms[0]).residue.index
    sasa = md.shrake_rupley(t, mode="residue")
    sasa_empty = md.shrake_rupley(t2, mode="residue")
    return sasa[:, res_index] / sasa_empty[:, 0]
def addDummyAtomToSystem(system, topology, positions, resname, dummies,
                         worker):
    protein_CAs = []
    for atom in topology.atoms():
        if atom.residue.name not in ("HOH", "Cl-", "Na+",
                                     resname) and atom.name == "CA":
            protein_CAs.append(atom.index)
    modul = len(protein_CAs) % 20
    step_to_use = int((len(protein_CAs) - modul) / 20)
    if modul == 0:
        modul = None
    else:
        modul = modul * -1
    protein_CAs = protein_CAs[:modul:step_to_use]
    if worker == 0:
        utilities.print_unbuffered(
            "Added bond between dummy atom and protein atoms", protein_CAs)
    for dummy in dummies:
        system.setParticleMass(dummy, 0.0)
        for protein_particle in protein_CAs:
            distance_constraint = np.linalg.norm(
                positions[dummy].value_in_unit(unit.nanometers) -
                positions[protein_particle].value_in_unit(unit.nanometers))
            force_dummy = mm.HarmonicBondForce()
            constraint_force = 10 * 4.184 * 2  # express the contraint_force in kJ/mol/nm^2
            force_dummy.addBond(dummy, protein_particle, distance_constraint,
                                constraint_force)
            system.addForce(force_dummy)
        for forces in system.getForces():
            if isinstance(forces, mm.NonbondedForce):
                forces.setParticleParameters(dummy, 0.0, 1.0, 0.0)
def addLigandCylinderBox(topology, positions, system, resname, dummies, radius, worker):
    center, base, top_base = dummies
    masses = []
    coords = np.ndarray(shape=(0, 3))
    ligand_atoms = []
    for atom in topology.atoms():
        if atom.residue.name == resname and atom.element.symbol != "H":
            masses.append(atom.element.mass.value_in_unit(unit=unit.dalton))
            coords = np.vstack((coords, positions[atom.index].value_in_unit(unit=unit.nanometer)))
            ligand_atoms.append(atom)
    masses = np.array(masses)
    masses /= masses.sum()
    mass_center = coords.astype('float64').T.dot(masses)
    atomClosestToMassCenter = min(ligand_atoms, key=lambda x: np.linalg.norm(mass_center - positions[x.index].value_in_unit(unit=unit.nanometer)))
    length = np.linalg.norm(positions[center].value_in_unit(unit.nanometers)-positions[base].value_in_unit(unit.nanometers))
    if worker == 0:
        utilities.print_unbuffered("Ligand atom selected to check distance to the box", atomClosestToMassCenter.residue.name, atomClosestToMassCenter.name, atomClosestToMassCenter.index)
    ligand_atom = atomClosestToMassCenter.index
    # forceFB = mm.CustomBondForce('step(r-r_l)*(k_box/2) * (r-r_l)^2')
    forceFB = mm.CustomCompoundBondForce(3, '(step(r_par-2*r_l)+step(-r_par))*(k_box/2) * (r_par-r_l)^2; r_par=ax*dx+ay*dy+az*dz; ax=(x1-x2)/l; ay=(y1-y2)/l; az=(z1-z2)/l; dx=x3-x2; dy=y3-y2; dz=z3-z2; l=distance(p1, p2)')
    forceFB.addPerBondParameter("k_box")
    forceFB.addPerBondParameter("r_l")
    #forceFB.addBond(center, ligand_atom, [5.0 * unit.kilocalories_per_mole / unit.angstroms ** 2, length*unit.nanometers])
    forceFB.addBond([center, base, ligand_atom], [5.0 * unit.kilocalories_per_mole / unit.angstroms ** 2, length*unit.nanometers])
    system.addForce(forceFB)
    force_side = mm.CustomCompoundBondForce(3, 'step(r_normal-r0)*(k_box/2) * (r_normal-r0)^2; r_normal=sqrt((ay*dz-az*dy)^2+(az*dx-ax*dz)^2+(ax*dy-ay*dx)^2); ax=(x1-x2)/l; ay=(y1-y2)/l; az=(z1-z2)/l; dx=x3-x2; dy=y3-y2; dz=z3-z2; l=distance(p1, p2)')
    force_side.addPerBondParameter("k_box")
    force_side.addPerBondParameter("r0")
    # the order of the particles involved are center of the cilinder, base and
    # atom to constrain (ligand)
    force_side.addBond([center, base, ligand_atom], [5.0 * unit.kilocalories_per_mole / unit.angstroms ** 2, radius*unit.angstroms])
    system.addForce(force_side)
Exemple #5
0
def getWorkingClusteringObjectAndReclusterIfNecessary(
        firstRun, outputPathConstants, clusteringBlock, spawningParams,
        simulationRunner, topologies, processManager):
    """
        It reads the previous clustering method, and, if there are changes,
        it reclusters the previous trajectories. Returns the clustering object to use

        :param firstRun: New epoch to run
        :type firstRun: int
        :param outputPathConstants: Contains outputPath-related constants
        :type outputPathConstants: :py:class:`.OutputPathConstants`
        :param clusteringBlock: Contains the new clustering block
        :type clusteringBlock: json
        :param spawningParams: Spawning params, to know what reportFile and column to read
        :type spawningParams: :py:class:`.SpawningParams`
        :param topologies: Topology object containing the set of topologies needed for the simulation
        :type topologies: :py:class:`.Topology`
        :param processManager: Object to synchronize the possibly multiple processes
        :type processManager: :py:class:`.ProcessesManager`

        :returns: :py:class:`.Clustering` -- The clustering method to use in the
            adaptive sampling simulation
    """
    if not processManager.isMaster():
        for ij in range(firstRun):
            topologies.readMappingFromDisk(
                outputPathConstants.epochOutputPathTempletized % ij, ij)
        return
    lastClusteringEpoch = firstRun - 1
    clusteringObjectPath = outputPathConstants.clusteringOutputObject % (
        lastClusteringEpoch)
    oldClusteringMethod = utilities.readClusteringObject(clusteringObjectPath)

    clusteringBuilder = clustering.ClusteringBuilder()
    clusteringMethod = clusteringBuilder.buildClustering(
        clusteringBlock, spawningParams.reportFilename,
        spawningParams.reportCol)

    clusteringMethod.setProcessors(simulationRunner.getWorkingProcessors())
    if needToRecluster(oldClusteringMethod, clusteringMethod):
        utilities.print_unbuffered("Reclustering!")
        startTime = time.time()
        clusterPreviousEpochs(clusteringMethod, firstRun,
                              outputPathConstants.epochOutputPathTempletized,
                              simulationRunner, topologies,
                              outputPathConstants.allTrajsPath)
        endTime = time.time()
        utilities.print_unbuffered("Reclustering took %s sec" %
                                   (endTime - startTime))
    else:
        clusteringMethod = oldClusteringMethod
        clusteringMethod.setCol(spawningParams.reportCol)
        for ij in range(firstRun):
            topologies.readMappingFromDisk(
                outputPathConstants.epochOutputPathTempletized % ij, ij)

    return clusteringMethod
Exemple #6
0
 def allRunning(self):
     """
         Check if all processes are still running
     """
     for pid in self.lockInfo:
         try:
             os.kill(pid, 0)
         except ProcessLookupError:
             utilities.print_unbuffered("Process %d not found!!!" % pid)
             return False
     return True
def process_traj(inputs):
    top_ind, traj_name, epoch, traj_num, imaging = inputs
    ext = os.path.splitext(traj_name)[1]
    utilities.print_unbuffered("Processing trajectory", traj_name)
    top = md.load("topologies/topology_%s.pdb" % top_ind)
    atoms = top.top.select("backbone")
    t = md.load(traj_name, top="topologies/system_%s.prmtop" % top_ind)
    if imaging:
        t.image_molecules(inplace=True)
    t.superpose(top, atom_indices=atoms)
    t.save(
        os.path.join(epoch, "trajectory_postprocessed_%d%s" % (traj_num, ext)))
def debugSimulation(simulation, outputDir, workerNumber, parameters):
    """
        Add some debugging information for MD simulations that crash
    """
    simulation.reporters.append(ForceReporter(str(os.path.join(outputDir, "forces_%d" % workerNumber)), parameters.reporterFreq))
    state = simulation.context.getState(getEnergy=True, getPositions=True)
    utilities.print_unbuffered("Trajectory", workerNumber, "kinetic energy", state.getKineticEnergy(), "potential energy", state.getPotentialEnergy())
    with open(str(os.path.join(outputDir, "initial_%d.pdb" % workerNumber)), 'w') as fw:
        app.PDBFile.writeFile(simulation.topology, state.getPositions(), fw)
    simulation.minimizeEnergy(maxIterations=parameters.minimizationIterations)
    state = simulation.context.getState(getEnergy=True, getPositions=True)
    utilities.print_unbuffered("After minimizing Trajectory", workerNumber, "kinetic energy", state.getKineticEnergy(), "potential energy", state.getPotentialEnergy())
    with open(str(os.path.join(outputDir, "initial_min_%d.pdb" % workerNumber)), 'w') as fw:
        app.PDBFile.writeFile(simulation.topology, state.getPositions(), fw)
Exemple #9
0
def calculate_distances(trajectory, topology, residues):
    """
        Calculate the distances between pairs of atoms in a trajectory

        :param trajectory: Name of the trajectory file
        :type trajectory: str
        :param topology: Topology of the trajectory (needed for non-pdb trajs)
        :type topology: str
        :param residues: Pairs of atoms to calculate distances
        :type residues: list
    """
    utilities.print_unbuffered("Processing", trajectory)
    t = md.load(trajectory, top=topology)
    atom_pairs = []
    for info1, info2 in residues:
        atom1 = t.top.select("resname '%s' and residue %s and name %s" % info1)
        atom2 = t.top.select("resname '%s' and residue %s and name %s" % info2)
        if atom1.size == 0 or atom2.size == 0:
            raise ValueError("Nothing found under current selection")
        atom_pairs.append(atom1.tolist()+atom2.tolist())
    atom_pairs = np.array(atom_pairs)
    return 10*md.compute_distances(t, atom_pairs)
def runEquilibration(equilibrationFiles, reportName, parameters, worker):
    """
    Function that runs the whole equilibration process and returns the final pdb

    :param equilibrationFiles: tuple with the topology (prmtop) in the first position and the coordinates
    in the second (inpcrd)
    :type equilibrationFiles: tuple
    :param outputPDB: string with the pdb to save
    :type outputPDB: str
    :param parameters: Object with the parameters for the simulation
    :type parameters: :py:class:`/simulationrunner/SimulationParameters` -- SimulationParameters object
    :param worker: Number of the subprocess
    :type worker: int

    :returns: str -- a string with the outputPDB
    """
    prmtop, inpcrd = equilibrationFiles
    prmtop = app.AmberPrmtopFile(prmtop)
    inpcrd = app.AmberInpcrdFile(inpcrd)
    PLATFORM = mm.Platform_getPlatformByName(str(parameters.runningPlatform))
    if parameters.runningPlatform == "CUDA":
        platformProperties = {"Precision": "mixed", "DeviceIndex": getDeviceIndexStr(worker, parameters.devicesPerTrajectory, devicesPerReplica=parameters.maxDevicesPerReplica), "UseCpuPme": "false"}
    else:
        platformProperties = {}
    if worker == 0:
        utilities.print_unbuffered("Running %d steps of minimization" % parameters.minimizationIterations)

    if parameters.boxCenter or parameters.cylinderBases:
        dummies = findDummyAtom(prmtop)
        assert dummies is not None
    else:
        dummies = None
    simulation = minimization(prmtop, inpcrd, PLATFORM, parameters.constraintsMin, parameters, platformProperties, dummies)
    # Retrieving the state is expensive (especially when running on GPUs) so we
    # only called it once and then separate positions and velocities
    state = simulation.context.getState(getPositions=True, getVelocities=True)
    positions = state.getPositions()
    velocities = state.getVelocities()
    if worker == 0:
        utilities.print_unbuffered("Running %d steps of NVT equilibration" % parameters.equilibrationLengthNVT)
    simulation = NVTequilibration(prmtop, positions, PLATFORM, parameters.equilibrationLengthNVT, parameters.constraintsNVT, parameters, reportName, platformProperties, velocities=velocities, dummy=dummies)
    state = simulation.context.getState(getPositions=True, getVelocities=True)
    positions = state.getPositions()
    velocities = state.getVelocities()
    if worker == 0:
        utilities.print_unbuffered("Running %d steps of NPT equilibration" % parameters.equilibrationLengthNPT)
    simulation = NPTequilibration(prmtop, positions, PLATFORM, parameters.equilibrationLengthNPT, parameters.constraintsNPT, parameters, reportName, platformProperties, velocities=velocities, dummy=dummies)
    state = simulation.context.getState(getPositions=True)
    root, _ = os.path.splitext(reportName)
    outputPDB = "%s_NPT.pdb" % root
    with open(outputPDB, 'w') as fw:
        app.PDBFile.writeFile(simulation.topology, state.getPositions(), fw)
    return outputPDB
def runProductionSimulation(equilibrationFiles,
                            workerNumber,
                            outputDir,
                            seed,
                            parameters,
                            reportFileName,
                            checkpoint,
                            ligandName,
                            replica_id,
                            trajsPerReplica,
                            epoch_number,
                            restart=False):
    """
    Functions that runs the production run at NPT conditions.
    If a boxRadius is defined in the parameters section, a Flat-bottom harmonic restrains will be applied between
    the protein and the ligand

    :param equilibrationFiles: Tuple with the paths for the Amber topology file (prmtop) and the pdb for the system
    :type equilibrationFiles: Tuple
    :param workerNumber: Number of the subprocess
    :type workerNumber: int
    :param outputDir: path to the directory where the output will be written
    :type outputDir: str
    :param seed: Seed to use to generate the random numbers
    :type seed: int
    :param parameters: Object with the parameters for the simulation
    :type parameters: :py:class:`/simulationrunner/SimulationParameters` -- SimulationParameters object
    :param reportFileName: Name for the file where the energy report will be written
    :type reportFileName: str
    :param checkpoint: Path to the checkpoint from where the production run will be restarted (Optional)
    :type checkpoint: str
    :param ligandName: Code Name for the ligand
    :type ligandName: str
    :param replica_id: Id of the replica running
    :type replica_id: int
    :param trajsPerReplica: Number of trajectories per replica
    :type trajsPerReplica: int
    :param restart: Whether the simulation run has to be restarted or not
    :type restart: bool
    :param epoch_number: Number of the epoch
    :type epoch_number: int

    """
    # this number gives the number of the subprocess in the given node
    deviceIndex = workerNumber
    # this one gives the number of the subprocess in the overall simulation (i.e
    # the trajectory file number)
    workerNumber += replica_id * trajsPerReplica + 1
    prmtop, pdb = equilibrationFiles
    prmtop = app.AmberPrmtopFile(prmtop)
    trajName = os.path.join(
        outputDir, constants.AmberTemplates.trajectoryTemplate %
        (workerNumber, parameters.format))
    stateReporter = os.path.join(outputDir,
                                 "%s_%s" % (reportFileName, workerNumber))
    checkpointReporter = os.path.join(
        outputDir,
        constants.AmberTemplates.CheckPointReporterTemplate % workerNumber)
    lastStep = getLastStep(stateReporter)
    simulation_length = parameters.productionLength - lastStep
    # if the string is unicode the PDBReaders fails to read the file (this is
    # probably due to the fact that openmm was built with python2 in my
    # computer, will need to test thoroughly with python3)
    pdb = app.PDBFile(str(pdb))
    PLATFORM = mm.Platform_getPlatformByName(str(parameters.runningPlatform))
    if parameters.runningPlatform == "CUDA":
        platformProperties = {
            "Precision":
            "mixed",
            "DeviceIndex":
            getDeviceIndexStr(
                deviceIndex,
                parameters.devicesPerTrajectory,
                devicesPerReplica=parameters.maxDevicesPerReplica),
            "UseCpuPme":
            "false"
        }
    else:
        platformProperties = {}

    dummies = None
    if parameters.boxCenter or parameters.cylinderBases:
        dummies = findDummyAtom(prmtop)

    if epoch_number > 0:
        min_sim = minimization(prmtop,
                               pdb,
                               PLATFORM,
                               parameters.constraintsMin,
                               parameters,
                               platformProperties,
                               dummy=dummies)
        positions = min_sim.context.getState(getPositions=True).getPositions()
    else:
        positions = pdb.positions
    system = prmtop.createSystem(nonbondedMethod=app.PME,
                                 nonbondedCutoff=parameters.nonBondedCutoff *
                                 unit.angstroms,
                                 constraints=app.HBonds,
                                 removeCMMotion=True)
    if parameters.boxCenter or parameters.cylinderBases:
        addDummyAtomToSystem(system, prmtop.topology, positions,
                             parameters.ligandName, dummies, deviceIndex)

    system.addForce(
        mm.AndersenThermostat(parameters.Temperature * unit.kelvin,
                              1 / unit.picosecond))
    integrator = mm.VerletIntegrator(parameters.timeStep * unit.femtoseconds)
    system.addForce(
        mm.MonteCarloBarostat(1 * unit.bar,
                              parameters.Temperature * unit.kelvin))
    if parameters.constraints is not None:
        # Add the specified constraints to the system
        addConstraints(system, prmtop.topology, parameters.constraints)

    if parameters.boxCenter or parameters.cylinderBases:
        if parameters.boxType == blockNames.SimulationParams.sphere:
            if deviceIndex == 0:
                utilities.print_unbuffered("Adding spherical ligand box")
            assert len(dummies) == 1
            addLigandBox(prmtop.topology, positions, system,
                         parameters.ligandName, dummies[0],
                         parameters.boxRadius, deviceIndex)
        elif parameters.boxType == blockNames.SimulationParams.cylinder:
            if deviceIndex == 0:
                utilities.print_unbuffered("Adding cylinder ligand box")
            addLigandCylinderBox(prmtop.topology, positions, system,
                                 parameters.ligandName, dummies,
                                 parameters.boxRadius, deviceIndex)
    simulation = app.Simulation(prmtop.topology,
                                system,
                                integrator,
                                PLATFORM,
                                platformProperties=platformProperties)
    utilities.print_unbuffered(workerNumber, equilibrationFiles, dummies,
                               len(positions), prmtop.topology.getNumAtoms(),
                               system.getNumParticles())
    simulation.context.setPositions(positions)
    if restart:
        with open(str(checkpoint), 'rb') as check:
            simulation.context.loadCheckpoint(check.read())
        stateData = open(str(stateReporter), "a")
    else:
        simulation.context.setVelocitiesToTemperature(
            parameters.Temperature * unit.kelvin, seed)
        stateData = open(str(stateReporter), "w")
    if parameters.format == "xtc":
        simulation.reporters.append(
            XTCReporter(str(trajName),
                        parameters.reporterFreq,
                        append=restart,
                        enforcePeriodicBox=parameters.postprocessing))
    elif parameters.format == "dcd":
        simulation.reporters.append(
            app.DCDReporter(str(trajName),
                            parameters.reporterFreq,
                            append=restart,
                            enforcePeriodicBox=parameters.postprocessing))

    simulation.reporters.append(
        app.CheckpointReporter(str(checkpointReporter),
                               parameters.reporterFreq))
    simulation.reporters.append(
        CustomStateDataReporter(stateData,
                                parameters.reporterFreq,
                                step=True,
                                potentialEnergy=True,
                                temperature=True,
                                time_sim=True,
                                volume=True,
                                remainingTime=True,
                                speed=True,
                                totalSteps=simulation_length,
                                separator="\t",
                                append=restart,
                                initialStep=lastStep))

    if workerNumber == 1:
        frequency = min(10 * parameters.reporterFreq,
                        parameters.productionLength)
        simulation.reporters.append(
            app.StateDataReporter(sys.stdout, frequency, step=True))
    simulation.step(simulation_length)
    stateData.close()
Exemple #12
0
def main(jsonParams, clusteringHook=None):
    """
        Main body of the adaptive sampling program.

        :param jsonParams: A string with the name of the control file to use
        :type jsonParams: str
    """

    controlFileValidator.validate(jsonParams)
    generalParams, spawningBlock, simulationrunnerBlock, clusteringBlock = loadParams(
        jsonParams)

    spawningAlgorithmBuilder = spawning.SpawningAlgorithmBuilder()
    spawningCalculator = spawningAlgorithmBuilder.build(spawningBlock)

    runnerbuilder = simulationrunner.RunnerBuilder()
    simulationRunner = runnerbuilder.build(simulationrunnerBlock)

    restart = generalParams.get(blockNames.GeneralParams.restart, True)
    debug = generalParams.get(blockNames.GeneralParams.debug, False)
    outputPath = generalParams[blockNames.GeneralParams.outputPath]
    initialStructuresWildcard = generalParams[
        blockNames.GeneralParams.initialStructures]
    writeAll = generalParams.get(blockNames.GeneralParams.writeAllClustering,
                                 False)
    nativeStructure = generalParams.get(
        blockNames.GeneralParams.nativeStructure, '')
    resname = clusteringBlock[blockNames.ClusteringTypes.params].get(
        blockNames.ClusteringTypes.ligandResname)
    if resname is None:
        # check if resname is provided in the simulation block
        resname = simulationRunner.getResname()

    initialStructures = expandInitialStructuresWildcard(
        initialStructuresWildcard)
    if not initialStructures:
        raise InitialStructuresError("No initial structures found!!!")

    if len(initialStructures) > simulationRunner.getWorkingProcessors():
        raise InitialStructuresError(
            "Error: More initial structures than Working Processors found!!!")

    if resname is not None:
        checkSymmetryDict(clusteringBlock, initialStructures, resname)

    outputPathConstants = constants.OutputPathConstants(outputPath)

    if not debug:
        atexit.register(utilities.cleanup, outputPathConstants.tmpFolder)
    simulationRunner.unifyReportNames(
        spawningCalculator.parameters.reportFilename)
    utilities.makeFolder(outputPath)
    utilities.makeFolder(outputPathConstants.tmpFolder)
    utilities.makeFolder(outputPathConstants.topologies)
    processManager = ProcessesManager(outputPath,
                                      simulationRunner.getNumReplicas())
    firstRun = findFirstRun(outputPath,
                            outputPathConstants.clusteringOutputObject,
                            simulationRunner, restart)
    if processManager.isMaster():
        printRunInfo(restart, debug, simulationRunner, spawningCalculator,
                     clusteringBlock, outputPath, initialStructuresWildcard)
        saveInitialControlFile(jsonParams,
                               outputPathConstants.originalControlFile)
    processManager.barrier()
    # once the replicas are properly syncronized there is no need for the
    # process files, and erasing them allows us to restart simulations
    cleanProcessesFiles(processManager.syncFolder)

    topologies = utilities.Topology(outputPathConstants.topologies)
    if restart and firstRun is not None:
        topology_files = glob.glob(
            os.path.join(outputPathConstants.topologies, "topology*.pdb"))
        topology_files.sort(key=utilities.getTrajNum)
        topologies.setTopologies(topology_files)
        if firstRun == 0:
            createMappingForFirstEpoch(initialStructures, topologies,
                                       simulationRunner.getWorkingProcessors())
            clusteringMethod, initialStructuresAsString = buildNewClusteringAndWriteInitialStructuresInNewSimulation(
                debug, jsonParams, outputPathConstants, clusteringBlock,
                spawningCalculator.parameters, initialStructures,
                simulationRunner, processManager)
        else:
            clusteringMethod, initialStructuresAsString = buildNewClusteringAndWriteInitialStructuresInRestart(
                firstRun, outputPathConstants, clusteringBlock,
                spawningCalculator.parameters, spawningCalculator,
                simulationRunner, topologies, processManager)
        if processManager.isMaster():
            checkMetricExitConditionMultipleTrajsinRestart(
                firstRun, outputPathConstants.epochOutputPathTempletized,
                simulationRunner)
        processManager.barrier()

    if firstRun is None or not restart:
        topologies.setTopologies(initialStructures)
        if processManager.isMaster():
            if not debug:
                cleanPreviousSimulation(outputPath,
                                        outputPathConstants.allTrajsPath)
            writeTopologyFiles(initialStructures,
                               outputPathConstants.topologies)
        processManager.barrier()
        firstRun = 0  # if restart false, but there were previous simulations

        if simulationRunner.parameters.runEquilibration:
            initialStructures = simulationRunner.equilibrate(
                initialStructures, outputPathConstants,
                spawningCalculator.parameters.reportFilename, outputPath,
                resname, processManager, topologies)
            # write the equilibration structures for each replica
            processManager.writeEquilibrationStructures(
                outputPathConstants.tmpFolder, initialStructures)
            if processManager.isMaster(
            ) and simulationRunner.parameters.constraints:
                # write the new constraints for synchronization
                utilities.writeNewConstraints(
                    outputPathConstants.topologies, "new_constraints.txt",
                    simulationRunner.parameters.constraints)
            processManager.barrier()

            if not processManager.isMaster(
            ) and simulationRunner.parameters.constraints:
                simulationRunner.parameters.constraints = utilities.readConstraints(
                    outputPathConstants.topologies, "new_constraints.txt")
            # read all the equilibration structures
            initialStructures = processManager.readEquilibrationStructures(
                outputPathConstants.tmpFolder)
            topologies.setTopologies(initialStructures,
                                     cleanFiles=processManager.isMaster())
            if processManager.isMaster():
                writeTopologyFiles(initialStructures,
                                   outputPathConstants.topologies)
            # ensure that topologies are written
            processManager.barrier()
            topology_files = glob.glob(
                os.path.join(outputPathConstants.topologies, "topology*.pdb"))
            topology_files.sort(key=utilities.getTrajNum)
            topologies.setTopologies(topology_files, cleanFiles=False)
        createMappingForFirstEpoch(initialStructures, topologies,
                                   simulationRunner.getWorkingProcessors())

        clusteringMethod, initialStructuresAsString = buildNewClusteringAndWriteInitialStructuresInNewSimulation(
            debug, jsonParams, outputPathConstants, clusteringBlock,
            spawningCalculator.parameters, initialStructures, simulationRunner,
            processManager)

    if processManager.isMaster():
        repeat, numSteps = simulationRunner.getClusteringInfo()
        clusteringMethod.updateRepeatParameters(repeat, numSteps)
        clusteringMethod.setProcessors(simulationRunner.getWorkingProcessors())
    if simulationRunner.parameters.modeMovingBox is not None and simulationRunner.parameters.boxCenter is None:
        simulationRunner.parameters.boxCenter = simulationRunner.selectInitialBoxCenter(
            initialStructuresAsString, resname)
    for i in range(firstRun, simulationRunner.parameters.iterations):
        if processManager.isMaster():
            utilities.print_unbuffered("Iteration", i)
            outputDir = outputPathConstants.epochOutputPathTempletized % i
            utilities.makeFolder(outputDir)

            simulationRunner.writeMappingToDisk(
                outputPathConstants.epochOutputPathTempletized % i)
            topologies.writeMappingToDisk(
                outputPathConstants.epochOutputPathTempletized % i, i)
            if i == 0:
                # write the object to file at the start of the first epoch, so
                # the topologies can always be loaded
                topologies.writeTopologyObject()
        processManager.barrier()
        if processManager.isMaster():
            utilities.print_unbuffered("Production run...")
        if not debug:
            simulationRunner.runSimulation(
                i, outputPathConstants, initialStructuresAsString, topologies,
                spawningCalculator.parameters.reportFilename, processManager)
        processManager.barrier()

        if processManager.isMaster():
            if simulationRunner.parameters.postprocessing:
                simulationRunner.processTrajectories(
                    outputPathConstants.epochOutputPathTempletized % i,
                    topologies, i)
            utilities.print_unbuffered("Clustering...")
            startTime = time.time()
            clusterEpochTrajs(clusteringMethod, i,
                              outputPathConstants.epochOutputPathTempletized,
                              topologies, outputPathConstants)
            endTime = time.time()
            utilities.print_unbuffered("Clustering ligand: %s sec" %
                                       (endTime - startTime))

            if clusteringHook is not None:
                clusteringHook(clusteringMethod, outputPathConstants,
                               simulationRunner, i + 1)
            clustersList = clusteringMethod.getClusterListForSpawning()
            clustersFiltered = [True for _ in clusteringMethod]

        if simulationRunner.parameters.modeMovingBox is not None:
            simulationRunner.getNextIterationBox(
                outputPathConstants.epochOutputPathTempletized % i, resname,
                topologies, i)
            if processManager.isMaster():
                clustersList, clustersFiltered = clusteringMethod.filterClustersAccordingToBox(
                    simulationRunner.parameters)

        if processManager.isMaster():
            if spawningCalculator.parameters.filterByMetric:
                clustersList, clustersFiltered = clusteringMethod.filterClustersAccordingToMetric(
                    clustersFiltered,
                    spawningCalculator.parameters.filter_value,
                    spawningCalculator.parameters.condition,
                    spawningCalculator.parameters.filter_col)

            degeneracyOfRepresentatives = spawningCalculator.calculate(
                clustersList,
                simulationRunner.getWorkingProcessors(),
                i,
                outputPathConstants=outputPathConstants)
            spawningCalculator.log()
            # this method only does works with MSM-based spwaning methods,
            # creating a plot of the stationary distribution and the PMF, for
            # the rest of methods it does nothing
            spawningCalculator.createPlots(outputPathConstants, i,
                                           clusteringMethod)

            if degeneracyOfRepresentatives is not None:
                if simulationRunner.parameters.modeMovingBox is not None or spawningCalculator.parameters.filterByMetric:
                    degeneracyOfRepresentatives = mergeFilteredClustersAccordingToBox(
                        degeneracyOfRepresentatives, clustersFiltered)
                utilities.print_unbuffered("Degeneracy",
                                           degeneracyOfRepresentatives)
                assert len(degeneracyOfRepresentatives) == len(
                    clusteringMethod)
            else:
                # When using null or independent spawning the calculate method returns None
                assert spawningCalculator.type in spawningTypes.SPAWNING_NO_DEGENERACY_TYPES, "calculate returned None with spawning type %s" % spawningTypes.SPAWNING_TYPE_TO_STRING_DICTIONARY[
                    spawningCalculator.type]

            clusteringMethod.writeOutput(
                outputPathConstants.clusteringOutputDir % i,
                degeneracyOfRepresentatives,
                outputPathConstants.clusteringOutputObject % i, writeAll)
            simulationRunner.cleanCheckpointFiles(
                outputPathConstants.epochOutputPathTempletized % i)

            if i > 0:
                # Remove old clustering object, since we already have a newer one
                try:
                    os.remove(outputPathConstants.clusteringOutputObject %
                              (i - 1))
                except OSError:
                    # In case of restart
                    pass

        # Prepare for next pele iteration
        if i != simulationRunner.parameters.iterations - 1:
            # Differentiate between null spawning and the rest of spawning
            # methods
            if spawningCalculator.shouldWriteStructures():
                if processManager.isMaster():
                    _, procMapping = spawningCalculator.writeSpawningInitialStructures(
                        outputPathConstants,
                        degeneracyOfRepresentatives,
                        clusteringMethod,
                        i + 1,
                        topologies=topologies)
                    utilities.writeProcessorMappingToDisk(
                        outputPathConstants.tmpFolder, "processMapping.txt",
                        procMapping)
                processManager.barrier()
                if not processManager.isMaster():
                    procMapping = utilities.readProcessorMappingFromDisk(
                        outputPathConstants.tmpFolder, "processMapping.txt")
                simulationRunner.updateMappingProcessors(procMapping)
                topologies.mapEpochTopologies(i + 1, procMapping)
                initialStructuresAsString = simulationRunner.createMultipleComplexesFilenames(
                    simulationRunner.getWorkingProcessors(),
                    outputPathConstants.tmpInitialStructuresTemplate, i + 1)

        if processManager.isMaster():
            topologies.writeTopologyObject()
            if clusteringMethod.symmetries and nativeStructure:
                fixReportsSymmetry(
                    outputPathConstants.epochOutputPathTempletized % i,
                    resname, nativeStructure, clusteringMethod.symmetries,
                    topologies)

            # check exit condition, if defined
            if simulationRunner.hasExitCondition():
                if simulationRunner.checkExitCondition(
                        clusteringMethod,
                        outputPathConstants.epochOutputPathTempletized % i):
                    utilities.print_unbuffered(
                        "Simulation exit condition met at iteration %d, stopping"
                        % i)
                    # send a signal to all possible adaptivePELE copies to stop
                    for pid in processManager.lockInfo:
                        if pid != processManager.pid:
                            os.kill(pid, signal.SIGTERM)
                    break
                else:
                    utilities.print_unbuffered(
                        "Simulation exit condition not met at iteration %d, continuing..."
                        % i)
        processManager.barrier()