Example #1
0
    def test_evaluate_directive_valid_job(self, available_directives_list,
                                          non_default_directive_values):
        _tmp_dir = TemporaryDirectory(prefix="flow-directives_")
        FlowProject.init_project(name="DirectivesTest", root=_tmp_dir.name)
        project = FlowProject.get_project(root=_tmp_dir.name)
        for i in range(5):
            project.open_job(dict(i=i)).init()

        valid_values = non_default_directive_values[0]
        valid_values["processor_fraction"] = lambda job: round(
            job.sp.i / 10, 1)

        for job in project:
            directives = _Directives(available_directives_list)
            directives.update(
                {"processor_fraction": lambda job: round(job.sp.i / 10, 1)})
            directives.evaluate((job, ))
            assert directives["processor_fraction"] == round(job.sp.i / 10, 1)
Example #2
0
    cg_annealer = cg_pyrosetta.CG_monte_carlo.CGMonteCarloAnnealer(
        seq_mover=sequence_mover,
        score_function=energy_function,
        pose=pose,
        param_file_object=annealer_params)

    # Setup Configuration/Energy observer for saving minimum energy structures
    struct_obs = cg_pyrosetta.CG_monte_carlo.StructureObserver(
        cg_annealer.get_mc_sim())
    energy_obs = cg_pyrosetta.CG_monte_carlo.EnergyObserver(
        cg_annealer.get_mc_sim())
    cg_annealer.registerObserver(struct_obs)
    cg_annealer.registerObserver(energy_obs)

    # Run Annealer
    cg_annealer.run_schedule()
    min_pose = cg_annealer._cg_mc_sim.get_minimum_energy_pose()
    print("Writing structure to:")
    print(job.fn("minimum.pdb"))
    min_pose.dump_pdb(job.fn("minimum.pdb"))

    end = time.time()

    t_rep = end - start
    print(t_rep, "seconds")
    job.data['timing'] = t_rep


if __name__ == '__main__':
    FlowProject().main()
 def setUp(self):
     MockScheduler.reset()
     self._tmp_dir = TemporaryDirectory(prefix='signac-flow_')
     self.addCleanup(self._tmp_dir.cleanup)
     self.project = FlowProject.init_project(name='FlowTestProject',
                                             root=self._tmp_dir.name)
Example #4
0
    outfile_name = 'energies_output.out'
    if not job.isfile(outfile_name):
        return False
    else:
        return has_libatoms_exit_message(job.fn(outfile_name))


def has_force_output(job):
    outfile_name = 'forces_output.out'
    if not job.isfile(outfile_name):
        return False
    else:
        return has_libatoms_exit_message(job.fn(outfile_name))


timings = FlowProject.make_group(name='timings')


@timings
@FlowProject.operation
@FlowProject.pre(gap_fit_success)
@FlowProject.post(has_energy_output)
@directives(omp_num_threads=1)
def time_quip_energies(job):
    old_dir = os.getcwd()
    quip_cmdline = build_quip_command_line(job)
    print(quip_cmdline)
    try:
        os.chdir(job.workspace())
        with open('energies_output.out', 'w') as outfile:
            subprocess.run(quip_cmdline, stdout=outfile)
Example #5
0
import signac
from flow import FlowProject
import os
from simtk import unit
from cg_openmm.cg_model.cgmodel import CGModel
from cg_openmm.parameters.reweight import get_temperature_list
from cg_openmm.simulation.rep_exch import *
from openmmtools.cache import global_context_cache
import numpy as np
import simtk.openmm as openmm
import pickle
from cg_openmm.thermo.calc import *

global_context_cache.platform = openmm.Platform.getPlatformByName("CUDA")

replica_exchange_group = FlowProject.make_group(name='replica_exchange')
analysis_group = FlowProject.make_group(name='analysis')

proj_directory = os.getcwd()


@FlowProject.label
def run_replica_exchange_done(job):
    output_directory = os.path.join(job.workspace(), "output")
    output_data = os.path.join(output_directory, "output.nc")
    rep_exch_completed = 0
    if os.path.isfile(output_data):
        rep_exch_status = ReplicaExchangeSampler.read_status(output_data)
        rep_exch_completed = rep_exch_status.is_completed
    return rep_exch_completed
Example #6
0
def main():
    global task_args, other_args, modelPathObj
    assert all(map(lambda x: x is not None, [
        model_path, modelScript, expCode, workspaceRoot, expProjectName
    ]))
    modelPathObj = Path(__file__).parent / model_path

    task_parser = argparse.ArgumentParser(add_help=False)
    task_parser.add_argument("--config", "-c", default=None)
    task_parser.add_argument("--exp_regex", default=None)
    task_parser.add_argument("--arg_regex", default=None)
    task_parser.add_argument("--model_args", default="", type=str)
    task_parser.add_argument("--clear_workspace", action="store_true")
    task_parser.add_argument("--clean_workspace", action="store_true")

    if ("run" in sys.argv[1:]) or ("exec" in sys.argv[1:]) or ("--help" in sys.argv[1:]):
        task_parser.add_argument("--tuning", action="store_true")
        task_parser.add_argument("--use_env", default=None)
        task_parser.add_argument("--interactive", "-i", action="store_true")
        task_parser.add_argument("--exp_tags", nargs="+", default=[])
        task_parser.add_argument("--check_paths", action="store_true")
        task_parser.add_argument("--export_dataset", default=None)


        extra_arguments_func(task_parser)
    task_args, other_args = task_parser.parse_known_args()
    if not hasattr(task_args, "model_args"):
        task_args.model_args = ""

    task_args.split_filter = None
    task_args.split_doc_filter = None

    if task_args.config:
        with open(task_args.config, "r") as args_in:
            argsDict = json.load(args_in)
            if "model_args" in argsDict:
                if task_args.model_args:
                    task_args.model_args = [
                        (arg + " " + task_args.model_args) for arg in argsDict["model_args"]]
                else:
                    task_args.model_args = argsDict["model_args"]
            if "exp_regex" in argsDict:
                task_args.exp_regex = argsDict["exp_regex"]
            if "arg_regex" in argsDict:
                task_args.arg_regex = argsDict["arg_regex"]
            if "exp_tags" in argsDict:
                task_args.exp_tags = argsDict["exp_tags"]
            if "split_filter" in argsDict:
                task_args.split_filter = argsDict["split_filter"]
            if "split_doc_filter" in argsDict:
                task_args.split_doc_filter = argsDict["split_doc_filter"]
            if ("-f" not in other_args) and ("-j" not in other_args):
                if "graph_filter_dict" in argsDict:
                    graph_filter_dict = argsDict["graph_filter_dict"]
                    sys.argv += ["-f", json.dumps(graph_filter_dict)]
                    print(sys.argv)
                elif "graph_filter" in argsDict:
                    graph_filter = argsDict["graph_filter"]
                    sys.argv += ["-f"] + graph_filter
                    print(sys.argv)
                
    if task_args.clean_workspace:
        sys.argv += ["-o", "clean_workspace"]
    if task_args.clear_workspace:
        sys.argv += ["-o", "clear_workspace"]

    if task_args.model_args and type(task_args.model_args) is str:
        task_args.model_args = task_args.model_args.split(";")
        print("Model args: {}".format(task_args.model_args))

    if not (modelPathObj.exists() and modelPathObj.is_dir()):
        raise ValueError("Path {} does not exist or is not a folder. \n"
                         "Please change the 'model_path' variable in the script.".format(model_path))

    if ("--debug" in other_args) or ("--show-traceback" in other_args) or vars(task_args).get("tuning", False):
        flags.log_to_terminal = True
        print("Log to terminal enabled.")

    if ("status" in sys.argv[1:]):
        while sys.argv[1] != "status":
            del sys.argv[1]
        FlowProject().main()
    else:
        task_help_parser = argparse.ArgumentParser(parents=[task_parser])
        FlowProject().main(parser=task_help_parser)

    if task_args.model_args:
        print("Model args: {}".format(task_args.model_args))
import sklearn
from sklearn.neural_network import MLPRegressor
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error, mean_absolute_error

TARGETS = ['COF', 'intercept']
IDENTIFIERS = ['terminal_group_1', 'terminal_group_2', 'terminal_group_3',
               'backbone', 'frac-1', 'frac-2']

# get project root directory
proj = signac.get_project()
root_dir = proj.root_directory()

# define group that combines the different operations
neural_network = FlowProject.make_group(name='neural_network')

@FlowProject.label
def models_trained(job):
    # post condition for train_evaluate
    # checks if pickle files for models and scalers are in job workspace
    # check external hard drive for models
    
    for target in TARGETS:
        if not os.path.isfile('/mnt/d/neural-networks-with-signac/workspace/' + job.id + '/{}_trained.pickle'.format(target)):
            return False
        if not os.path.isfile(job.fn('{}_scaler.pickle'.format(target))):
            return False
    return True

Example #8
0
# A Signac-Flow project for one-component VLE simulations
import json
import os
import pickle
import shutil
import subprocess
import numpy as np

import signac
from flow import FlowProject, directive

# Melt, cool, volume, swap
equilibrate = FlowProject.make_group(name="equilibrate")

project = signac.get_project()


@FlowProject.label
def is_equil(job):
    """
        For job tasks in the 'equilibrate' group, we only
        want to run the task if it 
    """

    return job.sp.run == "equil"


@FlowProject.label
def job_marked_complete(job):
    """
        For jobs that may have multiple steps, mark completion