コード例 #1
0
ファイル: sim_maker.py プロジェクト: gitter-badger/wepy-1
    def make_configuration(
        self,
        apparatus,
        work_mapper_class=None,
        work_mapper_spec='TaskMapper',
        work_mapper_params=None,
        platform='Reference',
        # defaults to using all of the defaults
        reporters=Ellipsis,
        reporter_kwargs=None,
        work_dir=None,
        monitor_class=None,
        monitor_params=None,
    ):

        # MAPPER

        # choose which mapper to use

        # use the class if given
        if work_mapper_class is not None:
            pass

        # use the spec string given
        elif work_mapper_spec is not None:

            work_mapper_class = [
                mapper for mapper in self.MAPPERS
                if mapper.__name__ == work_mapper_spec
            ][0]

        else:
            raise ValueError(
                "neither work_mapper_class or work_mapper_spec were not given")

        mapper_name = work_mapper_class.__name__

        # use either the default params or the user params
        if work_mapper_params is None:
            work_mapper_params = self.DEFAULT_MAPPER_PARAMS[mapper_name]

        # depending on the platform and work mapper choose the worker
        # type and update the params in place
        work_mapper_params.update(
            self.choose_work_mapper_platform_params(platform, mapper_name))

        # REPORTERS

        reporter_classes, reporter_params = \
                        self.resolve_reporter_params(apparatus, reporters, reporter_kwargs)

        ## Monitor

        config = Configuration(
            work_mapper_class=work_mapper_class,
            work_mapper_partial_kwargs=work_mapper_params,
            reporter_classes=reporter_classes,
            reporter_partial_kwargs=reporter_params,
            work_dir=work_dir,
            monitor_class=monitor_class,
            monitor_partial_kwargs=monitor_params,
        )

        return config
コード例 #2
0
def test_apparatus_configuration(datadir_factory, mocker):

    config = Configuration()

    assert config.apparatus_opts == {}

    reparam_config = config.reparametrize(apparatus_opts={
        'runner': {
            'platform': 'CPU',
        },
    })

    assert reparam_config.apparatus_opts == {
        'runner': {
            'platform': 'CPU',
        },
    }

    ## test that we can change the apparatus parameters in the sim_manager

    system_mock = mocker.Mock()
    topology_mock = mocker.Mock()
    integrator_mock = mocker.Mock()

    runner = OpenMMRunner(
        system_mock,
        topology_mock,
        integrator_mock,
    )

    resampler_mock = mocker.MagicMock()  # mocker.patch('WExploreResampler')

    apparatus = WepySimApparatus(
        runner,
        resampler=resampler_mock,
        boundary_conditions=None,
    )
    apparatus._filters = (
        runner,
        None,
        resampler_mock,
    )

    state_mock = mocker.MagicMock(
    )  # mocker.patch('wepy.walker.WalkerState', autospec=True)

    walkers = [Walker(state_mock, 0.1) for i in range(1)]

    snapshot = SimSnapshot(
        walkers,
        apparatus,
    )
    snapshot._walkers = walkers
    snapshot._apparatus = apparatus

    datadir = datadir_factory.mkdatadir()

    orch = Orchestrator(orch_path=str(datadir / "test.orch.sqlite3"))

    sim_manager = orch.gen_sim_manager(
        snapshot,
        reparam_config,
    )

    sim_manager.init()

    # sim_mock = mocker.patch('wepy.runners.openmm.omma.Simulation')
    # platform_mock = mocker.patch('wepy.runners.openmm.omm.Platform')

    # _ = sim_manager.run_cycle(
    #     walkers,
    #     2,
    #     0,
    #     runner_opts={
    #         'platform' : 'CPU',
    #     }
    # )

    # platform_mock.getPlatformByName.assert_called_with('CPU')

    sim_mock = mocker.patch('wepy.runners.openmm.omma.Simulation')
    platform_mock = mocker.patch('wepy.runners.openmm.omm.Platform')
    platform_mock.getPlatformByName.\
        return_value.getPropertyNames.\
        return_value = ('Threads',)

    _ = sim_manager.run_cycle(walkers,
                              2,
                              0,
                              runner_opts={
                                  'platform': 'CPU',
                                  'platform_kwargs': {
                                      'Threads': '3'
                                  },
                              })

    platform_mock.getPlatformByName.assert_called_with('CPU')

    platform_mock.getPlatformByName.\
        return_value.getPropertyNames.\
        assert_called()

    platform_mock.getPlatformByName.\
        return_value.setPropertyDefaultValue.\
        assert_called_with(
        'Threads',
        '3'
    )
コード例 #3
0
def run_sim(init_state_path, json_top_path, forcefield_paths, n_cycles,
            n_steps, n_workers, **kwargs):

    #### Wepy Orchestrator

    # load the wepy.OpenMMState
    with open(init_state_path, 'rb') as rf:
        init_state = pickle.load(rf)

    ### Apparatus

    # Runner components

    # load the JSON for the topology
    with open(json_top_path) as rf:
        json_top_str = rf.read()

    # load it with mdtraj and then convert to openmm
    mdj_top = json_to_mdtraj_topology(json_top_str)
    omm_topology = mdj_top.to_openmm()

    # we need to use the box vectors for setting the simulation up,
    # paying mind to the units
    box_vectors = init_state['box_vectors'] * init_state.box_vectors_unit

    # set the box to the last box size from equilibration
    omm_topology.setPeriodicBoxVectors(box_vectors)

    # force field parameters
    force_field = omma.ForceField(*forcefield_paths)

    # create a system using the topology method giving it a topology and
    # the method for calculation
    runner_system = force_field.createSystem(omm_topology,
                                             nonbondedMethod=NONBONDED_METHOD,
                                             nonbondedCutoff=NONBONDED_CUTOFF,
                                             constraints=MD_CONSTRAINTS,
                                             rigidWater=RIGID_WATER,
                                             removeCMMotion=REMOVE_CM_MOTION,
                                             hydrogenMass=HYDROGEN_MASS)

    # barostat to keep pressure constant
    runner_barostat = omm.MonteCarloBarostat(PRESSURE, TEMPERATURE,
                                             VOLUME_MOVE_FREQ)
    # add it to the system
    runner_system.addForce(runner_barostat)

    # set up for a short simulation to runner and prepare
    # instantiate an integrator
    runner_integrator = omm.LangevinIntegrator(TEMPERATURE,
                                               FRICTION_COEFFICIENT, STEP_TIME)

    ## Runner
    runner = OpenMMRunner(runner_system,
                          omm_topology,
                          runner_integrator,
                          platform=PLATFORM)

    ## Resampler

    # Distance Metric

    # TODO set distance metric
    distance_metric = None

    # TODO set resampler
    resampler = None

    ## Boundary Conditions

    # TODO optional: set the boundary conditions
    bc = None

    # apparatus = WepySimApparatus(runner, resampler=resampler,
    #                              boundary_conditions=bc)

    print("created apparatus")

    ## CONFIGURATION

    # the idxs of the main representation to save in the output files,
    # it is just the protein and the ligand

    # TODO optional: set the main representation atom indices
    main_rep_idxs = None

    # REPORTERS
    # list of reporter classes and partial kwargs for using in the
    # orchestrator

    hdf5_reporter_kwargs = {
        'main_rep_idxs': main_rep_idxs,
        'topology': json_top_str,
        'resampler': resampler,
        'boundary_conditions': bc,
        # general parameters
        'save_fields': SAVE_FIELDS,
        'units': dict(UNITS),
        'sparse_fields': dict(SPARSE_FIELDS),
        'all_atoms_rep_freq': ALL_ATOMS_SAVE_FREQ
    }

    # get all the reporters together. Order is important since they
    # will get paired with the kwargs
    reporter_classes = [
        WepyHDF5Reporter,
    ]

    # collate the kwargs in the same order
    reporter_kwargs = [
        hdf5_reporter_kwargs,
    ]

    # make the configuration with all these reporters and the default number of workers
    configuration = Configuration(n_workers=DEFAULT_N_WORKERS,
                                  reporter_classes=reporter_classes,
                                  reporter_partial_kwargs=reporter_kwargs,
                                  config_name="no-orch")

    # then instantiate them
    reporters = configuration._gen_reporters()

    print("created configuration")

    ### Initial Walkers
    init_walkers = [
        Walker(deepcopy(init_state), INIT_WEIGHT) for _ in range(N_WALKERS)
    ]

    print("created init walkers")

    ### Orchestrator
    # orchestrator = Orchestrator(apparatus,
    #                             default_init_walkers=init_walkers,
    #                             default_configuration=configuration)

    ### Work Mapper
    if PLATFORM in ('OpenCL', 'CUDA'):
        # we use a mapper that uses GPUs
        work_mapper = WorkerMapper(worker_type=OpenMMGPUWorker,
                                   num_workers=n_workers)
    if PLATFORM in ('Reference', 'CPU'):
        # we just use the standard mapper
        work_mapper = Mapper

    ### Simulation Manager
    sim_manager = Manager(init_walkers,
                          runner=runner,
                          resampler=resampler,
                          boundary_conditions=bc,
                          work_mapper=work_mapper,
                          reporters=reporters)

    ### Run the simulation
    steps = [n_steps for _ in range(n_cycles)]
    sim_manager.run_simulation(n_cycles, steps)
コード例 #4
0
def run_sim(init_state_path,
            json_top_path,
            forcefield_paths,
            n_cycles,
            n_steps,
            platform,
            n_workers,
            lig_ff=None,
            **kwargs):

    # add in the ligand force fields
    assert lig_ff is not None, "must give ligand forcefield"

    forcefield_paths.append(lig_ff)

    #### Wepy Orchestrator

    # load the wepy.OpenMMState
    with open(init_state_path, 'rb') as rf:
        init_state = pickle.load(rf)

    ### Apparatus

    # Runner components

    # load the JSON for the topology
    with open(json_top_path) as rf:
        json_top_str = rf.read()

    # load it with mdtraj and then convert to openmm
    mdj_top = json_to_mdtraj_topology(json_top_str)
    omm_topology = mdj_top.to_openmm()

    # we need to use the box vectors for setting the simulation up,
    # paying mind to the units
    box_vectors = init_state['box_vectors'] * init_state.box_vectors_unit
    positions = init_state['positions'] * init_state.positions_unit

    # set the box to the last box size from equilibration
    omm_topology.setPeriodicBoxVectors(box_vectors)

    # force field parameters
    force_field = omma.ForceField(*forcefield_paths)

    # create a system using the topology method giving it a topology and
    # the method for calculation
    runner_system = force_field.createSystem(omm_topology,
                                             nonbondedMethod=NONBONDED_METHOD,
                                             nonbondedCutoff=NONBONDED_CUTOFF,
                                             constraints=MD_CONSTRAINTS,
                                             rigidWater=RIGID_WATER,
                                             removeCMMotion=REMOVE_CM_MOTION,
                                             hydrogenMass=HYDROGEN_MASS)

    # barostat to keep pressure constant
    runner_barostat = omm.MonteCarloBarostat(PRESSURE, TEMPERATURE,
                                             VOLUME_MOVE_FREQ)
    # add it to the system
    runner_system.addForce(runner_barostat)

    # set up for a short simulation to runner and prepare
    # instantiate an integrator
    runner_integrator = omm.LangevinIntegrator(TEMPERATURE,
                                               FRICTION_COEFFICIENT, STEP_TIME)

    ## Runner
    runner = OpenMMRunner(runner_system,
                          omm_topology,
                          runner_integrator,
                          platform=platform)

    ## Resampler

    # Distance Metric

    lig_idxs = ligand_idxs(json_top_str)
    prot_idxs = protein_idxs(json_top_str)
    bs_idxs = binding_site_idxs(json_top_str, positions, box_vectors, CUTOFF)

    # set distance metric
    distance_metric = UnbindingDistance(lig_idxs, bs_idxs, init_state)

    # set resampler
    resampler = WExploreResampler(distance=distance_metric,
                                  init_state=init_state,
                                  max_n_regions=MAX_N_REGIONS,
                                  max_region_sizes=MAX_REGION_SIZES,
                                  pmin=PMIN,
                                  pmax=PMAX)

    ## Boundary Conditions

    # optional: set the boundary conditions
    bc = None

    ## CONFIGURATION

    # the idxs of the main representation to save in the output files,
    # it is just the protein and the ligand

    # optional: set the main representation atom indices, set to None
    # to save all the atoms in the 'positions' field
    main_rep_idxs = np.concatenate((lig_idxs, prot_idxs))

    # REPORTERS
    # list of reporter classes and partial kwargs for using in the
    # orchestrator

    hdf5_reporter_kwargs = {
        'main_rep_idxs': main_rep_idxs,
        'topology': json_top_str,
        'resampler': resampler,
        'boundary_conditions': bc,
        # general parameters
        'save_fields': SAVE_FIELDS,
        'units': dict(UNITS),
        'sparse_fields': dict(SPARSE_FIELDS),
        'all_atoms_rep_freq': ALL_ATOMS_SAVE_FREQ
    }

    # get all the reporters together. Order is important since they
    # will get paired with the kwargs
    reporter_classes = [
        WepyHDF5Reporter,
    ]

    # collate the kwargs in the same order
    reporter_kwargs = [
        hdf5_reporter_kwargs,
    ]

    # make the configuration with all these reporters and the default
    # number of workers. Don't be thrown off by this. You don't need
    # this. It is just a convenient way to dynamically name the
    # outputs of the reporters and parametrize the workers and worker
    # mappers. This is mainly for use in the Orchestrator framework
    # but it is useful here just for batch naming everything.
    configuration = Configuration(n_workers=DEFAULT_N_WORKERS,
                                  reporter_classes=reporter_classes,
                                  reporter_partial_kwargs=reporter_kwargs,
                                  config_name="no-orch",
                                  mode='w')

    # then instantiate the reporters from the configuration. THis
    # localizes the file paths to outputs and applies the key-word
    # arguments specified above.
    reporters = configuration._gen_reporters()

    print("created configuration")

    ### Initial Walkers
    init_walkers = [
        Walker(deepcopy(init_state), INIT_WEIGHT) for _ in range(N_WALKERS)
    ]

    print("created init walkers")

    ### Work Mapper
    if platform in ('OpenCL', 'CUDA'):
        # we use a mapper that uses GPUs
        work_mapper = WorkerMapper(worker_type=OpenMMGPUWorker,
                                   num_workers=n_workers)

    elif platform in ('CPU', ):
        # for the CPU we can choose how many threads to use per walker.
        worker_attributes = {'num_threads': N_CPU_THREADS}
        work_mapper = WorkerMapper(worker_type=OpenMMCPUWorker,
                                   worker_attributes=worker_attributes,
                                   num_workers=n_workers)

    elif platform in ('Reference', ):
        # we just use the standard mapper for in serial
        work_mapper = Mapper

    ### Simulation Manager
    sim_manager = Manager(init_walkers,
                          runner=runner,
                          resampler=resampler,
                          boundary_conditions=bc,
                          work_mapper=work_mapper,
                          reporters=reporters)

    ### Run the simulation
    steps = [n_steps for _ in range(n_cycles)]
    sim_manager.run_simulation(n_cycles, steps)
コード例 #5
0
ファイル: make_orchestrator.py プロジェクト: leelasdSI/wepy
                        'units' : dict(UNITS),
                        'sparse_fields' : dict(SPARSE_FIELDS),
                        'main_rep_idxs' : MAIN_REP_IDXS,
                        'all_atoms_rep_freq' : ALL_ATOMS_SAVE_FREQ}

dashboard_reporter_kwargs = {'step_time' : STEP_SIZE.value_in_unit(unit.second),
                             'max_n_regions' : RESAMPLER.max_n_regions,
                             'max_region_sizes' : RESAMPLER.max_region_sizes,
                             'bc_cutoff_distance' : BC.cutoff_distance}

REPORTER_KWARGS = [hdf5_reporter_kwargs, dashboard_reporter_kwargs]

N_WORKERS = 8

CONFIGURATION = Configuration(n_workers=N_WORKERS,
                              reporter_classes=REPORTER_CLASSES,
                              reporter_partial_kwargs=REPORTER_KWARGS)

print("created configuration")

### Initial Walkers
N_WALKERS = 48
INIT_WEIGHT = 1.0 / N_WALKERS
INIT_WALKERS = [Walker(deepcopy(INIT_STATE), INIT_WEIGHT) for i in range(N_WALKERS)]

print("created init walkers")

### Orchestrator
ORCHESTRATOR = Orchestrator(APPARATUS,
                            default_init_walkers=INIT_WALKERS,
                            default_configuration=CONFIGURATION)
コード例 #6
0
    def test_lj_sim_manager_openmm_integration_run(
        self,
        class_tmp_path_factory,
        boundary_condition_class,
        resampler_class,
        work_mapper_class,
        platform,
        lj_params,
        lj_omm_sys,
        lj_integrator,
        lj_reporter_classes,
        lj_reporter_kwargs,
        lj_init_walkers,
        lj_openmm_runner,
        lj_unbinding_bc,
        lj_wexplore_resampler,
        lj_revo_resampler,
    ):
        """Run all combinations of components in the fixtures for the smallest
        amount of time, just to make sure they all work together and don't give errors."""

        logging.getLogger().setLevel(logging.DEBUG)
        install_mp_handler()
        logging.debug("Starting the test")

        print("starting the test")

        # the configuration class gives us a convenient way to
        # parametrize our reporters for the locale
        from wepy.orchestration.configuration import Configuration

        # the runner
        from wepy.runners.openmm import OpenMMRunner

        # mappers
        from wepy.work_mapper.mapper import Mapper
        from wepy.work_mapper.worker import WorkerMapper
        from wepy.work_mapper.task_mapper import TaskMapper

        # the worker types for the WorkerMapper
        from wepy.work_mapper.worker import Worker
        from wepy.runners.openmm import OpenMMCPUWorker, OpenMMGPUWorker

        # the walker task types for the TaskMapper
        from wepy.work_mapper.task_mapper import WalkerTaskProcess
        from wepy.runners.openmm import OpenMMCPUWalkerTaskProcess, OpenMMGPUWalkerTaskProcess

        n_cycles = 1
        n_steps = 2
        num_workers = 2

        # generate the reporters and temporary directory for this test
        # combination

        tmpdir_template = 'lj_fixture_{plat}-{wm}-{res}-{bc}'
        tmpdir_name = tmpdir_template.format(plat=platform,
                                             wm=work_mapper_class,
                                             res=resampler_class,
                                             bc=boundary_condition_class)

        # make a temporary directory for this configuration to work with
        tmpdir = str(class_tmp_path_factory.mktemp(tmpdir_name))

        # make a config so that the reporters get parametrized properly
        reporters = Configuration(
            work_dir=tmpdir,
            reporter_classes=lj_reporter_classes,
            reporter_partial_kwargs=lj_reporter_kwargs).reporters

        steps = [n_steps for _ in range(n_cycles)]

        # choose the components based on the parametrization
        boundary_condition = None
        resampler = None

        walker_fixtures = [lj_init_walkers]
        runner_fixtures = [lj_openmm_runner]
        boundary_condition_fixtures = [lj_unbinding_bc]
        resampler_fixtures = [lj_wexplore_resampler, lj_revo_resampler]

        walkers = lj_init_walkers

        boundary_condition = [
            boundary_condition
            for boundary_condition in boundary_condition_fixtures
            if type(boundary_condition).__name__ == boundary_condition_class
        ][0]
        resampler = [
            resampler for resampler in resampler_fixtures
            if type(resampler).__name__ == resampler_class
        ][0]

        assert boundary_condition is not None
        assert resampler is not None

        # generate the work mapper given the type and the platform

        work_mapper_classes = {
            mapper_class.__name__: mapper_class
            for mapper_class in [Mapper, WorkerMapper, TaskMapper]
        }

        # # select the right one given the option
        # work_mapper_type = [mapper_type for mapper_type in work_mapper_classes
        #                     if type(mapper_type).__name__ == work_mapper_class][0]

        # decide based on the platform and the work mapper which
        # platform dependent components to build
        if work_mapper_class == 'Mapper':
            # then there is no settings
            work_mapper = Mapper()

        elif work_mapper_class == 'WorkerMapper':

            if platform == 'CUDA' or platform == 'OpenCL':
                work_mapper = WorkerMapper(num_workers=num_workers,
                                           worker_type=OpenMMGPUWorker,
                                           device_ids={
                                               '0': 0,
                                               '1': 1
                                           },
                                           proc_start_method='spawn')
            if platform == 'OpenCL':

                work_mapper = WorkerMapper(
                    num_workers=num_workers,
                    worker_type=OpenMMGPUWorker,
                    device_ids={
                        '0': 0,
                        '1': 1
                    },
                )

            elif platform == 'CPU':
                work_mapper = WorkerMapper(
                    num_workers=num_workers,
                    worker_type=OpenMMCPUWorker,
                    worker_attributes={'num_threads': 1})

            elif platform == 'Reference':
                work_mapper = WorkerMapper(
                    num_workers=num_workers,
                    worker_type=Worker,
                )

        elif work_mapper_class == 'TaskMapper':

            if platform == 'CUDA':
                work_mapper = TaskMapper(
                    num_workers=num_workers,
                    walker_task_type=OpenMMGPUWalkerTaskProcess,
                    device_ids={
                        '0': 0,
                        '1': 1
                    },
                    proc_start_method='spawn')

            elif platform == 'OpenCL':
                work_mapper = TaskMapper(
                    num_workers=num_workers,
                    walker_task_type=OpenMMGPUWalkerTaskProcess,
                    device_ids={
                        '0': 0,
                        '1': 1
                    })

            elif platform == 'CPU':
                work_mapper = TaskMapper(
                    num_workers=num_workers,
                    walker_task_type=OpenMMCPUWalkerTaskProcess,
                    worker_attributes={'num_threads': 1})

            elif platform == 'Reference':
                work_mapper = TaskMapper(
                    num_workers=num_workers,
                    worker_type=WalkerTaskProcess,
                )

        else:
            raise ValueError("Platform {} not recognized".format(platform))

        # initialize the runner with the platform
        runner = OpenMMRunner(lj_omm_sys.system,
                              lj_omm_sys.topology,
                              lj_integrator,
                              platform=platform)

        logging.debug("Constructing the manager")

        manager = Manager(walkers,
                          runner=runner,
                          boundary_conditions=boundary_condition,
                          resampler=resampler,
                          work_mapper=work_mapper,
                          reporters=reporters)

        # since different work mappers need different process start
        # methods for different platforms i.e. CUDA and linux fork
        # vs. spawn we choose the appropriate one for each method.

        logging.debug("Starting the simulation")

        walkers, filters = manager.run_simulation(n_cycles,
                                                  steps,
                                                  num_workers=num_workers)
コード例 #7
0
ファイル: make_orchestrator.py プロジェクト: leelasdSI/wepy
]

from wepy.work_mapper.mapper import Mapper

from wepy.orchestration.orchestrator import WepySimApparatus, Orchestrator
from wepy.orchestration.configuration import Configuration

sim_apparatus = WepySimApparatus(runner,
                                 resampler=resampler,
                                 boundary_conditions=ubc)

# we also create a default configuration for the orchestrator that
# will be used unless one is given at runtime for the creation of a
# simulation manager
configuration = Configuration(n_workers=4,
                              reporter_classes=reporter_classes,
                              reporter_partial_kwargs=reporter_kwargs)

# we also want to set up the orchestrator with some default walkers to
# use to get us started. Otherwise these could be provided from a
# snapshot or on their own. Ideally we only want to have a single
# script setting up an orchestrator and then manage everything else on
# the command line intereactively from then on out
init_weight = 1.0 / N_WALKERS
init_walkers = [
    Walker(OpenMMState(init_sim_state), init_weight) for i in range(N_WALKERS)
]

# then create the seed/root/master orchestrator which will be used
# from here on out
orchestrator = Orchestrator(sim_apparatus,
コード例 #8
0
ファイル: make_orchestrator.py プロジェクト: poharrison/wepy
reporter_classes = [
    WepyHDF5Reporter, WExploreDashboardReporter, ResTreeReporter
]
reporter_kwargs = [
    hdf5_reporter_kwargs, dashboard_reporter_kwargs, restree_reporter_kwargs
]

sim_apparatus = WepySimApparatus(runner,
                                 resampler=resampler,
                                 boundary_conditions=ubc)

# we also create a default configuration for the orchestrator that
# will be used unless one is given at runtime for the creation of a
# simulation manager
configuration = Configuration(reporter_classes=reporter_classes,
                              reporter_partial_kwargs=reporter_kwargs)

# we also want to set up the orchestrator with some default walkers to
# use to get us started. Otherwise these could be provided from a
# snapshot or on their own. Ideally we only want to have a single
# script setting up an orchestrator and then manage everything else on
# the command line intereactively from then on out
init_weight = 1.0 / N_WALKERS
init_walkers = [
    Walker(OpenMMState(init_sim_state), init_weight) for i in range(N_WALKERS)
]

# then create the seed/root/master orchestrator which will be used
# from here on out
orch = Orchestrator(orch_path='LJ-pair.orch.sqlite', mode='w')