Esempio n. 1
0
def test_build_model_specification_failure():
    with pytest.raises(ConfigurationError):
        build_model_specification('made_up_file.yaml')

    with pytest.raises(ConfigurationError):
        test_dir = os.path.dirname(os.path.realpath(__file__))
        model_spec = test_dir + '/../test_data/bad_model_specification.txt'
        assert os.path.exists(model_spec), 'Test directory structure is broken'
        build_model_specification(model_spec)
Esempio n. 2
0
def model_specification(mocker):
    test_dir = os.path.dirname(os.path.realpath(__file__))
    user_config = test_dir + '/test_data/mock_user_config.yaml'
    model_spec = test_dir + '/test_data/mock_model_specification.yaml'

    expand_user_mock = mocker.patch(
        'vivarium.framework.configuration.os.path.expanduser')
    expand_user_mock.return_value = user_config

    return build_model_specification(model_spec)
Esempio n. 3
0
def parse(
    command: str,
    input_model_specification_path: Optional[Path],
    artifact_path: Optional[Path],
    model_specification_path: Path,
    results_root: Path,
    keyspace: "Keyspace",
) -> ConfigTree:
    if command in [COMMANDS.restart, COMMANDS.expand]:
        return build_model_specification(model_specification_path)
    if command == COMMANDS.load_test:
        return build_model_specification()

    model_specification = build_model_specification(
        input_model_specification_path)
    model_specification.configuration.update(
        {OUTPUT_DATA_KEY: {
            RESULTS_DIRECTORY_KEY: str(results_root)
        }},
        source=__file__,
    )

    artifact_path_is_cli_arg = artifact_path is not None
    artifact_path_in_keyspace = FULL_ARTIFACT_PATH_KEY in keyspace

    if artifact_path_is_cli_arg and artifact_path_in_keyspace:
        raise ConfigurationError(
            "Artifact path cannot be specified both in the branch specification file"
            " and as a command line argument.",
            str(artifact_path),
        )
    elif artifact_path_is_cli_arg:
        model_specification.configuration[INPUT_DATA_KEY].update(
            {ARTIFACT_PATH_KEY: str(artifact_path)}, source=__file__)
    else:
        # Artifact path comes from the model spec.
        # Parsing here ensures the key exists and the value points
        # to an actual file.
        parse_artifact_path_config(model_specification.configuration)

    return model_specification
Esempio n. 4
0
def initialize_simulation_from_model_specification(model_specification_file):
    model_specification = build_model_specification(model_specification_file)

    plugin_config = model_specification.plugins
    component_config = model_specification.components
    simulation_config = model_specification.configuration

    plugin_manager = PluginManager(plugin_config)
    component_config_parser = plugin_manager.get_plugin(
        'component_configuration_parser')
    components = component_config_parser.get_components(component_config)

    return InteractiveContext(simulation_config, components, plugin_manager)
Esempio n. 5
0
    def __init__(self, model_specification_file: str,
                 branch_configuration_file: str, output_directory: Path,
                 logging_directories: Dict[str, Path], num_input_draws: int,
                 num_random_seeds: int, restart: bool, expand: Dict[str, int],
                 no_batch: bool):
        self.number_already_completed = 0
        self.output_directory = output_directory
        self.no_batch = no_batch
        self.sge_log_directory = logging_directories['sge']
        self.worker_log_directory = logging_directories['worker']

        if restart:
            self.keyspace = Keyspace.from_previous_run(self.output_directory)
            self.existing_outputs = pd.read_hdf(self.output_directory /
                                                'output.hdf')
            if expand:
                self.keyspace.add_draws(expand['num_draws'])
                self.keyspace.add_seeds(expand['num_seeds'])
                self.keyspace.persist(self.output_directory)
        else:
            model_specification = build_model_specification(
                model_specification_file)

            self.keyspace = Keyspace.from_branch_configuration(
                num_input_draws, num_random_seeds, branch_configuration_file)

            if "input_data.artifact_path" in self.keyspace.get_data():
                raise ValueError(
                    "An artifact path can only be supplied in the model specification file, "
                    "not the branches configuration.")

            if "artifact_path" in model_specification.configuration.input_data:
                artifact_path = parse_artifact_path_config(
                    model_specification.configuration)
                model_specification.configuration.input_data.update(
                    {"artifact_path": artifact_path}, source=__file__)

            model_specification_path = self.output_directory / 'model_specification.yaml'
            shutil.copy(model_specification_file, model_specification_path)

            self.existing_outputs = None

            # Log some basic stuff about the simulation to be run.
            self.keyspace.persist(self.output_directory)
        self.model_specification = self.output_directory / 'model_specification.yaml'
def test_build_model_specification(mocker, test_spec, test_user_config):
    expand_user_mock = mocker.patch(
        'vivarium.framework.configuration.Path.expanduser')
    expand_user_mock.return_value = test_user_config
    loaded_model_spec = build_model_specification(test_spec)

    test_data = DEFAULT_PLUGINS.copy()

    with test_spec.open() as f:
        model_data = yaml.full_load(f)

    test_data.update(model_data)

    with test_user_config.open() as f:
        user_data = yaml.full_load(f)

    test_data['configuration'].update(user_data)

    assert loaded_model_spec.to_dict() == test_data
def test_build_model_specification_failure(mocker, test_data_dir, test_spec):
    with pytest.raises(ConfigurationError):
        build_model_specification('made_up_file.yaml')

    with pytest.raises(ConfigurationError):
        model_spec = test_data_dir / 'bad_model_specification.txt'
        build_model_specification(model_spec)

    with test_spec.open() as f:
        spec_dict = yaml.full_load(f)
    spec_dict.update({'invalid_key': 'some_value'})
    load_mock = mocker.patch('vivarium.framework.configuration.yaml.full_load')
    load_mock.return_value = spec_dict
    with pytest.raises(ConfigurationError):
        build_model_specification(str(test_spec))
Esempio n. 8
0
def run_nth_draw(model_specification_file, draw_number):
    """
    Run a model simulation for a specific draw number.

    :param model_specification_file: The YAML model specification file.
    :param draw_number: The draw number to select for rates and values that
        have multiple draws.
    """
    logger = logging.getLogger(__name__)
    logger.info('{} Simulating draw #{} for {} ...'.format(
        datetime.datetime.now().strftime("%H:%M:%S"),
        draw_number, model_specification_file))
    spec = config.build_model_specification(model_specification_file)
    spec.configuration.input_data.input_draw_number = draw_number

    simulation = initialise_simulation_from_specification_config(spec)
    simulation.setup()

    metrics, final_state = engine.run(simulation)
    logger.info('{} Simulation for draw #{} complete'.format(
        datetime.datetime.now().strftime("%H:%M:%S"),
        draw_number))

    return metrics, final_state
Esempio n. 9
0
def test_build_model_specification(mocker):
    test_dir = os.path.dirname(os.path.realpath(__file__))
    user_config = test_dir + '/../test_data/mock_user_config.yaml'
    model_spec = test_dir + '/../test_data/mock_model_specification.yaml'

    expand_user_mock = mocker.patch(
        'vivarium.framework.configuration.os.path.expanduser')
    expand_user_mock.return_value = user_config

    loaded_model_spec = build_model_specification(model_spec)

    test_data = DEFAULT_PLUGINS

    with open(model_spec) as f:
        model_data = yaml.load(f)

    test_data.update(model_data)

    with open(user_config) as f:
        user_data = yaml.load(f)

    test_data['configuration'].update(user_data)

    assert loaded_model_spec.to_dict() == test_data
Esempio n. 10
0
def run_simulation(model_specification_file, results_directory):
    results_writer = get_results_writer(results_directory,
                                        model_specification_file)

    model_specification = build_model_specification(model_specification_file)
    model_specification.configuration.output_data.update(
        {'results_directory': results_writer.results_root},
        layer='override',
        source='command_line')

    simulation = setup_simulation(model_specification)
    metrics, final_state = run(simulation)

    _log.debug(pformat(metrics))
    unused_config_keys = simulation.configuration.unused_keys()
    if unused_config_keys:
        _log.debug("Some configuration keys not used during run: %s",
                   unused_config_keys)

    idx = pd.Index([simulation.configuration.randomness.random_seed],
                   name='random_seed')
    metrics = pd.DataFrame(metrics, index=idx)
    results_writer.write_output(metrics, 'output.hdf')
    results_writer.write_output(final_state, 'final_state.hdf')
Esempio n. 11
0
    def __init__(self,
                 model_specification: Union[str, Path, ConfigTree] = None,
                 components: Union[List, Dict, ConfigTree] = None,
                 configuration: Union[Dict, ConfigTree] = None,
                 plugin_configuration: Union[Dict, ConfigTree] = None):
        # Bootstrap phase: Parse arguments, make private managers
        component_configuration = components if isinstance(
            components, (dict, ConfigTree)) else None
        self._additional_components = components if isinstance(
            components, List) else []
        model_specification = build_model_specification(
            model_specification, component_configuration, configuration,
            plugin_configuration)

        self._plugin_configuration = model_specification.plugins
        self._component_configuration = model_specification.components
        self.configuration = model_specification.configuration

        self._plugin_manager = PluginManager(model_specification.plugins)

        # TODO: Setup logger here.

        self._builder = Builder(self.configuration, self._plugin_manager)

        # This formally starts the initialization phase (this call makes the
        # life-cycle manager).
        self._lifecycle = self._plugin_manager.get_plugin('lifecycle')
        self._lifecycle.add_phase(
            'setup', ['setup', 'post_setup', 'population_creation'])
        self._lifecycle.add_phase('main_loop', [
            'time_step__prepare', 'time_step', 'time_step__cleanup',
            'collect_metrics'
        ],
                                  loop=True)
        self._lifecycle.add_phase('simulation_end',
                                  ['simulation_end', 'report'])

        self._component_manager = self._plugin_manager.get_plugin(
            'component_manager')
        self._component_manager.setup(self.configuration, self._lifecycle)

        self._clock = self._plugin_manager.get_plugin('clock')
        self._values = self._plugin_manager.get_plugin('value')
        self._events = self._plugin_manager.get_plugin('event')
        self._population = self._plugin_manager.get_plugin('population')
        self._resource = self._plugin_manager.get_plugin('resource')
        self._tables = self._plugin_manager.get_plugin('lookup')
        self._randomness = self._plugin_manager.get_plugin('randomness')
        self._data = self._plugin_manager.get_plugin('data')

        for name, controller in self._plugin_manager.get_optional_controllers(
        ).items():
            setattr(self, f'_{name}', controller)

        # The order the managers are added is important.  It represents the
        # order in which they will be set up.  The clock is required by
        # several of the other managers, including the lifecycle manager.  The
        # lifecycle manager is also required by most managers. The randomness
        # manager requires the population manager.  The remaining managers need
        # no ordering.
        managers = [
            self._clock, self._lifecycle, self._resource, self._values,
            self._population, self._randomness, self._events, self._tables,
            self._data
        ] + list(self._plugin_manager.get_optional_controllers().values())
        self._component_manager.add_managers(managers)

        component_config_parser = self._plugin_manager.get_plugin(
            'component_configuration_parser')
        # Tack extra components onto the end of the list generated from the model specification.
        components = (component_config_parser.get_components(
            self._component_configuration) + self._additional_components +
                      [Metrics()])

        self._lifecycle.add_constraint(self.add_components,
                                       allow_during=['initialization'])
        self._lifecycle.add_constraint(
            self.get_population,
            restrict_during=['initialization', 'setup', 'post_setup'])

        self.add_components(components)
Esempio n. 12
0
def model_specification(mocker, test_spec, test_user_config):
    expand_user_mock = mocker.patch(
        'vivarium.framework.configuration.Path.expanduser')
    expand_user_mock.return_value = test_user_config
    return build_model_specification(test_spec)