Ejemplo n.º 1
0
def run_scaling(params, experiments, reflections):
    """Run scaling algorithms; stats only, cross validation or standard."""
    if params.stats_only:
        Script.stats_only(reflections, experiments, params)
        sys.exit()

    if params.export_mtz_only:
        Script.export_mtz_only(reflections, experiments, params)
        sys.exit()

    if params.output.delete_integration_shoeboxes:
        for r in reflections:
            del r["shoebox"]

    if params.cross_validation.cross_validation_mode:
        from dials.algorithms.scaling.cross_validation.cross_validate import (
            cross_validate,
        )
        from dials.algorithms.scaling.cross_validation.crossvalidator import (
            DialsScaleCrossValidator,
        )

        cross_validator = DialsScaleCrossValidator(experiments, reflections)
        try:
            cross_validate(params, cross_validator)
        except ValueError as e:
            raise Sorry(e)

        logger.info(
            "Cross validation analysis does not produce scaling output files, rather\n"
            "it gives insight into the dataset. Choose an appropriate parameterisation\n"
            "and rerun scaling without cross_validation_mode.\n"
        )

    else:
        script = Script(params, experiments, reflections)
        # Register the observers at the highest level
        if params.output.html:
            register_default_scaling_observers(script)
        else:
            register_merging_stats_observers(script)
        if params.filtering.method:
            if script.scaler.id_ != "multi":
                raise Sorry(
                    """
Scaling and filtering can only be performed in multi-dataset scaling mode
(not single dataset or scaling against a reference)"""
                )
            register_scale_and_filter_observers(script)
            script.run_scale_and_filter()
            with open(params.filtering.output.scale_and_filter_results, "w") as f:
                json.dump(script.filtering_results.to_dict(), f, indent=2)
        else:
            script.run()
        script.export()
Ejemplo n.º 2
0
def run_scaling(params, experiments, reflections):
    """Run scaling algorithms; cross validation, scaling + filtering or standard.

    Returns:
        experiments: an experiment list with scaled data (if created)
        joint_table: a single reflection table containing scaled data (if created).
    """

    if params.output.delete_integration_shoeboxes:
        for r in reflections:
            del r["shoebox"]

    if params.cross_validation.cross_validation_mode:
        from dials.algorithms.scaling.cross_validation.cross_validate import (
            cross_validate, )
        from dials.algorithms.scaling.cross_validation.crossvalidator import (
            DialsScaleCrossValidator, )

        cross_validator = DialsScaleCrossValidator(experiments, reflections)
        cross_validate(params, cross_validator)

        logger.info(
            "Cross validation analysis does not produce scaling output files, rather\n"
            "it gives insight into the dataset. Choose an appropriate parameterisation\n"
            "and rerun scaling without cross_validation_mode.\n")
        return (None, None)

    else:
        # Register the observers at the highest level
        if params.filtering.method:
            algorithm = ScaleAndFilterAlgorithm(params, experiments,
                                                reflections)
            register_scale_and_filter_observers(algorithm)
        else:
            algorithm = ScalingAlgorithm(params, experiments, reflections)

        if params.output.html:
            register_default_scaling_observers(algorithm)
        else:
            register_merging_stats_observers(algorithm)

        algorithm.run()

        experiments, joint_table = algorithm.finish()

        return experiments, joint_table
Ejemplo n.º 3
0
def test_dialsscalecrossvalidator():
    """Test the methods of the dials.scale cross validator"""
    experiments = []
    reflections = []

    def mock_script():
        script = mock.MagicMock()
        script.scaler.work_free_stats = [1.0, 2.0, 3.0, 4.0]
        return script

    # test get results from script
    crossvalidator = DialsScaleCrossValidator(experiments, reflections)
    script = mock_script()
    results = crossvalidator.get_results_from_script(script)
    assert results == [1.0, 2.0, 3.0, 4.0]

    params = generated_param()
    params.scaling_options.free_set_percentage = 20.0
    # test get free set offset
    fsp = crossvalidator.get_free_set_percentage(params)
    assert fsp == 20.0

    # test set free set offset
    params = crossvalidator.set_free_set_offset(params, 5)
    assert params.scaling_options.free_set_offset == 5

    # test get/set parameters
    assert crossvalidator.get_parameter_type("model") == "choice"
    assert crossvalidator.get_parameter_type(
        "physical.absorption_correction") == "bool"
    assert crossvalidator.get_parameter_type("physical.lmax") == "int"
    assert crossvalidator.get_parameter_type(
        "physical.decay_interval") == "float"

    params = crossvalidator.set_parameter(params, "model", "physical")
    assert params.model == "physical"
    params = crossvalidator.set_parameter(params, "physical.decay_interval",
                                          50.0)
    assert params.physical.decay_interval == 50.0
    params = crossvalidator.set_parameter(params, "physical.lmax", 10)
    assert params.physical.lmax == 10
    params = crossvalidator.set_parameter(params,
                                          "weighting.error_model.error_model",
                                          None)
    assert params.weighting.error_model.error_model is None
    params = crossvalidator.set_parameter(params, "cut_data.d_min", 1.8)
    assert params.cut_data.d_min == 1.8
    params = crossvalidator.set_parameter(params,
                                          "scaling_options.outlier_zmax", 7.53)
    assert params.scaling_options.outlier_zmax == 7.53
    with pytest.raises(ValueError):
        _ = crossvalidator.set_parameter(params, "bad_parameter", 7.53)
Ejemplo n.º 4
0
def test_crossvalidator():
    """Use the dials.scale cross validator to test the general methods of the
    parent class."""
    experiments = []
    reflections = []

    # test the create_results_dict method
    crossvalidator = DialsScaleCrossValidator(experiments, reflections)
    crossvalidator.create_results_dict(1)
    assert len(crossvalidator.results_dict) == 1
    assert (len(crossvalidator.results_dict[0]) == len(
        crossvalidator.results_metadata["names"]) + 1)
    crossvalidator = DialsScaleCrossValidator(experiments, reflections)
    crossvalidator.create_results_dict(2)
    assert len(crossvalidator.results_dict) == 2
    assert (len(crossvalidator.results_dict[0]) == len(
        crossvalidator.results_metadata["names"]) + 1)
    assert (len(crossvalidator.results_dict[1]) == len(
        crossvalidator.results_metadata["names"]) + 1)

    # now test the add configuration
    keys = ("model", )
    values = (["a", "b"], )
    crossvalidator.set_results_dict_configuration(keys, values)
    assert crossvalidator.results_dict[0]["configuration"][0] == "model=a"
    assert crossvalidator.results_dict[1]["configuration"][0] == "model=b"

    # now try adding results to dict
    results_1 = [1.0, 1.5, 0.5, 3.0, 4.0, 1.0]
    results_2 = [1.0, 2.0, 1.0, 3.0, 2.0, 2.0]
    results_3 = [1.0, 2.0, 1.0, 3.0, 3.0, 0.0]
    results_4 = [1.0, 2.0, 1.0, 3.0, 3.5, 0.5]
    crossvalidator.add_results_to_results_dict(0, results_1)
    assert crossvalidator.results_dict[0]["work Rmeas"] == [1.0]
    assert crossvalidator.results_dict[0]["free Rmeas"] == [1.5]
    assert crossvalidator.results_dict[0]["work CC1/2"] == [3.0]
    assert crossvalidator.results_dict[0]["free CC1/2"] == [4.0]
    # add some more so that can test interpreting results
    crossvalidator.add_results_to_results_dict(0, results_2)
    crossvalidator.add_results_to_results_dict(1, results_3)
    crossvalidator.add_results_to_results_dict(1, results_4)

    # Now try interpreting results - check that values are calculated correctly
    st = crossvalidator.interpret_results()
    r1 = ["model=a", "mean", "1.0", "1.75*", "0.75*", "3.0", "3.0", "1.5"]
    r2 = ["", "std dev", "0.0", "0.25", "0.25", "0.0", "1.0", "0.5"]
    r3 = ["model=b", "mean", "1.0", "2.0", "1.0", "3.0", "3.25*", "0.25*"]
    r4 = ["", "std dev", "0.0", "0.0", "0.0", "0.0", "0.25", "0.25"]
    assert st._rows[0] == r1
    assert st._rows[1] == r2
    assert st._rows[2] == r3
    assert st._rows[3] == r4
Ejemplo n.º 5
0
def test_cross_validate_script():
    """Test the script, mocking the run_script and interpret results calls"""

    param = generated_param()
    crossvalidator = DialsScaleCrossValidator([], [])

    # test expected error raise due to unspecified parameter
    param.cross_validation.cross_validation_mode = "multi"
    with pytest.raises(ValueError):
        cross_validate(param, crossvalidator)

    # test single mode
    param.cross_validation.cross_validation_mode = "single"
    param.cross_validation.nfolds = 2
    fpath = "dials.algorithms.scaling.cross_validation."
    with mock.patch(fpath +
                    "crossvalidator.DialsScaleCrossValidator.run_script"
                    ) as mock_run_script:
        with mock.patch(
                fpath +
                "crossvalidator.DialsScaleCrossValidator.interpret_results"
        ) as mock_interpret:
            cross_validate(param, crossvalidator)
            assert mock_run_script.call_count == 2
            assert mock_interpret.call_count == 1

    # test multi mode
    param = generated_param()
    param.cross_validation.cross_validation_mode = "multi"
    param.cross_validation.nfolds = 2
    fpath = "dials.algorithms.scaling.cross_validation."
    with mock.patch(fpath +
                    "crossvalidator.DialsScaleCrossValidator.run_script"
                    ) as mock_run_script:
        with mock.patch(
                fpath +
                "crossvalidator.DialsScaleCrossValidator.interpret_results"
        ) as mock_interpret:
            param.cross_validation.parameter = "physical.absorption_correction"
            cross_validate(param, crossvalidator)
            assert mock_run_script.call_count == 4
            assert mock_interpret.call_count == 1

            param.cross_validation.parameter = "physical.decay_interval"
            with pytest.raises(ValueError):
                cross_validate(param, crossvalidator)

            param.cross_validation.parameter = "physical.absorption_correction"
            param.cross_validation.parameter_values = ["True", "False"]
            cross_validate(param, crossvalidator)
            assert mock_run_script.call_count == 8
            assert mock_interpret.call_count == 2

            param.cross_validation.parameter = "physical.decay_interval"
            param.cross_validation.parameter_values = ["5.0", "10.0"]
            cross_validate(param, crossvalidator)
            assert mock_run_script.call_count == 12
            assert mock_interpret.call_count == 3

            param.cross_validation.parameter = "model"
            param.cross_validation.parameter_values = ["array", "physical"]
            cross_validate(param, crossvalidator)
            assert mock_run_script.call_count == 16
            assert mock_interpret.call_count == 4

            param.cross_validation.parameter = "physical.lmax"
            param.cross_validation.parameter_values = ["4", "6"]
            cross_validate(param, crossvalidator)
            assert mock_run_script.call_count == 20
            assert mock_interpret.call_count == 5

            param.cross_validation.parameter = "bad_interval"
            with pytest.raises(ValueError):
                cross_validate(param, crossvalidator)

            param.cross_validation.cross_validation_mode = "bad"
            with pytest.raises(ValueError):
                cross_validate(param, crossvalidator)
Ejemplo n.º 6
0
def test_dialsscalecrossvalidator():
    """Test the methods of the dials.scale cross validator"""
    experiments = []
    reflections = []

    def mock_script():
        script = mock.MagicMock()
        script.scaler.final_rmsds = [1.0, 2.0, 3.0, 4.0]
        return script

    # test get results from script
    crossvalidator = DialsScaleCrossValidator(experiments, reflections)
    script = mock_script()
    results = crossvalidator.get_results_from_script(script)
    assert results == [1.0, 2.0, 3.0, 4.0]

    params = generated_param()
    params.scaling_options.free_set_percentage = 20.0
    # test get free set offset
    fsp = crossvalidator.get_free_set_percentage(params)
    assert fsp == 20.0

    # test set free set offset
    params = crossvalidator.set_free_set_offset(params, 5)
    assert params.scaling_options.free_set_offset == 5

    # test get/set parameters
    assert crossvalidator.get_parameter_type("model") == "choice"
    assert crossvalidator.get_parameter_type("absorption_term") == "bool"
    assert crossvalidator.get_parameter_type("lmax") == "int"
    assert crossvalidator.get_parameter_type("decay_interval") == "float"

    params = crossvalidator.set_parameter(params, "model", "KB")
    assert params.model == "KB"
    params = crossvalidator.set_parameter(params, "decay_interval", 50.0)
    assert params.parameterisation.decay_interval == 50.0
    params = crossvalidator.set_parameter(params, "lmax", 10)
    assert params.parameterisation.lmax == 10
    params = crossvalidator.set_parameter(params, "optimise_errors", False)
    assert params.weighting.optimise_errors is False
    params = crossvalidator.set_parameter(params, "d_min", 1.8)
    assert params.cut_data.d_min == 1.8
    params = crossvalidator.set_parameter(params, "outlier_zmax", 7.53)
    assert params.scaling_options.outlier_zmax == 7.53
    with pytest.raises(AssertionError):
        _ = crossvalidator.set_parameter(params, "bad_parameter", 7.53)