コード例 #1
0
ファイル: test_model.py プロジェクト: jasonroyprice/dials
def test_model_creation_from_data(default_params, mock_exp, test_reflections):
    """Test the factory creation of the three standard scaling models with the
    default params."""

    _ = KBScalingModel.from_data(default_params, [], [])

    _ = PhysicalScalingModel.from_data(default_params, mock_exp, test_reflections)

    _ = ArrayScalingModel.from_data(default_params, mock_exp, test_reflections)
コード例 #2
0
def create_datastructures_for_structural_model(reflections, experiments,
                                               cif_file):
    """Read a cif file, calculate intensities and scale them to the average
    intensity of the reflections. Return an experiment and reflection table to
    be used for the structural model in scaling."""

    # read model, compute Fc, square to F^2
    ic = intensity_array_from_cif_file(cif_file)
    exp = deepcopy(experiments[0])
    params = Mock()
    params.decay_correction.return_value = False
    exp.scaling_model = KBScalingModel.from_data(params, [], [])
    exp.scaling_model.set_scaling_model_as_scaled(
    )  # Set as scaled to fix scale.

    # Now put the calculated I's on roughly a common scale with the data.
    miller_indices = flex.miller_index([])
    intensities = flex.double([])

    for refl in reflections:
        miller_indices.extend(refl["miller_index"])
        intensities.extend(refl["intensity.prf.value"])
    miller_set = miller.set(
        crystal_symmetry=crystal.symmetry(
            space_group=experiments[0].crystal.get_space_group()),
        indices=miller_indices,
        anomalous_flag=True,
    )
    idata = miller.array(miller_set, data=intensities)

    match = idata.match_indices(ic)
    pairs = match.pairs()

    icalc = flex.double()
    iobs = flex.double()
    miller_idx = flex.miller_index()
    for p in pairs:
        # Note : will create miller_idx duplicates in i_calc - problem?
        iobs.append(idata.data()[p[0]])
        icalc.append(ic.data()[p[1]])
        miller_idx.append(ic.indices()[p[1]])

    icalc *= flex.sum(iobs) / flex.sum(icalc)

    rt = flex.reflection_table()
    rt["intensity"] = icalc
    rt["miller_index"] = miller_idx

    used_ids = experiments.identifiers()
    unique_id = get_next_unique_id(len(used_ids), used_ids)
    exp.identifier = str(unique_id)
    rt.experiment_identifiers()[unique_id] = str(unique_id)
    rt["id"] = flex.int(rt.size(), unique_id)

    return exp, rt
コード例 #3
0
def test_scale_against_target(KB_test_param):
    """Integration testing of the scale_against_target library function/targeted
    scaling."""
    # Based on input - have Target Ih of 1.0, 10.0 and d of 1.0, 2.0. Then refl to
    # target have I's of 2 and 5, (with the same ds). Therefore for a KB model the
    # problem can be minimised exactly by solving the equations:
    # 2 = K * exp(B/2)
    # 1/2 = K * exp(B/8)
    # Solving these gives the form tested for at the end of this test.
    target_reflections = test_target_refl()
    reflections = test_refl_to_scale()
    target_experiments = test_exp()
    experiments = test_exp(idval=1)
    scaled_reflections = scale_against_target(
        reflections, experiments, target_reflections, target_experiments, KB_test_param
    )
    assert list(scaled_reflections["inverse_scale_factor"]) == pytest.approx(
        [2.0, 0.5, 2.0, 2.0 * (4.0 ** (-1.0 / 3.0))]
    )

    experiments = test_exp()
    experiments.append(test_exp(idval=1)[0])
    experiments[0].scaling_model = KBScalingModel.from_data(KB_test_param, [], [])
    experiments[0].scaling_model.set_scaling_model_as_scaled()
    experiments[1].scaling_model = KBScalingModel.from_data(KB_test_param, [], [])
    target_reflections = test_target_refl()
    reflections = test_refl_to_scale()
    # Repeat the test but calling the TargetScaler directly, to allow inspection
    # of the model components.
    targetscaler = TargetScalerFactory.create(
        KB_test_param, experiments, [target_reflections, reflections]
    )
    targetscaler.perform_scaling()
    assert list(
        targetscaler.unscaled_scalers[0].components["scale"].parameters
    ) == pytest.approx([(4.0 ** (-1.0 / 3.0)) / 2.0])
    assert list(
        targetscaler.unscaled_scalers[0].components["decay"].parameters
    ) == pytest.approx([(log(4.0) * 8.0 / 3.0)])
コード例 #4
0
def get_scaling_model():
    return KBScalingModel.from_data(generated_params(), [], [])
コード例 #5
0
ファイル: scaling_library.py プロジェクト: huwjenkins/dials
def create_datastructures_for_target_mtz(experiments, mtz_file):
    """Read a merged mtz file and extract miller indices, intensities and
    variances."""
    m = mtz.object(mtz_file)
    ind = m.extract_miller_indices()
    cols = m.columns()
    col_dict = {c.label(): c for c in cols}
    r_t = flex.reflection_table()
    if "I" in col_dict:  # nice and simple
        r_t["miller_index"] = ind
        r_t["intensity"] = col_dict["I"].extract_values().as_double()
        r_t["variance"] = flex.pow2(col_dict["SIGI"].extract_values().as_double())
    elif "IMEAN" in col_dict:  # nice and simple
        r_t["miller_index"] = ind
        r_t["intensity"] = col_dict["IMEAN"].extract_values().as_double()
        r_t["variance"] = flex.pow2(col_dict["SIGIMEAN"].extract_values().as_double())
    elif "I(+)" in col_dict:  # need to combine I+ and I- together into target Ih
        if col_dict["I(+)"].n_valid_values() == 0:  # use I(-)
            r_t["miller_index"] = ind
            r_t["intensity"] = col_dict["I(-)"].extract_values().as_double()
            r_t["variance"] = flex.pow2(
                col_dict["SIGI(-)"].extract_values().as_double()
            )
        elif col_dict["I(-)"].n_valid_values() == 0:  # use I(+)
            r_t["miller_index"] = ind
            r_t["intensity"] = col_dict["I(+)"].extract_values().as_double()
            r_t["variance"] = flex.pow2(
                col_dict["SIGI(+)"].extract_values().as_double()
            )
        else:  # Combine both - add together then use Ih table to calculate I and sigma
            r_tplus = flex.reflection_table()
            r_tminus = flex.reflection_table()
            r_tplus["miller_index"] = ind
            r_tplus["intensity"] = col_dict["I(+)"].extract_values().as_double()
            r_tplus["variance"] = flex.pow2(
                col_dict["SIGI(+)"].extract_values().as_double()
            )
            r_tminus["miller_index"] = ind
            r_tminus["intensity"] = col_dict["I(-)"].extract_values().as_double()
            r_tminus["variance"] = flex.pow2(
                col_dict["SIGI(-)"].extract_values().as_double()
            )
            r_tplus.extend(r_tminus)
            r_tplus.set_flags(
                flex.bool(r_tplus.size(), False), r_tplus.flags.bad_for_scaling
            )
            r_tplus = r_tplus.select(r_tplus["variance"] != 0.0)
            Ih_table = create_Ih_table(
                [experiments[0]], [r_tplus], anomalous=True
            ).blocked_data_list[0]
            r_t["intensity"] = Ih_table.Ih_values
            inv_var = (
                Ih_table.weights * Ih_table.h_index_matrix
            ) * Ih_table.h_expand_matrix
            r_t["variance"] = 1.0 / inv_var
            r_t["miller_index"] = Ih_table.miller_index
    else:
        assert 0, """Unrecognised intensities in mtz file."""
    r_t = r_t.select(r_t["variance"] > 0.0)
    r_t["d"] = (
        miller.set(
            crystal_symmetry=crystal.symmetry(
                space_group=m.space_group(), unit_cell=m.crystals()[0].unit_cell()
            ),
            indices=r_t["miller_index"],
        )
        .d_spacings()
        .data()
    )
    r_t.set_flags(flex.bool(r_t.size(), True), r_t.flags.integrated)

    exp = Experiment()
    exp.crystal = deepcopy(experiments[0].crystal)
    exp.identifier = str(uuid.uuid4())
    r_t.experiment_identifiers()[len(experiments)] = exp.identifier
    r_t["id"] = flex.int(r_t.size(), len(experiments))

    # create a new KB scaling model for the target and set as scaled to fix scale
    # for targeted scaling.
    params = Mock()
    params.KB.decay_correction.return_value = False
    exp.scaling_model = KBScalingModel.from_data(params, [], [])
    exp.scaling_model.set_scaling_model_as_scaled()  # Set as scaled to fix scale.

    return exp, r_t
コード例 #6
0
def get_scaling_model():
    """Make a KB Scaling model instance"""
    return KBScalingModel.from_data(generated_params(), [], [])
コード例 #7
0
def test_KBScalingModel():
    """Test for the KB Scaling Model."""

    # Test standard initialisation method.
    configdict = {"corrections": ["scale", "decay"]}
    parameters_dict = {
        "scale": {
            "parameters": flex.double([1.2]),
            "parameter_esds": flex.double([0.1]),
        },
        "decay": {
            "parameters": flex.double([0.01]),
            "parameter_esds": flex.double([0.02]),
        },
    }
    KBmodel = KBScalingModel(parameters_dict, configdict)
    assert KBmodel.id_ == "KB"
    assert "scale" in KBmodel.components
    assert "decay" in KBmodel.components
    assert list(KBmodel.components["scale"].parameters) == [1.2]
    assert list(KBmodel.components["decay"].parameters) == [0.01]
    assert list(KBmodel.components["scale"].parameter_esds) == [0.1]
    assert list(KBmodel.components["decay"].parameter_esds) == [0.02]

    # Test from_dict initialisation method.
    KB_dict = {
        "__id__": "KB",
        "is_scaled": True,
        "scale": {
            "n_parameters": 1,
            "parameters": [0.5],
            "est_standard_devs": [0.05],
            "null_parameter_value": 1,
        },
        "configuration_parameters": {
            "corrections": ["scale"]
        },
    }
    KBmodel = KBScalingModel.from_dict(KB_dict)
    assert KBmodel.is_scaled is True
    assert "scale" in KBmodel.components
    assert "decay" not in KBmodel.components
    assert list(KBmodel.components["scale"].parameters) == [0.5]
    assert list(KBmodel.components["scale"].parameter_esds) == [0.05]

    new_dict = KBmodel.to_dict()
    assert new_dict == KB_dict

    # Test again with all parameters
    KB_dict = {
        "__id__": "KB",
        "is_scaled": True,
        "scale": {
            "n_parameters": 1,
            "parameters": [0.5],
            "est_standard_devs": [0.05],
            "null_parameter_value": 1,
        },
        "decay": {
            "n_parameters": 1,
            "parameters": [0.2],
            "est_standard_devs": [0.02],
            "null_parameter_value": 0,
        },
        "configuration_parameters": {
            "corrections": ["scale", "decay"]
        },
    }
    KBmodel = KBScalingModel.from_dict(KB_dict)
    assert KBmodel.is_scaled is True
    assert "scale" in KBmodel.components
    assert "decay" in KBmodel.components
    assert list(KBmodel.components["scale"].parameters) == [0.5]
    assert list(KBmodel.components["scale"].parameter_esds) == [0.05]
    assert list(KBmodel.components["decay"].parameters) == [0.2]
    assert list(KBmodel.components["decay"].parameter_esds) == [0.02]

    new_dict = KBmodel.to_dict()
    assert new_dict == KB_dict

    with pytest.raises(RuntimeError):
        KB_dict["__id__"] = "physical"
        KBmodel = KBScalingModel.from_dict(KB_dict)

    assert KBmodel.consecutive_refinement_order == [["scale", "decay"]]
    assert "Decay component" in str(KBmodel)
コード例 #8
0
ファイル: test_observers.py プロジェクト: jasonroyprice/dials
def test_ScalingModelObserver():
    """Test that the observer correctly logs data when passed a scaler."""

    KB_dict = {
        "__id__": "KB",
        "is_scaled": True,
        "scale": {
            "n_parameters": 1,
            "parameters": [0.5],
            "est_standard_devs": [0.05],
            "null_parameter_value": 1,
        },
        "configuration_parameters": {
            "corrections": ["scale"]
        },
    }

    KBmodel = KBScalingModel.from_dict(KB_dict)
    experiment = Experiment()
    experiment.scaling_model = KBmodel
    experiment.identifier = "0"

    scaler1 = mock.Mock()
    scaler1.experiment = experiment
    scaler1.active_scalers = None

    observer = ScalingModelObserver()
    observer.update(scaler1)
    assert observer.data["0"] == KB_dict

    msg = observer.return_model_error_summary()
    assert msg != ""

    mock_func = mock.Mock()
    mock_func.return_value = {"plot": {"layout": {"title": ""}}}

    with mock.patch("dials.algorithms.scaling.observers.plot_scaling_models",
                    new=mock_func):
        observer.make_plots()
        assert mock_func.call_count == 1
        assert mock_func.call_args_list == [mock.call(KB_dict)]

    experiment2 = Experiment()
    experiment2.scaling_model = KBmodel
    experiment2.identifier = "1"
    scaler2 = mock.Mock()
    scaler2.experiment = experiment2
    scaler2.active_scalers = None

    multiscaler = mock.Mock()
    multiscaler.active_scalers = [scaler1, scaler2]
    observer.data = {}
    observer.update(multiscaler)
    assert observer.data["0"] == KB_dict
    assert observer.data["1"] == KB_dict

    mock_func = mock.Mock()
    mock_func.return_value = {"plot": {"layout": {"title": ""}}}

    with mock.patch("dials.algorithms.scaling.observers.plot_scaling_models",
                    new=mock_func):
        r = observer.make_plots()
        assert mock_func.call_count == 2
        assert mock_func.call_args_list == [
            mock.call(KB_dict), mock.call(KB_dict)
        ]
        assert all(i in r["scaling_model"] for i in ["plot_0", "plot_1"])
コード例 #9
0
    def from_dict(d):
        """creates a scaling model from a dict"""
        from dials.algorithms.scaling.model.model import KBScalingModel

        return KBScalingModel.from_dict(d)