Пример #1
0
def test_SingleScaler_update_for_minimisation():
    """Test the update_for_minimisation method of the singlescaler."""
    # test_params.scaling_options.nproc = 1
    p, e, r = (generated_param(), generated_exp(), generated_refl_2())
    exp = create_scaling_model(p, e, r)
    p.reflection_selection.method = "use_all"
    single_scaler = SingleScaler(p, exp[0], r)
    apm_fac = create_apm_factory(single_scaler)
    single_scaler.components["scale"].parameters /= 2.0
    apm = apm_fac.make_next_apm()

    Ih_table = single_scaler.Ih_table.blocked_data_list[0]
    Ih_table.calc_Ih()
    assert list(Ih_table.inverse_scale_factors) == [1.0, 1.0]
    assert list(Ih_table.Ih_values) == [10.0, 1.0]
    single_scaler.update_for_minimisation(apm, 0)
    # Should set new scale factors, and calculate Ih and weights.
    bf = basis_function().calculate_scales_and_derivatives(apm.apm_list[0], 0)
    assert list(Ih_table.inverse_scale_factors) == list(bf[0])
    assert list(Ih_table.Ih_values) != [1.0, 10.0]
    assert approx_equal(list(Ih_table.Ih_values),
                        list(Ih_table.intensities / bf[0]))
    for i in range(Ih_table.derivatives.n_rows):
        for j in range(Ih_table.derivatives.n_cols):
            assert approx_equal(Ih_table.derivatives[i, j], bf[1][i, j])
    assert Ih_table.derivatives.non_zeroes == bf[1].non_zeroes
Пример #2
0
def test_SingleScaler_update_for_minimisation():
    """Test the update_for_minimisation method of the singlescaler."""
    # test_params.scaling_options.nproc = 1
    p, e, r = (generated_param(), generated_exp(), generated_refl_2())
    exp = create_scaling_model(p, e, r)
    p.reflection_selection.method = "use_all"
    single_scaler = SingleScaler(p, exp[0], r)
    pmg = ScalingParameterManagerGenerator(
        single_scaler.active_scalers,
        ScalingTarget(),
        single_scaler.params.scaling_refinery.refinement_order,
    )
    single_scaler.components["scale"].parameters /= 2.0
    apm = pmg.parameter_managers()[0]

    Ih_table = single_scaler.Ih_table.blocked_data_list[0]
    Ih_table.calc_Ih()
    assert list(Ih_table.inverse_scale_factors) == [1.0, 1.0]
    assert list(Ih_table.Ih_values) == [10.0, 1.0]
    single_scaler.update_for_minimisation(apm, 0)
    # Should set new scale factors, and calculate Ih and weights.
    bf = RefinerCalculator.calculate_scales_and_derivatives(apm.apm_list[0], 0)
    assert list(Ih_table.inverse_scale_factors) == list(bf[0])
    assert list(Ih_table.Ih_values) != [1.0, 10.0]
    assert list(Ih_table.Ih_values) == pytest.approx(
        list(Ih_table.intensities / bf[0]))
    for i in range(Ih_table.derivatives.n_rows):
        for j in range(Ih_table.derivatives.n_cols):
            assert Ih_table.derivatives[i, j] == pytest.approx(bf[1][i, j])
    assert Ih_table.derivatives.non_zeroes == bf[1].non_zeroes
Пример #3
0
def test_target_gradient_calculation_finite_difference(small_reflection_table,
                                                       single_exp,
                                                       physical_param):
    """Test the calculated gradients against a finite difference calculation."""
    model = PhysicalScalingModel.from_data(physical_param, single_exp,
                                           small_reflection_table)

    # need to 'add_data'
    model.configure_components(small_reflection_table, single_exp,
                               physical_param)
    model.components["scale"].update_reflection_data()
    model.components["decay"].update_reflection_data()
    apm = multi_active_parameter_manager(
        ScalingTarget(),
        [model.components],
        [["scale", "decay"]],
        scaling_active_parameter_manager,
    )
    model.components["scale"].inverse_scales = flex.double([2.0, 1.0, 2.0])
    model.components["decay"].inverse_scales = flex.double([1.0, 1.0, 0.4])

    Ih_table = IhTable([small_reflection_table],
                       single_exp.crystal.get_space_group())

    with patch.object(SingleScaler, "__init__", lambda x, y, z, k: None):
        scaler = SingleScaler(None, None, None)
        scaler._Ih_table = Ih_table

        # Now do finite difference check.
        target = ScalingTarget()

        scaler.update_for_minimisation(apm, 0)
        grad = target.calculate_gradients(scaler.Ih_table.blocked_data_list[0])
        res = target.calculate_residuals(scaler.Ih_table.blocked_data_list[0])

        assert (res >
                1e-8), """residual should not be zero, or the gradient test
        below will not really be working!"""

        # Now compare to finite difference
        f_d_grad = calculate_gradient_fd(target, scaler, apm)
        print(list(f_d_grad))
        print(list(grad))
        assert list(grad) == pytest.approx(list(f_d_grad))

        sel = f_d_grad > 1e-8
        assert sel, """assert sel has some elements, as finite difference grad should
Пример #4
0
def test_target_jacobian_calculation_finite_difference(physical_param,
                                                       single_exp,
                                                       large_reflection_table):
    """Test the calculated jacobian against a finite difference calculation."""
    physical_param.physical.decay_correction = False
    model = PhysicalScalingModel.from_data(physical_param, single_exp,
                                           large_reflection_table)
    # need to 'add_data'
    model.configure_components(large_reflection_table, single_exp,
                               physical_param)
    model.components["scale"].update_reflection_data()
    apm = multi_active_parameter_manager(
        ScalingTarget(),
        [model.components],
        [["scale"]],
        scaling_active_parameter_manager,
    )
    Ih_table = IhTable([large_reflection_table],
                       single_exp.crystal.get_space_group())

    with patch.object(SingleScaler, "__init__", lambda x, y, z, k: None):
        scaler = SingleScaler(None, None, None)
        scaler._Ih_table = Ih_table

        target = ScalingTarget()
        scaler.update_for_minimisation(apm, 0)

        fd_jacobian = calculate_jacobian_fd(target, scaler, apm)
        r, jacobian, w = target.compute_residuals_and_gradients(
            scaler.Ih_table.blocked_data_list[0])
        assert r == pytest.approx(
            [-50.0 / 3.0, 70.0 / 3.0, -20.0 / 3.0, 12.5, -2.5] +
            [-25.0, 0.0, -75.0, 0.0, 200.0])
        assert w == pytest.approx(
            [0.1, 0.1, 0.1, 0.02, 0.1, 0.02, 0.01, 0.02, 0.01, 0.01])

        n_rows = jacobian.n_rows
        n_cols = jacobian.n_cols

        print(jacobian)
        print(fd_jacobian)

        for i in range(0, n_rows):
            for j in range(0, n_cols):
                assert jacobian[i, j] == pytest.approx(fd_jacobian[i, j],
                                                       abs=1e-4)
Пример #5
0
def test_update_error_model(mock_errormodel, mock_errormodel2):
    """Test the update_error_model method"""
    p, e, r = (generated_param(), generated_exp(), generated_refl())
    exp = create_scaling_model(p, e, r)
    p.reflection_selection.method = "use_all"
    # test initialised correctly
    scaler = SingleScaler(p, exp[0], r)
    block = scaler.global_Ih_table.blocked_data_list[0]
    original_vars = block.variances
    # test update error model - should update weights in global Ih
    # as will be setting different things in Ih_table and reflection table, split
    # up the test to use two different error models.
    scaler._update_error_model(mock_errormodel)
    assert list(block.variances) == list(original_vars)
    newvars = flex.double(range(1, 8))
    assert list(block.block_selections[0]) == [2, 0, 4, 5, 6, 1, 3]
    assert list(block.weights) == list(1.0 / newvars)
    assert scaler.experiment.scaling_model.error_model is mock_errormodel

    # now test for updating of reflection table
    # do again with second errormodel
    scaler.global_Ih_table.reset_error_model()
    scaler._update_error_model(mock_errormodel2)
    assert list(block.variances) == list(original_vars)
    newvars = flex.double(range(1, 9))
    assert list(block.block_selections[0]) == [2, 0, 4, 5, 6, 1, 3]
    # [2, 3, 4, 5, 6, 7, 8] < set these in ^ these positions (taking into account
    # the one non-suitable refl at index 5)
    assert list(block.weights) == list(1.0 / newvars)[:-1]
    assert scaler.experiment.scaling_model.error_model is mock_errormodel2
Пример #6
0
def test_SingleScaler_combine_intensities():
    """test combine intensities method"""
    p, e, r = (generated_param(), generated_exp(), generated_refl_for_comb())
    exp = create_scaling_model(p, e, r)
    p.reflection_selection.method = "use_all"
    scaler = SingleScaler(p, exp[0], r)
    scaler.combine_intensities()

    # The input makes the profile intensities best - so check these are set in the
    # reflection table and global_Ih_table
    assert list(scaler.reflection_table["intensity"]) == list(
        r["intensity.prf.value"])
    assert list(scaler.reflection_table["variance"]) == list(
        r["intensity.prf.variance"])
    block = scaler.global_Ih_table.blocked_data_list[0]
    block_sel = block.block_selections[0]
    suitable = scaler.suitable_refl_for_scaling_sel
    assert list(block.intensities) == list(
        scaler.reflection_table["intensity"].select(suitable).select(
            block_sel))
    assert list(block.variances) == list(
        scaler.reflection_table["variance"].select(suitable).select(block_sel))
Пример #7
0
def test_SingleScaler_expand_scales_to_all_reflections(mock_apm):
    p, e, r = (generated_param(), generated_exp(), generated_refl())
    exp = create_scaling_model(p, e, r)
    p.reflection_selection.method = "use_all"
    scaler = SingleScaler(p, exp[0], r)
    # test expand to all reflections method. First check scales are all 1, then
    # update a component to simulate a minimisation result, then check that
    # scales are set only in all suitable reflections (as it may not be possible
    # to calculate scales for unsuitable reflections!)
    # Must also update the scales in the global_Ih_table
    assert list(scaler.reflection_table["inverse_scale_factor"]) == [1.0] * 8
    scaler.experiment.scaling_model.components[
        "scale"].parameters = flex.double([2.0])
    scaler.expand_scales_to_all_reflections(calc_cov=False)
    assert (list(
        scaler.reflection_table["inverse_scale_factor"]) == [2.0] * 5 + [1.0] +
            [2.0] * 2)
    assert (list(
        scaler.global_Ih_table.blocked_data_list[0].inverse_scale_factors) ==
            [2.0] * 7)

    assert list(
        scaler.reflection_table["inverse_scale_factor_variance"]) == [0.0] * 8
    # now try again
    apm = Mock()
    apm.n_active_params = 2
    var_list = [1.0, 0.1, 0.1, 0.5]
    apm.var_cov_matrix = flex.double(var_list)
    apm.var_cov_matrix.reshape(flex.grid(2, 2))
    scaler.update_var_cov(apm)
    assert scaler.var_cov_matrix[0, 0] == var_list[0]
    assert scaler.var_cov_matrix[0, 1] == var_list[1]
    assert scaler.var_cov_matrix[1, 0] == var_list[2]
    assert scaler.var_cov_matrix[1, 1] == var_list[3]
    assert scaler.var_cov_matrix.non_zeroes == 4
    scaler.expand_scales_to_all_reflections(calc_cov=True)
    assert list(
        scaler.reflection_table["inverse_scale_factor_variance"]
    ) == pytest.approx(
        [2.53320, 1.07106, 1.08125, 1.23219, 1.15442, 0.0, 1.0448, 1.0448],
        1e-4)

    # Second case - when var_cov_matrix is only part of full matrix.
    p, e, r = (generated_param(), generated_exp(), generated_refl())
    exp = create_scaling_model(p, e, r)
    p.reflection_selection.method = "use_all"
    scaler = SingleScaler(p, exp[0], r)
    apm = mock_apm
    scaler.update_var_cov(apm)
    assert scaler.var_cov_matrix.non_zeroes == 1
    assert scaler.var_cov_matrix[0, 0] == 2.0
    assert scaler.var_cov_matrix.n_cols == 2
    assert scaler.var_cov_matrix.n_rows == 2
    assert scaler.var_cov_matrix.non_zeroes == 1
Пример #8
0
def test_targetscaler_initialisation():
    """Unit tests for the MultiScalerBase class."""
    p, e = (generated_param(), generated_exp(2))
    r1 = generated_refl(id_=0)
    p.reflection_selection.method = "intensity_ranges"

    r1["intensity.sum.value"] = r1["intensity"]
    r1["intensity.sum.variance"] = r1["variance"]
    r2 = generated_refl(id_=1)
    r2["intensity.sum.value"] = r2["intensity"]
    r2["intensity.sum.variance"] = r2["variance"]
    exp = create_scaling_model(p, e, [r1, r2])
    singlescaler1 = SingleScaler(p, exp[0], r1, for_multi=True)
    singlescaler2 = SingleScaler(p, exp[1], r2, for_multi=True)

    # singlescaler2.experiments.scaling_model.set_scaling_model_as_scaled()

    targetscaler = TargetScaler(scaled_scalers=[singlescaler1],
                                unscaled_scalers=[singlescaler2])

    # check initialisation
    assert len(targetscaler.active_scalers) == 1
    assert len(targetscaler.single_scalers) == 1
    assert targetscaler.active_scalers[0] == singlescaler2
    assert targetscaler.single_scalers[0] == singlescaler1

    # check for correct setup of global Ih table
    assert targetscaler.global_Ih_table.size == 7  # only for active scalers
    assert list(
        targetscaler.global_Ih_table.blocked_data_list[0].intensities) == [
            3.0,
            1.0,
            500.0,
            2.0,
            2.0,
            2.0,
            4.0,
        ]
    block_selections = targetscaler.global_Ih_table.blocked_data_list[
        0].block_selections
    assert list(block_selections[0]) == [2, 0, 4, 5, 6, 1, 3]

    # check for correct setup of Ih_table
    assert targetscaler.Ih_table.size == 6
    assert list(targetscaler.Ih_table.blocked_data_list[0].intensities) == [
        3.0,
        1.0,
        2.0,
        2.0,
        2.0,
        4.0,
    ]
    block_selections = targetscaler.Ih_table.blocked_data_list[
        0].block_selections
    assert list(block_selections[0]) == [2, 0, 5, 6, 1, 3]

    # check for correct setup of target Ih_Table
    assert targetscaler.target_Ih_table.size == 6
    assert list(
        targetscaler.target_Ih_table.blocked_data_list[0].intensities) == [
            3.0,
            1.0,
            2.0,
            2.0,
            2.0,
            4.0,
        ]
    block_selections = targetscaler.target_Ih_table.blocked_data_list[
        0].block_selections
    assert list(block_selections[0]) == [
        2,
        0,
        4,
        5,
        1,
        3,
    ]  # different as taget_Ih_table
    # not created with indices lists.

    block_selections = targetscaler.Ih_table.blocked_data_list[
        0].block_selections
    # check for correct data/d_values in components
    for i, scaler in enumerate(targetscaler.active_scalers):
        d_suitable = scaler.reflection_table["d"].select(
            scaler.suitable_refl_for_scaling_sel)
        decay = scaler.experiment.scaling_model.components["decay"]
        # first check 'data' contains all suitable reflections
        assert list(decay.data["d"]) == list(d_suitable)
        # Now check 'd_values' (which will be used for minim.) matches Ih_table data
        assert list(decay.d_values[0]) == list(
            d_suitable.select(block_selections[i]))

    # but shouldn't have updated other
    assert (targetscaler.single_scalers[0].experiment.scaling_model.
            components["decay"].d_values == [])
Пример #9
0
def test_SingleScaler_initialisation():
    """Test that all attributes are correctly set upon initialisation"""
    p, e, r = (generated_param(), generated_exp(), generated_refl())
    exp = create_scaling_model(p, e, r)
    p.reflection_selection.method = "use_all"
    # test initialised correctly
    scaler = SingleScaler(p, exp[0], r)
    assert (list(scaler.suitable_refl_for_scaling_sel) == [True] * 5 +
            [False] + [True] * 2)
    # all 7 of the suitable should be within the scaling_subset
    assert list(scaler.scaling_subset_sel) == [True] * 7
    # one of these is not in the scaling selection due to being an outlier.
    assert list(scaler.scaling_selection) == [True] * 4 + [False] + [True] * 2
    assert list(scaler.outliers) == [False] * 4 + [True] + [False] * 2
    assert scaler.n_suitable_refl == 7

    # check for correct setup of global_Ih_table
    # block selection is order to extract out from suitable_reflections
    assert scaler.global_Ih_table.size == 7
    assert list(scaler.global_Ih_table.blocked_data_list[0].intensities) == [
        3.0,
        1.0,
        500.0,
        2.0,
        2.0,
        2.0,
        4.0,
    ]
    block_selection = scaler.global_Ih_table.blocked_data_list[
        0].block_selections[0]
    assert list(block_selection) == [2, 0, 4, 5, 6, 1, 3]

    # check for correct setup of Ih_table
    assert scaler.Ih_table.size == 6
    assert list(scaler.Ih_table.blocked_data_list[0].intensities) == [
        3.0,
        1.0,
        2.0,
        2.0,
        2.0,
        4.0,
    ]
    block_selection = scaler.Ih_table.blocked_data_list[0].block_selections[0]
    assert list(block_selection) == [2, 0, 5, 6, 1, 3]

    # check for correct data/d_values in components
    d_suitable = r["d"].select(scaler.suitable_refl_for_scaling_sel)
    decay = scaler.experiment.scaling_model.components["decay"]
    # first check 'data' contains all suitable reflections
    assert list(decay.data["d"]) == list(d_suitable)
    # Now check 'd_values' (which will be used for minim.) matches Ih_table data
    assert list(decay.d_values[0]) == list(d_suitable.select(block_selection))

    # test make ready for scaling method
    # set some new outliers and check for updated datastructures
    outlier_list = [False] * 3 + [True] * 2 + [False] * 2
    scaler.outliers = flex.bool(outlier_list)
    scaler.make_ready_for_scaling(outlier=True)
    assert scaler.Ih_table.size == 5
    assert list(scaler.Ih_table.blocked_data_list[0].intensities) == [
        3.0,
        1.0,
        2.0,
        2.0,
        2.0,
    ]
    block_selection = scaler.Ih_table.blocked_data_list[0].block_selections[0]
    assert list(block_selection) == [2, 0, 5, 6, 1]
    assert list(decay.d_values[0]) == list(d_suitable.select(block_selection))

    # test set outliers
    assert list(r.get_flags(r.flags.outlier_in_scaling)) == [False] * 8
    scaler._set_outliers()
    assert list(r.get_flags(
        r.flags.outlier_in_scaling)) == outlier_list + [False]
Пример #10
0
    def create(cls, params, experiment, reflection_table, for_multi=False):
        """Perform reflection_table preprocessing and create a SingleScaler."""

        cls.ensure_experiment_identifier(params, experiment, reflection_table)

        logger.info(
            "Preprocessing data for scaling. The id assigned to this \n"
            "dataset is %s, and the scaling model type being applied is %s. \n",
            list(reflection_table.experiment_identifiers().values())[0],
            experiment.scaling_model.id_,
        )

        reflection_table, reasons = cls.filter_bad_reflections(
            reflection_table)

        if "inverse_scale_factor" not in reflection_table:
            reflection_table["inverse_scale_factor"] = flex.double(
                reflection_table.size(), 1.0)
        elif (reflection_table["inverse_scale_factor"].count(0.0) ==
              reflection_table.size()):
            reflection_table["inverse_scale_factor"] = flex.double(
                reflection_table.size(), 1.0)
        reflection_table = choose_scaling_intensities(
            reflection_table, params.reflection_selection.intensity_choice)

        excluded_for_scaling = reflection_table.get_flags(
            reflection_table.flags.excluded_for_scaling)
        user_excluded = reflection_table.get_flags(
            reflection_table.flags.user_excluded_in_scaling)
        reasons.add_reason("user excluded", user_excluded.count(True))
        reasons.add_reason("excluded for scaling",
                           excluded_for_scaling.count(True))
        n_excluded = (excluded_for_scaling | user_excluded).count(True)
        if n_excluded == reflection_table.size():
            logger.info(
                "All reflections were determined to be unsuitable for scaling."
            )
            logger.info(reasons)
            raise BadDatasetForScalingException(
                """Unable to use this dataset for scaling""")
        else:
            logger.info(
                "%s/%s reflections not suitable for scaling\n%s",
                n_excluded,
                reflection_table.size(),
                reasons,
            )

        if not for_multi:
            determine_reflection_selection_parameters(params, [experiment],
                                                      [reflection_table])
        if params.reflection_selection.method == "intensity_ranges":
            reflection_table = quasi_normalisation(reflection_table,
                                                   experiment)
        if (params.reflection_selection.method
                in (None, Auto, "auto", "quasi_random")) or (
                    experiment.scaling_model.id_ == "physical"
                    and "absorption" in experiment.scaling_model.components):
            if experiment.scan:
                # calc theta and phi cryst
                reflection_table["phi"] = (
                    reflection_table["xyzobs.px.value"].parts()[2] *
                    experiment.scan.get_oscillation()[1])
                reflection_table = calc_crystal_frame_vectors(
                    reflection_table, experiment)

        return SingleScaler(params, experiment, reflection_table, for_multi)
Пример #11
0
    def create(cls, params, experiment, reflection_table, for_multi=False):
        """Perform reflection_table preprocessing and create a SingleScaler."""

        cls.ensure_experiment_identifier(experiment, reflection_table)

        logger.info(
            "The scaling model type being applied is %s. \n",
            experiment.scaling_model.id_,
        )
        try:
            reflection_table = cls.filter_bad_reflections(
                reflection_table,
                partiality_cutoff=params.cut_data.partiality_cutoff,
                min_isigi=params.cut_data.min_isigi,
                intensity_choice=params.reflection_selection.intensity_choice,
            )
        except ValueError:
            raise BadDatasetForScalingException

        # combine partial measurements of same reflection, to handle those reflections
        # that were split by dials.integrate  - changes size of reflection table.
        reflection_table = sum_partial_reflections(reflection_table)

        if "inverse_scale_factor" not in reflection_table:
            reflection_table["inverse_scale_factor"] = flex.double(
                reflection_table.size(), 1.0)
        elif (reflection_table["inverse_scale_factor"].count(0.0) ==
              reflection_table.size()):
            reflection_table["inverse_scale_factor"] = flex.double(
                reflection_table.size(), 1.0)
        reflection_table = choose_initial_scaling_intensities(
            reflection_table, params.reflection_selection.intensity_choice)

        excluded_for_scaling = reflection_table.get_flags(
            reflection_table.flags.excluded_for_scaling)
        user_excluded = reflection_table.get_flags(
            reflection_table.flags.user_excluded_in_scaling)
        reasons = Reasons()
        reasons.add_reason("user excluded", user_excluded.count(True))
        reasons.add_reason("excluded for scaling",
                           excluded_for_scaling.count(True))
        n_excluded = (excluded_for_scaling | user_excluded).count(True)
        if n_excluded == reflection_table.size():
            logger.info(
                "All reflections were determined to be unsuitable for scaling."
            )
            logger.info(reasons)
            raise BadDatasetForScalingException(
                """Unable to use this dataset for scaling""")
        else:
            logger.info(
                "Excluding %s/%s reflections\n%s",
                n_excluded,
                reflection_table.size(),
                reasons,
            )

        if params.reflection_selection.method == "intensity_ranges":
            reflection_table = quasi_normalisation(reflection_table,
                                                   experiment)
        if (params.reflection_selection.method
                in (None, Auto, "auto", "quasi_random")) or (
                    experiment.scaling_model.id_ == "physical"
                    and "absorption" in experiment.scaling_model.components):
            if experiment.scan:
                reflection_table = calc_crystal_frame_vectors(
                    reflection_table, experiment)
                alignment_axis = (1.0, 0.0, 0.0)
                reflection_table["s0c"] = align_axis_along_z(
                    alignment_axis, reflection_table["s0c"])
                reflection_table["s1c"] = align_axis_along_z(
                    alignment_axis, reflection_table["s1c"])
        try:
            scaler = SingleScaler(params, experiment, reflection_table,
                                  for_multi)
        except BadDatasetForScalingException as e:
            raise ValueError(e)
        else:
            return scaler