Esempio n. 1
0
def test_SingleScaler_expand_scales_to_all_reflections(mock_apm):
    p, e, r = (generated_param(), generated_exp(), generated_refl())
    exp = create_scaling_model(p, e, r)
    p.reflection_selection.method = "use_all"
    scaler = SingleScaler(p, exp[0], r)
    # test expand to all reflections method. First check scales are all 1, then
    # update a component to simulate a minimisation result, then check that
    # scales are set only in all suitable reflections (as it may not be possible
    # to calculate scales for unsuitable reflections!)
    # Must also update the scales in the global_Ih_table
    assert list(scaler.reflection_table["inverse_scale_factor"]) == [1.0] * 8
    scaler.experiment.scaling_model.components[
        "scale"].parameters = flex.double([2.0])
    scaler.expand_scales_to_all_reflections(calc_cov=False)
    assert (list(
        scaler.reflection_table["inverse_scale_factor"]) == [2.0] * 5 + [1.0] +
            [2.0] * 2)
    assert (list(
        scaler.global_Ih_table.blocked_data_list[0].inverse_scale_factors) ==
            [2.0] * 7)

    assert list(
        scaler.reflection_table["inverse_scale_factor_variance"]) == [0.0] * 8
    # now try again
    apm = Mock()
    apm.n_active_params = 2
    var_list = [1.0, 0.1, 0.1, 0.5]
    apm.var_cov_matrix = flex.double(var_list)
    apm.var_cov_matrix.reshape(flex.grid(2, 2))
    scaler.update_var_cov(apm)
    assert scaler.var_cov_matrix[0, 0] == var_list[0]
    assert scaler.var_cov_matrix[0, 1] == var_list[1]
    assert scaler.var_cov_matrix[1, 0] == var_list[2]
    assert scaler.var_cov_matrix[1, 1] == var_list[3]
    assert scaler.var_cov_matrix.non_zeroes == 4
    scaler.expand_scales_to_all_reflections(calc_cov=True)
    assert list(
        scaler.reflection_table["inverse_scale_factor_variance"]
    ) == pytest.approx(
        [2.53320, 1.07106, 1.08125, 1.23219, 1.15442, 0.0, 1.0448, 1.0448],
        1e-4)

    # Second case - when var_cov_matrix is only part of full matrix.
    p, e, r = (generated_param(), generated_exp(), generated_refl())
    exp = create_scaling_model(p, e, r)
    p.reflection_selection.method = "use_all"
    scaler = SingleScaler(p, exp[0], r)
    apm = mock_apm
    scaler.update_var_cov(apm)
    assert scaler.var_cov_matrix.non_zeroes == 1
    assert scaler.var_cov_matrix[0, 0] == 2.0
    assert scaler.var_cov_matrix.n_cols == 2
    assert scaler.var_cov_matrix.n_rows == 2
    assert scaler.var_cov_matrix.non_zeroes == 1
Esempio n. 2
0
def test_multiscaler_update_for_minimisation():
    """Test the multiscaler update_for_minimisation method."""

    p, e = (generated_param(), generated_exp(2))
    p.reflection_selection.method = "use_all"
    r1 = generated_refl(id_=0)
    r1["intensity.sum.value"] = r1["intensity"]
    r1["intensity.sum.variance"] = r1["variance"]
    r2 = generated_refl(id_=1)
    r2["intensity.sum.value"] = r2["intensity"]
    r2["intensity.sum.variance"] = r2["variance"]
    p.scaling_options.nproc = 2
    p.model = "physical"
    exp = create_scaling_model(p, e, [r1, r2])
    singlescaler1 = create_scaler(p, [exp[0]], [r1])
    singlescaler2 = create_scaler(p, [exp[1]], [r2])

    multiscaler = MultiScaler([singlescaler1, singlescaler2])
    pmg = ScalingParameterManagerGenerator(
        multiscaler.active_scalers,
        ScalingTarget,
        multiscaler.params.scaling_refinery.refinement_order,
    )
    multiscaler.single_scalers[0].components["scale"].parameters /= 2.0
    multiscaler.single_scalers[1].components["scale"].parameters *= 1.5
    apm = pmg.parameter_managers()[0]
    multiscaler.update_for_minimisation(apm, 0)
    multiscaler.update_for_minimisation(apm, 1)
    # bf[0], bf[1] should be list of scales and derivatives
    s1, d1 = RefinerCalculator.calculate_scales_and_derivatives(
        apm.apm_list[0], 0)
    s2, d2 = RefinerCalculator.calculate_scales_and_derivatives(
        apm.apm_list[1], 0)
    s3, d3 = RefinerCalculator.calculate_scales_and_derivatives(
        apm.apm_list[0], 1)
    s4, d4 = RefinerCalculator.calculate_scales_and_derivatives(
        apm.apm_list[1], 1)
    expected_scales_for_block_1 = s1
    expected_scales_for_block_1.extend(s2)
    expected_scales_for_block_2 = s3
    expected_scales_for_block_2.extend(s4)

    expected_derivatives_for_block_1 = sparse.matrix(
        expected_scales_for_block_1.size(), apm.n_active_params)
    expected_derivatives_for_block_2 = sparse.matrix(
        expected_scales_for_block_2.size(), apm.n_active_params)

    expected_derivatives_for_block_1.assign_block(d1, 0, 0)
    expected_derivatives_for_block_1.assign_block(d2, d1.n_rows,
                                                  apm.apm_data[1]["start_idx"])
    expected_derivatives_for_block_2.assign_block(d3, 0, 0)
    expected_derivatives_for_block_2.assign_block(d4, d3.n_rows,
                                                  apm.apm_data[1]["start_idx"])

    block_list = multiscaler.Ih_table.blocked_data_list

    assert block_list[0].inverse_scale_factors == expected_scales_for_block_1
    assert block_list[1].inverse_scale_factors == expected_scales_for_block_2
    assert block_list[1].derivatives == expected_derivatives_for_block_2
    assert block_list[0].derivatives == expected_derivatives_for_block_1
Esempio n. 3
0
def test_SingleScaler_update_for_minimisation():
    """Test the update_for_minimisation method of the singlescaler."""
    # test_params.scaling_options.nproc = 1
    p, e, r = (generated_param(), generated_exp(), generated_refl_2())
    exp = create_scaling_model(p, e, r)
    p.reflection_selection.method = "use_all"
    single_scaler = SingleScaler(p, exp[0], r)
    apm_fac = create_apm_factory(single_scaler)
    single_scaler.components["scale"].parameters /= 2.0
    apm = apm_fac.make_next_apm()

    Ih_table = single_scaler.Ih_table.blocked_data_list[0]
    Ih_table.calc_Ih()
    assert list(Ih_table.inverse_scale_factors) == [1.0, 1.0]
    assert list(Ih_table.Ih_values) == [10.0, 1.0]
    single_scaler.update_for_minimisation(apm, 0)
    # Should set new scale factors, and calculate Ih and weights.
    bf = basis_function().calculate_scales_and_derivatives(apm.apm_list[0], 0)
    assert list(Ih_table.inverse_scale_factors) == list(bf[0])
    assert list(Ih_table.Ih_values) != [1.0, 10.0]
    assert approx_equal(list(Ih_table.Ih_values),
                        list(Ih_table.intensities / bf[0]))
    for i in range(Ih_table.derivatives.n_rows):
        for j in range(Ih_table.derivatives.n_cols):
            assert approx_equal(Ih_table.derivatives[i, j], bf[1][i, j])
    assert Ih_table.derivatives.non_zeroes == bf[1].non_zeroes
Esempio n. 4
0
def test_update_error_model(mock_errormodel, mock_errormodel2):
    """Test the update_error_model method"""
    p, e, r = (generated_param(), generated_exp(), generated_refl())
    exp = create_scaling_model(p, e, r)
    p.reflection_selection.method = "use_all"
    # test initialised correctly
    scaler = SingleScaler(p, exp[0], r)
    block = scaler.global_Ih_table.blocked_data_list[0]
    original_vars = block.variances
    # test update error model - should update weights in global Ih
    # as will be setting different things in Ih_table and reflection table, split
    # up the test to use two different error models.
    scaler._update_error_model(mock_errormodel)
    assert list(block.variances) == list(original_vars)
    newvars = flex.double(range(1, 8))
    assert list(block.block_selections[0]) == [2, 0, 4, 5, 6, 1, 3]
    assert list(block.weights) == list(1.0 / newvars)
    assert scaler.experiment.scaling_model.error_model is mock_errormodel

    # now test for updating of reflection table
    # do again with second errormodel
    scaler.global_Ih_table.reset_error_model()
    scaler._update_error_model(mock_errormodel2)
    assert list(block.variances) == list(original_vars)
    newvars = flex.double(range(1, 9))
    assert list(block.block_selections[0]) == [2, 0, 4, 5, 6, 1, 3]
    # [2, 3, 4, 5, 6, 7, 8] < set these in ^ these positions (taking into account
    # the one non-suitable refl at index 5)
    assert list(block.weights) == list(1.0 / newvars)[:-1]
    assert scaler.experiment.scaling_model.error_model is mock_errormodel2
Esempio n. 5
0
def test_target_jacobian_calculation_finite_difference(physical_param,
                                                       single_exp,
                                                       large_reflection_table):
    """Test the calculated jacobian against a finite difference calculation."""
    test_params, exp, test_refl = physical_param, single_exp, large_reflection_table
    test_params.parameterisation.decay_term = False
    test_params.model = "physical"
    experiments = create_scaling_model(test_params, exp, test_refl)
    assert experiments[0].scaling_model.id_ == "physical"
    scaler = create_scaler(test_params, experiments, test_refl)

    apm = multi_active_parameter_manager([scaler.components], [["scale"]],
                                         scaling_active_parameter_manager)

    target = ScalingTarget()
    scaler.update_for_minimisation(apm, 0)

    fd_jacobian = calculate_jacobian_fd(target, scaler, apm)
    _, jacobian, _ = target.compute_residuals_and_gradients(
        scaler.Ih_table.blocked_data_list[0])

    n_rows = jacobian.n_rows
    n_cols = jacobian.n_cols

    print(jacobian)
    print(fd_jacobian)

    for i in range(0, n_rows):
        for j in range(0, n_cols):
            assert jacobian[i, j] == pytest.approx(fd_jacobian[i, j], abs=1e-4)
Esempio n. 6
0
def test_SingleScaler_update_for_minimisation():
    """Test the update_for_minimisation method of the singlescaler."""
    # test_params.scaling_options.nproc = 1
    p, e, r = (generated_param(), generated_exp(), generated_refl_2())
    exp = create_scaling_model(p, e, r)
    p.reflection_selection.method = "use_all"
    single_scaler = SingleScaler(p, exp[0], r)
    pmg = ScalingParameterManagerGenerator(
        single_scaler.active_scalers,
        ScalingTarget(),
        single_scaler.params.scaling_refinery.refinement_order,
    )
    single_scaler.components["scale"].parameters /= 2.0
    apm = pmg.parameter_managers()[0]

    Ih_table = single_scaler.Ih_table.blocked_data_list[0]
    Ih_table.calc_Ih()
    assert list(Ih_table.inverse_scale_factors) == [1.0, 1.0]
    assert list(Ih_table.Ih_values) == [10.0, 1.0]
    single_scaler.update_for_minimisation(apm, 0)
    # Should set new scale factors, and calculate Ih and weights.
    bf = RefinerCalculator.calculate_scales_and_derivatives(apm.apm_list[0], 0)
    assert list(Ih_table.inverse_scale_factors) == list(bf[0])
    assert list(Ih_table.Ih_values) != [1.0, 10.0]
    assert list(Ih_table.Ih_values) == pytest.approx(
        list(Ih_table.intensities / bf[0]))
    for i in range(Ih_table.derivatives.n_rows):
        for j in range(Ih_table.derivatives.n_cols):
            assert Ih_table.derivatives[i, j] == pytest.approx(bf[1][i, j])
    assert Ih_table.derivatives.non_zeroes == bf[1].non_zeroes
def test_create_scaling_model():
    """Test the create scaling model function."""

    # Test that one can create the correct scaling model with the phil param.
    for m in ["physical", "array", "KB"]:
        params = generated_param()
        exp = generated_exp()
        rt = generated_refl()
        params.model = m
        new_exp = create_scaling_model(params, exp, [rt])
        assert new_exp[0].scaling_model.id_ == m

    # If a scaling model already exists, then nothing else should happen.
    params = generated_param()
    exp = generated_exp()
    rt = generated_refl()
    exp[0].scaling_model = PhysicalScalingModel.from_data(params, exp[0], rt)
    old_scaling_model = exp[0].scaling_model
    params.model = "KB"
    new_exp = create_scaling_model(params, exp, [rt])
    new_scaling_model = new_exp[0].scaling_model
    assert new_scaling_model is old_scaling_model  # Should not modify original.

    # Test multiple datasets, where one already has a scaling model.
    exp = generated_exp(3)
    params = generated_param()
    rt = generated_refl()
    rt_2 = generated_refl()
    rt_3 = generated_refl()
    exp[0].scaling_model = PhysicalScalingModel.from_data(params, exp[0], rt)
    params.model = "KB"
    new_exp = create_scaling_model(params, exp, [rt, rt_2, rt_3])
    assert new_exp[0].scaling_model is exp[0].scaling_model
    assert isinstance(new_exp[1].scaling_model, KBScalingModel)
    assert isinstance(new_exp[2].scaling_model, KBScalingModel)

    # Now test overwrite_existing_models option
    params.overwrite_existing_models = True
    params.model = "physical"
    newer_exp = create_scaling_model(params, new_exp, [rt, rt_2, rt_3])
    for exp in newer_exp:
        assert isinstance(exp.scaling_model, PhysicalScalingModel)
Esempio n. 8
0
    def _create_model_and_scaler(self):
        """Create the scaling models and scaler."""
        self.experiments = create_scaling_model(
            self.params, self.experiments, self.reflections
        )
        logger.info("\nScaling models have been initialised for all experiments.")
        logger.info("%s%s%s", "\n", "=" * 80, "\n")

        self.experiments = set_image_ranges_in_scaling_models(self.experiments)

        self.scaler = create_scaler(self.params, self.experiments, self.reflections)
def test_auto_scaling_model():
    """Test auto options for scaling model creation."""
    params = generated_param()
    exp = generated_exp(scan=False)
    rt = generated_refl()
    params.model = "auto"
    new_exp = create_scaling_model(params, exp, [rt])
    assert new_exp[0].scaling_model.id_ == "KB"

    params = generated_param(absorption_term="auto")
    exp = generated_exp(image_range=[1, 5])  # 5 degree wedge
    params.model = "auto"
    new_exp = create_scaling_model(params, exp, [rt])
    assert new_exp[0].scaling_model.id_ == "physical"
    assert len(new_exp[0].scaling_model.components["scale"].parameters) == 5
    assert len(new_exp[0].scaling_model.components["decay"].parameters) == 3
    assert "absorption" not in new_exp[0].scaling_model.components

    params = generated_param(absorption_term="auto")
    exp = generated_exp(image_range=[1, 20])  # 20 degree wedge
    params.model = "auto"
    new_exp = create_scaling_model(params, exp, [rt])
    assert new_exp[0].scaling_model.id_ == "physical"
    assert len(new_exp[0].scaling_model.components["scale"].parameters) == 7
    assert len(new_exp[0].scaling_model.components["decay"].parameters) == 6
    assert "absorption" not in new_exp[0].scaling_model.components

    params = generated_param(absorption_term="auto")
    exp = generated_exp(image_range=[1, 75])  # 20 degree wedge
    params.model = "auto"
    new_exp = create_scaling_model(params, exp, [rt])
    assert new_exp[0].scaling_model.id_ == "physical"
    assert len(new_exp[0].scaling_model.components["scale"].parameters) == 12
    assert len(new_exp[0].scaling_model.components["decay"].parameters) == 10
    assert "absorption" in new_exp[0].scaling_model.components

    # Now test overwrite_existing_models option
    params.overwrite_existing_models = True
    params.model = "KB"
    newer_exp = create_scaling_model(params, new_exp, [rt])
    assert isinstance(newer_exp[0].scaling_model, KBScalingModel)
Esempio n. 10
0
def test_multiscaler_initialisation():
    """Unit tests for the MultiScalerBase class."""
    p, e = (generated_param(), generated_exp(2))
    r1 = generated_refl(id_=0)
    r1["intensity.sum.value"] = r1["intensity"]
    r1["intensity.sum.variance"] = r1["variance"]
    r2 = generated_refl(id_=1)
    r2["intensity.sum.value"] = r2["intensity"]
    r2["intensity.sum.variance"] = r2["variance"]
    exp = create_scaling_model(p, e, [r1, r2])
    singlescaler1 = create_scaler(p, [exp[0]], [r1])
    singlescaler2 = create_scaler(p, [exp[1]], [r2])

    multiscaler = MultiScaler([singlescaler1, singlescaler2])

    # check initialisation
    assert len(multiscaler.active_scalers) == 2
    assert multiscaler.active_scalers[0] == singlescaler1
    assert multiscaler.active_scalers[1] == singlescaler2

    # check for correct setup of global Ih table
    assert multiscaler.global_Ih_table.size == 14
    assert (
        list(multiscaler.global_Ih_table.blocked_data_list[0].intensities)
        == [3.0, 1.0, 500.0, 2.0, 2.0, 2.0, 4.0] * 2
    )
    block_selections = multiscaler.global_Ih_table.blocked_data_list[0].block_selections
    assert list(block_selections[0]) == [2, 0, 4, 5, 6, 1, 3]
    assert list(block_selections[1]) == [2, 0, 4, 5, 6, 1, 3]

    # check for correct setup of Ih_table
    assert multiscaler.Ih_table.size == 12
    assert (
        list(multiscaler.Ih_table.blocked_data_list[0].intensities)
        == [3.0, 1.0, 2.0, 2.0, 2.0, 4.0] * 2
    )
    block_selections = multiscaler.Ih_table.blocked_data_list[0].block_selections
    assert list(block_selections[0]) == [2, 0, 5, 6, 1, 3]
    assert list(block_selections[1]) == [2, 0, 5, 6, 1, 3]

    # check for correct data/d_values in components
    for i, scaler in enumerate(multiscaler.active_scalers):
        d_suitable = scaler.reflection_table["d"].select(
            scaler.suitable_refl_for_scaling_sel
        )
        decay = scaler.experiment.scaling_model.components["decay"]
        # first check 'data' contains all suitable reflections
        assert list(decay.data["d"]) == list(d_suitable)
        # Now check 'd_values' (which will be used for minim.) matches Ih_table data
        assert list(decay.d_values[0]) == list(
            d_suitable.select(flumpy.from_numpy(block_selections[i]))
        )
Esempio n. 11
0
def test_sf_variance_calculation():
    """Test the calculation of scale factor variances."""
    test_experiments = generated_exp()
    test_params = generated_param()
    assert len(test_experiments) == 1
    experiments = create_scaling_model(test_params, test_experiments, [None])
    components = experiments[0].scaling_model.components
    rt = flex.reflection_table()
    d1 = 1.0
    d2 = 2.0
    d3 = 3.0
    rt["d"] = flex.double([d1, d2, d3])
    rt["id"] = flex.int([0, 0, 0])
    experiments[0].scaling_model.configure_components(rt, experiments[0],
                                                      test_params)
    components["scale"].update_reflection_data()
    _, d = components["scale"].calculate_scales_and_derivatives()
    assert list(d.col(0)) == [(0, 1.0), (1, 1.0), (2, 1.0)]
    components["decay"].update_reflection_data()
    s, d = components["decay"].calculate_scales_and_derivatives()
    assert list(d.col(0)) == [
        (0, 1.0 / (2.0 * d1 * d1)),
        (1, 1.0 / (2.0 * d2 * d2)),
        (2, 1.0 / (2.0 * d3 * d3)),
    ]
    var_cov = sparse.matrix(2, 2)
    a = 0.2
    b = 0.3
    c = 0.1
    var_cov[0, 0] = a
    var_cov[0, 1] = c
    var_cov[1, 0] = c
    var_cov[1, 1] = b
    variances = calc_sf_variances(components, var_cov)
    assert approx_equal(
        list(variances),
        [
            b / (4.0 * (d1**4.0)) + c / (d1**2.0) + a,
            b / (4.0 * (d2**4.0)) + c / (d2**2.0) + a,
            b / (4.0 * (d3**4.0)) + c / (d3**2.0) + a,
        ],
    )
Esempio n. 12
0
def test_target_gradient_calculation_finite_difference(small_reflection_table,
                                                       single_exp,
                                                       physical_param):
    """Test the calculated gradients against a finite difference calculation."""
    (test_reflections, test_experiments, params) = (
        small_reflection_table,
        single_exp,
        physical_param,
    )
    assert len(test_experiments) == 1
    assert len(test_reflections) == 1
    experiments = create_scaling_model(params, test_experiments,
                                       test_reflections)
    scaler = create_scaler(params, experiments, test_reflections)
    assert scaler.experiment.scaling_model.id_ == "physical"

    # Initialise the parameters and create an apm
    scaler.components["scale"].inverse_scales = flex.double([2.0, 1.0, 2.0])
    scaler.components["decay"].inverse_scales = flex.double([1.0, 1.0, 0.4])
    apm = multi_active_parameter_manager([scaler.components],
                                         [["scale", "decay"]],
                                         scaling_active_parameter_manager)

    # Now do finite difference check.
    target = ScalingTarget()
    scaler.update_for_minimisation(apm, 0)
    grad = target.calculate_gradients(scaler.Ih_table.blocked_data_list[0])
    res = target.calculate_residuals(scaler.Ih_table.blocked_data_list[0])

    assert (res > 1e-8), """residual should not be zero, or the gradient test
    below will not really be working!"""

    # Now compare to finite difference
    f_d_grad = calculate_gradient_fd(target, scaler, apm)
    print(list(f_d_grad))
    print(list(grad))
    assert approx_equal(list(grad), list(f_d_grad))

    sel = f_d_grad > 1e-8
    assert sel, """assert sel has some elements, as finite difference grad should
Esempio n. 13
0
def test_SingleScaler_combine_intensities():
    """test combine intensities method"""
    p, e, r = (generated_param(), generated_exp(), generated_refl_for_comb())
    exp = create_scaling_model(p, e, r)
    p.reflection_selection.method = "use_all"
    scaler = SingleScaler(p, exp[0], r)
    scaler.combine_intensities()

    # The input makes the profile intensities best - so check these are set in the
    # reflection table and global_Ih_table
    assert list(scaler.reflection_table["intensity"]) == list(
        r["intensity.prf.value"])
    assert list(scaler.reflection_table["variance"]) == list(
        r["intensity.prf.variance"])
    block = scaler.global_Ih_table.blocked_data_list[0]
    block_sel = block.block_selections[0]
    suitable = scaler.suitable_refl_for_scaling_sel
    assert list(block.intensities) == list(
        scaler.reflection_table["intensity"].select(suitable).select(
            block_sel))
    assert list(block.variances) == list(
        scaler.reflection_table["variance"].select(suitable).select(block_sel))
def test_equality_of_two_harmonic_table_methods(dials_regression, run_in_tmpdir):
    from dials_scaling_ext import calc_theta_phi, calc_lookup_index
    from dxtbx.serialize import load
    from dials.util.options import OptionParser
    from libtbx import phil
    from dials.algorithms.scaling.scaling_library import create_scaling_model

    data_dir = os.path.join(dials_regression, "xia2-28")
    pickle_path = os.path.join(data_dir, "20_integrated.pickle")
    sequence_path = os.path.join(data_dir, "20_integrated_experiments.json")

    phil_scope = phil.parse(
        """
      include scope dials.command_line.scale.phil_scope
    """,
        process_includes=True,
    )
    optionparser = OptionParser(phil=phil_scope, check_format=False)
    params, _ = optionparser.parse_args(args=[], quick_parse=True)
    params.model = "physical"
    lmax = 2
    params.physical.lmax = lmax

    reflection_table = flex.reflection_table.from_file(pickle_path)
    experiments = load.experiment_list(sequence_path, check_format=False)
    experiments = create_scaling_model(params, experiments, [reflection_table])

    experiment = experiments[0]
    # New method

    reflection_table["phi"] = (
        reflection_table["xyzobs.px.value"].parts()[2]
        * experiment.scan.get_oscillation()[1]
    )
    reflection_table = calc_crystal_frame_vectors(reflection_table, experiment)
    theta_phi_0 = calc_theta_phi(reflection_table["s0c"])  # array of tuples in radians
    theta_phi_1 = calc_theta_phi(reflection_table["s1c"])
    points_per_degree = 2
    s0_lookup_index = calc_lookup_index(theta_phi_0, points_per_degree)
    s1_lookup_index = calc_lookup_index(theta_phi_1, points_per_degree)
    print(list(s0_lookup_index[0:20]))
    print(list(s1_lookup_index[0:20]))
    coefficients_list = create_sph_harm_lookup_table(lmax, points_per_degree)
    experiment.scaling_model.components["absorption"].data = {
        "s0_lookup": s0_lookup_index,
        "s1_lookup": s1_lookup_index,
    }
    experiment.scaling_model.components[
        "absorption"
    ].coefficients_list = coefficients_list
    assert experiment.scaling_model.components["absorption"]._mode == "memory"
    experiment.scaling_model.components["absorption"].update_reflection_data()
    absorption = experiment.scaling_model.components["absorption"]
    harmonic_values_list = absorption.harmonic_values[0]

    experiment.scaling_model.components["absorption"].parameters = flex.double(
        [0.1, -0.1, 0.05, 0.02, 0.01, -0.05, 0.12, -0.035]
    )
    scales, derivatives = experiment.scaling_model.components[
        "absorption"
    ].calculate_scales_and_derivatives()

    # Old method:

    old_data = {"sph_harm_table": create_sph_harm_table(theta_phi_0, theta_phi_1, lmax)}
    experiment.scaling_model.components["absorption"].data = old_data
    assert experiment.scaling_model.components["absorption"]._mode == "speed"
    experiment.scaling_model.components["absorption"].update_reflection_data()
    old_harmonic_values = absorption.harmonic_values[0]
    for i in range(0, 8):
        print(i)
        assert list(harmonic_values_list[i]) == pytest.approx(
            list(old_harmonic_values.col(i).as_dense_vector()), abs=0.01
        )

    experiment.scaling_model.components["absorption"].parameters = flex.double(
        [0.1, -0.1, 0.05, 0.02, 0.01, -0.05, 0.12, -0.035]
    )
    scales_1, derivatives_1 = experiment.scaling_model.components[
        "absorption"
    ].calculate_scales_and_derivatives()

    assert list(scales_1) == pytest.approx(list(scales), abs=0.001)
    assert list(scales_1) != [1.0] * len(scales_1)
Esempio n. 15
0
def test_NullScaler():
    """Test for successful creation of NullScaler."""
    p, e, r = (generated_param(), generated_exp(), generated_refl())
    exp = create_scaling_model(p, e, r)
    _ = NullScaler(p, exp[0], r)
Esempio n. 16
0
def test_targetscaler_initialisation():
    """Unit tests for the MultiScalerBase class."""
    p, e = (generated_param(), generated_exp(2))
    r1 = generated_refl(id_=0)
    p.reflection_selection.method = "intensity_ranges"

    r1["intensity.sum.value"] = r1["intensity"]
    r1["intensity.sum.variance"] = r1["variance"]
    r2 = generated_refl(id_=1)
    r2["intensity.sum.value"] = r2["intensity"]
    r2["intensity.sum.variance"] = r2["variance"]
    exp = create_scaling_model(p, e, [r1, r2])
    singlescaler1 = SingleScaler(p, exp[0], r1, for_multi=True)
    singlescaler2 = SingleScaler(p, exp[1], r2, for_multi=True)

    # singlescaler2.experiments.scaling_model.set_scaling_model_as_scaled()

    targetscaler = TargetScaler(scaled_scalers=[singlescaler1],
                                unscaled_scalers=[singlescaler2])

    # check initialisation
    assert len(targetscaler.active_scalers) == 1
    assert len(targetscaler.single_scalers) == 1
    assert targetscaler.active_scalers[0] == singlescaler2
    assert targetscaler.single_scalers[0] == singlescaler1

    # check for correct setup of global Ih table
    assert targetscaler.global_Ih_table.size == 7  # only for active scalers
    assert list(
        targetscaler.global_Ih_table.blocked_data_list[0].intensities) == [
            3.0,
            1.0,
            500.0,
            2.0,
            2.0,
            2.0,
            4.0,
        ]
    block_selections = targetscaler.global_Ih_table.blocked_data_list[
        0].block_selections
    assert list(block_selections[0]) == [2, 0, 4, 5, 6, 1, 3]

    # check for correct setup of Ih_table
    assert targetscaler.Ih_table.size == 6
    assert list(targetscaler.Ih_table.blocked_data_list[0].intensities) == [
        3.0,
        1.0,
        2.0,
        2.0,
        2.0,
        4.0,
    ]
    block_selections = targetscaler.Ih_table.blocked_data_list[
        0].block_selections
    assert list(block_selections[0]) == [2, 0, 5, 6, 1, 3]

    # check for correct setup of target Ih_Table
    assert targetscaler.target_Ih_table.size == 6
    assert list(
        targetscaler.target_Ih_table.blocked_data_list[0].intensities) == [
            3.0,
            1.0,
            2.0,
            2.0,
            2.0,
            4.0,
        ]
    block_selections = targetscaler.target_Ih_table.blocked_data_list[
        0].block_selections
    assert list(block_selections[0]) == [
        2,
        0,
        4,
        5,
        1,
        3,
    ]  # different as taget_Ih_table
    # not created with indices lists.

    block_selections = targetscaler.Ih_table.blocked_data_list[
        0].block_selections
    # check for correct data/d_values in components
    for i, scaler in enumerate(targetscaler.active_scalers):
        d_suitable = scaler.reflection_table["d"].select(
            scaler.suitable_refl_for_scaling_sel)
        decay = scaler.experiment.scaling_model.components["decay"]
        # first check 'data' contains all suitable reflections
        assert list(decay.data["d"]) == list(d_suitable)
        # Now check 'd_values' (which will be used for minim.) matches Ih_table data
        assert list(decay.d_values[0]) == list(
            d_suitable.select(block_selections[i]))

    # but shouldn't have updated other
    assert (targetscaler.single_scalers[0].experiment.scaling_model.
            components["decay"].d_values == [])
Esempio n. 17
0
def test_SingleScaler_initialisation():
    """Test that all attributes are correctly set upon initialisation"""
    p, e, r = (generated_param(), generated_exp(), generated_refl())
    exp = create_scaling_model(p, e, r)
    p.reflection_selection.method = "use_all"
    # test initialised correctly
    scaler = SingleScaler(p, exp[0], r)
    assert (list(scaler.suitable_refl_for_scaling_sel) == [True] * 5 +
            [False] + [True] * 2)
    # all 7 of the suitable should be within the scaling_subset
    assert list(scaler.scaling_subset_sel) == [True] * 7
    # one of these is not in the scaling selection due to being an outlier.
    assert list(scaler.scaling_selection) == [True] * 4 + [False] + [True] * 2
    assert list(scaler.outliers) == [False] * 4 + [True] + [False] * 2
    assert scaler.n_suitable_refl == 7

    # check for correct setup of global_Ih_table
    # block selection is order to extract out from suitable_reflections
    assert scaler.global_Ih_table.size == 7
    assert list(scaler.global_Ih_table.blocked_data_list[0].intensities) == [
        3.0,
        1.0,
        500.0,
        2.0,
        2.0,
        2.0,
        4.0,
    ]
    block_selection = scaler.global_Ih_table.blocked_data_list[
        0].block_selections[0]
    assert list(block_selection) == [2, 0, 4, 5, 6, 1, 3]

    # check for correct setup of Ih_table
    assert scaler.Ih_table.size == 6
    assert list(scaler.Ih_table.blocked_data_list[0].intensities) == [
        3.0,
        1.0,
        2.0,
        2.0,
        2.0,
        4.0,
    ]
    block_selection = scaler.Ih_table.blocked_data_list[0].block_selections[0]
    assert list(block_selection) == [2, 0, 5, 6, 1, 3]

    # check for correct data/d_values in components
    d_suitable = r["d"].select(scaler.suitable_refl_for_scaling_sel)
    decay = scaler.experiment.scaling_model.components["decay"]
    # first check 'data' contains all suitable reflections
    assert list(decay.data["d"]) == list(d_suitable)
    # Now check 'd_values' (which will be used for minim.) matches Ih_table data
    assert list(decay.d_values[0]) == list(d_suitable.select(block_selection))

    # test make ready for scaling method
    # set some new outliers and check for updated datastructures
    outlier_list = [False] * 3 + [True] * 2 + [False] * 2
    scaler.outliers = flex.bool(outlier_list)
    scaler.make_ready_for_scaling(outlier=True)
    assert scaler.Ih_table.size == 5
    assert list(scaler.Ih_table.blocked_data_list[0].intensities) == [
        3.0,
        1.0,
        2.0,
        2.0,
        2.0,
    ]
    block_selection = scaler.Ih_table.blocked_data_list[0].block_selections[0]
    assert list(block_selection) == [2, 0, 5, 6, 1]
    assert list(decay.d_values[0]) == list(d_suitable.select(block_selection))

    # test set outliers
    assert list(r.get_flags(r.flags.outlier_in_scaling)) == [False] * 8
    scaler._set_outliers()
    assert list(r.get_flags(
        r.flags.outlier_in_scaling)) == outlier_list + [False]
Esempio n. 18
0
def test_equality_of_two_harmonic_table_methods(dials_data):
    location = dials_data("l_cysteine_dials_output", pathlib=True)
    refl = location / "20_integrated.pickle"
    expt = location / "20_integrated_experiments.json"

    phil_scope = phil.parse(
        """
      include scope dials.command_line.scale.phil_scope
    """,
        process_includes=True,
    )
    parser = ArgumentParser(phil=phil_scope, check_format=False)
    params, _ = parser.parse_args(args=[], quick_parse=True)
    params.model = "physical"
    lmax = 2
    params.physical.lmax = lmax

    reflection_table = flex.reflection_table.from_file(refl)
    experiments = load.experiment_list(expt, check_format=False)
    experiments = create_scaling_model(params, experiments, [reflection_table])

    experiment = experiments[0]
    # New method

    reflection_table["phi"] = (reflection_table["xyzobs.px.value"].parts()[2] *
                               experiment.scan.get_oscillation()[1])
    reflection_table = calc_crystal_frame_vectors(reflection_table, experiment)
    reflection_table["s1c"] = align_axis_along_z((1.0, 0.0, 0.0),
                                                 reflection_table["s1c"])
    reflection_table["s0c"] = align_axis_along_z((1.0, 0.0, 0.0),
                                                 reflection_table["s0c"])
    theta_phi_0 = calc_theta_phi(
        reflection_table["s0c"])  # array of tuples in radians
    theta_phi_1 = calc_theta_phi(reflection_table["s1c"])
    points_per_degree = 4
    s0_lookup_index = calc_lookup_index(theta_phi_0, points_per_degree)
    s1_lookup_index = calc_lookup_index(theta_phi_1, points_per_degree)
    print(list(s0_lookup_index[0:20]))
    print(list(s1_lookup_index[0:20]))
    coefficients_list = create_sph_harm_lookup_table(lmax, points_per_degree)
    experiment.scaling_model.components["absorption"].data = {
        "s0_lookup": s0_lookup_index,
        "s1_lookup": s1_lookup_index,
    }
    experiment.scaling_model.components[
        "absorption"].coefficients_list = coefficients_list
    assert experiment.scaling_model.components["absorption"]._mode == "memory"
    experiment.scaling_model.components["absorption"].update_reflection_data()
    absorption = experiment.scaling_model.components["absorption"]
    harmonic_values_list = absorption.harmonic_values[0]

    experiment.scaling_model.components["absorption"].parameters = flex.double(
        [0.1, -0.1, 0.05, 0.02, 0.01, -0.05, 0.12, -0.035])
    scales, derivatives = experiment.scaling_model.components[
        "absorption"].calculate_scales_and_derivatives()

    # Old method:

    old_data = {
        "sph_harm_table": create_sph_harm_table(theta_phi_0, theta_phi_1, lmax)
    }
    experiment.scaling_model.components["absorption"].data = old_data
    assert experiment.scaling_model.components["absorption"]._mode == "speed"
    experiment.scaling_model.components["absorption"].update_reflection_data()
    old_harmonic_values = absorption.harmonic_values[0]
    for i in range(0, 8):
        print(i)
        assert list(harmonic_values_list[i]) == pytest.approx(list(
            old_harmonic_values.col(i).as_dense_vector()),
                                                              abs=0.01)

    experiment.scaling_model.components["absorption"].parameters = flex.double(
        [0.1, -0.1, 0.05, 0.02, 0.01, -0.05, 0.12, -0.035])
    scales_1, derivatives_1 = experiment.scaling_model.components[
        "absorption"].calculate_scales_and_derivatives()

    assert list(scales_1) == pytest.approx(list(scales), abs=0.001)
    assert list(scales_1) != [1.0] * len(scales_1)