def test_choose_scaling_intensities(test_reflections):
    """Test for correct choice of intensities."""
    test_refl = test_reflections
    intstr = "prf"
    new_rt = choose_scaling_intensities(test_refl, intstr)
    assert list(new_rt["intensity"]) == list(test_refl["intensity.prf.value"])
    assert list(new_rt["variance"]) == list(test_refl["intensity.prf.variance"])
    intstr = "sum"  # should apply partiality correction
    new_rt = choose_scaling_intensities(test_refl, intstr)
    assert list(new_rt["intensity"]) == list(
        test_refl["intensity.sum.value"] / test_refl["partiality"]
    )
    assert list(new_rt["variance"]) == pytest.approx(
        list(test_refl["intensity.sum.variance"] / flex.pow2(test_refl["partiality"]))
    )
    # If bad choice, currently return the prf values.
    intstr = "bad"
    new_rt = choose_scaling_intensities(test_refl, intstr)
    assert list(new_rt["intensity"]) == list(test_refl["intensity.prf.value"])
    assert list(new_rt["variance"]) == list(test_refl["intensity.prf.variance"])
Exemplo n.º 2
0
def refine_error_model(params, experiments, reflection_tables):
    """Do error model refinement."""

    # prepare relevant data for datastructures
    for i, table in enumerate(reflection_tables):
        # First get the good data
        selection = ~(table.get_flags(table.flags.bad_for_scaling, all=False))
        outliers = table.get_flags(table.flags.outlier_in_scaling)
        table = table.select(selection & ~outliers)

        # Now chose intensities, ideally these two options could be combined
        # with a smart refactor
        if params.intensity_choice == "combine":
            if not params.combine.Imid:
                sys.exit(
                    "Imid value must be provided if intensity_choice=combine")
            table = calculate_prescaling_correction(table)  # needed for below.
            I, V = combine_intensities(table, params.combine.Imid)
            table["intensity"] = I
            table["variance"] = V
        else:
            table = choose_scaling_intensities(
                table, intensity_choice=params.intensity_choice)
        reflection_tables[i] = table
    space_group = experiments[0].crystal.get_space_group()
    Ih_table = IhTable(reflection_tables,
                       space_group,
                       additional_cols=["partiality"])

    # now do the error model refinement
    try:
        model = run_error_model_refinement(params, Ih_table)
    except (ValueError, RuntimeError) as e:
        logger.info(e)
    else:
        return model
Exemplo n.º 3
0
    def create(cls, params, experiment, reflection_table, for_multi=False):
        """Perform reflection_table preprocessing and create a SingleScaler."""

        cls.ensure_experiment_identifier(params, experiment, reflection_table)

        logger.info(
            "Preprocessing data for scaling. The id assigned to this \n"
            "dataset is %s, and the scaling model type being applied is %s. \n",
            list(reflection_table.experiment_identifiers().values())[0],
            experiment.scaling_model.id_,
        )

        reflection_table, reasons = cls.filter_bad_reflections(
            reflection_table)

        if "inverse_scale_factor" not in reflection_table:
            reflection_table["inverse_scale_factor"] = flex.double(
                reflection_table.size(), 1.0)
        elif (reflection_table["inverse_scale_factor"].count(0.0) ==
              reflection_table.size()):
            reflection_table["inverse_scale_factor"] = flex.double(
                reflection_table.size(), 1.0)
        reflection_table = choose_scaling_intensities(
            reflection_table, params.reflection_selection.intensity_choice)

        excluded_for_scaling = reflection_table.get_flags(
            reflection_table.flags.excluded_for_scaling)
        user_excluded = reflection_table.get_flags(
            reflection_table.flags.user_excluded_in_scaling)
        reasons.add_reason("user excluded", user_excluded.count(True))
        reasons.add_reason("excluded for scaling",
                           excluded_for_scaling.count(True))
        n_excluded = (excluded_for_scaling | user_excluded).count(True)
        if n_excluded == reflection_table.size():
            logger.info(
                "All reflections were determined to be unsuitable for scaling."
            )
            logger.info(reasons)
            raise BadDatasetForScalingException(
                """Unable to use this dataset for scaling""")
        else:
            logger.info(
                "%s/%s reflections not suitable for scaling\n%s",
                n_excluded,
                reflection_table.size(),
                reasons,
            )

        if not for_multi:
            determine_reflection_selection_parameters(params, [experiment],
                                                      [reflection_table])
        if params.reflection_selection.method == "intensity_ranges":
            reflection_table = quasi_normalisation(reflection_table,
                                                   experiment)
        if (params.reflection_selection.method
                in (None, Auto, "auto", "quasi_random")) or (
                    experiment.scaling_model.id_ == "physical"
                    and "absorption" in experiment.scaling_model.components):
            if experiment.scan:
                # calc theta and phi cryst
                reflection_table["phi"] = (
                    reflection_table["xyzobs.px.value"].parts()[2] *
                    experiment.scan.get_oscillation()[1])
                reflection_table = calc_crystal_frame_vectors(
                    reflection_table, experiment)

        return SingleScaler(params, experiment, reflection_table, for_multi)