def test_quasi_normalisation(simple_reflection_table, test_exp_E2, test_exp_P1): """Test the quasi_normalisation function.""" # Test that for small datasets, all Esq values are set to one. refl = quasi_normalisation(simple_reflection_table, test_exp_E2) assert list(refl["Esq"]) == [1.0, 1.0, 1.0] rt = refl_for_norm() new_rt = quasi_normalisation(rt, test_exp_P1) for i in range(0, 9): assert list(new_rt["Esq"][i * 1100 : (i + 1) * 1100]) == pytest.approx( list(np.linspace(0.9, 1.1, num=1100, endpoint=True)) )
def create(cls, params, experiment, reflection_table, for_multi=False): """Perform reflection_table preprocessing and create a SingleScaler.""" cls.ensure_experiment_identifier(params, experiment, reflection_table) logger.info( "Preprocessing data for scaling. The id assigned to this \n" "dataset is %s, and the scaling model type being applied is %s. \n", list(reflection_table.experiment_identifiers().values())[0], experiment.scaling_model.id_, ) reflection_table, reasons = cls.filter_bad_reflections( reflection_table) if "inverse_scale_factor" not in reflection_table: reflection_table["inverse_scale_factor"] = flex.double( reflection_table.size(), 1.0) elif (reflection_table["inverse_scale_factor"].count(0.0) == reflection_table.size()): reflection_table["inverse_scale_factor"] = flex.double( reflection_table.size(), 1.0) reflection_table = choose_scaling_intensities( reflection_table, params.reflection_selection.intensity_choice) excluded_for_scaling = reflection_table.get_flags( reflection_table.flags.excluded_for_scaling) user_excluded = reflection_table.get_flags( reflection_table.flags.user_excluded_in_scaling) reasons.add_reason("user excluded", user_excluded.count(True)) reasons.add_reason("excluded for scaling", excluded_for_scaling.count(True)) n_excluded = (excluded_for_scaling | user_excluded).count(True) if n_excluded == reflection_table.size(): logger.info( "All reflections were determined to be unsuitable for scaling." ) logger.info(reasons) raise BadDatasetForScalingException( """Unable to use this dataset for scaling""") else: logger.info( "%s/%s reflections not suitable for scaling\n%s", n_excluded, reflection_table.size(), reasons, ) if not for_multi: determine_reflection_selection_parameters(params, [experiment], [reflection_table]) if params.reflection_selection.method == "intensity_ranges": reflection_table = quasi_normalisation(reflection_table, experiment) if (params.reflection_selection.method in (None, Auto, "auto", "quasi_random")) or ( experiment.scaling_model.id_ == "physical" and "absorption" in experiment.scaling_model.components): if experiment.scan: # calc theta and phi cryst reflection_table["phi"] = ( reflection_table["xyzobs.px.value"].parts()[2] * experiment.scan.get_oscillation()[1]) reflection_table = calc_crystal_frame_vectors( reflection_table, experiment) return SingleScaler(params, experiment, reflection_table, for_multi)
def create(cls, params, experiment, reflection_table, for_multi=False): """Perform reflection_table preprocessing and create a SingleScaler.""" cls.ensure_experiment_identifier(experiment, reflection_table) logger.info( "The scaling model type being applied is %s. \n", experiment.scaling_model.id_, ) try: reflection_table = cls.filter_bad_reflections( reflection_table, partiality_cutoff=params.cut_data.partiality_cutoff, min_isigi=params.cut_data.min_isigi, intensity_choice=params.reflection_selection.intensity_choice, ) except ValueError: raise BadDatasetForScalingException # combine partial measurements of same reflection, to handle those reflections # that were split by dials.integrate - changes size of reflection table. reflection_table = sum_partial_reflections(reflection_table) if "inverse_scale_factor" not in reflection_table: reflection_table["inverse_scale_factor"] = flex.double( reflection_table.size(), 1.0) elif (reflection_table["inverse_scale_factor"].count(0.0) == reflection_table.size()): reflection_table["inverse_scale_factor"] = flex.double( reflection_table.size(), 1.0) reflection_table = choose_initial_scaling_intensities( reflection_table, params.reflection_selection.intensity_choice) excluded_for_scaling = reflection_table.get_flags( reflection_table.flags.excluded_for_scaling) user_excluded = reflection_table.get_flags( reflection_table.flags.user_excluded_in_scaling) reasons = Reasons() reasons.add_reason("user excluded", user_excluded.count(True)) reasons.add_reason("excluded for scaling", excluded_for_scaling.count(True)) n_excluded = (excluded_for_scaling | user_excluded).count(True) if n_excluded == reflection_table.size(): logger.info( "All reflections were determined to be unsuitable for scaling." ) logger.info(reasons) raise BadDatasetForScalingException( """Unable to use this dataset for scaling""") else: logger.info( "Excluding %s/%s reflections\n%s", n_excluded, reflection_table.size(), reasons, ) if params.reflection_selection.method == "intensity_ranges": reflection_table = quasi_normalisation(reflection_table, experiment) if (params.reflection_selection.method in (None, Auto, "auto", "quasi_random")) or ( experiment.scaling_model.id_ == "physical" and "absorption" in experiment.scaling_model.components): if experiment.scan: reflection_table = calc_crystal_frame_vectors( reflection_table, experiment) alignment_axis = (1.0, 0.0, 0.0) reflection_table["s0c"] = align_axis_along_z( alignment_axis, reflection_table["s0c"]) reflection_table["s1c"] = align_axis_along_z( alignment_axis, reflection_table["s1c"]) try: scaler = SingleScaler(params, experiment, reflection_table, for_multi) except BadDatasetForScalingException as e: raise ValueError(e) else: return scaler