def test_reasons(): """Test the reasons class, which is basically a dictionary with a nice printed output""" reasons = Reasons() reasons.add_reason("test reason", 100) assert reasons.reasons["test reason"] == 100 print(reasons) expected_output = """Reflections passing individual criteria: criterion: test reason, reflections: 100 """ assert reasons.__repr__() == expected_output
def calculate_scaling_subset_ranges_with_E2(reflection_table, params): """Select reflections with non-zero weight and update scale weights.""" reasons = Reasons() selection = ~reflection_table.get_flags( reflection_table.flags.user_excluded_in_scaling) selection &= ~reflection_table.get_flags( reflection_table.flags.excluded_for_scaling) reasons.add_reason("suitable/selected for scaling", selection.count(True)) if reflection_table["Esq"].count(1.0) != reflection_table.size(): sel, reason = _determine_E2_range_selection(reflection_table, params) reasons.add_reason(reason, sel.count(True)) selection &= sel sel, reasons = _common_range_selections(reasons, reflection_table, params) selection &= sel logger.info( "%s reflections were selected for scale factor determination \n" + "out of %s suitable reflections: \n%s", selection.count(True), reflection_table.size(), reasons, ) if selection.count(True) == 0: raise BadDatasetForScalingException( """No reflections pass all user-controllable selection criteria""") return selection
def filter_bad_reflections(cls, reflections): """Initial filter to select integrated reflections.""" reasons = Reasons() mask = ~reflections.get_flags(reflections.flags.integrated, all=False) reasons.add_reason("not integrated by any method", mask.count(True)) if "d" in reflections: d_mask = reflections["d"] <= 0.0 reasons.add_reason("bad d-value", d_mask.count(True)) mask = mask | d_mask reflections.set_flags(mask, reflections.flags.excluded_for_scaling) return reflections, reasons
def calculate_scaling_subset_ranges(reflection_table, params, print_summary=False): selection, reasons = _common_range_selections(Reasons(), reflection_table, params) if print_summary: logger.info( "%s reflections were preselected for scale factor determination \n" + "out of %s suitable reflections: \n%s", selection.count(True), reflection_table.size(), reasons, ) if selection.count(True) == 0: raise BadDatasetForScalingException( """No reflections pass all user-controllable selection criteria""") return selection
def create(cls, params, experiment, reflection_table, for_multi=False): """Perform reflection_table preprocessing and create a SingleScaler.""" cls.ensure_experiment_identifier(experiment, reflection_table) logger.info( "The scaling model type being applied is %s. \n", experiment.scaling_model.id_, ) try: reflection_table = cls.filter_bad_reflections( reflection_table, partiality_cutoff=params.cut_data.partiality_cutoff, min_isigi=params.cut_data.min_isigi, ) except ValueError: raise BadDatasetForScalingException if "inverse_scale_factor" not in reflection_table: reflection_table["inverse_scale_factor"] = flex.double( reflection_table.size(), 1.0) elif (reflection_table["inverse_scale_factor"].count(0.0) == reflection_table.size()): reflection_table["inverse_scale_factor"] = flex.double( reflection_table.size(), 1.0) reflection_table = choose_scaling_intensities( reflection_table, params.reflection_selection.intensity_choice) excluded_for_scaling = reflection_table.get_flags( reflection_table.flags.excluded_for_scaling) user_excluded = reflection_table.get_flags( reflection_table.flags.user_excluded_in_scaling) reasons = Reasons() reasons.add_reason("user excluded", user_excluded.count(True)) reasons.add_reason("excluded for scaling", excluded_for_scaling.count(True)) n_excluded = (excluded_for_scaling | user_excluded).count(True) if n_excluded == reflection_table.size(): logger.info( "All reflections were determined to be unsuitable for scaling." ) logger.info(reasons) raise BadDatasetForScalingException( """Unable to use this dataset for scaling""") else: logger.info( "Excluding %s/%s reflections\n%s", n_excluded, reflection_table.size(), reasons, ) if params.reflection_selection.method == "intensity_ranges": reflection_table = quasi_normalisation(reflection_table, experiment) if (params.reflection_selection.method in (None, Auto, "auto", "quasi_random")) or ( experiment.scaling_model.id_ == "physical" and "absorption" in experiment.scaling_model.components): if experiment.scan: # calc theta and phi cryst reflection_table["phi"] = ( reflection_table["xyzobs.px.value"].parts()[2] * experiment.scan.get_oscillation()[1]) reflection_table = calc_crystal_frame_vectors( reflection_table, experiment) return SingleScaler(params, experiment, reflection_table, for_multi)
def create(cls, params, experiment, reflection_table, for_multi=False): """Perform reflection_table preprocessing and create a SingleScaler.""" cls.ensure_experiment_identifier(experiment, reflection_table) logger.info( "The scaling model type being applied is %s. \n", experiment.scaling_model.id_, ) try: reflection_table = cls.filter_bad_reflections( reflection_table, partiality_cutoff=params.cut_data.partiality_cutoff, min_isigi=params.cut_data.min_isigi, intensity_choice=params.reflection_selection.intensity_choice, ) except ValueError: raise BadDatasetForScalingException # combine partial measurements of same reflection, to handle those reflections # that were split by dials.integrate - changes size of reflection table. reflection_table = sum_partial_reflections(reflection_table) if "inverse_scale_factor" not in reflection_table: reflection_table["inverse_scale_factor"] = flex.double( reflection_table.size(), 1.0) elif (reflection_table["inverse_scale_factor"].count(0.0) == reflection_table.size()): reflection_table["inverse_scale_factor"] = flex.double( reflection_table.size(), 1.0) reflection_table = choose_initial_scaling_intensities( reflection_table, params.reflection_selection.intensity_choice) excluded_for_scaling = reflection_table.get_flags( reflection_table.flags.excluded_for_scaling) user_excluded = reflection_table.get_flags( reflection_table.flags.user_excluded_in_scaling) reasons = Reasons() reasons.add_reason("user excluded", user_excluded.count(True)) reasons.add_reason("excluded for scaling", excluded_for_scaling.count(True)) n_excluded = (excluded_for_scaling | user_excluded).count(True) if n_excluded == reflection_table.size(): logger.info( "All reflections were determined to be unsuitable for scaling." ) logger.info(reasons) raise BadDatasetForScalingException( """Unable to use this dataset for scaling""") else: logger.info( "Excluding %s/%s reflections\n%s", n_excluded, reflection_table.size(), reasons, ) if params.reflection_selection.method == "intensity_ranges": reflection_table = quasi_normalisation(reflection_table, experiment) if (params.reflection_selection.method in (None, Auto, "auto", "quasi_random")) or ( experiment.scaling_model.id_ == "physical" and "absorption" in experiment.scaling_model.components): if experiment.scan: reflection_table = calc_crystal_frame_vectors( reflection_table, experiment) alignment_axis = (1.0, 0.0, 0.0) reflection_table["s0c"] = align_axis_along_z( alignment_axis, reflection_table["s0c"]) reflection_table["s1c"] = align_axis_along_z( alignment_axis, reflection_table["s1c"]) try: scaler = SingleScaler(params, experiment, reflection_table, for_multi) except BadDatasetForScalingException as e: raise ValueError(e) else: return scaler