Beispiel #1
0
 def create(cls, params, experiments, reflections):
     """create a list of single scalers to pass to a MultiScaler."""
     single_scalers = []
     offset = 0
     for i in range(len(reflections)):
         # Remove bad datasets that literally have no integrated reflections
         try:
             scaler = SingleScalerFactory.create(
                 params,
                 experiments[i - offset],
                 reflections[i - offset],
                 for_multi=True,
             )
             single_scalers.append(scaler)
         except BadDatasetForScalingException as e:
             logger.info(e)
             logger.info("Removing experiment " + str(i) + "\n" + "=" * 80 +
                         "\n")
             del experiments[i - offset]
             del reflections[i - offset]
             offset += 1
     assert len(experiments) == len(single_scalers), (
         len(experiments),
         len(single_scalers),
     )
     assert len(experiments) == len(reflections), (
         len(experiments),
         len(reflections),
     )
     determine_reflection_selection_parameters(params, experiments,
                                               reflections)
     return MultiScaler(params, experiments, single_scalers)
Beispiel #2
0
 def create(cls, params, experiments, reflections):
     """create a list of single scalers to pass to a MultiScaler."""
     single_scalers = []
     idx_to_remove = []
     for i, (expt, refl) in enumerate(zip(experiments, reflections)):
         # Remove bad datasets that literally have no integrated reflections
         try:
             scaler = SingleScalerFactory.create(params,
                                                 expt,
                                                 refl,
                                                 for_multi=True)
         except BadDatasetForScalingException as e:
             logger.info(e)
             idx_to_remove.append(i)
         else:
             single_scalers.append(scaler)
     if idx_to_remove:
         for j in idx_to_remove[::-1]:
             del experiments[j]
             del reflections[j]
         logger.info("Removed experiments %s",
                     " ".join(str(i) for i in idx_to_remove))
     n_exp, n_refl, n_ss = (len(experiments), len(reflections),
                            len(single_scalers))
     assert n_exp == n_ss, (n_exp, n_ss)
     assert n_exp == n_refl, (n_exp, n_refl)
     determine_reflection_selection_parameters(params, experiments,
                                               reflections)
     return MultiScaler(single_scalers)
Beispiel #3
0
    def create(cls, params, experiments, reflections):
        """sort scaled and unscaled datasets to pass to TargetScaler"""
        scaled_experiments = []
        scaled_scalers = []
        unscaled_scalers = []
        idx_to_remove = []

        for i, (expt, refl) in enumerate(zip(experiments, reflections)):
            # Remove bad datasets that literally have no integrated reflections
            try:
                scaler = SingleScalerFactory.create(params,
                                                    expt,
                                                    refl,
                                                    for_multi=True)
            except BadDatasetForScalingException as e:
                logger.info(e)
                idx_to_remove.append(i)
            else:
                if expt.scaling_model.is_scaled:
                    scaled_scalers.append(scaler)
                    scaled_experiments.append(expt)
                else:
                    unscaled_scalers.append(scaler)
        if idx_to_remove:
            for j in idx_to_remove[::-1]:
                del experiments[j]
                del reflections[j]
            logger.info("Removed experiments %s",
                        " ".join(str(i) for i in idx_to_remove))

        n_exp, n_refl = (len(experiments), len(reflections))
        n_ss, n_us = (len(scaled_scalers), len(unscaled_scalers))
        assert n_exp == n_ss + n_us, (n_exp, str(n_ss) + " + " + str(n_us))
        assert n_exp == n_refl, (n_exp, n_refl)
        determine_reflection_selection_parameters(params, experiments,
                                                  reflections)
        return TargetScaler(scaled_scalers, unscaled_scalers)
Beispiel #4
0
    def create_for_target_against_reference(cls, params, experiments,
                                            reflections):
        """Create TargetScaler for case where have a target_mtz or target_model."""
        scaled_scalers = []
        unscaled_scalers = []
        idx_to_remove = []

        for i, (expt,
                refl) in enumerate(zip(experiments[:-1], reflections[:-1])):
            # Remove bad datasets that literally have no integrated reflections
            try:
                scaler = SingleScalerFactory.create(params,
                                                    expt,
                                                    refl,
                                                    for_multi=True)
            except BadDatasetForScalingException as e:
                logger.info(e)
                idx_to_remove.append(i)
            else:
                unscaled_scalers.append(scaler)
        if idx_to_remove:
            for j in idx_to_remove[::-1]:
                del experiments[j]
                del reflections[j]
            logger.info("Removed experiments %s",
                        " ".join(str(i) for i in idx_to_remove))
        scaled_scalers = [
            NullScalerFactory.create(params, experiments[-1], reflections[-1])
        ]

        n_exp, n_refl = (len(experiments), len(reflections))
        n_ss, n_us = (len(scaled_scalers), len(unscaled_scalers))
        assert n_exp == n_ss + n_us, (n_exp, str(n_ss) + " + " + str(n_us))
        assert n_exp == n_refl, (n_exp, n_refl)
        determine_reflection_selection_parameters(params, experiments,
                                                  reflections)
        return TargetScaler(scaled_scalers, unscaled_scalers)
Beispiel #5
0
    def create(cls, params, experiment, reflection_table, for_multi=False):
        """Perform reflection_table preprocessing and create a SingleScaler."""

        cls.ensure_experiment_identifier(params, experiment, reflection_table)

        logger.info(
            "Preprocessing data for scaling. The id assigned to this \n"
            "dataset is %s, and the scaling model type being applied is %s. \n",
            list(reflection_table.experiment_identifiers().values())[0],
            experiment.scaling_model.id_,
        )

        reflection_table, reasons = cls.filter_bad_reflections(
            reflection_table)

        if "inverse_scale_factor" not in reflection_table:
            reflection_table["inverse_scale_factor"] = flex.double(
                reflection_table.size(), 1.0)
        elif (reflection_table["inverse_scale_factor"].count(0.0) ==
              reflection_table.size()):
            reflection_table["inverse_scale_factor"] = flex.double(
                reflection_table.size(), 1.0)
        reflection_table = choose_scaling_intensities(
            reflection_table, params.reflection_selection.intensity_choice)

        excluded_for_scaling = reflection_table.get_flags(
            reflection_table.flags.excluded_for_scaling)
        user_excluded = reflection_table.get_flags(
            reflection_table.flags.user_excluded_in_scaling)
        reasons.add_reason("user excluded", user_excluded.count(True))
        reasons.add_reason("excluded for scaling",
                           excluded_for_scaling.count(True))
        n_excluded = (excluded_for_scaling | user_excluded).count(True)
        if n_excluded == reflection_table.size():
            logger.info(
                "All reflections were determined to be unsuitable for scaling."
            )
            logger.info(reasons)
            raise BadDatasetForScalingException(
                """Unable to use this dataset for scaling""")
        else:
            logger.info(
                "%s/%s reflections not suitable for scaling\n%s",
                n_excluded,
                reflection_table.size(),
                reasons,
            )

        if not for_multi:
            determine_reflection_selection_parameters(params, [experiment],
                                                      [reflection_table])
        if params.reflection_selection.method == "intensity_ranges":
            reflection_table = quasi_normalisation(reflection_table,
                                                   experiment)
        if (params.reflection_selection.method
                in (None, Auto, "auto", "quasi_random")) or (
                    experiment.scaling_model.id_ == "physical"
                    and "absorption" in experiment.scaling_model.components):
            if experiment.scan:
                # calc theta and phi cryst
                reflection_table["phi"] = (
                    reflection_table["xyzobs.px.value"].parts()[2] *
                    experiment.scan.get_oscillation()[1])
                reflection_table = calc_crystal_frame_vectors(
                    reflection_table, experiment)

        return SingleScaler(params, experiment, reflection_table, for_multi)
Beispiel #6
0
 def create(cls, params, experiments, reflections, is_scaled_list):
     """sort scaled and unscaled datasets to pass to TargetScaler"""
     scaled_experiments = []
     scaled_scalers = []
     unscaled_scalers = []
     offset = 0
     for i in range(len(reflections)):
         if is_scaled_list[i] is True:
             if (params.scaling_options.target_model
                     or params.scaling_options.target_mtz):
                 scaled_experiments.append(experiments[i - offset])
                 scaled_scalers.append(
                     NullScalerFactory.create(params,
                                              experiments[i - offset],
                                              reflections[i - offset]))
             else:
                 try:
                     scaled_scalers.append(
                         SingleScalerFactory.create(
                             params,
                             experiments[i - offset],
                             reflections[i - offset],
                             for_multi=True,
                         ))
                     scaled_experiments.append(experiments[i - offset])
                 except BadDatasetForScalingException as e:
                     logger.info(e)
                     logger.info("Removing experiment " + str(i) + "\n" +
                                 "=" * 80 + "\n")
                     del experiments[i - offset]
                     del reflections[i - offset]
                     offset += 1
         else:
             try:
                 unscaled_scalers.append(
                     SingleScalerFactory.create(
                         params,
                         experiments[i - offset],
                         reflections[i - offset],
                         for_multi=True,
                     ))
             except BadDatasetForScalingException as e:
                 logger.info(e)
                 logger.info("Removing experiment " + str(i) + "\n" +
                             "=" * 80 + "\n")
                 del experiments[i - offset]
                 del reflections[i - offset]
                 offset += 1
     assert len(
         experiments) == len(scaled_scalers) + len(unscaled_scalers), (
             len(experiments),
             str(len(scaled_scalers)) + " + " + str(len(unscaled_scalers)),
         )
     assert len(experiments) == len(reflections), (
         len(experiments),
         len(reflections),
     )
     determine_reflection_selection_parameters(params, experiments,
                                               reflections)
     return TargetScaler(params, scaled_experiments, scaled_scalers,
                         unscaled_scalers)