示例#1
0
def print_scaling_summary(script):
    """Log summary information after scaling."""
    logger.info(print_scaling_model_error_summary(script.experiments))
    valid_ranges = get_valid_image_ranges(script.experiments)
    image_ranges = get_image_ranges(script.experiments)
    msg = []
    for (img, valid, refl) in zip(image_ranges, valid_ranges,
                                  script.reflections):
        if valid:
            if len(valid
                   ) > 1 or valid[0][0] != img[0] or valid[-1][1] != img[1]:
                msg.append(
                    "Excluded images for experiment id: %s, image range: %s, limited range: %s"
                    % (
                        refl.experiment_identifiers().keys()[0],
                        list(img),
                        list(valid),
                    ))
    if msg:
        msg = ["Summary of image ranges removed:"] + msg
        logger.info("\n".join(msg))

    # report on partiality of dataset
    partials = flex.double()
    for r in script.reflections:
        if "partiality" in r:
            partials.extend(r["partiality"])
    not_full_sel = partials < 0.99
    not_zero_sel = partials > 0.01
    gt_half = partials > 0.5
    lt_half = partials < 0.5
    partial_gt_half_sel = not_full_sel & gt_half
    partial_lt_half_sel = not_zero_sel & lt_half
    logger.info("Summary of dataset partialities")
    header = ["Partiality (p)", "n_refl"]
    rows = [
        ["all reflections", str(partials.size())],
        ["p > 0.99", str(not_full_sel.count(False))],
        ["0.5 < p < 0.99",
         str(partial_gt_half_sel.count(True))],
        ["0.01 < p < 0.5",
         str(partial_lt_half_sel.count(True))],
        ["p < 0.01", str(not_zero_sel.count(False))],
    ]
    logger.info(tabulate(rows, header))
    logger.info(
        """
Reflections below a partiality_cutoff of %s are not considered for any
part of the scaling analysis or for the reporting of merging statistics.
Additionally, if applicable, only reflections with a min_partiality > %s
were considered for use when refining the scaling model.
""",
        script.params.cut_data.partiality_cutoff,
        script.params.reflection_selection.min_partiality,
    )
    stats = script.merging_statistics_result
    if stats:
        anom_stats, cut_stats, cut_anom_stats = (None, None, None)
        if not script.scaled_miller_array.space_group().is_centric():
            anom_stats = script.anom_merging_statistics_result
        logger.info(make_merging_statistics_summary(stats))
        try:
            d_min = resolution_cc_half(stats, limit=0.3).d_min
        except RuntimeError as e:
            logger.debug(f"Resolution fit failed: {e}")
        else:
            max_current_res = stats.bins[-1].d_min
            if d_min and d_min - max_current_res > 0.005:
                logger.info(
                    "Resolution limit suggested from CC" + "\u00BD" +
                    " fit (limit CC" + "\u00BD" + "=0.3): %.2f",
                    d_min,
                )
                try:
                    cut_stats, cut_anom_stats = merging_stats_from_scaled_array(
                        script.scaled_miller_array.resolution_filter(
                            d_min=d_min),
                        script.params.output.merging.nbins,
                        script.params.output.use_internal_variance,
                    )
                except DialsMergingStatisticsError:
                    pass
                else:
                    if script.scaled_miller_array.space_group().is_centric():
                        cut_anom_stats = None
        logger.info(
            table_1_summary(stats, anom_stats, cut_stats, cut_anom_stats))
示例#2
0
    def run_scale_and_filter(self):
        """Run cycles of scaling and filtering."""
        start_time = time.time()
        results = AnalysisResults()

        for counter in range(1,
                             self.params.filtering.deltacchalf.max_cycles + 1):
            self.run_scaling_cycle()

            if counter == 1:
                results.initial_expids_and_image_ranges = [
                    (exp.identifier,
                     exp.scan.get_image_range()) if exp.scan else None
                    for exp in self.experiments
                ]

            delta_cc_params = deltacc_phil_scope.extract()
            delta_cc_params.mode = self.params.filtering.deltacchalf.mode
            delta_cc_params.group_size = self.params.filtering.deltacchalf.group_size
            delta_cc_params.stdcutoff = self.params.filtering.deltacchalf.stdcutoff
            logger.info("\nPerforming a round of filtering.\n")

            script = deltaccscript(delta_cc_params, self.experiments,
                                   self.reflections)
            script.run()

            valid_image_ranges = get_valid_image_ranges(self.experiments)
            results.expids_and_image_ranges = [
                (exp.identifier, valid_image_ranges[i]) if exp.scan else None
                for i, exp in enumerate(self.experiments)
            ]

            self.experiments = script.experiments
            self.params.dataset_selection.use_datasets = None
            self.params.dataset_selection.exclude_datasets = None

            results = log_cycle_results(results, self, script)
            logger.info(
                "Cycle %s of filtering, n_reflections removed this cycle: %s",
                counter,
                results.get_last_cycle_results()["n_removed"],
            )

            # Test termination conditions
            latest_results = results.get_last_cycle_results()
            if latest_results["n_removed"] == 0:
                logger.info(
                    "Finishing scaling and filtering as no data removed in this cycle."
                )
                if self.params.scaling_options.full_matrix:
                    self.reflections = parse_multiple_datasets(
                        script.reflections)
                    results = self._run_final_scale_cycle(results)
                else:
                    self.reflections = script.reflections
                results.finish(termination_reason="no_more_removed")
                break

            # Need to split reflections for further processing.
            self.reflections = parse_multiple_datasets(script.reflections)

            if (latest_results["cumul_percent_removed"] >
                    self.params.filtering.deltacchalf.max_percent_removed):
                logger.info(
                    "Finishing scale and filtering as have now removed more than the limit."
                )
                results = self._run_final_scale_cycle(results)
                results.finish(termination_reason="max_percent_removed")
                break

            if self.params.filtering.deltacchalf.min_completeness:
                if (latest_results["merging_stats"]["completeness"] <
                        self.params.filtering.deltacchalf.min_completeness):
                    logger.info(
                        "Finishing scaling and filtering as completeness now below cutoff."
                    )
                    results = self._run_final_scale_cycle(results)
                    results.finish(
                        termination_reason="below_completeness_limit")
                    break

            if counter == self.params.filtering.deltacchalf.max_cycles:
                logger.info("Finishing as reached max number of cycles.")
                results = self._run_final_scale_cycle(results)
                results.finish(termination_reason="max_cycles")
                break

            #  If not finished then need to create new scaler to try again
            self._create_model_and_scaler()
            register_scaler_observers(self.scaler)
        self.filtering_results = results
        # Print summary of results
        logger.info(results.make_summary())

        # All done!
        logger.info("\nTotal time taken: {:.4f}s ".format(time.time() -
                                                          start_time))
        logger.info("%s%s%s", "\n", "=" * 80, "\n")
示例#3
0
    def print_scaling_summary(self, scaling_script):
        """Log summary information after scaling."""
        if ScalingModelObserver().data:
            logger.info(ScalingModelObserver().return_model_error_summary())
        valid_ranges = get_valid_image_ranges(scaling_script.experiments)
        image_ranges = get_image_ranges(scaling_script.experiments)
        msg = []
        for (img, valid, refl) in zip(
            image_ranges, valid_ranges, scaling_script.reflections
        ):
            if valid:
                if len(valid) > 1 or valid[0][0] != img[0] or valid[-1][1] != img[1]:
                    msg.append(
                        "Excluded images for experiment id: %s, image range: %s, limited range: %s"
                        % (
                            refl.experiment_identifiers().keys()[0],
                            list(img),
                            list(valid),
                        )
                    )
        if msg:
            msg = ["Summary of image ranges removed:"] + msg
            logger.info("\n".join(msg))

        # report on partiality of dataset
        partials = flex.double()
        for r in scaling_script.reflections:
            if "partiality" in r:
                partials.extend(r["partiality"])
        not_full_sel = partials < 0.99
        not_zero_sel = partials > 0.01
        gt_half = partials > 0.5
        lt_half = partials < 0.5
        partial_gt_half_sel = not_full_sel & gt_half
        partial_lt_half_sel = not_zero_sel & lt_half
        logger.info("Summary of dataset partialities")
        header = ["Partiality (p)", "n_refl"]
        rows = [
            ["all reflections", str(partials.size())],
            ["p > 0.99", str(not_full_sel.count(False))],
            ["0.5 < p < 0.99", str(partial_gt_half_sel.count(True))],
            ["0.01 < p < 0.5", str(partial_lt_half_sel.count(True))],
            ["p < 0.01", str(not_zero_sel.count(False))],
        ]
        logger.info(tabulate(rows, header))
        logger.info(
            """
Reflections below a partiality_cutoff of %s are not considered for any
part of the scaling analysis or for the reporting of merging statistics.
Additionally, if applicable, only reflections with a min_partiality > %s
were considered for use when refining the scaling model.
""",
            scaling_script.params.cut_data.partiality_cutoff,
            scaling_script.params.reflection_selection.min_partiality,
        )
        if MergingStatisticsObserver().data:
            logger.info(
                make_merging_statistics_summary(
                    MergingStatisticsObserver().data["statistics"]
                )
            )