コード例 #1
0
def inspect(experiments, reflections, params):

    Ioversig_per_crystal = flex.double()
    if params.d_min:
        reflections = reflections.select(reflections["d"] > params.d_min)
        if not reflections:
            return
    I = reflections["intensity.sum.value"]
    sig = reflections["intensity.sum.variance"]**0.5
    Ioversig = I / sig
    images = []

    for (id_,
         identifier) in dict(reflections.experiment_identifiers()).items():
        sel = reflections["id"] == id_
        Ioversig_per_crystal.append(flex.mean(Ioversig.select(sel)))
        expt = (experiments.identifiers() == identifier).iselection()[0]
        images.append(experiments[expt].imageset.get_path(0).split("/")[-1])

    header = [
        "image",
        "expt_id",
        ("I/sigma" +
         f" (d_min={params.d_min})" if params.d_min else "I/sigma"),
    ]
    rows = []

    for i, (image, Iovers) in enumerate(zip(images, Ioversig_per_crystal)):
        rows.append([f"{image}", f"{i}", f"{Iovers:.2f}"])

    logger.info(tabulate(rows, header))

    if params.threshold_i_over_sigma:
        logger.info(
            f"Integrated images with I/sigma > {params.threshold_i_over_sigma}"
        )
        header = [
            "image",
            "expt_id",
            ("I/sigma" +
             f" (d_min={params.d_min})" if params.d_min else "I/sigma"),
        ]
        rows = []

        for i, (image, Iovers) in enumerate(zip(images, Ioversig_per_crystal)):
            if Iovers > params.threshold_i_over_sigma:
                rows.append([f"{image}", f"{i}", f"{Iovers:.2f}"])

        logger.info(tabulate(rows, header))
コード例 #2
0
    def _record_individual_report(self, data_manager, report, cluster_name):
        d = self._report_as_dict(report)

        self._individual_report_dicts[cluster_name] = self._individual_report_dict(
            d, cluster_name
        )

        for graph in (
            "cc_one_half",
            "i_over_sig_i",
            "completeness",
            "multiplicity_vs_resolution",
            "r_pim",
        ):
            self._comparison_graphs.setdefault(
                graph, {"layout": d[graph]["layout"], "data": []}
            )
            data = copy.deepcopy(d[graph]["data"][0])
            data["name"] = cluster_name
            data.pop("line", None)  # remove default color override
            self._comparison_graphs[graph]["data"].append(data)

        def remove_html_tags(table):
            return [
                [
                    s.replace("<strong>", "")
                    .replace("</strong>", "")
                    .replace("<sub>", "")
                    .replace("</sub>", "")
                    if isinstance(s, six.string_types)
                    else s
                    for s in row
                ]
                for row in table
            ]

        logger.info(
            "\nOverall merging statistics:\n%s",
            tabulate(
                remove_html_tags(d["overall_statistics_table"]), headers="firstrow"
            ),
        )
        logger.info(
            "\nResolution shells:\n%s",
            tabulate(
                remove_html_tags(d["merging_statistics_table"]), headers="firstrow"
            ),
        )
コード例 #3
0
    def print_stats_on_matches(self):

        l = self.get_matches()
        nref = len(l)
        if nref == 0:
            logger.warning(
                "Unable to calculate summary statistics for zero observations"
            )
            return

        twotheta_resid = l["2theta_resid"]
        w_2theta = l["2theta.weights"]

        msg = (
            f"\nSummary statistics for {nref} observations" + " matched to predictions:"
        )
        header = ["", "Min", "Q1", "Med", "Q3", "Max"]
        rows = []
        row_data = five_number_summary(twotheta_resid)
        rows.append(
            ["2theta_c - 2theta_o (deg)"] + [f"{e * RAD2DEG:.4g}" for e in row_data]
        )
        row_data = five_number_summary(w_2theta)
        rows.append(["2theta weights"] + [f"{e * DEG2RAD ** 2:.4g}" for e in row_data])
        logger.info(msg)
        logger.info(tabulate(rows, header) + "\n")
コード例 #4
0
ファイル: observers.py プロジェクト: jbeilstenedmands/dials
    def print_scaling_summary(self, scaling_script):
        """Log summary information after scaling."""
        if ScalingModelObserver().data:
            logger.info(ScalingModelObserver().return_model_error_summary())
        valid_ranges = get_valid_image_ranges(scaling_script.experiments)
        image_ranges = get_image_ranges(scaling_script.experiments)
        msg = []
        for (img, valid, refl) in zip(image_ranges, valid_ranges,
                                      scaling_script.reflections):
            if valid:
                if len(valid) > 1 or valid[0][0] != img[0] or valid[-1][
                        1] != img[1]:
                    msg.append(
                        "Excluded images for experiment id: %s, image range: %s, limited range: %s"
                        % (
                            refl.experiment_identifiers().keys()[0],
                            list(img),
                            list(valid),
                        ))
        if msg:
            msg = ["Summary of image ranges removed:"] + msg
            logger.info("\n".join(msg))

        # report on partiality of dataset
        partials = flex.double()
        for r in scaling_script.reflections:
            if "partiality" in r:
                partials.extend(r["partiality"])
        not_full_sel = partials < 0.99
        not_zero_sel = partials > 0.01
        gt_half = partials > 0.5
        lt_half = partials < 0.5
        partial_gt_half_sel = not_full_sel & gt_half
        partial_lt_half_sel = not_zero_sel & lt_half
        logger.info("Summary of dataset partialities")
        header = ["Partiality (p)", "n_refl"]
        rows = [
            ["all reflections", str(partials.size())],
            ["p > 0.99", str(not_full_sel.count(False))],
            ["0.5 < p < 0.99",
             str(partial_gt_half_sel.count(True))],
            ["0.01 < p < 0.5",
             str(partial_lt_half_sel.count(True))],
            ["p < 0.01", str(not_zero_sel.count(False))],
        ]
        logger.info(tabulate(rows, header))
        logger.info(
            """
Reflections below a partiality_cutoff of %s are not considered for any
part of the scaling analysis or for the reporting of merging statistics.
Additionally, if applicable, only reflections with a min_partiality > %s
were considered for use when refining the scaling model.
""",
            scaling_script.params.cut_data.partiality_cutoff,
            scaling_script.params.reflection_selection.min_partiality,
        )
        if MergingStatisticsObserver().data:
            logger.info(
                make_merging_statistics_summary(
                    MergingStatisticsObserver().data["statistics"]))
コード例 #5
0
    def summary(self):
        """
        Get a summary of the processing
        """
        # Compute the task table
        if self.experiments.all_stills():
            rows = [["#", "Group", "Frame From", "Frame To", "# Reflections"]]
            for i in range(len(self)):
                job = self.manager.job(i)
                group = job.index()
                f0, f1 = job.frames()
                n = self.manager.num_reflections(i)
                rows.append([str(i), str(group), str(f0), str(f1), str(n)])
        elif self.experiments.all_sequences():
            rows = [[
                "#",
                "Group",
                "Frame From",
                "Frame To",
                "Angle From",
                "Angle To",
                "# Reflections",
            ]]
            for i in range(len(self)):
                job = self.manager.job(i)
                group = job.index()
                expr = job.expr()
                f0, f1 = job.frames()
                scan = self.experiments[expr[0]].scan
                p0 = scan.get_angle_from_array_index(f0)
                p1 = scan.get_angle_from_array_index(f1)
                n = self.manager.num_reflections(i)
                rows.append([
                    str(i),
                    str(group),
                    str(f0 + 1),
                    str(f1),
                    str(p0),
                    str(p1),
                    str(n)
                ])
        else:
            raise RuntimeError(
                "Experiments must be all sequences or all stills")

        # The job table
        task_table = tabulate(rows, headers="firstrow")

        # The format string
        if self.params.block.size is None:
            block_size = "auto"
        else:
            block_size = str(self.params.block.size)
        return ("Processing reflections in the following blocks of images:\n\n"
                " block_size: {} {}\n\n{}\n").format(
                    block_size,
                    "" if block_size in ("auto",
                                         "Auto") else self.params.block.units,
                    task_table,
                )
コード例 #6
0
 def binned_variances_summary(self):
     """Generate a summary of the model minimisation for output."""
     header = [
         "Intensity range (<Ih>)",
         "n_refl",
         "Uncorrected variance",
         "Corrected variance",
     ]
     rows = []
     bin_bounds = [
         "%.2f" % i for i in self.binner.binning_info["bin_boundaries"]
     ]
     for i, (initial_var, bin_var, n_refl) in enumerate(
             zip(
                 self.binner.binning_info["initial_variances"],
                 self.binner.binning_info["bin_variances"],
                 self.binner.binning_info["refl_per_bin"],
             )):
         rows.append([
             bin_bounds[i] + " - " + bin_bounds[i + 1],
             str(int(n_refl)),
             str(round(initial_var, 3)),
             str(round(bin_var, 3)),
         ])
     return "\n".join((
         "Results of error model refinement. Uncorrected and corrected variances",
         "of normalised intensity deviations for given intensity ranges. Variances",
         "are expected to be ~1.0 for reliable errors (sigmas).",
         tabulate(rows, header),
         "",
     ))
コード例 #7
0
ファイル: refiner.py プロジェクト: kmdalton/dials
    def refine_fisher_scoring(self):
        """
        Perform the profile refinement

        """

        # Print information
        logger.info("\nComponents to refine:")
        logger.info(" Orientation:       %s" %
                    (not self.state.is_orientation_fixed))
        logger.info(" Unit cell:         %s" %
                    (not self.state.is_unit_cell_fixed))
        logger.info(" RLP mosaicity:     %s" %
                    (not self.state.is_mosaic_spread_fixed))
        logger.info(" Wavelength spread: %s\n" %
                    (not self.state.is_wavelength_spread_fixed))

        # Initialise the algorithm
        self.ml = FisherScoringMaximumLikelihood(
            self.state,
            self.s0,
            self.sp_list,
            self.h_list,
            self.ctot_list,
            self.mobs_list,
            self.sobs_list,
        )

        # Solve the maximum likelihood equations
        self.ml.solve()

        # Get the parameters
        self.parameters = flex.double(self.ml.parameters)

        # set the parameters
        self.state.active_parameters = self.parameters

        # Print summary table of refinement.
        rows = []
        headers = ["Iteration", "likelihood", "RMSD (pixel) X,Y"]
        for i, h in enumerate(self.ml.history):
            l = h["likelihood"]
            rmsd = h["rmsd"]
            rows.append([str(i), f"{l:.4f}", f"{rmsd[0]:.3f}, {rmsd[1]:.3f}"])
        logger.info("\nRefinement steps:\n\n" +
                    textwrap.indent(tabulate(rows, headers), " "))

        # Print the eigen values and vectors of sigma_m
        if not self.state.is_mosaic_spread_fixed:
            logger.info("\nDecomposition of Sigma_M:")
            print_eigen_values_and_vectors(
                matrix.sqr(
                    flumpy.from_numpy(self.state.mosaicity_covariance_matrix)))

        # Save the history
        self.history = self.ml.history

        # Return the optimizer
        return self.ml
コード例 #8
0
def resolution_fit(d_star_sq, y_obs, model, limit, sel=None):
    """Estimate a resolution limit based on the input merging statistics

    The function defined by `model` will be fit to the input `d_star_sq` and `y_obs`.
    The estimated resolution limit is chosen as the `d_star_sq` value at which the
    fitted function equals `limit`.

    Args:
        d_star_sq (scitbx.array_family.flex.double): The high resolution limits of the
            resolution bins in units 1/d*2
        y_obs (scitbx.array_family.flex.double): The statistic against which to fit the
            function `model`
        model: The function to fit against `y_obs`. Must be callable, taking as input x
            (d_star_sq) and y (the metric to be fitted) values, returning the fitted
            y(x) values.
        limit (float): The resolution limit criterion.
        sel (scitbx.array_family.flex.bool): An optional selection to apply to the
            `d_star_sq` and `y_obs` values.

    Returns: The estimated resolution limit in units of Å^-1

    Raises:
        RuntimeError: Raised if no `y_obs` values remain after application of the
        selection `sel`
    """
    if not sel:
        sel = flex.bool(len(d_star_sq), True)
    sel &= y_obs > 0
    y_obs = y_obs.select(sel)
    d_star_sq = d_star_sq.select(sel)

    if not len(y_obs):
        raise RuntimeError("No reflections left for fitting")
    y_fit = model(d_star_sq, y_obs, 6)
    logger.debug(
        tabulate(
            [("d*2", "d", "obs", "fit")]
            + [
                (ds2, uctbx.d_star_sq_as_d(ds2), yo, yf)
                for ds2, yo, yf in zip(d_star_sq, y_obs, y_fit)
            ],
            headers="firstrow",
        )
    )

    if flex.min(y_obs) > limit:
        d_min = 1.0 / math.sqrt(flex.max(d_star_sq))
    else:
        try:
            d_min = 1.0 / math.sqrt(interpolate_value(d_star_sq, y_fit, limit))
        except RuntimeError as e:
            logger.debug(f"Error interpolating value: {e}")
            d_min = None

    return ResolutionResult(d_star_sq, y_obs, y_fit, d_min)
コード例 #9
0
    def resolution_cc_ref(self, limit=None):
        """Compute a resolution limit where cc_ref < 0.5 (limit if
        set) or the full extent of the data."""

        if limit is None:
            limit = self._params.cc_ref

        intensities = self._intensities.merge_equivalents(
            use_internal_variance=False).array()
        cc_s = flex.double()
        for b in self._merging_statistics.bins:
            sel = intensities.resolution_filter_selection(d_min=b.d_min,
                                                          d_max=b.d_max)
            sel_ref = self._reference.resolution_filter_selection(
                d_min=b.d_min, d_max=b.d_max)
            d = intensities.select(sel)
            dref = self._reference.select(sel_ref)
            cc = d.correlation(dref, assert_is_similar_symmetry=False)
            cc_s.append(cc.coefficient())
        cc_s = cc_s.reversed()

        s_s = flex.double([
            1 / b.d_min**2 for b in self._merging_statistics.bins
        ]).reversed()

        if self._params.cc_half_fit == "tanh":
            cc_f = tanh_fit(s_s, cc_s, iqr_multiplier=4)
        else:
            cc_f = fit(s_s, cc_s, 6)

        logger.debug(
            "rch: fits\n%s",
            tabulate(
                [("d*2", "d", "cc_s", "cc_f")] +
                [(s, 1.0 / math.sqrt(s), cc_s[j], cc_f[j])
                 for j, s in enumerate(s_s)],
                headers="firstrow",
            ),
        )

        rlimit = limit * max(cc_s)
        try:
            r_cc = 1.0 / math.sqrt(interpolate_value(s_s, cc_f, rlimit))
        except Exception:
            r_cc = 1.0 / math.sqrt(max(s_s))
        logger.debug("rch: done : %s", r_cc)

        if self._params.plot:
            plot = resolution_plot("CCref")
            plot.plot(s_s, cc_f, label="fit")
            plot.plot(s_s, cc_s, label="CCref")
            plot.plot_resolution_limit(r_cc)
            plot.savefig("cc_ref.png")

        return r_cc
コード例 #10
0
ファイル: report.py プロジェクト: jbeilstenedmands/dials
    def as_str(self, prefix=""):
        """
        Return the table as a string

        :return: The string
        """
        rows = [[col[1] for col in self.cols]]
        for i, row in enumerate(self.rows):
            rows.append([str(x) for x in row])
        text = [prefix + self.title, tabulate(rows, headers="firstrow"), ""]
        return "\n".join(text)
コード例 #11
0
    def resolution_rmerge(self, limit=None):
        """Compute a resolution limit where either rmerge = 1.0 (limit if
        set) or the full extent of the data. N.B. this fit is only meaningful
        for positive values."""

        if limit is None:
            limit = self._params.rmerge

        rmerge_s = flex.double(
            [b.r_merge for b in self._merging_statistics.bins]).reversed()
        s_s = flex.double([
            1 / b.d_min**2 for b in self._merging_statistics.bins
        ]).reversed()

        sel = rmerge_s > 0
        rmerge_s = rmerge_s.select(sel)
        s_s = s_s.select(sel)

        if limit == 0.0:
            r_rmerge = 1.0 / math.sqrt(flex.max(s_s))
            rmerge_f = None

        elif limit > flex.max(rmerge_s):
            r_rmerge = 1.0 / math.sqrt(flex.max(s_s))
            rmerge_f = None

        else:
            rmerge_f = log_inv_fit(s_s, rmerge_s, 6)

            logger.debug(
                "rmerge: fits\n%s",
                tabulate(
                    [("d*2", "d", "rmerge_s", "rmerge_f")] +
                    [(s, 1.0 / math.sqrt(s), rmerge_s[j], rmerge_f[j])
                     for j, s in enumerate(s_s)],
                    headers="firstrow",
                ),
            )

            try:
                r_rmerge = 1.0 / math.sqrt(
                    interpolate_value(s_s, rmerge_f, limit))
            except Exception:
                r_rmerge = 1.0 / math.sqrt(flex.max(s_s))

        if self._params.plot:
            plot = resolution_plot(ylabel="Rmerge")
            if rmerge_f is not None:
                plot.plot(s_s, rmerge_f, label="fit")
            plot.plot(s_s, rmerge_s, label="Rmerge")
            plot.plot_resolution_limit(r_rmerge)
            plot.savefig("rmerge.png")

        return r_rmerge
コード例 #12
0
ファイル: combine_intensities.py プロジェクト: kmdalton/dials
    def __init__(self, scaler, use_Imid=None):
        self.scaler = scaler
        self.experiment = scaler.experiment
        if "intensity.prf.value" not in scaler.reflection_table:
            self.max_key = 1
            logger.info(
                "No profile intensities found, skipping profile/summation intensity combination."
            )
            return
        if use_Imid is not None:
            self.max_key = use_Imid
        else:
            self.Imids = scaler.params.reflection_selection.combine.Imid
            self.dataset = _make_reflection_table_from_scaler(self.scaler)
            if "partiality" in self.dataset:
                raw_intensities = (
                    self.dataset["intensity.sum.value"].as_double() /
                    self.dataset["partiality"])
            else:
                raw_intensities = self.dataset[
                    "intensity.sum.value"].as_double()
            logger.debug("length of raw intensity array: %s",
                         raw_intensities.size())
            _determine_Imids(self, raw_intensities)
            header = ["Combination", "CC1/2", "Rmeas"]
            rows, results = self._test_Imid_combinations()
            logger.info(tabulate(rows, header))

            self.max_key = min(results, key=results.get)
            while results[self.max_key] < 0:
                del results[self.max_key]
                if results:
                    self.max_key = min(results, key=results.get)
                else:
                    self.max_key = -1
                    break
            if self.max_key == 0:
                logger.info(
                    "Profile intensities determined to be best for scaling. \n"
                )
            elif self.max_key == 1:
                logger.info(
                    "Summation intensities determined to be best for scaling. \n"
                )
            elif self.max_key == -1:
                logger.info(
                    "No good statistics found, using profile intensities. \n")
                self.max_key = 0
            else:
                logger.info(
                    "Combined intensities with Imid = %.2f determined to be best for scaling. \n",
                    self.max_key,
                )
コード例 #13
0
    def resolution_i_mean_over_sigma_mean(self, limit=None):
        """Compute a resolution limit where either <I>/<sigma> = 1.0 (limit if
        set) or the full extent of the data."""

        if limit is None:
            limit = self._params.i_mean_over_sigma_mean

        isigma_s = flex.double([
            b.i_mean_over_sigi_mean for b in self._merging_statistics.bins
        ]).reversed()
        s_s = flex.double([
            1 / b.d_min**2 for b in self._merging_statistics.bins
        ]).reversed()

        sel = isigma_s > 0
        isigma_s = isigma_s.select(sel)
        s_s = s_s.select(sel)

        if flex.min(isigma_s) > limit:
            r_isigma = 1.0 / math.sqrt(flex.max(s_s))
            isigma_f = None

        else:
            isigma_f = log_fit(s_s, isigma_s, 6)

            logger.debug(
                "isigma: fits\n%s",
                tabulate(
                    [("d*2", "d", "isigma_s", "isigma_f")] +
                    [(s, 1.0 / math.sqrt(s), isigma_s[j], isigma_f[j])
                     for j, s in enumerate(s_s)],
                    headers="firstrow",
                ),
            )

            try:
                r_isigma = 1.0 / math.sqrt(
                    interpolate_value(s_s, isigma_f, limit))
            except Exception:
                if limit > max(isigma_f):
                    r_isigma = 1.0 / math.sqrt(flex.min(s_s))
                else:
                    r_isigma = 1.0 / math.sqrt(flex.max(s_s))

        if self._params.plot:
            plot = resolution_plot(ylabel="Unmerged <I>/<sigma>")
            if isigma_f is not None:
                plot.plot(s_s, isigma_f, label="fit")
            plot.plot(s_s, isigma_s, label="Unmerged <I>/<sigma>")
            plot.plot_resolution_limit(r_isigma)
            plot.savefig("i_mean_over_sigma_mean.png")

        return r_isigma
コード例 #14
0
def run_analysis(flags, reflections):
    """Print a table of flags present in the reflections file"""

    header = ["flag", "nref"]
    rows = []
    for name, val in flags:
        n = (reflections.get_flags(val)).count(True)
        if n > 0:
            rows.append([name, "%d" % n])
    if rows:
        print(tabulate(rows, header))
    else:
        print("No flags set")
コード例 #15
0
 def __str__(self):
     """Convert to string."""
     rows = [[description, f"{value:.2f} seconds"]
             for description, value in (
                 ["Read time", self.read],
                 ["Extract time", self.extract],
                 ["Pre-process time", self.initialize],
                 ["Process time", self.process],
                 ["Post-process time", self.finalize],
                 ["Total time", self.total],
                 ["User time", self.user],
             ) if value]
     return tabulate(rows)
コード例 #16
0
ファイル: merge.py プロジェクト: rjgildea/dials
def make_dano_table(anomalous_amplitudes):
    """Calculate <dano/sigdano> in resolution bins and tabulate."""
    dFsdF, resolution_bin_edges = dano_over_sigdano_stats(anomalous_amplitudes)

    logger.info("Size of anomalous differences")
    header = ["d_max", "d_min", "<|ΔF|/σ(ΔF)>"]
    rows = []
    for i, dF in enumerate(dFsdF):
        rows.append([
            f"{resolution_bin_edges[i]:6.2f}",
            f"{resolution_bin_edges[i+1]:6.2f}",
            f"{dF:6.3f}",
        ])
    return tabulate(rows, header)
コード例 #17
0
ファイル: combine_intensities.py プロジェクト: kmdalton/dials
    def __init__(self, multiscaler):
        self.active_scalers = multiscaler.active_scalers
        self.Imids = multiscaler.params.reflection_selection.combine.Imid
        # first copy across relevant data that's needed
        self.good_datasets = []
        for i, scaler in enumerate(self.active_scalers):
            if "intensity.prf.value" in scaler.reflection_table:
                self.good_datasets.append(i)
        if not self.good_datasets:
            self.max_key = 1
            logger.info(
                "No profile intensities found, skipping profile/summation intensity combination."
            )
            return
        self.datasets = [
            _make_reflection_table_from_scaler(self.active_scalers[i])
            for i in self.good_datasets
        ]
        raw_intensities = self._get_raw_intensity_array()
        logger.debug("length of raw intensity array: %s",
                     raw_intensities.size())
        _determine_Imids(self, raw_intensities)

        header = ["Combination", "CC1/2", "Rmeas"]
        rows, results = self._test_Imid_combinations()
        logger.info(tabulate(rows, header))

        self.max_key = min(results, key=results.get)
        while results[self.max_key] < 0:
            del results[self.max_key]
            if results:
                self.max_key = min(results, key=results.get)
            else:
                self.max_key = -1
                break
        if self.max_key == 0:
            logger.info(
                "Profile intensities determined to be best for scaling. \n")
        elif self.max_key == 1:
            logger.info(
                "Summation intensities determined to be best for scaling. \n")
        elif self.max_key == -1:
            logger.info(
                "No good statistics found, using profile intensities. \n")
            self.max_key = 0
        else:
            logger.info(
                "Combined intensities with Imid = %.2f determined to be best for scaling. \n",
                self.max_key,
            )
コード例 #18
0
    def cell_param_table(crystal):
        """Construct a table of cell parameters and their ESDs"""

        cell = crystal.get_unit_cell().parameters()
        esd = crystal.get_cell_parameter_sd()
        vol = crystal.get_unit_cell().volume()
        vol_esd = crystal.get_cell_volume_sd()
        header = ["Parameter", "Value", "Estimated sd"]
        rows = []
        names = ["a", "b", "c", "alpha", "beta", "gamma"]
        for n, p, e in zip(names, cell, esd):
            rows.append([n, "%9.5f" % p, "%9.5f" % e])
        rows.append(["\nvolume", "\n%9.5f" % vol, "\n%9.5f" % vol_esd])
        return tabulate(rows, header)
コード例 #19
0
    def resolution_completeness(self, limit=None):
        """Compute a resolution limit where completeness < 0.5 (limit if
        set) or the full extent of the data. N.B. this completeness is
        with respect to the *maximum* completeness in a shell, to reflect
        triclinic cases."""

        if limit is None:
            limit = self._params.completeness

        comp_s = flex.double([
            b.completeness for b in self._merging_statistics.bins
        ]).reversed()
        s_s = flex.double([
            1 / b.d_min**2 for b in self._merging_statistics.bins
        ]).reversed()

        if flex.min(comp_s) > limit:
            r_comp = 1.0 / math.sqrt(flex.max(s_s))
            comp_f = None

        else:
            comp_f = fit(s_s, comp_s, 6)

            logger.debug(
                "comp: fits\n%s",
                tabulate(
                    [("d*2", "d", "comp_s", "comp_f")] +
                    [(s, 1.0 / math.sqrt(s), comp_s[j], comp_f[j])
                     for j, s in enumerate(s_s)],
                    headers="firstrow",
                ),
            )

            rlimit = limit * max(comp_s)
            try:
                r_comp = 1.0 / math.sqrt(interpolate_value(
                    s_s, comp_f, rlimit))
            except Exception:
                r_comp = 1.0 / math.sqrt(flex.max(s_s))

        if self._params.plot:
            plot = resolution_plot(ylabel="Completeness")
            if comp_f is not None:
                plot.plot(s_s, comp_f, label="fit")
            plot.plot(s_s, comp_s, label="Completeness")
            plot.plot_resolution_limit(r_comp)
            plot.savefig("completeness.png")

        return r_comp
コード例 #20
0
ファイル: show.py プロジェクト: kmdalton/dials
 def model_connectivity_impl(experiments, model):
     text = [""]
     text.append(f"{model.capitalize()}:")
     models = getattr(experiments, f"{model}s")()
     rows = [[""] + [str(j) for j in range(len(models))]]
     for j, e in enumerate(experiments):
         row = ["Experiment %d" % j]
         for m in models:
             if getattr(e, model) is m:
                 row.append("x")
             else:
                 row.append(".")
         rows.append(row)
     text.append(tabulate(rows, tablefmt="plain"))
     return text
コード例 #21
0
def print_step_table(refinery):
    """print useful output about refinement steps in the form of a simple table"""

    logger.info("\nRefinement steps:")

    header = ["Step", "Nref"]
    for (name, units) in zip(refinery._target.rmsd_names,
                             refinery._target.rmsd_units):
        header.append(name + "\n(" + units + ")")

    rows = []
    for i in range(refinery.history.get_nrows()):
        rmsds = [r for r in refinery.history["rmsd"][i]]
        rows.append(
            [str(i), str(refinery.history["num_reflections"][i])] +
            ["%.5g" % r for r in rmsds])

    logger.info(tabulate(rows, header))
    logger.info(refinery.history.reason_for_termination)
コード例 #22
0
    def _resolution_sigma(self, limit, get_mean, label, fig_filename):
        isigma_s = flex.double(map(get_mean,
                                   self._merging_statistics.bins)).reversed()
        s_s = flex.double(1 / b.d_min**2
                          for b in self._merging_statistics.bins).reversed()

        sel = isigma_s > 0
        isigma_s = isigma_s.select(sel)
        s_s = s_s.select(sel)

        if flex.min(isigma_s) > limit:
            r_isigma = 1.0 / math.sqrt(flex.max(s_s))
            isigma_f = None

        else:
            isigma_f = log_fit(s_s, isigma_s, 6)

            logger.debug(
                "isigma: fits\n%s",
                tabulate(
                    [("d*2", "d", "isigma_s", "isigma_f")] +
                    [(s, 1.0 / math.sqrt(s), isigma_s[j], isigma_f[j])
                     for j, s in enumerate(s_s)],
                    headers="firstrow",
                ),
            )

            try:
                r_isigma = 1.0 / math.sqrt(
                    interpolate_value(s_s, isigma_f, limit))
            except Exception:
                r_isigma = 1.0 / math.sqrt(flex.max(s_s))

        if self._params.plot:
            plot = resolution_plot(ylabel=label)
            if isigma_f is not None:
                plot.plot(s_s, isigma_f, label="fit")
            plot.plot(s_s, isigma_s, label=label)
            plot.plot_resolution_limit(r_isigma)
            plot.savefig(fig_filename)

        return r_isigma
コード例 #23
0
ファイル: show.py プロジェクト: kmdalton/dials
def _create_flag_count_table(table):
    """Generate a summary table of flag values in a reflection table.

    :param table: A reflection table
    :returns:     A string of the formatted flags table
    """

    # Calculate the counts of entries that match each flag
    numpy_flags = table["flags"].as_numpy_array()
    flag_count = {
        flag: np.sum(numpy_flags & value != 0)
        for value, flag in table.flags.values.items()
    }

    # Work out the numeric-value order of the flags
    flag_order = sorted(table.flags.values.values(), key=lambda x: x.real)

    # Build the actual table
    flag_rows = [["Flag", "Count", "%"]]
    max_count_len = max(5, len(str(max(flag_count.values()))))
    last_flag = None
    for flag in flag_order:
        indent = ""
        # As a hint for reading, indent any 'summary' flags.
        # A summary flag is any flag which overlaps with the previous one.
        if last_flag and (last_flag.real & flag.real):
            indent = "  "
        last_flag = flag
        # Add the row to the table we're building
        flag_rows.append([
            indent + flag.name,
            "{:{:d}d}".format(flag_count[flag], max_count_len),
            f"{100 * flag_count[flag] / len(table):5.01f}",
        ])

    # Build the array of output strings
    text = []
    text.append("Reflection flags:")
    text.append(tabulate(flag_rows, headers="firstrow"))
    return "\n".join(text)
コード例 #24
0
ファイル: analysis.py プロジェクト: dagewa/dials
def make_summary_table(results_summary: dict) -> tabulate:
    # make a summary table
    overall_summary_header = [
        "Image",
        "expt_id",
        "n_indexed",
        "RMSD X",
        "RMSD Y",
        "RMSD dPsi",
    ]

    rows = []
    total = 0
    if any(len(v) > 1 for v in results_summary.values()):
        show_lattices = True
        overall_summary_header.insert(1, "lattice")
    else:
        show_lattices = False
    for k in sorted(results_summary.keys()):
        for j, cryst in enumerate(results_summary[k]):
            if not cryst["n_indexed"]:
                continue
            n_idx, n_strong = (cryst["n_indexed"], cryst["n_strong"])
            frac_idx = f"{n_idx}/{n_strong} ({100*n_idx/n_strong:2.1f}%)"
            row = [
                cryst["Image"],
                str(total),
                frac_idx,
                cryst["RMSD_X"],
                cryst["RMSD_Y"],
                cryst["RMSD_dPsi"],
            ]
            if show_lattices:
                row.insert(1, j + 1)
            rows.append(row)
            total += 1

    summary_table = tabulate(rows, overall_summary_header)
    return summary_table
コード例 #25
0
ファイル: show.py プロジェクト: kmdalton/dials
def show_reflections(
    reflections,
    show_intensities=False,
    show_profile_fit=False,
    show_centroids=False,
    show_all_reflection_data=False,
    show_flags=False,
    max_reflections=None,
    show_identifiers=False,
):

    text = []

    from orderedset import OrderedSet

    formats = {
        "miller_index": "%i, %i, %i",
        "d": "%.2f",
        "qe": "%.3f",
        "dqe": "%.3f",
        "id": "%i",
        "imageset_id": "%i",
        "panel": "%i",
        "flags": "%i",
        "background.mean": "%.1f",
        "background.dispersion": "%.1f",
        "background.mse": "%.1f",
        "background.sum.value": "%.1f",
        "background.sum.variance": "%.1f",
        "intensity.prf.value": "%.1f",
        "intensity.prf.variance": "%.1f",
        "intensity.sum.value": "%.1f",
        "intensity.sum.variance": "%.1f",
        "intensity.cor.value": "%.1f",
        "intensity.cor.variance": "%.1f",
        "intensity.scale.value": "%.1f",
        "intensity.scale.variance": "%.1f",
        "Ih_values": "%.1f",
        "lp": "%.3f",
        "num_pixels.background": "%i",
        "num_pixels.background_used": "%i",
        "num_pixels.foreground": "%i",
        "num_pixels.valid": "%i",
        "partial_id": "%i",
        "partiality": "%.4f",
        "profile.correlation": "%.3f",
        "profile.rmsd": "%.3f",
        "xyzcal.mm": "%.2f, %.2f, %.2f",
        "xyzcal.px": "%.2f, %.2f, %.2f",
        "delpsical.rad": "%.3f",
        "delpsical2": "%.3f",
        "delpsical.weights": "%.3f",
        "xyzobs.mm.value": "%.2f, %.2f, %.2f",
        "xyzobs.mm.variance": "%.4e, %.4e, %.4e",
        "xyzobs.px.value": "%.2f, %.2f, %.2f",
        "xyzobs.px.variance": "%.4f, %.4f, %.4f",
        "s1": "%.4f, %.4f, %.4f",
        "s2": "%.4f, %.4f, %.4f",
        "shoebox": "%.1f",
        "rlp": "%.4f, %.4f, %.4f",
        "zeta": "%.3f",
        "x_resid": "%.3f",
        "x_resid2": "%.3f",
        "y_resid": "%.3f",
        "y_resid2": "%.3f",
        "kapton_absorption_correction": "%.3f",
        "kapton_absorption_correction_sigmas": "%.3f",
        "inverse_scale_factor": "%.3f",
        "inverse_scale_factor_variance": "%.3f",
    }

    for rlist in reflections:
        from dials.algorithms.shoebox import MaskCode

        foreground_valid = MaskCode.Valid | MaskCode.Foreground
        text.append("")
        text.append(f"Reflection list contains {len(rlist)} reflections")

        if len(rlist) == 0:
            continue

        rows = [["Column", "min", "max", "mean"]]
        for k, col in rlist.cols():
            if k in formats and "%" not in formats.get(k, "%s"):
                # Allow blanking out of entries that wouldn't make sense
                rows.append([
                    k,
                    formats.get(k, "%s"),
                    formats.get(k, "%s"),
                    formats.get(k, "%s"),
                ])
            elif type(col) in (flex.double, flex.int, flex.size_t):
                if type(col) in (flex.int, flex.size_t):
                    col = col.as_double()
                rows.append([
                    k,
                    formats.get(k, "%s") % flex.min(col),
                    formats.get(k, "%s") % flex.max(col),
                    formats.get(k, "%s") % flex.mean(col),
                ])
            elif type(col) in (flex.vec3_double, flex.miller_index):
                if isinstance(col, flex.miller_index):
                    col = col.as_vec3_double()
                rows.append([
                    k,
                    formats.get(k, "%s") % col.min(),
                    formats.get(k, "%s") % col.max(),
                    formats.get(k, "%s") % col.mean(),
                ])
            elif isinstance(col, flex.shoebox):
                rows.append([k, "", "", ""])
                si = col.summed_intensity().observed_value()
                rows.append([
                    "  summed I",
                    formats.get(k, "%s") % flex.min(si),
                    formats.get(k, "%s") % flex.max(si),
                    formats.get(k, "%s") % flex.mean(si),
                ])
                x1, x2, y1, y2, z1, z2 = col.bounding_boxes().parts()
                bbox_sizes = ((z2 - z1) * (y2 - y1) * (x2 - x1)).as_double()
                rows.append([
                    "  N pix",
                    formats.get(k, "%s") % flex.min(bbox_sizes),
                    formats.get(k, "%s") % flex.max(bbox_sizes),
                    formats.get(k, "%s") % flex.mean(bbox_sizes),
                ])
                fore_valid = col.count_mask_values(
                    foreground_valid).as_double()
                rows.append([
                    "  N valid foreground pix",
                    formats.get(k, "%s") % flex.min(fore_valid),
                    formats.get(k, "%s") % flex.max(fore_valid),
                    formats.get(k, "%s") % flex.mean(fore_valid),
                ])

        text.append(tabulate(rows, headers="firstrow"))

        if show_flags:
            text.append(_create_flag_count_table(rlist))

        if show_identifiers:
            if rlist.experiment_identifiers():
                text.append(
                    """Experiment identifiers id-map values:\n%s""" %
                    ("\n".join(
                        "id:" + str(k) + " -> experiment identifier:" +
                        str(rlist.experiment_identifiers()[k])
                        for k in rlist.experiment_identifiers().keys())))

    intensity_keys = (
        "miller_index",
        "d",
        "intensity.prf.value",
        "intensity.prf.variance",
        "intensity.sum.value",
        "intensity.sum.variance",
        "background.mean",
        "profile.correlation",
        "profile.rmsd",
    )

    profile_fit_keys = ("miller_index", "d")

    centroid_keys = (
        "miller_index",
        "d",
        "xyzcal.mm",
        "xyzcal.px",
        "xyzobs.mm.value",
        "xyzobs.mm.variance",
        "xyzobs.px.value",
        "xyzobs.px.variance",
    )

    keys_to_print = OrderedSet()

    if show_intensities:
        for k in intensity_keys:
            keys_to_print.add(k)
    if show_profile_fit:
        for k in profile_fit_keys:
            keys_to_print.add(k)
    if show_centroids:
        for k in centroid_keys:
            keys_to_print.add(k)
    if show_all_reflection_data:
        for k in formats:
            keys_to_print.add(k)

    def format_column(key, data, format_strings=None):
        if isinstance(data, flex.vec3_double):
            c_strings = [
                c.as_string(format_strings[i].strip())
                for i, c in enumerate(data.parts())
            ]
        elif isinstance(data, flex.miller_index):
            c_strings = [
                c.as_string(format_strings[i].strip())
                for i, c in enumerate(data.as_vec3_double().parts())
            ]
        elif isinstance(data, flex.size_t):
            c_strings = [data.as_int().as_string(format_strings[0].strip())]
        elif isinstance(data, flex.shoebox):
            x1, x2, y1, y2, z1, z2 = data.bounding_boxes().parts()
            bbox_sizes = ((z2 - z1) * (y2 - y1) * (x2 - x1)).as_double()
            c_strings = [bbox_sizes.as_string(format_strings[0].strip())]
            key += " (N pix)"
        else:
            c_strings = [data.as_string(format_strings[0].strip())]

        column = flex.std_string()
        max_element_lengths = [c.max_element_length() for c in c_strings]
        for i in range(len(c_strings[0])):

            column.append(f"%{len(key)}s" % ", ".join(
                ("%%%is" % max_element_lengths[j]) % c_strings[j][i]
                for j in range(len(c_strings))))
        return column

    if keys_to_print:
        keys = [k for k in keys_to_print if k in rlist]
        if max_reflections is not None:
            max_reflections = min(len(rlist), max_reflections)
        else:
            max_reflections = len(rlist)

        columns = []

        for k in keys:
            columns.append(
                format_column(k,
                              rlist[k],
                              format_strings=formats[k].split(",")))

        text.append("")
        text.append("Printing %i of %i reflections:" %
                    (max_reflections, len(rlist)))
        line = []
        for j in range(len(columns)):
            key = keys[j]
            if key == "shoebox":
                key += " (N pix)"
            width = max(len(key), columns[j].max_element_length())
            line.append("%%%is" % width % key)
        text.append(" ".join(line))
        for i in range(max_reflections):
            line = (c[i] for c in columns)
            text.append(" ".join(line))

    return "\n".join(text)
コード例 #26
0
ファイル: observers.py プロジェクト: huwjenkins/dials
def print_scaling_summary(script):
    """Log summary information after scaling."""
    logger.info(print_scaling_model_error_summary(script.experiments))
    valid_ranges = get_valid_image_ranges(script.experiments)
    image_ranges = get_image_ranges(script.experiments)
    msg = []
    for (img, valid, refl) in zip(image_ranges, valid_ranges,
                                  script.reflections):
        if valid:
            if len(valid
                   ) > 1 or valid[0][0] != img[0] or valid[-1][1] != img[1]:
                msg.append(
                    "Excluded images for experiment id: %s, image range: %s, limited range: %s"
                    % (
                        refl.experiment_identifiers().keys()[0],
                        list(img),
                        list(valid),
                    ))
    if msg:
        msg = ["Summary of image ranges removed:"] + msg
        logger.info("\n".join(msg))

    # report on partiality of dataset
    partials = flex.double()
    for r in script.reflections:
        if "partiality" in r:
            partials.extend(r["partiality"])
    not_full_sel = partials < 0.99
    not_zero_sel = partials > 0.01
    gt_half = partials > 0.5
    lt_half = partials < 0.5
    partial_gt_half_sel = not_full_sel & gt_half
    partial_lt_half_sel = not_zero_sel & lt_half
    logger.info("Summary of dataset partialities")
    header = ["Partiality (p)", "n_refl"]
    rows = [
        ["all reflections", str(partials.size())],
        ["p > 0.99", str(not_full_sel.count(False))],
        ["0.5 < p < 0.99",
         str(partial_gt_half_sel.count(True))],
        ["0.01 < p < 0.5",
         str(partial_lt_half_sel.count(True))],
        ["p < 0.01", str(not_zero_sel.count(False))],
    ]
    logger.info(tabulate(rows, header))
    logger.info(
        """
Reflections below a partiality_cutoff of %s are not considered for any
part of the scaling analysis or for the reporting of merging statistics.
Additionally, if applicable, only reflections with a min_partiality > %s
were considered for use when refining the scaling model.
""",
        script.params.cut_data.partiality_cutoff,
        script.params.reflection_selection.min_partiality,
    )
    stats = script.merging_statistics_result
    if stats:
        anom_stats, cut_stats, cut_anom_stats = (None, None, None)
        if not script.scaled_miller_array.space_group().is_centric():
            anom_stats = script.anom_merging_statistics_result
        logger.info(make_merging_statistics_summary(stats))
        try:
            d_min = resolution_cc_half(stats, limit=0.3).d_min
        except RuntimeError as e:
            logger.debug(f"Resolution fit failed: {e}")
        else:
            max_current_res = stats.bins[-1].d_min
            if d_min and d_min - max_current_res > 0.005:
                logger.info(
                    "Resolution limit suggested from CC" + "\u00BD" +
                    " fit (limit CC" + "\u00BD" + "=0.3): %.2f",
                    d_min,
                )
                try:
                    cut_stats, cut_anom_stats = merging_stats_from_scaled_array(
                        script.scaled_miller_array.resolution_filter(
                            d_min=d_min),
                        script.params.output.merging.nbins,
                        script.params.output.use_internal_variance,
                    )
                except DialsMergingStatisticsError:
                    pass
                else:
                    if script.scaled_miller_array.space_group().is_centric():
                        cut_anom_stats = None
        logger.info(
            table_1_summary(stats, anom_stats, cut_stats, cut_anom_stats))
コード例 #27
0
def select_connected_reflections_across_datasets(Ih_table,
                                                 experiment,
                                                 Isigma_cutoff=2.0,
                                                 min_total=40000,
                                                 n_resolution_bins=20):
    """Select highly connected reflections across datasets."""
    assert Ih_table.n_work_blocks == 1
    Ih_table = Ih_table.Ih_table_blocks[0]
    sel_Ih_table = _select_groups_on_Isigma_cutoff(Ih_table, Isigma_cutoff)

    # now split into resolution bins
    sel_Ih_table.setup_binner(
        experiment.crystal.get_unit_cell(),
        experiment.crystal.get_space_group(),
        n_resolution_bins,
    )
    binner = sel_Ih_table.binner

    # prepare parameters for selection algorithm.
    n_datasets = len(set(sel_Ih_table.Ih_table["dataset_id"]))
    min_per_class = min_total / (n_datasets * 4.0)
    max_total = min_total * 1.2
    logger.info(
        """
Using quasi-random reflection selection. Selecting from %s symmetry groups
with <I/sI> > %s (%s reflections)). Selection target of %.2f reflections
from each dataset, with a total number between %.2f and %.2f.
""",
        sel_Ih_table.n_groups,
        Isigma_cutoff,
        sel_Ih_table.size,
        min_per_class,
        min_total,
        max_total,
    )
    # split across resolution bins
    mpc = int(min_per_class / n_resolution_bins)
    mint = int(min_total / n_resolution_bins)
    maxt = int(max_total / n_resolution_bins)

    header = ["d-range", "n_groups", "n_refl"
              ] + [str(i) for i in range(n_datasets)]
    rows = []
    if n_datasets >= 15:
        summary_rows = []
        summary_header = ["d-range", "n_groups", "n_refl"]

    indices = flex.size_t()
    dataset_ids = flex.int()
    total_groups_used = 0
    n_cols_used = 0

    for ibin in binner.range_all():
        sel = binner.selection(ibin)
        res_Ih_table = sel_Ih_table.select(sel)
        if not res_Ih_table.Ih_table.size():
            continue

        (
            indices_this_res,
            dataset_ids_this_res,
            n_groups_used,
            total_per_dataset,
        ) = _perform_quasi_random_selection(res_Ih_table, n_datasets, mpc,
                                            mint, maxt)

        indices.extend(indices_this_res)
        dataset_ids.extend(dataset_ids_this_res)
        total_groups_used += n_groups_used
        d0, d1 = binner.bin_d_range(ibin)
        drange = str(round(d0, 3)) + " - " + str(round(d1, 3))
        n_refl = str(int(indices_this_res.size()))
        rows.append([drange, str(n_groups_used), n_refl] +
                    [str(int(i)) for i in total_per_dataset])
        if n_datasets >= 15:
            summary_rows.append([drange, str(n_groups_used), n_refl])
        n_cols_used += n_groups_used

    logger.info(
        "Summary of cross-dataset reflection groups chosen (%s groups, %s reflections):",
        n_cols_used,
        indices.size(),
    )
    if n_datasets < 15:
        logger.info(tabulate(rows, header))
    else:
        logger.info(tabulate(summary_rows, summary_header))
        logger.debug(tabulate(rows, header))

    return indices, dataset_ids
コード例 #28
0
    def __str__(self):
        U = matrix.sqr(self.experiment.crystal.get_U())
        B = matrix.sqr(self.experiment.crystal.get_B())

        a_star_ = U * B * a_star
        b_star_ = U * B * b_star
        c_star_ = U * B * c_star

        Binvt = B.inverse().transpose()

        a_ = U * Binvt * a
        b_ = U * Binvt * b
        c_ = U * Binvt * c

        names = self.experiment.goniometer.get_names()
        axes = self.experiment.goniometer.get_axes()
        rows = [["Experimental axis", "a*", "b*", "c*"]]
        rows.append([names[0]] + [
            "%.3f" % smallest_angle(axis.angle(matrix.col(axes[0]), deg=True))
            for axis in (a_star_, b_star_, c_star_)
        ])
        rows.append(["Beam"] + [
            "%.3f" % smallest_angle(axis.angle(self.s0, deg=True))
            for axis in (a_star_, b_star_, c_star_)
        ])
        rows.append([names[2]] + [
            "%.3f" % smallest_angle(axis.angle(matrix.col(axes[2]), deg=True))
            for axis in (a_star_, b_star_, c_star_)
        ])

        output = []
        output.append(
            "Angles between reciprocal cell axes and principal experimental axes:"
        )
        output.append(tabulate(rows, headers="firstrow"))
        output.append("")

        rows = [["Experimental axis", "a", "b", "c"]]
        rows.append([names[0]] + [
            "%.3f" % smallest_angle(axis.angle(matrix.col(axes[0]), deg=True))
            for axis in (a_, b_, c_)
        ])
        rows.append(["Beam"] + [
            "%.3f" % smallest_angle(axis.angle(self.s0, deg=True))
            for axis in (a_, b_, c_)
        ])
        rows.append([names[2]] + [
            "%.3f" % smallest_angle(axis.angle(matrix.col(axes[2]), deg=True))
            for axis in (a_, b_, c_)
        ])
        output.append(
            "Angles between unit cell axes and principal experimental axes:")
        output.append(tabulate(rows, headers="firstrow"))
        output.append("")

        names = self.experiment.goniometer.get_names()

        space_group = self.experiment.crystal.get_space_group()
        reciprocal = self.frame == "reciprocal"
        rows = []
        for angles, vector_pairs in self.unique_solutions.items():
            v1, v2 = list(vector_pairs)[0]
            rows.append((
                describe(v1, space_group, reciprocal=reciprocal),
                describe(v2, space_group, reciprocal=reciprocal),
                "% 7.3f" % angles[0],
                "% 7.3f" % angles[1],
            ))
        rows = [("Primary axis", "Secondary axis", names[1], names[0])
                ] + sorted(rows)
        output.append("Independent solutions:")
        output.append(tabulate(rows, headers="firstrow"))

        return "\n".join(output)
コード例 #29
0
def symmetry(experiments, reflection_tables, params=None):
    """
    Run symmetry analysis

    Args:
        experiments: An experiment list.
        reflection_tables: A list of reflection tables.
        params: The dials.symmetry phil scope.
    """
    result = None
    if params is None:
        params = phil_scope.extract()
    refls_for_sym = []

    if params.laue_group is Auto:
        logger.info("=" * 80)
        logger.info("")
        logger.info("Performing Laue group analysis")
        logger.info("")

        # Transform models into miller arrays
        n_datasets = len(experiments)

        # Map experiments and reflections to minimum cell
        # Eliminate reflections that are systematically absent due to centring
        # of the lattice, otherwise they would lead to non-integer miller indices
        # when reindexing to a primitive setting
        cb_ops = change_of_basis_ops_to_minimum_cell(
            experiments,
            params.lattice_symmetry_max_delta,
            params.relative_length_tolerance,
            params.absolute_angle_tolerance,
        )
        reflection_tables = eliminate_sys_absent(experiments,
                                                 reflection_tables)
        experiments, reflection_tables = apply_change_of_basis_ops(
            experiments, reflection_tables, cb_ops)

        refls_for_sym = get_subset_for_symmetry(experiments, reflection_tables,
                                                params.exclude_images)

        datasets = filtered_arrays_from_experiments_reflections(
            experiments,
            refls_for_sym,
            outlier_rejection_after_filter=True,
            partiality_threshold=params.partiality_threshold,
        )
        if len(datasets) != n_datasets:
            raise ValueError(
                """Some datasets have no reflection after prefiltering, please check
    input data and filtering settings e.g partiality_threshold""")

        datasets = [
            ma.as_anomalous_array().merge_equivalents().array()
            for ma in datasets
        ]
        result = LaueGroupAnalysis(
            datasets,
            normalisation=params.normalisation,
            d_min=params.d_min,
            min_i_mean_over_sigma_mean=params.min_i_mean_over_sigma_mean,
            lattice_symmetry_max_delta=params.lattice_symmetry_max_delta,
            relative_length_tolerance=params.relative_length_tolerance,
            absolute_angle_tolerance=params.absolute_angle_tolerance,
            best_monoclinic_beta=params.best_monoclinic_beta,
        )
        logger.info("")
        logger.info(result)

        if params.output.json is not None:
            d = result.as_dict()
            d["cb_op_inp_min"] = [str(cb_op) for cb_op in cb_ops]
            # Copy the "input_symmetry" to "min_cell_symmetry" as it isn't technically
            # the input symmetry to dials.symmetry
            d["min_cell_symmetry"] = d["input_symmetry"]
            del d["input_symmetry"]
            json_str = json.dumps(d, indent=2)
            with open(params.output.json, "w") as f:
                f.write(json_str)

        # Change of basis operator from input unit cell to best unit cell
        cb_op_inp_best = result.best_solution.subgroup["cb_op_inp_best"]
        # Get the best space group.
        best_subsym = result.best_solution.subgroup["best_subsym"]
        best_space_group = best_subsym.space_group(
        ).build_derived_acentric_group()
        logger.info(
            tabulate(
                [[
                    str(best_subsym.space_group_info()),
                    str(best_space_group.info())
                ]],
                ["Patterson group", "Corresponding MX group"],
            ))
        # Reindex the input data
        experiments, reflection_tables = _reindex_experiments_reflections(
            experiments, reflection_tables, best_space_group, cb_op_inp_best)

    elif params.laue_group is not None:
        if params.change_of_basis_op is not None:
            cb_op = sgtbx.change_of_basis_op(params.change_of_basis_op)
        else:
            cb_op = sgtbx.change_of_basis_op()
        # Reindex the input data
        experiments, reflection_tables = _reindex_experiments_reflections(
            experiments, reflection_tables, params.laue_group.group(), cb_op)

    if params.systematic_absences.check:
        logger.info("=" * 80)
        logger.info("")
        logger.info("Analysing systematic absences")
        logger.info("")

        # Get the laue class from the current space group.
        space_group = experiments[0].crystal.get_space_group()
        laue_group = str(space_group.build_derived_patterson_group().info())
        logger.info("Laue group: %s", laue_group)
        if laue_group in ("I m -3", "I m m m"):
            if laue_group == "I m -3":
                logger.info(
                    """Space groups I 2 3 & I 21 3 cannot be distinguished with systematic absence
analysis, due to lattice centering.
Using space group I 2 3, space group I 21 3 is equally likely.\n""")
            if laue_group == "I m m m":
                logger.info(
                    """Space groups I 2 2 2 & I 21 21 21 cannot be distinguished with systematic absence
analysis, due to lattice centering.
Using space group I 2 2 2, space group I 21 21 21 is equally likely.\n""")
        elif laue_group not in laue_groups_for_absence_analysis:
            logger.info("No absences to check for this laue group\n")
        else:
            if not refls_for_sym:
                refls_for_sym = get_subset_for_symmetry(
                    experiments, reflection_tables, params.exclude_images)

            if (params.d_min is Auto) and (result is not None):
                d_min = result.intensities.resolution_range()[1]
            elif params.d_min is Auto:
                d_min = resolution_filter_from_reflections_experiments(
                    refls_for_sym,
                    experiments,
                    params.min_i_mean_over_sigma_mean,
                    params.min_cc_half,
                )
            else:
                d_min = params.d_min

            # combine before sys abs test - only triggers if laue_group=None and
            # multiple input files.
            if len(reflection_tables) > 1:
                joint_reflections = flex.reflection_table()
                for table in refls_for_sym:
                    joint_reflections.extend(table)
            else:
                joint_reflections = refls_for_sym[0]

            merged_reflections = prepare_merged_reflection_table(
                experiments, joint_reflections, d_min)
            run_systematic_absences_checks(
                experiments,
                merged_reflections,
                float(params.systematic_absences.significance_level),
            )

    logger.info(
        "Saving reindexed experiments to %s in space group %s",
        params.output.experiments,
        str(experiments[0].crystal.get_space_group().info()),
    )
    experiments.as_file(params.output.experiments)
    if params.output.reflections is not None:
        if len(reflection_tables) > 1:
            joint_reflections = flex.reflection_table()
            for table in reflection_tables:
                joint_reflections.extend(table)
        else:
            joint_reflections = reflection_tables[0]
        logger.info(
            "Saving %s reindexed reflections to %s",
            len(joint_reflections),
            params.output.reflections,
        )
        joint_reflections.as_file(params.output.reflections)

    if params.output.html and params.systematic_absences.check:
        ScrewAxisObserver().generate_html_report(params.output.html)
コード例 #30
0
def run(args=None):
    usage = "dials.spot_counts_per_image [options] imported.expt strong.refl"

    parser = OptionParser(
        usage=usage,
        read_reflections=True,
        read_experiments=True,
        phil=phil_scope,
        check_format=False,
        epilog=help_message,
    )

    params, options = parser.parse_args(args, show_diff_phil=False)
    reflections, experiments = reflections_and_experiments_from_files(
        params.input.reflections, params.input.experiments)

    if not reflections and not experiments:
        parser.print_help()
        return

    # FIXME may want to change this to allow many to be passed i.e.
    # from parallel runs
    if len(reflections) != 1:
        sys.exit("Only one reflection list may be passed")
    reflections = reflections[0]

    if "miller_index" in reflections:
        sys.exit("Only unindexed reflections are currently supported")

    if any(experiments.crystals()):
        sys.exit("Only unindexed experiments are currently supported")

    reflections.centroid_px_to_mm(experiments)
    reflections.map_centroids_to_reciprocal_space(experiments)

    if params.id is not None:
        reflections = reflections.select(reflections["id"] == params.id)

    all_stats = []
    for i, expt in enumerate(experiments):
        refl = reflections.select(reflections["id"] == i)
        stats = per_image_analysis.stats_per_image(
            expt, refl, resolution_analysis=params.resolution_analysis)
        all_stats.append(stats)

    # transpose stats
    summary_table = {}
    for s in all_stats:
        for k, value in s._asdict().items():
            summary_table.setdefault(k, [])
            summary_table[k].extend(value)
    stats = per_image_analysis.StatsMultiImage(**summary_table)
    print(stats)

    overall_stats = per_image_analysis.stats_for_reflection_table(
        reflections, resolution_analysis=params.resolution_analysis)
    rows = [
        ("Overall statistics", ""),
        ("#spots", "%i" % overall_stats.n_spots_total),
        ("#spots_no_ice", "%i" % overall_stats.n_spots_no_ice),
        ("d_min", f"{overall_stats.estimated_d_min:.2f}"),
        (
            "d_min (distl method 1)",
            "%.2f (%.2f)" % (overall_stats.d_min_distl_method_1,
                             overall_stats.noisiness_method_1),
        ),
        (
            "d_min (distl method 2)",
            "%.2f (%.2f)" % (overall_stats.d_min_distl_method_2,
                             overall_stats.noisiness_method_2),
        ),
    ]
    print(tabulate(rows, headers="firstrow"))

    if params.json:
        if params.split_json:
            for k, v in stats._asdict().items():
                start, end = params.json.split(".")
                with open(f"{start}_{k}.{end}", "w") as fp:
                    json.dump(v, fp)
        if params.joint_json:
            with open(params.json, "w") as fp:
                json.dump(stats._asdict(), fp)
    if params.plot:
        import matplotlib

        matplotlib.use("Agg")
        per_image_analysis.plot_stats(stats, filename=params.plot)