Пример #1
0
def test_stats_per_image(centroid_test_data):
    experiments, reflections = centroid_test_data
    stats = per_image_analysis.stats_per_image(experiments[0], reflections)
    result = stats._asdict()
    for v in result.values():
        assert len(v) == len(experiments[0].scan)
    assert stats.n_spots_total == [90, 100, 67, 49, 54, 62, 68, 83, 81]
    t = stats.as_table()
    assert len(t) == len(experiments[0].scan) + 1
    assert t[0] == [
        "image",
        "#spots",
        "#spots_no_ice",
        "total_intensity",
        "d_min",
        "d_min (distl method 1)",
        "d_min (distl method 2)",
    ]
    assert t[1] == ["1", "90", "77", "28214", "1.56", "2.08 (0.09)", "1.59 (0.27)"]
    # Test n_rows option
    t = stats.as_table(n_rows=3)
    assert len(t) == 4
    # Test perm option
    perm = flex.random_permutation(len(experiments[0].scan))
    t = stats.as_table(perm=perm)
    assert [tt[0] for tt in t[1:]] == [str(i + 1) for i in perm]
Пример #2
0
def test_stats_table_no_resolution_analysis(centroid_test_data):
    experiments, reflections = centroid_test_data
    stats = per_image_analysis.stats_per_image(
        experiments[0], reflections, resolution_analysis=False
    )
    t = stats.as_table()
    assert t[0] == ["image", "#spots", "#spots_no_ice", "total_intensity"]
Пример #3
0
def test_plot_stats(centroid_test_data, tmpdir):
    experiments, reflections = centroid_test_data
    stats = per_image_analysis.stats_per_image(
        experiments[0], reflections, resolution_analysis=False
    )
    image_file = tmpdir.join("pia.png")
    per_image_analysis.plot_stats(stats, filename=image_file.strpath)
    assert image_file.check()
Пример #4
0
def run(args=None):
    usage = "dials.spot_counts_per_image [options] imported.expt strong.refl"

    parser = OptionParser(
        usage=usage,
        read_reflections=True,
        read_experiments=True,
        phil=phil_scope,
        check_format=False,
        epilog=help_message,
    )

    params, options = parser.parse_args(args, show_diff_phil=False)
    reflections, experiments = reflections_and_experiments_from_files(
        params.input.reflections, params.input.experiments)

    if not reflections and not experiments:
        parser.print_help()
        return

    # FIXME may want to change this to allow many to be passed i.e.
    # from parallel runs
    if len(reflections) != 1:
        sys.exit("Only one reflection list may be passed")
    reflections = reflections[0]

    if "miller_index" in reflections:
        sys.exit("Only unindexed reflections are currently supported")

    if any(experiments.crystals()):
        sys.exit("Only unindexed experiments are currently supported")

    reflections.centroid_px_to_mm(experiments)
    reflections.map_centroids_to_reciprocal_space(experiments)

    if params.id is not None:
        reflections = reflections.select(reflections["id"] == params.id)

    all_stats = []
    for i, expt in enumerate(experiments):
        refl = reflections.select(reflections["id"] == i)
        stats = per_image_analysis.stats_per_image(
            expt, refl, resolution_analysis=params.resolution_analysis)
        all_stats.append(stats)

    # transpose stats
    summary_table = {}
    for s in all_stats:
        for k, value in s._asdict().items():
            summary_table.setdefault(k, [])
            summary_table[k].extend(value)
    stats = per_image_analysis.StatsMultiImage(**summary_table)
    print(stats)

    overall_stats = per_image_analysis.stats_for_reflection_table(
        reflections, resolution_analysis=params.resolution_analysis)
    rows = [
        ("Overall statistics", ""),
        ("#spots", "%i" % overall_stats.n_spots_total),
        ("#spots_no_ice", "%i" % overall_stats.n_spots_no_ice),
        ("d_min", f"{overall_stats.estimated_d_min:.2f}"),
        (
            "d_min (distl method 1)",
            "%.2f (%.2f)" % (overall_stats.d_min_distl_method_1,
                             overall_stats.noisiness_method_1),
        ),
        (
            "d_min (distl method 2)",
            "%.2f (%.2f)" % (overall_stats.d_min_distl_method_2,
                             overall_stats.noisiness_method_2),
        ),
    ]
    print(tabulate(rows, headers="firstrow"))

    if params.json:
        if params.split_json:
            for k, v in stats._asdict().items():
                start, end = params.json.split(".")
                with open(f"{start}_{k}.{end}", "w") as fp:
                    json.dump(v, fp)
        if params.joint_json:
            with open(params.json, "w") as fp:
                json.dump(stats._asdict(), fp)
    if params.plot:
        import matplotlib

        matplotlib.use("Agg")
        per_image_analysis.plot_stats(stats, filename=params.plot)
Пример #5
0
    def run(self, args=None):
        """Execute the script."""

        # Parse the command line
        params, options = self.parser.parse_args(args=args,
                                                 show_diff_phil=False)

        if __name__ == "__main__":
            # Configure the logging
            log.config(verbosity=options.verbose, logfile=params.output.log)
        logger.info(dials_version())

        # Log the diff phil
        diff_phil = self.parser.diff_phil.as_str()
        if diff_phil != "":
            logger.info("The following parameters have been modified:\n")
            logger.info(diff_phil)

        # Ensure we have a data block
        experiments = flatten_experiments(params.input.experiments)

        # did input have identifier?
        had_identifiers = False
        if all(i != "" for i in experiments.identifiers()):
            had_identifiers = True
        else:
            generate_experiment_identifiers(
                experiments
            )  # add identifier e.g. if coming straight from images

        if len(experiments) == 0:
            self.parser.print_help()
            return

        # If maximum_trusted_value assigned, use this temporarily for the
        # spot finding
        if params.maximum_trusted_value is not None:
            logger.info("Overriding maximum trusted value to %.1f",
                        params.maximum_trusted_value)
            input_trusted_ranges = {}
            for _d, detector in enumerate(experiments.detectors()):
                for _p, panel in enumerate(detector):
                    trusted = panel.get_trusted_range()
                    input_trusted_ranges[(_d, _p)] = trusted
                    panel.set_trusted_range(
                        (trusted[0], params.maximum_trusted_value))

        # Loop through all the imagesets and find the strong spots
        reflections = flex.reflection_table.from_observations(
            experiments, params)

        # Add n_signal column - before deleting shoeboxes
        good = MaskCode.Foreground | MaskCode.Valid
        reflections["n_signal"] = reflections["shoebox"].count_mask_values(
            good)

        # Delete the shoeboxes
        if not params.output.shoeboxes:
            del reflections["shoebox"]

        # ascii spot count per image plot - per imageset

        imagesets = []
        for i, experiment in enumerate(experiments):
            if experiment.imageset not in imagesets:
                imagesets.append(experiment.imageset)

        for imageset in imagesets:
            selected = flex.bool(reflections.nrows(), False)
            for i, experiment in enumerate(experiments):
                if experiment.imageset is not imageset:
                    continue
                selected.set_selected(reflections["id"] == i, True)
            ascii_plot = spot_counts_per_image_plot(
                reflections.select(selected))
            if len(ascii_plot):
                logger.info(
                    "\nHistogram of per-image spot count for imageset %i:" % i)
                logger.info(ascii_plot)

        # Save the reflections to file
        logger.info("\n" + "-" * 80)
        # If started with images and not saving experiments, then remove id mapping
        # as the experiment linked to will no longer exists after exit.
        if not had_identifiers:
            if not params.output.experiments:
                for k in reflections.experiment_identifiers().keys():
                    del reflections.experiment_identifiers()[k]

        reflections.as_file(params.output.reflections)
        logger.info("Saved {} reflections to {}".format(
            len(reflections), params.output.reflections))

        # Reset the trusted ranges
        if params.maximum_trusted_value is not None:
            for _d, detector in enumerate(experiments.detectors()):
                for _p, panel in enumerate(detector):
                    trusted = input_trusted_ranges[(_d, _p)]
                    panel.set_trusted_range(trusted)

        # Save the experiments
        if params.output.experiments:

            logger.info("Saving experiments to {}".format(
                params.output.experiments))
            experiments.as_file(params.output.experiments)

        # Print some per image statistics
        if params.per_image_statistics:
            for i, experiment in enumerate(experiments):
                logger.info("Number of centroids per image for imageset %i:",
                            i)
                refl = reflections.select(reflections["id"] == i)
                refl.centroid_px_to_mm([experiment])
                refl.map_centroids_to_reciprocal_space([experiment])
                stats = per_image_analysis.stats_per_image(
                    experiment, refl, resolution_analysis=False)
                logger.info(str(stats))

        if params.output.experiments:
            return experiments, reflections
        else:
            return reflections