Esempio n. 1
0
def test_stats_for_reflection_table(centroid_test_data):
    _, reflections = centroid_test_data
    stats = per_image_analysis.stats_for_reflection_table(reflections)
    result = stats._asdict()
    expected = {
        "d_min_distl_method_1": 1.8771983880778702,
        "d_min_distl_method_2": 1.6293601446185495,
        "estimated_d_min": 1.446715534174674,
        "n_spots_4A": 76,
        "n_spots_no_ice": 578,
        "n_spots_total": 654,
        "noisiness_method_1": 0.021021021021021023,
        "noisiness_method_2": 0.0858974358974359,
        "total_intensity": 919847.0,
    }
    for k, v in expected.items():
        assert result[k] == pytest.approx(v)
Esempio n. 2
0
def test_stats_for_reflection_table(centroid_test_data):
    _, reflections = centroid_test_data
    stats = per_image_analysis.stats_for_reflection_table(reflections)
    result = stats._asdict()
    expected = dict([
        ("d_min_distl_method_1", 1.8771983880778702),
        ("d_min_distl_method_2", 1.6293601446185495),
        ("estimated_d_min", 1.446715534174674),
        ("n_spots_4A", 76),
        ("n_spots_no_ice", 578),
        ("n_spots_total", 654),
        ("noisiness_method_1", 0.021021021021021023),
        ("noisiness_method_2", 0.0858974358974359),
        ("total_intensity", 919847.0),
    ])
    for k, v in expected.items():
        assert result[k] == pytest.approx(v)
Esempio n. 3
0
def test_stats_for_reflection_table_no_resolution_analysis_no_ice_filtering(
    centroid_test_data, ):
    _, reflections = centroid_test_data
    stats = per_image_analysis.stats_for_reflection_table(
        reflections, resolution_analysis=False, filter_ice=False)
    result = stats._asdict()
    expected = {
        "d_min_distl_method_1": -1.0,
        "d_min_distl_method_2": -1.0,
        "estimated_d_min": -1.0,
        "n_spots_4A": 76,
        "n_spots_no_ice": 654,
        "n_spots_total": 654,
        "noisiness_method_1": -1.0,
        "noisiness_method_2": -1.0,
        "total_intensity": 961472.0,
    }
    for k, v in expected.items():
        assert result[k] == pytest.approx(v)
Esempio n. 4
0
def run(args=None):
    usage = "dials.spot_counts_per_image [options] imported.expt strong.refl"

    parser = OptionParser(
        usage=usage,
        read_reflections=True,
        read_experiments=True,
        phil=phil_scope,
        check_format=False,
        epilog=help_message,
    )

    params, options = parser.parse_args(args, show_diff_phil=False)
    reflections, experiments = reflections_and_experiments_from_files(
        params.input.reflections, params.input.experiments)

    if not reflections and not experiments:
        parser.print_help()
        return

    # FIXME may want to change this to allow many to be passed i.e.
    # from parallel runs
    if len(reflections) != 1:
        sys.exit("Only one reflection list may be passed")
    reflections = reflections[0]

    if "miller_index" in reflections:
        sys.exit("Only unindexed reflections are currently supported")

    if any(experiments.crystals()):
        sys.exit("Only unindexed experiments are currently supported")

    reflections.centroid_px_to_mm(experiments)
    reflections.map_centroids_to_reciprocal_space(experiments)

    if params.id is not None:
        reflections = reflections.select(reflections["id"] == params.id)

    all_stats = []
    for i, expt in enumerate(experiments):
        refl = reflections.select(reflections["id"] == i)
        stats = per_image_analysis.stats_per_image(
            expt, refl, resolution_analysis=params.resolution_analysis)
        all_stats.append(stats)

    # transpose stats
    summary_table = {}
    for s in all_stats:
        for k, value in s._asdict().items():
            summary_table.setdefault(k, [])
            summary_table[k].extend(value)
    stats = per_image_analysis.StatsMultiImage(**summary_table)
    print(stats)

    overall_stats = per_image_analysis.stats_for_reflection_table(
        reflections, resolution_analysis=params.resolution_analysis)
    rows = [
        ("Overall statistics", ""),
        ("#spots", "%i" % overall_stats.n_spots_total),
        ("#spots_no_ice", "%i" % overall_stats.n_spots_no_ice),
        ("d_min", f"{overall_stats.estimated_d_min:.2f}"),
        (
            "d_min (distl method 1)",
            "%.2f (%.2f)" % (overall_stats.d_min_distl_method_1,
                             overall_stats.noisiness_method_1),
        ),
        (
            "d_min (distl method 2)",
            "%.2f (%.2f)" % (overall_stats.d_min_distl_method_2,
                             overall_stats.noisiness_method_2),
        ),
    ]
    print(tabulate(rows, headers="firstrow"))

    if params.json:
        if params.split_json:
            for k, v in stats._asdict().items():
                start, end = params.json.split(".")
                with open(f"{start}_{k}.{end}", "w") as fp:
                    json.dump(v, fp)
        if params.joint_json:
            with open(params.json, "w") as fp:
                json.dump(stats._asdict(), fp)
    if params.plot:
        import matplotlib

        matplotlib.use("Agg")
        per_image_analysis.plot_stats(stats, filename=params.plot)
def work(filename, cl=None):
    if cl is None:
        cl = []

    phil_scope = libtbx.phil.parse("""\
ice_rings {
  filter = True
    .type = bool
  width = 0.004
    .type = float(value_min=0.0)
}
index = False
  .type = bool
integrate = False
  .type = bool
indexing_min_spots = 10
  .type = int(value_min=1)
""")
    interp = phil_scope.command_line_argument_interpreter()
    params, unhandled = interp.process_and_fetch(
        cl, custom_processor="collect_remaining")
    filter_ice = params.extract().ice_rings.filter
    ice_rings_width = params.extract().ice_rings.width
    index = params.extract().index
    integrate = params.extract().integrate
    indexing_min_spots = params.extract().indexing_min_spots

    from dials.command_line.find_spots import phil_scope as find_spots_phil_scope
    from dxtbx.model.experiment_list import ExperimentListFactory
    from dials.array_family import flex

    interp = find_spots_phil_scope.command_line_argument_interpreter()
    phil_scope, unhandled = interp.process_and_fetch(
        unhandled, custom_processor="collect_remaining")
    logger.info("The following spotfinding parameters have been modified:")
    logger.info(find_spots_phil_scope.fetch_diff(source=phil_scope).as_str())
    params = phil_scope.extract()
    # no need to write the hot mask in the server/client
    params.spotfinder.write_hot_mask = False
    experiments = ExperimentListFactory.from_filenames([filename])
    t0 = time.time()
    reflections = flex.reflection_table.from_observations(experiments, params)
    t1 = time.time()
    logger.info("Spotfinding took %.2f seconds" % (t1 - t0))
    from dials.algorithms.spot_finding import per_image_analysis

    imageset = experiments.imagesets()[0]
    reflections.centroid_px_to_mm(experiments)
    reflections.map_centroids_to_reciprocal_space(experiments)
    stats = per_image_analysis.stats_for_reflection_table(
        reflections, filter_ice=filter_ice,
        ice_rings_width=ice_rings_width)._asdict()
    t2 = time.time()
    logger.info("Resolution analysis took %.2f seconds" % (t2 - t1))

    if index and stats["n_spots_no_ice"] > indexing_min_spots:
        logging.basicConfig(stream=sys.stdout, level=logging.INFO)
        from dials.algorithms.indexing import indexer
        from dials.command_line.index import phil_scope as index_phil_scope

        interp = index_phil_scope.command_line_argument_interpreter()
        phil_scope, unhandled = interp.process_and_fetch(
            unhandled, custom_processor="collect_remaining")
        logger.info("The following indexing parameters have been modified:")
        index_phil_scope.fetch_diff(source=phil_scope).show()
        params = phil_scope.extract()

        if (imageset.get_goniometer() is not None
                and imageset.get_scan() is not None
                and imageset.get_scan().is_still()):
            imageset.set_goniometer(None)
            imageset.set_scan(None)

        try:
            idxr = indexer.Indexer.from_parameters(reflections,
                                                   experiments,
                                                   params=params)
            indexing_results = []
            idxr.index()
            indexed_sel = idxr.refined_reflections.get_flags(
                idxr.refined_reflections.flags.indexed)
            indexed_sel &= ~(idxr.refined_reflections.get_flags(
                idxr.refined_reflections.flags.centroid_outlier))
            for i_expt, expt in enumerate(idxr.refined_experiments):
                sel = idxr.refined_reflections["id"] == i_expt
                sel &= indexed_sel
                indexing_results.append({
                    "crystal":
                    expt.crystal.to_dict(),
                    "n_indexed":
                    sel.count(True),
                    "fraction_indexed":
                    sel.count(True) / sel.size(),
                })
            stats["lattices"] = indexing_results
            stats["n_indexed"] = indexed_sel.count(True)
            stats["fraction_indexed"] = indexed_sel.count(True) / len(
                reflections)
        except Exception as e:
            logger.error(e)
            stats["error"] = str(e)
        finally:
            t3 = time.time()
            logger.info("Indexing took %.2f seconds" % (t3 - t2))

        if integrate and "lattices" in stats:

            from dials.algorithms.profile_model.factory import ProfileModelFactory
            from dials.algorithms.integration.integrator import IntegratorFactory
            from dials.command_line.integrate import phil_scope as integrate_phil_scope

            interp = integrate_phil_scope.command_line_argument_interpreter()
            phil_scope, unhandled = interp.process_and_fetch(
                unhandled, custom_processor="collect_remaining")
            logger.error(
                "The following integration parameters have been modified:")
            integrate_phil_scope.fetch_diff(source=phil_scope).show()
            params = phil_scope.extract()

            try:
                params.profile.gaussian_rs.min_spots = 0

                experiments = idxr.refined_experiments
                reference = idxr.refined_reflections

                predicted = flex.reflection_table.from_predictions_multi(
                    experiments,
                    dmin=params.prediction.d_min,
                    dmax=params.prediction.d_max,
                    margin=params.prediction.margin,
                    force_static=params.prediction.force_static,
                )

                matched, reference, unmatched = predicted.match_with_reference(
                    reference)
                assert len(matched) == len(predicted)
                assert matched.count(True) <= len(reference)
                if matched.count(True) == 0:
                    raise Sorry("""
            Invalid input for reference reflections.
            Zero reference spots were matched to predictions
          """)
                elif matched.count(True) != len(reference):
                    logger.info("")
                    logger.info("*" * 80)
                    logger.info(
                        "Warning: %d reference spots were not matched to predictions"
                        % (len(reference) - matched.count(True)))
                    logger.info("*" * 80)
                    logger.info("")

                # Compute the profile model
                experiments = ProfileModelFactory.create(
                    params, experiments, reference)

                # Compute the bounding box
                predicted.compute_bbox(experiments)

                # Create the integrator
                integrator = IntegratorFactory.create(params, experiments,
                                                      predicted)

                # Integrate the reflections
                reflections = integrator.integrate()

                # print len(reflections)

                stats["integrated_intensity"] = flex.sum(
                    reflections["intensity.sum.value"])
            except Exception as e:
                logger.error(e)
                stats["error"] = str(e)
            finally:
                t4 = time.time()
                logger.info("Integration took %.2f seconds" % (t4 - t3))

    return stats