def test_joint_refinement(dials_regression, run_in_tmpdir):
    """A basic test of joint refinement of the CS-PAD detector at hierarchy level 2
    with 300 crystals."""

    bevington = pytest.importorskip("scitbx.examples.bevington")
    if not hasattr(bevington, "non_linear_ls_eigen_wrapper"):
        pytest.skip("Skipping test as SparseLevMar engine not available")

    data_dir = os.path.join(dials_regression, "refinement_test_data",
                            "xfel_metrology")

    # Do refinement and load the history
    result = procrunner.run([
        "dials.refine",
        os.path.join(data_dir, "benchmark_level2d.json"),
        os.path.join(data_dir, "benchmark_level2d.pickle"),
        os.path.join(data_dir, "refine.phil"),
        "history=history.json",
    ])
    assert not result.returncode and not result.stderr

    # there are plenty of things we could do with the refinement history, but
    # here just check that final RMSDs are low enough
    history = Journal.from_json_file("history.json")
    final_rmsd = history["rmsd"][-1]
    assert final_rmsd[0] < 0.0354
    assert final_rmsd[1] < 0.0406
    assert final_rmsd[2] < 0.0018

    # also check that the used_in_refinement flag got set correctly
    rt = flex.reflection_table.from_file("refined.refl")
    uir = rt.get_flags(rt.flags.used_in_refinement)
    assert uir.count(True) == history["num_reflections"][-1]
Exemple #2
0
def test3(dials_regression, tmpdir):
    """Strict check for scan-varying refinement using automated outlier rejection
    block width and interval width setting"""

    # use the i04_weak_data for this test
    data_dir = os.path.join(dials_regression, "refinement_test_data",
                            "centroid")
    experiments_path = os.path.join(data_dir,
                                    "experiments_XPARM_REGULARIZED.json")
    pickle_path = os.path.join(data_dir, "spot_all_xds.pickle")

    for pth in (experiments_path, pickle_path):
        assert os.path.exists(pth)

    result = procrunner.run(
        (
            "dials.refine",
            experiments_path,
            pickle_path,
            "scan_varying=true",
            "max_iterations=5",
            "output.history=history.json",
            "crystal.orientation.smoother.interval_width_degrees=auto",
            "crystal.unit_cell.smoother.interval_width_degrees=auto",
        ),
        working_directory=tmpdir,
    )
    assert not result.returncode and not result.stderr

    # load and check results
    history = Journal.from_json_file(tmpdir.join("history.json").strpath)

    expected_rmsds = [
        [0.619507829, 0.351326044, 0.006955399],
        [0.174024575, 0.113486044, 0.004704006],
        [0.098351363, 0.084052519, 0.002660408],
        [0.069202909, 0.072796782, 0.001451734],
        [0.064305277, 0.071560831, 0.001165639],
        [0.062955462, 0.071315612, 0.001074453],
    ]
    assert approx_equal(history["rmsd"], expected_rmsds)

    # check the refined unit cell
    ref_exp = ExperimentListFactory.from_json_file(
        tmpdir.join("refined.expt").strpath, check_format=False)[0]
    unit_cell = ref_exp.crystal.get_unit_cell().parameters()
    assert unit_cell == pytest.approx(
        [42.27482, 42.27482, 39.66893, 90.00000, 90.00000, 90.00000], abs=1e-3)
Exemple #3
0
def test_scan_varying_multi_scan_one_crystal(gcb, dials_data, tmpdir):
    # https://github.com/dials/dials/issues/994
    location = dials_data("l_cysteine_dials_output")
    refls = location.join("indexed.refl")
    expts = location.join("indexed.expt")

    # Set options for quick rather than careful refinement
    result = procrunner.run(
        (
            "dials.refine",
            expts,
            refls,
            "output.history=history.json",
            "outlier.algorithm=tukey",
            "max_iterations=3",
            "unit_cell.smoother.interval_width_degrees=56",
            "orientation.smoother.interval_width_degrees=56",
            "gradient_calculation_blocksize=" + gcb,
        ),
        working_directory=tmpdir,
    )
    assert not result.returncode and not result.stderr

    el = ExperimentListFactory.from_json_file(
        tmpdir.join("refined.expt").strpath, check_format=False)

    # Crystal has been copied into each experiment for scan-varying refinement
    assert len(el.crystals()) == 4

    # load and check results
    history = Journal.from_json_file(tmpdir.join("history.json").strpath)

    expected_rmsds = [
        (0.1401658782847504, 0.2225931584837884, 0.002349912655443814),
        (0.12060230585178289, 0.1585977879739876, 0.002114318828411418),
        (0.10970832317567975, 0.1348574975434352, 0.001955034565537597),
        (0.10373159352273859, 0.12827852889951505, 0.0017901404193256304),
    ]
    for a, b in zip(history["rmsd"], expected_rmsds):
        assert a == pytest.approx(b, abs=1e-6)
def test_scan_varying_multi_scan_one_crystal(dials_data, tmpdir):
    # https://github.com/dials/dials/issues/994
    location = dials_data("l_cysteine_dials_output")
    refls = location.join("indexed.refl")
    expts = location.join("indexed.expt")

    result = procrunner.run(
        ("dials.refine", expts, refls, "output.history=history.json"),
        working_directory=tmpdir,
    )
    assert not result.returncode and not result.stderr

    el = ExperimentListFactory.from_json_file(
        tmpdir.join("refined.expt").strpath, check_format=False)

    # Crystal has been copied into each experiment for scan-varying refinement
    assert len(el.crystals()) == 4

    # load and check results
    history = Journal.from_json_file(tmpdir.join("history.json").strpath)

    expected_rmsds = [
        [0.102069933, 0.186479653, 0.000970519],
        [0.078117368, 0.105479383, 0.000691489],
        [0.058065104, 0.065957717, 0.000497565],
        [0.042720574, 0.052950359, 0.000393993],
        [0.034387246, 0.045392992, 0.000341015],
        [0.031183632, 0.041773097, 0.000308778],
        [0.029800047, 0.040413058, 0.000288812],
        [0.029161629, 0.039836196, 0.000280338],
        [0.028807767, 0.039529834, 0.000277679],
        [0.028551526, 0.039361871, 0.000276772],
        [0.028395222, 0.039322832, 0.000276637],
        [0.028334067, 0.039332485, 0.000276678],
        [0.028319859, 0.039338415, 0.000276687],
        [0.028317563, 0.039339503, 0.000276691],
    ]
    for a, b in zip(history["rmsd"], expected_rmsds):
        assert a == pytest.approx(b, abs=1e-6)
def test_scan_varying_refinement_of_a_multiple_panel_detector(
    dials_regression, run_in_tmpdir
):
    from dials.array_family import flex

    result = procrunner.run(
        [
            "dials.refine",
            os.path.join(
                dials_regression,
                "refinement_test_data",
                "i23_as_24_panel_barrel",
                "experiments.json",
            ),
            os.path.join(
                dials_regression,
                "refinement_test_data",
                "i23_as_24_panel_barrel",
                "indexed.pickle",
            ),
            "scan_varying=true",
            "history=history.json",
            "outlier.separate_blocks=False",
        ]
    )
    assert not result.returncode and not result.stderr

    # there are plenty of things we could do with the refinement history, but
    # here just check that final RMSDs are low enough
    history = Journal.from_json_file("history.json")
    final_rmsd = history["rmsd"][-1]
    assert final_rmsd[0] < 0.05
    assert final_rmsd[1] < 0.04
    assert final_rmsd[2] < 0.0002

    # also check that the used_in_refinement flag got set correctly
    rt = flex.reflection_table.from_file("refined.refl")
    uir = rt.get_flags(rt.flags.used_in_refinement)
    assert uir.count(True) == history["num_reflections"][-1]
Exemple #6
0
def test_order_invariance(dials_regression, run_in_tmpdir):
    """Check that the order that datasets are included in refinement does not
    matter"""

    data_dir = os.path.join(dials_regression, "refinement_test_data",
                            "multi_narrow_wedges")
    selection1 = (2, 3, 4, 5, 6)
    selection2 = (2, 3, 4, 6, 5)

    # First run
    result = procrunner.run([
        "dials.combine_experiments",
        "reference_from_experiment.beam=0",
        "reference_from_experiment.goniometer=0",
        "reference_from_experiment.detector=0",
    ] + [
        "experiments={0}/data/sweep_%03d/experiments.json".format(data_dir) % n
        for n in selection1
    ] + [
        "reflections={0}/data/sweep_%03d/reflections.pickle".format(data_dir) %
        n for n in selection1
    ])
    assert not result.returncode and not result.stderr
    result = procrunner.run([
        "dials.refine",
        "combined.expt",
        "combined.refl",
        "scan_varying=false",
        "outlier.algorithm=tukey",
        "history=history1.json",
        "output.experiments=refined1.expt",
        "output.reflections=refined1.refl",
    ])
    assert not result.returncode and not result.stderr

    # Second run
    result = procrunner.run([
        "dials.combine_experiments",
        "reference_from_experiment.beam=0",
        "reference_from_experiment.goniometer=0",
        "reference_from_experiment.detector=0",
    ] + [
        "experiments={0}/data/sweep_%03d/experiments.json".format(data_dir) % n
        for n in selection2
    ] + [
        "reflections={0}/data/sweep_%03d/reflections.pickle".format(data_dir) %
        n for n in selection2
    ])
    assert not result.returncode and not result.stderr
    result = procrunner.run([
        "dials.refine",
        "combined.expt",
        "combined.refl",
        "scan_varying=false",
        "outlier.algorithm=tukey",
        "history=history2.json",
        "output.experiments=refined2.expt",
        "output.reflections=refined2.refl",
    ])
    assert not result.returncode and not result.stderr

    # Load results
    refined_experiments1 = ExperimentListFactory.from_json_file(
        "refined1.expt", check_format=False)
    refined_experiments2 = ExperimentListFactory.from_json_file(
        "refined2.expt", check_format=False)

    history1 = Journal.from_json_file("history1.json")
    history2 = Journal.from_json_file("history1.json")

    # Compare RMSDs
    rmsd1 = history1["rmsd"]
    rmsd2 = history2["rmsd"]
    for a, b in zip(rmsd1, rmsd2):
        assert a == pytest.approx(b)

    # Compare crystals
    crystals1 = [exp.crystal for exp in refined_experiments1]
    crystals2 = [exp.crystal for exp in refined_experiments2[0:8]]
    crystals2.extend([exp.crystal for exp in refined_experiments2[13:16]])
    crystals2.extend([exp.crystal for exp in refined_experiments2[8:13]])
    for a, b in zip(crystals1, crystals2):
        assert a.is_similar_to(b)
Exemple #7
0
def test_constrained_refinement(dials_regression, run_in_tmpdir):
    """Test joint refinement where two detectors are constrained to enforce a
    differential distance (along the shared initial normal vector) of 1 mm.
    This test can be constructed on the fly from data already in
    dials_regression"""

    # use the 'centroid' data for this test. The 'regularized' experiments are
    # useful because the detector has fast and slow exactly aligned with X, -Y
    # so the distance is exactly along the normal vector and can be altered
    # directly by changing the Z component of the orgin vector
    data_dir = os.path.join(dials_regression, "refinement_test_data",
                            "centroid")
    experiments_path = os.path.join(data_dir,
                                    "experiments_XPARM_REGULARIZED.json")
    pickle_path = os.path.join(data_dir, "spot_1000_xds.pickle")

    # load the experiments and spots
    el = ExperimentListFactory.from_json_file(experiments_path,
                                              check_format=False)
    rt = flex.reflection_table.from_file(pickle_path)

    # adjust the detector distance by -0.5 mm
    detector = el[0].detector
    panel = detector[0]
    fast = panel.get_fast_axis()
    slow = panel.get_slow_axis()
    origin = panel.get_origin()
    panel.set_frame(fast, slow, origin[0:2] + (origin[2] + 0.5, ))

    # duplicate the experiment and adjust distance by +1 mm
    e2 = deepcopy(el[0])
    detector = e2.detector
    panel = detector[0]
    fast = panel.get_fast_axis()
    slow = panel.get_slow_axis()
    origin = panel.get_origin()
    panel.set_frame(fast, slow, origin[0:2] + (origin[2] - 1.0, ))

    # append to the experiment list and write out
    el.append(e2)
    el.as_json("foo.expt")

    # duplicate the reflections and increment the experiment id
    rt2 = deepcopy(rt)
    rt2["id"] = rt2["id"] + 1

    # concatenate reflections and write out
    rt.extend(rt2)
    rt.as_file("foo.refl")

    # set up refinement, constraining the distance parameter
    cmd = ("dials.refine foo.expt foo.refl "
           "history=history.json refinement.parameterisation.detector."
           "constraints.parameter=Dist scan_varying=False")
    easy_run.fully_buffered(command=cmd).raise_if_errors()

    # load refinement history
    history = Journal.from_json_file("history.json")
    ref_exp = ExperimentListFactory.from_json_file("refined.expt",
                                                   check_format=False)

    # we expect 8 steps of constrained refinement
    assert history.get_nrows() == 8

    # get parameter vector from the final step
    pvec = history["parameter_vector"][-1]

    # the constrained parameters have indices 0 and 6 in this case. Check they
    # are still exactly 1 mm apart
    assert pvec[0] == pvec[6] - 1.0

    # NB because the other detector parameters were not also constrained, the
    # refined lab frame distances may not in fact differ by 1 mm. The constraint
    # acts along the initial detector normal vector during composition of a new
    # detector position. After refinement of tilt/twist type rotations,
    # the final distances along the new normal vectors will change
    det1, det2 = ref_exp.detectors()
    p1 = det1[0]
    p2 = det2[0]
    assert approx_equal(p2.get_distance() - p1.get_distance(), 0.9987655)
def test2(dials_regression, tmpdir):
    """Run scan-varying refinement, comparing RMSD table with expected values.
    This test automates what was manually done periodically and recorded in
    dials_regression/refinement_test_data/centroid/README.txt"""

    # use the i04_weak_data for this test
    data_dir = os.path.join(dials_regression, "refinement_test_data",
                            "centroid")
    experiments_path = os.path.join(data_dir,
                                    "experiments_XPARM_REGULARIZED.json")
    pickle_path = os.path.join(data_dir, "spot_all_xds.pickle")

    for pth in (experiments_path, pickle_path):
        assert os.path.exists(pth)

    # scan-static refinement first to get refined.expt as start point
    result = procrunner.run(
        (
            "dials.refine",
            experiments_path,
            pickle_path,
            "scan_varying=False",
            "reflections_per_degree=50",
            "outlier.algorithm=null",
            "close_to_spindle_cutoff=0.05",
        ),
        working_directory=tmpdir,
    )
    assert not result.returncode and not result.stderr
    result = procrunner.run(
        (
            "dials.refine",
            "refined.expt",
            pickle_path,
            "scan_varying=true",
            "output.history=history.json",
            "reflections_per_degree=50",
            "outlier.algorithm=null",
            "close_to_spindle_cutoff=0.05",
            "crystal.orientation.smoother.interval_width_degrees=36.0",
            "crystal.unit_cell.smoother.interval_width_degrees=36.0",
            "set_scan_varying_errors=True",
        ),
        working_directory=tmpdir,
    )
    assert not result.returncode and not result.stderr

    # load and check results
    history = Journal.from_json_file(tmpdir.join("history.json").strpath)

    expected_rmsds = [
        (0.088488398, 0.114583571, 0.001460382),
        (0.080489334, 0.086406517, 0.001284069),
        (0.078835086, 0.086052630, 0.001195882),
        (0.077476911, 0.086194611, 0.001161143),
        (0.076755840, 0.086090630, 0.001157239),
        (0.076586376, 0.085939462, 0.001155641),
        (0.076603722, 0.085878953, 0.001155065),
        (0.076611382, 0.085862959, 0.001154863),
        (0.076608732, 0.085856935, 0.001154384),
        (0.076605731, 0.085852271, 0.001153858),
        (0.076604576, 0.085852318, 0.001153643),
        (0.076603981, 0.085854175, 0.001153594),
    ]
    for a, b in zip(history["rmsd"], expected_rmsds):
        assert a == pytest.approx(b, abs=1e-6)

    # check that the used_in_refinement flag got set correctly
    rt = flex.reflection_table.from_file(tmpdir.join("refined.refl").strpath)
    uir = rt.get_flags(rt.flags.used_in_refinement)
    assert uir.count(True) == history["num_reflections"][-1]
def test3(dials_regression, tmpdir):
    """Strict check for scan-varying refinement using automated outlier rejection
    block width and interval width setting"""

    # use the i04_weak_data for this test
    data_dir = os.path.join(dials_regression, "refinement_test_data",
                            "centroid")
    experiments_path = os.path.join(data_dir,
                                    "experiments_XPARM_REGULARIZED.json")
    pickle_path = os.path.join(data_dir, "spot_all_xds.pickle")

    for pth in (experiments_path, pickle_path):
        assert os.path.exists(pth)

    result = procrunner.run(
        (
            "dials.refine",
            experiments_path,
            pickle_path,
            "scan_varying=true",
            "max_iterations=5",
            "output.history=history.json",
            "crystal.orientation.smoother.interval_width_degrees=auto",
            "crystal.unit_cell.smoother.interval_width_degrees=auto",
        ),
        working_directory=tmpdir,
    )
    assert not result.returncode and not result.stderr

    # load and check results
    history = Journal.from_json_file(tmpdir.join("history.json").strpath)

    expected_rmsds = [
        [0.619507829, 0.351326044, 0.006955399],
        [0.174024575, 0.113486044, 0.004704006],
        [0.098351363, 0.084052519, 0.002660408],
        [0.069202909, 0.072796782, 0.001451734],
        [0.064305277, 0.071560831, 0.001165639],
        [0.062955462, 0.071315612, 0.001074453],
    ]
    for a, b in zip(history["rmsd"], expected_rmsds):
        assert a == pytest.approx(b, abs=1e-6)

    # check the refined unit cell
    ref_exp = ExperimentListFactory.from_json_file(
        tmpdir.join("refined.expt").strpath, check_format=False)[0]
    unit_cell = ref_exp.crystal.get_unit_cell().parameters()
    assert unit_cell == pytest.approx(
        [42.27482, 42.27482, 39.66893, 90.00000, 90.00000, 90.00000], abs=1e-3)

    refined_refl = flex.reflection_table.from_file(
        tmpdir.join("refined.refl").strpath)
    # re-predict reflections using the refined experiments
    predicted = flex.reflection_table.from_predictions_multi([ref_exp])

    matched, reference, unmatched = predicted.match_with_reference(
        refined_refl)
    # assert most refined reflections are matched with predictions
    assert reference.size() > (0.997 * refined_refl.size())

    # second check with nearest neighbour matching that the predictions match up
    ann = AnnAdaptor(data=predicted["xyzcal.px"].as_double(), dim=3, k=1)
    ann.query(refined_refl["xyzcal.px"].as_double())
    assert (ann.distances < 0.5).count(True) > (0.998 * refined_refl.size())