Esempio n. 1
0
def test_experiment_identifiers():

  from dials.array_family import flex
  from dxtbx.model import ExperimentList, Experiment

  table = flex.reflection_table()
  table['id'] = flex.int([0,1,2,3])

  assert table.are_experiment_identifiers_consistent() == True

  identifiers = table.experiment_identifiers()
  identifiers[0] = 'abcd'
  identifiers[1] = 'efgh'
  identifiers[2] = 'ijkl'
  identifiers[3] = 'mnop'

  assert identifiers[0] == 'abcd'
  assert identifiers[1] == 'efgh'
  assert identifiers[2] == 'ijkl'
  assert identifiers[3] == 'mnop'

  for k, v in identifiers:
    if k == 0:
      assert v == 'abcd'
    if k == 1:
      assert v == 'efgh'
    if k == 2:
      assert v == 'ijkl'
    if k == 3:
      assert v == 'mnop'

  assert tuple(identifiers.keys()) == (0, 1, 2, 3)
  assert tuple(identifiers.values()) == ("abcd", "efgh", "ijkl", "mnop")


  assert table.are_experiment_identifiers_consistent() == True

  experiments = ExperimentList()
  experiments.append(Experiment(identifier="abcd"))
  experiments.append(Experiment(identifier="efgh"))
  experiments.append(Experiment(identifier="ijkl"))
  experiments.append(Experiment(identifier="mnop"))

  assert table.are_experiment_identifiers_consistent(experiments) == True

  experiments = ExperimentList()
  experiments.append(Experiment(identifier="abcd"))
  experiments.append(Experiment(identifier="efgh"))
  experiments.append(Experiment(identifier="ijkl"))
  experiments.append(Experiment(identifier="mnop"))
  experiments[3].identifier = "ijkl"

  assert table.are_experiment_identifiers_consistent(experiments) == False

  identifiers = table.experiment_identifiers()
  identifiers[0] = 'abcd'
  identifiers[1] = 'efgh'
  identifiers[2] = 'ijkl'
  identifiers[3] = 'ijkl'

  assert table.are_experiment_identifiers_consistent() == False

  identifiers[4] = 'mnop'

  import six.moves.cPickle as pickle
  pickled = pickle.dumps(table)
  table2 = pickle.loads(pickled)

  id1 = table.experiment_identifiers()
  id2 = table2.experiment_identifiers()

  for i in id1.keys():
    assert id1[i] == id2[i]

  other_table = flex.reflection_table()
  other_table['id'] = flex.int([3, 4])

  assert other_table.are_experiment_identifiers_consistent() == True

  identifiers = other_table.experiment_identifiers()
  identifiers[3] = 'mnop'
  identifiers[4] = 'qrst'

  table.extend(other_table)

  assert len(table.experiment_identifiers()) == 5
  assert table.experiment_identifiers()[0] == 'abcd'
  assert table.experiment_identifiers()[1] == 'efgh'
  assert table.experiment_identifiers()[2] == 'ijkl'
  assert table.experiment_identifiers()[3] == 'mnop'
  assert table.experiment_identifiers()[4] == 'qrst'
Esempio n. 2
0
def test_forward(dials_data):
    expt = ExperimentList.from_file(
        dials_data("centroid_test_data").join(
            "imported_experiments.json").strpath)[0]

    # Get the models
    beam = expt.beam
    detector = expt.detector
    gonio = expt.goniometer
    scan = expt.scan

    # Set some parameters
    sigma_divergence = 0.00101229
    mosaicity = 0.157 * math.pi / 180
    n_sigma = 3
    grid_size = 7
    delta_divergence = n_sigma * sigma_divergence

    step_size = delta_divergence / grid_size
    delta_divergence2 = delta_divergence + step_size * 0.5
    delta_mosaicity = n_sigma * mosaicity

    # Create the bounding box calculator
    calculate_bbox = BBoxCalculator3D(beam, detector, gonio, scan,
                                      delta_divergence2, delta_mosaicity)

    # Initialise the transform
    spec = transform.TransformSpec(beam, detector, gonio, scan,
                                   sigma_divergence, mosaicity, n_sigma + 1,
                                   grid_size)

    # tst_conservation_of_counts(self):

    assert len(detector) == 1

    s0 = beam.get_s0()
    m2 = gonio.get_rotation_axis()
    s0_length = matrix.col(beam.get_s0()).length()

    # Create an s1 map
    s1_map = transform.beam_vector_map(detector[0], beam, True)

    for i in range(100):

        # Get random x, y, z
        x = random.uniform(300, 1800)
        y = random.uniform(300, 1800)
        z = random.uniform(0, 9)

        # Get random s1, phi, panel
        s1 = matrix.col(detector[0].get_pixel_lab_coord(
            (x, y))).normalize() * s0_length
        phi = scan.get_angle_from_array_index(z, deg=False)
        panel = 0

        # Calculate the bounding box
        bbox = calculate_bbox(s1, z, panel)
        x0, x1, y0, y1, z0, z1 = bbox

        # Create the coordinate system
        cs = CoordinateSystem(m2, s0, s1, phi)

        # The grid index generator
        step_size = delta_divergence / grid_size
        grid_index = transform.GridIndexGenerator(cs, x0, y0,
                                                  (step_size, step_size),
                                                  grid_size, s1_map)

        # Create the image
        # image = flex.double(flex.grid(z1 - z0, y1 - y0, x1 - x0), 1)
        image = gaussian((z1 - z0, y1 - y0, x1 - x0), 10.0,
                         (z - z0, y - y0, x - x0), (2.0, 2.0, 2.0))
        mask = flex.bool(flex.grid(image.all()), False)
        for j in range(y1 - y0):
            for i in range(x1 - x0):
                inside = False
                gx00, gy00 = grid_index(j, i)
                gx01, gy01 = grid_index(j, i + 1)
                gx10, gy10 = grid_index(j + 1, i)
                gx11, gy11 = grid_index(j + 1, i + 1)
                mingx = min([gx00, gx01, gx10, gx11])
                maxgx = max([gx00, gx01, gx10, gx11])
                mingy = min([gy00, gy01, gy10, gy11])
                maxgy = max([gy00, gy01, gy10, gy11])
                if (mingx >= 0 and maxgx < 2 * grid_size + 1 and mingy >= 0
                        and maxgy < 2 * grid_size + 1):
                    inside = True
                for k in range(1, z1 - z0 - 1):
                    mask[k, j, i] = inside

        # Transform the image to the grid
        transformed = transform.TransformForward(spec, cs, bbox, 0,
                                                 image.as_double(), mask)
        grid = transformed.profile()

        # Get the sums and ensure they're the same
        eps = 1e-7
        sum_grid = flex.sum(grid)
        sum_image = flex.sum(flex.double(flex.select(image, flags=mask)))
        assert abs(sum_grid - sum_image) <= eps

    # Test passed

    # tst_transform_with_background(self):

    assert len(detector) == 1
    s0 = beam.get_s0()
    m2 = gonio.get_rotation_axis()
    s0_length = matrix.col(beam.get_s0()).length()

    # Create an s1 map
    s1_map = transform.beam_vector_map(detector[0], beam, True)

    for i in range(100):

        # Get random x, y, z
        x = random.uniform(300, 1800)
        y = random.uniform(300, 1800)
        z = random.uniform(0, 9)

        # Get random s1, phi, panel
        s1 = matrix.col(detector[0].get_pixel_lab_coord(
            (x, y))).normalize() * s0_length
        phi = scan.get_angle_from_array_index(z, deg=False)
        panel = 0

        # Calculate the bounding box
        bbox = calculate_bbox(s1, z, panel)
        x0, x1, y0, y1, z0, z1 = bbox

        # Create the coordinate system
        cs = CoordinateSystem(m2, s0, s1, phi)

        # The grid index generator
        step_size = delta_divergence / grid_size
        grid_index = transform.GridIndexGenerator(cs, x0, y0,
                                                  (step_size, step_size),
                                                  grid_size, s1_map)

        # Create the image
        # image = flex.double(flex.grid(z1 - z0, y1 - y0, x1 - x0), 1)
        image = gaussian((z1 - z0, y1 - y0, x1 - x0), 10.0,
                         (z - z0, y - y0, x - x0), (2.0, 2.0, 2.0))
        background = flex.random_double(len(image))
        background.resize(image.accessor())
        mask = flex.bool(flex.grid(image.all()), False)
        for j in range(y1 - y0):
            for i in range(x1 - x0):
                inside = False
                gx00, gy00 = grid_index(j, i)
                gx01, gy01 = grid_index(j, i + 1)
                gx10, gy10 = grid_index(j + 1, i)
                gx11, gy11 = grid_index(j + 1, i + 1)
                mingx = min([gx00, gx01, gx10, gx11])
                maxgx = max([gx00, gx01, gx10, gx11])
                mingy = min([gy00, gy01, gy10, gy11])
                maxgy = max([gy00, gy01, gy10, gy11])
                if (mingx >= 0 and maxgx <= 2 * grid_size + 1 and mingy >= 0
                        and maxgy <= 2 * grid_size + 1):
                    inside = True
                for k in range(1, z1 - z0 - 1):
                    mask[k, j, i] = inside

        # Transform the image to the grid
        transformed = transform.TransformForward(spec, cs, bbox, 0,
                                                 image.as_double(),
                                                 background.as_double(), mask)
        igrid = transformed.profile()
        bgrid = transformed.background()

        # Get the sums and ensure they're the same
        eps = 1e-7
        sum_igrid = flex.sum(igrid)
        sum_bgrid = flex.sum(bgrid)
        sum_image = flex.sum(flex.double(flex.select(image, flags=mask)))
        sum_bkgrd = flex.sum(flex.double(flex.select(background, flags=mask)))
        try:
            assert abs(sum_igrid - sum_image) <= eps
            assert abs(sum_bgrid - sum_bkgrd) <= eps
        except Exception:
            print("Failed for: ", (x, y, z))
            raise
Esempio n. 3
0
def test_map_to_minimum_cell():
    # Input and expected output
    input_ucs = [
        (39.7413, 183.767, 140.649, 90, 90, 90),
        (40.16, 142.899, 92.4167, 90, 102.48, 90),
        (180.613, 40.1558, 142.737, 90, 90.0174, 90),
    ]
    input_sgs = ["C 2 2 21", "P 1 2 1", "C 1 2 1"]
    input_hkl = [
        [(1, -75, -71), (1, -73, -70), (1, -71, -69)],
        [(14, -37, -36), (-2, -35, -46), (-3, -34, -47)],
        [(-31, -5, -3), (-25, -3, -3), (-42, -8, -2)],
    ]
    expected_ucs = [
        (39.7413, 94.00755450320204, 140.649, 90.0, 90.0, 77.79717980856927),
        (40.16, 92.46399390642911, 142.899, 90.0, 90.0, 77.3882749092846),
        (
            40.1558,
            92.51154528306184,
            142.73699999999997,
            89.9830147351441,
            90.0,
            77.46527404307477,
        ),
    ]
    expected_output_hkl = [
        [(-1, 37, -71), (-1, 36, -70), (-1, 35, -69)],
        [(-14, 22, 37), (2, 48, 35), (3, 50, 34)],
        [(-5, 13, -3), (-3, 11, -3), (-8, 17, -2)],
    ]

    # Setup the input experiments and reflection tables
    expts = ExperimentList()
    reflections = []
    for uc, sg, hkl in zip(input_ucs, input_sgs, input_hkl):
        uc = uctbx.unit_cell(uc)
        sg = sgtbx.space_group_info(sg).group()
        B = scitbx.matrix.sqr(uc.fractionalization_matrix()).transpose()
        expts.append(Experiment(crystal=Crystal(B, space_group=sg, reciprocal=True)))
        refl = flex.reflection_table()
        refl["miller_index"] = flex.miller_index(hkl)
        reflections.append(refl)

    # Actually run the method we are testing
    cb_ops = change_of_basis_ops_to_minimum_cell(
        expts, max_delta=5, relative_length_tolerance=0.05, absolute_angle_tolerance=2
    )
    cb_ops_as_xyz = [cb_op.as_xyz() for cb_op in cb_ops]
    # Actual cb_ops are machine dependent (sigh)
    assert cb_ops_as_xyz == [
        "-x+y,-2*y,z",
        "-x+z,-z,-y",
        "x+y,-2*x,z",
    ] or cb_ops_as_xyz == ["x-y,2*y,z", "x-z,z,-y", "-x-y,2*x,z"]

    expts_min, reflections = apply_change_of_basis_ops(expts, reflections, cb_ops)
    # Verify that the unit cells have been transformed as expected
    for expt, uc in zip(expts, expected_ucs):
        assert expt.crystal.get_unit_cell().parameters() == pytest.approx(uc, abs=4e-2)

    # Space group should be set to P1
    assert [expt.crystal.get_space_group().type().number() for expt in expts_min] == [
        1,
        1,
        1,
    ]

    # Verify that the reflections have been reindexed as expected
    # Because the exact choice of minimum cell can be platform-dependent,
    # compare the magnitude, but not the sign of the output hkl values
    for refl, expected_hkl in zip(reflections, expected_output_hkl):
        for hkl, e_hkl in zip(refl["miller_index"], expected_hkl):
            assert [abs(h) for h in hkl] == [abs(eh) for eh in e_hkl]
Esempio n. 4
0
    def run(self):
        ''' Parse the options. '''
        from dials.util.options import flatten_experiments, flatten_reflections
        from dxtbx.model import ExperimentList
        from scitbx.math import five_number_summary
        # Parse the command line arguments
        params, options = self.parser.parse_args(show_diff_phil=True)
        self.params = params
        experiments = flatten_experiments(params.input.experiments)
        reflections = flatten_reflections(params.input.reflections)

        assert len(reflections) == 1
        reflections = reflections[0]
        print("Found", len(reflections), "reflections", "and",
              len(experiments), "experiments")

        filtered_reflections = flex.reflection_table()
        filtered_experiments = ExperimentList()

        skipped_reflections = flex.reflection_table()
        skipped_experiments = ExperimentList()

        if params.detector is not None:
            culled_reflections = flex.reflection_table()
            culled_experiments = ExperimentList()
            detector = experiments.detectors()[params.detector]
            for expt_id, experiment in enumerate(experiments):
                refls = reflections.select(reflections['id'] == expt_id)
                if experiment.detector is detector:
                    culled_experiments.append(experiment)
                    refls['id'] = flex.int(len(refls),
                                           len(culled_experiments) - 1)
                    culled_reflections.extend(refls)
                else:
                    skipped_experiments.append(experiment)
                    refls['id'] = flex.int(len(refls),
                                           len(skipped_experiments) - 1)
                    skipped_reflections.extend(refls)

            print(
                "RMSD filtering %d experiments using detector %d, out of %d" %
                (len(culled_experiments), params.detector, len(experiments)))
            reflections = culled_reflections
            experiments = culled_experiments

        difference_vector_norms = (reflections['xyzcal.mm'] -
                                   reflections['xyzobs.mm.value']).norms()

        if params.max_delta is not None:
            sel = difference_vector_norms <= params.max_delta
            reflections = reflections.select(sel)
            difference_vector_norms = difference_vector_norms.select(sel)

        data = flex.double()
        counts = flex.double()
        for i in range(len(experiments)):
            dvns = difference_vector_norms.select(reflections['id'] == i)
            counts.append(len(dvns))
            if len(dvns) == 0:
                data.append(0)
                continue
            rmsd = math.sqrt(flex.sum_sq(dvns) / len(dvns))
            data.append(rmsd)
        data *= 1000
        subset = data.select(counts > 0)
        print(len(subset), "experiments with > 0 reflections")

        if params.show_plots:
            h = flex.histogram(subset, n_slots=40)
            fig = plt.figure()
            ax = fig.add_subplot('111')
            ax.plot(h.slot_centers().as_numpy_array(),
                    h.slots().as_numpy_array(), '-')
            plt.title("Histogram of %d image RMSDs" % len(subset))

            fig = plt.figure()
            plt.boxplot(subset, vert=False)
            plt.title("Boxplot of %d image RMSDs" % len(subset))
            plt.show()

        outliers = counts == 0
        min_x, q1_x, med_x, q3_x, max_x = five_number_summary(subset)
        print(
            "Five number summary of RMSDs (microns): min %.1f, q1 %.1f, med %.1f, q3 %.1f, max %.1f"
            % (min_x, q1_x, med_x, q3_x, max_x))
        iqr_x = q3_x - q1_x
        cut_x = params.iqr_multiplier * iqr_x
        outliers.set_selected(data > q3_x + cut_x, True)
        #outliers.set_selected(col < q1_x - cut_x, True) # Don't throw away the images that are outliers in the 'good' direction!

        for i in range(len(experiments)):
            if outliers[i]:
                continue
            refls = reflections.select(reflections['id'] == i)
            refls['id'] = flex.int(len(refls), len(filtered_experiments))
            filtered_reflections.extend(refls)
            filtered_experiments.append(experiments[i])

        zeroes = counts == 0
        n_zero = len(counts.select(zeroes))
        print(
            "Removed %d bad experiments and %d experiments with zero reflections, out of %d (%%%.1f)"
            % (len(experiments) - len(filtered_experiments) - n_zero, n_zero,
               len(experiments), 100 *
               ((len(experiments) - len(filtered_experiments)) /
                len(experiments))))

        if params.detector is not None:
            crystals = filtered_experiments.crystals()
            for expt_id, experiment in enumerate(skipped_experiments):
                if experiment.crystal in crystals:
                    filtered_experiments.append(experiment)
                    refls = skipped_reflections.select(
                        skipped_reflections['id'] == expt_id)
                    refls['id'] = flex.int(len(refls),
                                           len(filtered_experiments) - 1)
                    filtered_reflections.extend(refls)

        if params.delta_psi_filter is not None:
            delta_psi = filtered_reflections['delpsical.rad'] * 180 / math.pi
            sel = (delta_psi <= params.delta_psi_filter) & (
                delta_psi >= -params.delta_psi_filter)
            l = len(filtered_reflections)
            filtered_reflections = filtered_reflections.select(sel)
            print("Filtering by delta psi, removing %d out of %d reflections" %
                  (l - len(filtered_reflections), l))

        print("Final experiment count", len(filtered_experiments))

        filtered_experiments.as_file(params.output.filtered_experiments)

        filtered_reflections.as_pickle(params.output.filtered_reflections)
Esempio n. 5
0
def test_forward_no_model(dials_data):
    expt = ExperimentList.from_file(
        dials_data("centroid_test_data").join(
            "imported_experiments.json").strpath)[0]

    # Get the models
    beam = expt.beam
    detector = expt.detector
    gonio = expt.goniometer
    scan = expt.scan
    scan.set_image_range((0, 1000))

    # Set some parameters
    sigma_divergence = 0.00101229
    mosaicity = 0.157 * math.pi / 180
    n_sigma = 3
    grid_size = 20
    delta_divergence = n_sigma * sigma_divergence

    step_size = delta_divergence / grid_size
    delta_divergence2 = delta_divergence + step_size * 0.5
    delta_mosaicity = n_sigma * mosaicity

    # Create the bounding box calculator
    calculate_bbox = BBoxCalculator3D(beam, detector, gonio, scan,
                                      delta_divergence2, delta_mosaicity)

    # Initialise the transform
    spec = transform.TransformSpec(beam, detector, gonio, scan,
                                   sigma_divergence, mosaicity, n_sigma + 1,
                                   grid_size)

    # tst_conservation_of_counts(self):

    random.seed(0)

    assert len(detector) == 1

    s0 = beam.get_s0()
    m2 = gonio.get_rotation_axis()
    s0_length = matrix.col(beam.get_s0()).length()

    # Create an s1 map
    s1_map = transform.beam_vector_map(detector[0], beam, True)

    for i in range(100):

        # Get random x, y, z
        x = random.uniform(300, 1800)
        y = random.uniform(300, 1800)
        z = random.uniform(500, 600)

        # Get random s1, phi, panel
        s1 = matrix.col(detector[0].get_pixel_lab_coord(
            (x, y))).normalize() * s0_length
        phi = scan.get_angle_from_array_index(z, deg=False)
        panel = 0

        # Calculate the bounding box
        bbox = calculate_bbox(s1, z, panel)
        x0, x1, y0, y1, z0, z1 = bbox

        # Create the coordinate system
        cs = CoordinateSystem(m2, s0, s1, phi)
        if abs(cs.zeta()) < 0.1:
            continue

        # The grid index generator
        step_size = delta_divergence / grid_size
        grid_index = transform.GridIndexGenerator(cs, x0, y0,
                                                  (step_size, step_size),
                                                  grid_size, s1_map)

        # Create the image
        # image = flex.double(flex.grid(z1 - z0, y1 - y0, x1 - x0), 1)
        image = gaussian((z1 - z0, y1 - y0, x1 - x0), 10.0,
                         (z - z0, y - y0, x - x0), (2.0, 2.0, 2.0))
        mask = flex.bool(flex.grid(image.all()), False)
        for j in range(y1 - y0):
            for i in range(x1 - x0):
                inside = False
                gx00, gy00 = grid_index(j, i)
                gx01, gy01 = grid_index(j, i + 1)
                gx10, gy10 = grid_index(j + 1, i)
                gx11, gy11 = grid_index(j + 1, i + 1)
                mingx = min([gx00, gx01, gx10, gx11])
                maxgx = max([gx00, gx01, gx10, gx11])
                mingy = min([gy00, gy01, gy10, gy11])
                maxgy = max([gy00, gy01, gy10, gy11])
                if (mingx >= 0 and maxgx < 2 * grid_size + 1 and mingy >= 0
                        and maxgy < 2 * grid_size + 1):
                    inside = True
                for k in range(1, z1 - z0 - 1):
                    mask[k, j, i] = inside

        # Transform the image to the grid
        transformed = transform.TransformForwardNoModel(
            spec, cs, bbox, 0, image.as_double(), mask)
        grid = transformed.profile()

        # Get the sums and ensure they're the same
        eps = 1e-7
        sum_grid = flex.sum(grid)
        sum_image = flex.sum(flex.double(flex.select(image, flags=mask)))
        assert abs(sum_grid - sum_image) <= eps

        mask = flex.bool(flex.grid(image.all()), True)
        transformed = transform.TransformForwardNoModel(
            spec, cs, bbox, 0, image.as_double(), mask)
        grid = transformed.profile()

        # Boost the bbox to make sure all intensity is included
        x0, x1, y0, y1, z0, z1 = bbox
        bbox2 = (x0 - 10, x1 + 10, y0 - 10, y1 + 10, z0 - 10, z1 + 10)

        # Do the reverse transform
        transformed = transform.TransformReverseNoModel(
            spec, cs, bbox2, 0, grid)
        image2 = transformed.profile()

        # Check the sum of pixels are the same
        sum_grid = flex.sum(grid)
        sum_image = flex.sum(image2)
        assert abs(sum_grid - sum_image) <= eps

        # Do the reverse transform
        transformed = transform.TransformReverseNoModel(
            spec, cs, bbox, 0, grid)
        image2 = transformed.profile()

        from dials.algorithms.statistics import pearson_correlation_coefficient

        cc = pearson_correlation_coefficient(image.as_1d().as_double(),
                                             image2.as_1d())
        assert cc >= 0.99
Esempio n. 6
0
def run(args):
    usage = "dev.dials.csv [options] imported.expt strong.refl output.csv=rl.csv"

    parser = OptionParser(
        usage=usage,
        phil=phil_scope,
        read_experiments=True,
        read_reflections=True,
        check_format=False,
    )

    params, options = parser.parse_args(show_diff_phil=False)
    reflections, experiments = reflections_and_experiments_from_files(
        params.input.reflections, params.input.experiments)

    if not experiments or not reflections:
        parser.print_help()
        exit(0)

    spots = []

    for reflection in reflections:
        unique_ids = set(reflection["id"])
        for unique_id in sorted(unique_ids):
            spots.append(reflection.select(reflection["id"] == unique_id))
        if not reflection:  # If there are no reflections then export an empty list
            spots.append(reflection)

    assert len(experiments) == len(spots)

    if params.output.compress:
        fout = gzip.GzipFile(params.output.csv, "w")
        if six.PY3:
            # GzipFile() always provides binary access only.
            # Replace the file object with one that allows writing text:
            fout = io.TextIOWrapper(fout)
            # Rely on garbage collection to close the underlying GzipFile.
    else:
        fout = open(params.output.csv, "w")

    fout.write("# x,y,z,experiment_id,imageset_id\n")

    dp = params.output.dp

    if dp <= 0:
        fmt = "%f,%f,%f,%d,%d\n"
    else:
        fmt = "%%.%df,%%.%df,%%.%df,%%d,%%d\n" % (dp, dp, dp)

    print("Using format:", fmt.strip())

    for k, (expt, refl) in enumerate(zip(experiments, spots)):
        if "imageset_id" not in refl:
            refl["imageset_id"] = refl["id"]

        refl.centroid_px_to_mm(ExperimentList([expt]))
        refl.map_centroids_to_reciprocal_space(ExperimentList([expt]))
        rlp = refl["rlp"]

        for _rlp in rlp:
            fout.write(fmt % (_rlp[0], _rlp[1], _rlp[2], k, k))

        print("Appended %d spots to %s" % (len(rlp), params.output.csv))

    fout.close()
Esempio n. 7
0
def test_stills_indexer_multi_lattice_bug_MosaicSauter2014(
        dials_regression, tmpdir):
    """ Problem: In stills_indexer, before calling the refine function, the experiment list contains a list of
        dxtbx crystal models (that are not MosaicSauter2014 models). The conversion to MosaicSauter2014 is made
        during the refine step when functions from nave_parameters is called. If the experiment list contains
        more than 1 experiment, for eg. multiple lattices, only the first crystal gets assigned mosaicity. In
        actuality, all crystal models should be assigned mosaicity. This test only compares whether or not all crystal models
        have been assigned a MosaicSauter2014 model.  """

    from dxtbx.model.experiment_list import ExperimentListFactory
    from dxtbx.model.experiment_list import Experiment, ExperimentList
    from dials.array_family import flex
    from dxtbx.model import Crystal
    from dials.algorithms.indexing.stills_indexer import StillsIndexer
    from dials.command_line.stills_process import (
        phil_scope as stills_process_phil_scope, )
    import dxtbx_model_ext  # needed for comparison of types

    experiment_data = os.path.join(
        dials_regression,
        "refinement_test_data",
        "cspad_refinement",
        "cspad_refined_experiments_step6_level2_300.json",
    )
    reflection_data = os.path.join(
        dials_regression,
        "refinement_test_data",
        "cspad_refinement",
        "cspad_reflections_step7_300.pickle",
    )

    refl = flex.reflection_table.from_file(reflection_data)
    explist = ExperimentListFactory.from_json_file(experiment_data,
                                                   check_format=False)[0:2]
    reflist = refl.select(
        refl["id"] < 2)  # Only use the first 2 for convenience
    # Construct crystal models that don't have mosaicity. These A,B,C values are the same
    # as read in from the dials_regression folder
    # Crystal-0
    cs0 = Crystal(explist[0].crystal)
    exp0 = Experiment(
        imageset=explist[0].imageset,
        beam=explist[0].beam,
        detector=explist[0].detector,
        goniometer=None,
        scan=None,
        crystal=cs0,
    )

    # Crystal-1
    cs1 = Crystal(explist[1].crystal)
    exp1 = Experiment(
        imageset=explist[1].imageset,
        beam=explist[1].beam,
        detector=explist[1].detector,
        goniometer=None,
        scan=None,
        crystal=cs1,
    )
    # Construct a new experiment_list that will be passed on for refinement
    unrefined_explist = ExperimentList([exp0, exp1])
    # Get default params from stills_process and construct StillsIndexer, then run refinement
    params = stills_process_phil_scope.extract()
    SI = StillsIndexer(reflist, unrefined_explist, params=params)
    refined_explist, new_reflist = SI.refine(unrefined_explist, reflist)
    # Now check whether the models have mosaicity after stills_indexer refinement
    # Also check that mosaicity values are within expected limits
    for ii, crys in enumerate(refined_explist.crystals()):
        assert isinstance(crys, dxtbx_model_ext.MosaicCrystalSauter2014)
        if ii == 0:
            assert crys.get_domain_size_ang() == pytest.approx(2242.0, rel=0.1)
        if ii == 1:
            assert crys.get_domain_size_ang() == pytest.approx(2689.0, rel=0.1)
Esempio n. 8
0
        writer.add_image(noisy)
        writer.add_image(sim_data)
        writer.close_file()

    shot_data += list(zip([i_shot]*len(cents), cents , cents_mm, i_refs ) )

    print("Rank %d: shot %d / %d  has %d/%d refls TOok %f seconds to simulate %d panels, Ncells=%d %d %d " 
        % (rank, i_shot, Nexper, len(i_refs), len(exper_refls_strong), tsim, len(panels_with_spots), Ncells_abc[0], Ncells_abc[1], Ncells_abc[2]), flush=True)


if has_mpi:
    shot_data = comm.reduce(shot_data)
if rank==0:
    print("Saving all data")
    exper_ids, cents, cents_mm, i_refs = zip(*shot_data)
    El_out = ExperimentList()
    Rmaster_out = flex.reflection_table()
    Rmaster_orig = flex.reflection_table()
    df = pandas.DataFrame({"exper_ids":exper_ids, "cents": cents, "cents_mm": cents_mm, "i_refs": i_refs})
    unique_exper_ids = df.exper_ids.unique()
    print("%d unique experiments with %d total reflections" % (len(unique_exper_ids), len(cents) ))
    from copy import deepcopy
    for new_exper_id, exper_id in enumerate(unique_exper_ids):
        exper_id = int(exper_id)
        df_id = df.query("exper_ids==%d" % exper_id)
        r = Rmaster.select(Rmaster["id"]==exper_id)
        r_orig = deepcopy(r)
        sel = flex.bool([i in df_id.i_refs.values for i in range(len(r))])
        cents = [val for val in df_id.cents.values]
        cents_mm = [val for val in df_id.cents_mm.values]
        for ii, i_ref in enumerate(df_id.i_refs.values):
Esempio n. 9
0
    def _refine(self):
        for idxr in set(self._refinr_indexers.values()):
            experiments = idxr.get_indexer_experiment_list()

            indexed_experiments = idxr.get_indexer_payload(
                "experiments_filename")
            indexed_reflections = idxr.get_indexer_payload("indexed_filename")

            # If multiple sweeps but not doing joint refinement, get only the
            # relevant reflections.
            multi_sweep = PhilIndex.params.xia2.settings.multi_sweep_refinement
            if len(experiments) > 1 and not multi_sweep:
                xsweeps = idxr._indxr_sweeps
                assert len(xsweeps) == len(experiments)
                # Don't do joint refinement
                assert len(self._refinr_sweeps) == 1
                xsweep = self._refinr_sweeps[0]
                i = xsweeps.index(xsweep)
                experiments = experiments[i:i + 1]

                # Extract and output experiment and reflections for current sweep
                indexed_experiments = os.path.join(
                    self.get_working_directory(),
                    "%s_indexed.expt" % xsweep.get_name())
                indexed_reflections = os.path.join(
                    self.get_working_directory(),
                    "%s_indexed.refl" % xsweep.get_name())

                experiments.as_file(indexed_experiments)

                reflections = flex.reflection_table.from_file(
                    idxr.get_indexer_payload("indexed_filename"))
                sel = reflections["id"] == i
                assert sel.count(True) > 0
                imageset_id = reflections["imageset_id"].select(sel)
                assert imageset_id.all_eq(imageset_id[0])
                sel = reflections["imageset_id"] == imageset_id[0]
                reflections = reflections.select(sel)
                # set indexed reflections to id == 0 and imageset_id == 0
                reflections["id"].set_selected(reflections["id"] == i, 0)
                reflections["imageset_id"] = flex.int(len(reflections), 0)
                reflections.as_file(indexed_reflections)

            # currently only handle one lattice/refiner
            assert len(experiments.crystals()) == 1

            scan_static = PhilIndex.params.dials.refine.scan_static

            # Avoid doing scan-varying refinement on narrow wedges.
            scan_oscillation_ranges = []
            for experiment in experiments:
                start, end = experiment.scan.get_oscillation_range()
                scan_oscillation_ranges.append(end - start)

            min_oscillation_range = min(scan_oscillation_ranges)

            if (PhilIndex.params.dials.refine.scan_varying
                    and min_oscillation_range > 5
                    and not PhilIndex.params.dials.fast_mode):
                scan_varying = PhilIndex.params.dials.refine.scan_varying
            else:
                scan_varying = False

            if scan_static:
                refiner = self.Refine()
                refiner.set_experiments_filename(indexed_experiments)
                refiner.set_indexed_filename(indexed_reflections)
                refiner.set_scan_varying(False)
                refiner.run()
                self._refinr_experiments_filename = (
                    refiner.get_refined_experiments_filename())
                self._refinr_indexed_filename = refiner.get_refined_filename()
            else:
                self._refinr_experiments_filename = indexed_experiments
                self._refinr_indexed_filename = indexed_reflections

            if scan_varying:
                refiner = self.Refine()
                refiner.set_experiments_filename(
                    self._refinr_experiments_filename)
                refiner.set_indexed_filename(self._refinr_indexed_filename)
                if min_oscillation_range < 36:
                    refiner.set_interval_width_degrees(min_oscillation_range /
                                                       2)
                refiner.run()
                self._refinr_experiments_filename = (
                    refiner.get_refined_experiments_filename())
                self._refinr_indexed_filename = refiner.get_refined_filename()

            if scan_static or scan_varying:
                FileHandler.record_log_file(
                    "%s REFINE" % idxr.get_indexer_full_name(),
                    refiner.get_log_file())
                report = self.Report()
                report.set_experiments_filename(
                    self._refinr_experiments_filename)
                report.set_reflections_filename(self._refinr_indexed_filename)
                html_filename = os.path.join(
                    self.get_working_directory(),
                    "%i_dials.refine.report.html" % report.get_xpid(),
                )
                report.set_html_filename(html_filename)
                report.run()
                FileHandler.record_html_file(
                    "%s REFINE" % idxr.get_indexer_full_name(), html_filename)

            experiments = ExperimentList.from_file(
                self._refinr_experiments_filename)
            self.set_refiner_payload("models.expt",
                                     self._refinr_experiments_filename)
            self.set_refiner_payload("observations.refl",
                                     self._refinr_indexed_filename)

            # this is the result of the cell refinement
            self._refinr_cell = experiments.crystals()[0].get_unit_cell(
            ).parameters()
    def __init__(self, params):
        from six.moves import cPickle as pickle
        from dxtbx.model import BeamFactory
        from dxtbx.model import DetectorFactory
        from dxtbx.model.crystal import CrystalFactory
        from cctbx.crystal_orientation import crystal_orientation, basis_type
        from dxtbx.model import Experiment, ExperimentList
        from scitbx import matrix
        self.experiments = ExperimentList()
        self.unique_file_names = []

        self.params = params
        data = pickle.load(
            open(self.params.output.prefix + "_frame.pickle", "rb"))
        frames_text = data.split("\n")

        for item in frames_text:
            tokens = item.split(' ')
            wavelength = float(tokens[order_dict["wavelength"]])

            beam = BeamFactory.simple(wavelength=wavelength)

            detector = DetectorFactory.simple(
                sensor=DetectorFactory.sensor(
                    "PAD"),  # XXX shouldn't hard code for XFEL
                distance=float(tokens[order_dict["distance"]]),
                beam_centre=[
                    float(tokens[order_dict["beam_x"]]),
                    float(tokens[order_dict["beam_y"]])
                ],
                fast_direction="+x",
                slow_direction="+y",
                pixel_size=[self.params.pixel_size, self.params.pixel_size],
                image_size=[1795,
                            1795],  # XXX obviously need to figure this out
            )

            reciprocal_matrix = matrix.sqr([
                float(tokens[order_dict[k]]) for k in [
                    'res_ori_1', 'res_ori_2', 'res_ori_3', 'res_ori_4',
                    'res_ori_5', 'res_ori_6', 'res_ori_7', 'res_ori_8',
                    'res_ori_9'
                ]
            ])
            ORI = crystal_orientation(reciprocal_matrix, basis_type.reciprocal)
            direct = matrix.sqr(ORI.direct_matrix())
            transfer_dict = dict(
                __id__="crystal",
                ML_half_mosaicity_deg=float(
                    tokens[order_dict["half_mosaicity_deg"]]),
                ML_domain_size_ang=float(
                    tokens[order_dict["domain_size_ang"]]),
                real_space_a=matrix.row(direct[0:3]),
                real_space_b=matrix.row(direct[3:6]),
                real_space_c=matrix.row(direct[6:9]),
                space_group_hall_symbol=self.params.target_space_group.type().
                hall_symbol(),
            )
            crystal = CrystalFactory.from_dict(transfer_dict)
            """ old code reflects python-based crystal model
      crystal = Crystal(
        real_space_a = matrix.row(direct[0:3]),
        real_space_b = matrix.row(direct[3:6]),
        real_space_c = matrix.row(direct[6:9]),
        space_group_symbol = self.params.target_space_group.type().lookup_symbol(),
        mosaicity = float(tokens[order_dict["half_mosaicity_deg"]]),
      )
      crystal.domain_size = float(tokens[order_dict["domain_size_ang"]])
      """
            #if isoform is not None:
            #  newB = matrix.sqr(isoform.fractionalization_matrix()).transpose()
            #  crystal.set_B(newB)

            self.experiments.append(
                Experiment(
                    beam=beam,
                    detector=None,  #dummy for now
                    crystal=crystal))
            self.unique_file_names.append(
                tokens[order_dict["unique_file_name"]])

        self.show_summary()
Esempio n. 11
0
 def mock_exp_obj(id_=0):
     """Make a mock experiments file object."""
     exp = Mock()
     exp.data = ExperimentList()
     exp.data.append(Experiment(identifier=str(id_)))
     return exp
class read_experiments(object):
    def __init__(self, params):
        from six.moves import cPickle as pickle
        from dxtbx.model import BeamFactory
        from dxtbx.model import DetectorFactory
        from dxtbx.model.crystal import CrystalFactory
        from cctbx.crystal_orientation import crystal_orientation, basis_type
        from dxtbx.model import Experiment, ExperimentList
        from scitbx import matrix
        self.experiments = ExperimentList()
        self.unique_file_names = []

        self.params = params
        data = pickle.load(
            open(self.params.output.prefix + "_frame.pickle", "rb"))
        frames_text = data.split("\n")

        for item in frames_text:
            tokens = item.split(' ')
            wavelength = float(tokens[order_dict["wavelength"]])

            beam = BeamFactory.simple(wavelength=wavelength)

            detector = DetectorFactory.simple(
                sensor=DetectorFactory.sensor(
                    "PAD"),  # XXX shouldn't hard code for XFEL
                distance=float(tokens[order_dict["distance"]]),
                beam_centre=[
                    float(tokens[order_dict["beam_x"]]),
                    float(tokens[order_dict["beam_y"]])
                ],
                fast_direction="+x",
                slow_direction="+y",
                pixel_size=[self.params.pixel_size, self.params.pixel_size],
                image_size=[1795,
                            1795],  # XXX obviously need to figure this out
            )

            reciprocal_matrix = matrix.sqr([
                float(tokens[order_dict[k]]) for k in [
                    'res_ori_1', 'res_ori_2', 'res_ori_3', 'res_ori_4',
                    'res_ori_5', 'res_ori_6', 'res_ori_7', 'res_ori_8',
                    'res_ori_9'
                ]
            ])
            ORI = crystal_orientation(reciprocal_matrix, basis_type.reciprocal)
            direct = matrix.sqr(ORI.direct_matrix())
            transfer_dict = dict(
                __id__="crystal",
                ML_half_mosaicity_deg=float(
                    tokens[order_dict["half_mosaicity_deg"]]),
                ML_domain_size_ang=float(
                    tokens[order_dict["domain_size_ang"]]),
                real_space_a=matrix.row(direct[0:3]),
                real_space_b=matrix.row(direct[3:6]),
                real_space_c=matrix.row(direct[6:9]),
                space_group_hall_symbol=self.params.target_space_group.type().
                hall_symbol(),
            )
            crystal = CrystalFactory.from_dict(transfer_dict)
            """ old code reflects python-based crystal model
      crystal = Crystal(
        real_space_a = matrix.row(direct[0:3]),
        real_space_b = matrix.row(direct[3:6]),
        real_space_c = matrix.row(direct[6:9]),
        space_group_symbol = self.params.target_space_group.type().lookup_symbol(),
        mosaicity = float(tokens[order_dict["half_mosaicity_deg"]]),
      )
      crystal.domain_size = float(tokens[order_dict["domain_size_ang"]])
      """
            #if isoform is not None:
            #  newB = matrix.sqr(isoform.fractionalization_matrix()).transpose()
            #  crystal.set_B(newB)

            self.experiments.append(
                Experiment(
                    beam=beam,
                    detector=None,  #dummy for now
                    crystal=crystal))
            self.unique_file_names.append(
                tokens[order_dict["unique_file_name"]])

        self.show_summary()

    def get_experiments(self):
        return self.experiments

    def get_files(self):
        return self.unique_file_names

    def show_summary(self):
        w = flex.double([e.beam.get_wavelength() for e in self.experiments])
        stats = flex.mean_and_variance(w)
        print "Wavelength mean and standard deviation:", stats.mean(
        ), stats.unweighted_sample_standard_deviation()
        uc = [e.crystal.get_unit_cell().parameters() for e in self.experiments]
        a = flex.double([u[0] for u in uc])
        stats = flex.mean_and_variance(a)
        print "Unit cell a mean and standard deviation:", stats.mean(
        ), stats.unweighted_sample_standard_deviation()
        b = flex.double([u[1] for u in uc])
        stats = flex.mean_and_variance(b)
        print "Unit cell b mean and standard deviation:", stats.mean(
        ), stats.unweighted_sample_standard_deviation()
        c = flex.double([u[2] for u in uc])
        stats = flex.mean_and_variance(c)
        print "Unit cell c mean and standard deviation:", stats.mean(
        ), stats.unweighted_sample_standard_deviation()
        d = flex.double(
            [e.crystal.get_domain_size_ang() for e in self.experiments])
        stats = flex.mean_and_variance(d)
        # NOTE XXX FIXME:  cxi.index seems to record the half-domain size; report here the full domain size
        print "Domain size mean and standard deviation:", 2. * stats.mean(
        ), 2. * stats.unweighted_sample_standard_deviation()
Esempio n. 13
0
def merge_data_to_mtz(params, experiments, reflections):
    """Merge data (at each wavelength) and write to an mtz file object."""
    wavelengths = match_wavelengths(experiments)  # wavelengths is an ordered dict
    mtz_datasets = [
        MTZDataClass(wavelength=w, project_name=params.output.project_name)
        for w in wavelengths.keys()
    ]
    dataset_names = params.output.dataset_names
    crystal_names = params.output.crystal_names

    # check if best_unit_cell is set.
    best_unit_cell = params.best_unit_cell
    if not best_unit_cell:
        best_unit_cell = determine_best_unit_cell(experiments)
    reflections[0]["d"] = best_unit_cell.d(reflections[0]["miller_index"])
    for expt in experiments:
        expt.crystal.unit_cell = best_unit_cell

    if len(wavelengths) > 1:
        logger.info(
            "Multiple wavelengths found: \n%s",
            "\n".join(
                "  Wavlength: %.5f, experiment numbers: %s "
                % (k, ",".join(map(str, v)))
                for k, v in wavelengths.items()
            ),
        )
        if not dataset_names or len(dataset_names) != len(wavelengths):
            logger.info(
                "Unequal number of dataset names and wavelengths, using default naming."
            )
            dataset_names = [None] * len(wavelengths)
        if not crystal_names or len(crystal_names) != len(wavelengths):
            logger.info(
                "Unequal number of crystal names and wavelengths, using default naming."
            )
            crystal_names = [None] * len(wavelengths)
        experiments_subsets = []
        reflections_subsets = []
        for dataset, dname, cname in zip(mtz_datasets, dataset_names, crystal_names):
            dataset.dataset_name = dname
            dataset.crystal_name = cname
        for exp_nos in wavelengths.values():
            expids = [experiments[i].identifier for i in exp_nos]
            experiments_subsets.append(
                ExperimentList([experiments[i] for i in exp_nos])
            )
            reflections_subsets.append(
                reflections[0].select_on_experiment_identifiers(expids)
            )
    else:
        mtz_datasets[0].dataset_name = dataset_names[0]
        mtz_datasets[0].crystal_name = crystal_names[0]
        experiments_subsets = [experiments]
        reflections_subsets = reflections

    for experimentlist, reflection_table, mtz_dataset in zip(
        experiments_subsets, reflections_subsets, mtz_datasets
    ):
        # merge and truncate the data
        merged_array, merged_anomalous_array, stats_summary = merge(
            experimentlist,
            reflection_table,
            d_min=params.d_min,
            d_max=params.d_max,
            combine_partials=params.combine_partials,
            partiality_threshold=params.partiality_threshold,
            best_unit_cell=best_unit_cell,
            anomalous=params.anomalous,
            assess_space_group=params.assess_space_group,
            n_bins=params.merging.n_bins,
            use_internal_variance=params.merging.use_internal_variance,
        )
        mtz_dataset.merged_array = merged_array
        mtz_dataset.merged_anomalous_array = merged_anomalous_array
        if params.anomalous:
            merged_intensities = merged_anomalous_array
        else:
            merged_intensities = merged_array

        if params.truncate:
            amplitudes, anomalous_amplitudes = truncate(merged_intensities)
            mtz_dataset.amplitudes = amplitudes
            mtz_dataset.anomalous_amplitudes = anomalous_amplitudes
        show_wilson_scaling_analysis(merged_intensities)
        if stats_summary:
            logger.info(stats_summary)

    return make_merged_mtz_file(mtz_datasets)
Esempio n. 14
0
  def _index(self):
    '''Actually index the diffraction pattern. Note well that
    this is not going to compute the matrix...'''

    # acknowledge this program

    Citations.cite('labelit')
    Citations.cite('distl')

    #self.reset()

    _images = []
    for i in self._indxr_images:
      for j in i:
        if not j in _images:
          _images.append(j)

    _images.sort()

    images_str = '%d' % _images[0]
    for i in _images[1:]:
      images_str += ', %d' % i

    cell_str = None
    if self._indxr_input_cell:
      cell_str = '%.2f %.2f %.2f %.2f %.2f %.2f' % \
                  self._indxr_input_cell

    if self._indxr_sweep_name:

      # then this is a proper autoindexing run - describe this
      # to the journal entry

      #if len(self._fp_directory) <= 50:
        #dirname = self._fp_directory
      #else:
        #dirname = '...%s' % self._fp_directory[-46:]
      dirname = os.path.dirname(self.get_imageset().get_template())

      Journal.block(
          'autoindexing', self._indxr_sweep_name, 'labelit',
          {'images':images_str,
           'target cell':cell_str,
           'target lattice':self._indxr_input_lattice,
           'template':self.get_imageset().get_template(),
           'directory':dirname})

    if len(_images) > 4:
      raise RuntimeError('cannot use more than 4 images')

    from xia2.Wrappers.Labelit.LabelitIndex import LabelitIndex
    index = LabelitIndex()
    index.set_working_directory(self.get_working_directory())
    auto_logfiler(index)

    #task = 'Autoindex from images:'

    #for i in _images:
      #task += ' %s' % self.get_image_name(i)

    #self.set_task(task)

    Debug.write('Indexing from images:')
    for i in _images:
      index.add_image(self.get_image_name(i))
      Debug.write('%s' % self.get_image_name(i))

    xsweep = self.get_indexer_sweep()
    if xsweep is not None:
      if xsweep.get_distance() is not None:
        index.set_distance(xsweep.get_distance())
      #if self.get_wavelength_prov() == 'user':
        #index.set_wavelength(self.get_wavelength())
      if xsweep.get_beam_centre() is not None:
        index.set_beam_centre(xsweep.get_beam_centre())

    if self._refine_beam is False:
      index.set_refine_beam(False)
    else:
      index.set_refine_beam(True)
      index.set_beam_search_scope(self._beam_search_scope)

    if ((math.fabs(self.get_wavelength() - 1.54) < 0.01) or
        (math.fabs(self.get_wavelength() - 2.29) < 0.01)):
      index.set_Cu_KA_or_Cr_KA(True)

    #sweep = self.get_indexer_sweep_name()
    #FileHandler.record_log_file(
        #'%s INDEX' % (sweep), self.get_log_file())

    try:
      index.run()
    except RuntimeError as e:

      if self._refine_beam is False:
        raise e

      # can we improve the situation?

      if self._beam_search_scope < 4.0:
        self._beam_search_scope += 4.0

        # try repeating the indexing!

        self.set_indexer_done(False)
        return 'failed'

      # otherwise this is beyond redemption

      raise e

    self._solutions = index.get_solutions()

    # FIXME this needs to check the smilie status e.g.
    # ":)" or ";(" or "  ".

    # FIXME need to check the value of the RMSD and raise an
    # exception if the P1 solution has an RMSD > 1.0...

    # Change 27/FEB/08 to support user assigned spacegroups
    # (euugh!) have to "ignore" solutions with higher symmetry
    # otherwise the rest of xia will override us. Bummer.

    for i, solution in self._solutions.iteritems():
      if self._indxr_user_input_lattice:
        if (lattice_to_spacegroup(solution['lattice']) >
            lattice_to_spacegroup(self._indxr_input_lattice)):
          Debug.write('Ignoring solution: %s' % solution['lattice'])
          del self._solutions[i]

    # check the RMSD from the triclinic unit cell
    if self._solutions[1]['rmsd'] > 1.0 and False:
      # don't know when this is useful - but I know when it is not!
      raise RuntimeError('high RMSD for triclinic solution')

    # configure the "right" solution
    self._solution = self.get_solution()

    # now store also all of the other solutions... keyed by the
    # lattice - however these should only be added if they
    # have a smiley in the appropriate record, perhaps?

    for solution in self._solutions.keys():
      lattice = self._solutions[solution]['lattice']
      if lattice in self._indxr_other_lattice_cell:
        if self._indxr_other_lattice_cell[lattice]['goodness'] < \
           self._solutions[solution]['metric']:
          continue

      self._indxr_other_lattice_cell[lattice] = {
          'goodness':self._solutions[solution]['metric'],
          'cell':self._solutions[solution]['cell']}

    self._indxr_lattice = self._solution['lattice']
    self._indxr_cell = tuple(self._solution['cell'])
    self._indxr_mosaic = self._solution['mosaic']

    lms = LabelitMosflmScript()
    lms.set_working_directory(self.get_working_directory())
    lms.set_solution(self._solution['number'])
    self._indxr_payload['mosflm_orientation_matrix'] = lms.calculate()

    # get the beam centre from the mosflm script - mosflm
    # may have inverted the beam centre and labelit will know
    # this!

    mosflm_beam_centre = lms.get_mosflm_beam()

    if mosflm_beam_centre:
      self._indxr_payload['mosflm_beam_centre'] = tuple(mosflm_beam_centre)

    import copy
    detector = copy.deepcopy(self.get_detector())
    beam = copy.deepcopy(self.get_beam())
    from dxtbx.model.detector_helpers import set_mosflm_beam_centre
    set_mosflm_beam_centre(detector, beam, mosflm_beam_centre)

    from xia2.Experts.SymmetryExpert import lattice_to_spacegroup_number
    from scitbx import matrix
    from cctbx import sgtbx, uctbx
    from dxtbx.model import CrystalFactory
    mosflm_matrix = matrix.sqr(
      [float(i) for line in lms.calculate()
       for i in line.replace("-", " -").split() ][:9])

    space_group = sgtbx.space_group_info(lattice_to_spacegroup_number(
      self._solution['lattice'])).group()
    crystal_model = CrystalFactory.from_mosflm_matrix(
      mosflm_matrix,
      unit_cell=uctbx.unit_cell(
        tuple(self._solution['cell'])),
      space_group=space_group)

    from dxtbx.model import Experiment, ExperimentList
    experiment = Experiment(beam=beam,
                            detector=detector,
                            goniometer=self.get_goniometer(),
                            scan=self.get_scan(),
                            crystal=crystal_model,
                            )

    experiment_list = ExperimentList([experiment])
    self.set_indexer_experiment_list(experiment_list)

    # also get an estimate of the resolution limit from the
    # labelit.stats_distl output... FIXME the name is wrong!

    lsd = LabelitStats_distl()
    lsd.set_working_directory(self.get_working_directory())
    lsd.stats_distl()

    resolution = 1.0e6
    for i in _images:
      stats = lsd.get_statistics(self.get_image_name(i))

      resol = 0.5 * (stats['resol_one'] + stats['resol_two'])

      if resol < resolution:
        resolution = resol

    self._indxr_resolution_estimate = resolution

    return 'ok'
Esempio n. 15
0
def test_experiment_identifiers():
    from dxtbx.model import Experiment, ExperimentList

    table = flex.reflection_table()
    table["id"] = flex.int([0, 1, 2, 3])

    table.assert_experiment_identifiers_are_consistent()

    identifiers = table.experiment_identifiers()
    identifiers[0] = "abcd"
    identifiers[1] = "efgh"
    identifiers[2] = "ijkl"
    identifiers[3] = "mnop"

    assert identifiers[0] == "abcd"
    assert identifiers[1] == "efgh"
    assert identifiers[2] == "ijkl"
    assert identifiers[3] == "mnop"

    for k, v in identifiers:
        if k == 0:
            assert v == "abcd"
        if k == 1:
            assert v == "efgh"
        if k == 2:
            assert v == "ijkl"
        if k == 3:
            assert v == "mnop"

    assert tuple(identifiers.keys()) == (0, 1, 2, 3)
    assert tuple(identifiers.values()) == ("abcd", "efgh", "ijkl", "mnop")

    table.assert_experiment_identifiers_are_consistent()

    experiments = ExperimentList()
    experiments.append(Experiment(identifier="abcd"))
    experiments.append(Experiment(identifier="efgh"))
    experiments.append(Experiment(identifier="ijkl"))
    experiments.append(Experiment(identifier="mnop"))

    table.assert_experiment_identifiers_are_consistent()

    experiments = ExperimentList()
    experiments.append(Experiment(identifier="abcd"))
    experiments.append(Experiment(identifier="efgh"))
    experiments.append(Experiment(identifier="ijkl"))
    experiments.append(Experiment(identifier="mnop"))
    experiments[3].identifier = "ijkl"

    with pytest.raises(AssertionError):
        table.assert_experiment_identifiers_are_consistent(experiments)

    experiments[2].identifier = "mnop"
    table.assert_experiment_identifiers_are_consistent(experiments)

    identifiers = table.experiment_identifiers()
    identifiers[0] = "abcd"
    identifiers[1] = "efgh"
    identifiers[2] = "ijkl"
    identifiers[3] = "ijkl"

    with pytest.raises(AssertionError):
        table.assert_experiment_identifiers_are_consistent()

    identifiers[3] = "mnop"

    pickled = pickle.dumps(table)
    table2 = pickle.loads(pickled)

    id1 = table.experiment_identifiers()
    id2 = table2.experiment_identifiers()

    for i in id1.keys():
        assert id1[i] == id2[i]

    other_table = flex.reflection_table()
    other_table["id"] = flex.int([3, 4])

    table.assert_experiment_identifiers_are_consistent()

    packed = table.as_msgpack()
    table2 = table.from_msgpack(packed)

    id1 = table.experiment_identifiers()
    id2 = table2.experiment_identifiers()

    for i in id1.keys():
        assert id1[i] == id2[i]

    other_table = flex.reflection_table()
    other_table["id"] = flex.int([3, 4])

    table.assert_experiment_identifiers_are_consistent()

    identifiers = other_table.experiment_identifiers()
    identifiers[3] = "mnop"
    identifiers[4] = "qrst"

    table.extend(other_table)

    assert len(table.experiment_identifiers()) == 5
    assert table.experiment_identifiers()[0] == "abcd"
    assert table.experiment_identifiers()[1] == "efgh"
    assert table.experiment_identifiers()[2] == "ijkl"
    assert table.experiment_identifiers()[3] == "mnop"
    assert table.experiment_identifiers()[4] == "qrst"

    assert len(table.experiment_identifiers()) == 5
    assert table.experiment_identifiers()[0] == "abcd"
    assert table.experiment_identifiers()[1] == "efgh"
    assert table.experiment_identifiers()[2] == "ijkl"
    assert table.experiment_identifiers()[3] == "mnop"
    assert table.experiment_identifiers()[4] == "qrst"
Esempio n. 16
0
def discover_better_experimental_model(imagesets,
                                       spot_lists,
                                       params,
                                       dps_params,
                                       nproc=1,
                                       wide_search_binning=1):
    assert len(imagesets) == len(spot_lists)
    assert len(imagesets) > 0
    # XXX should check that all the detector and beam objects are the same

    spot_lists_mm = []
    max_cell_list = []

    detector = imagesets[0].get_detector()
    beam = imagesets[0].get_beam()

    beam_panel = detector.get_panel_intersection(beam.get_s0())

    if beam_panel == -1:

        raise Sorry("input beam does not intersect detector")

    for imageset, spots in zip(imagesets, spot_lists):
        spots_mm = copy.deepcopy(spots)
        spots_mm["imageset_id"] = flex.int(len(spots), 0)
        expts = ExperimentList([
            Experiment(
                detector=imageset.get_detector(),
                beam=imageset.get_beam(),
                goniometer=imageset.get_goniometer(),
                scan=imageset.get_scan(),
            )
        ])
        spots_mm.centroid_px_to_mm(expts)
        spots_mm.map_centroids_to_reciprocal_space(expts)

        if dps_params.d_min is not None:
            d_spacings = 1 / spots_mm["rlp"].norms()
            sel = d_spacings > dps_params.d_min
            spots_mm = spots_mm.select(sel)

        # derive a max_cell from mm spots

        if params.max_cell is None:
            max_cell = find_max_cell(spots_mm,
                                     max_cell_multiplier=1.3,
                                     step_size=45).max_cell
            max_cell_list.append(max_cell)

        if (params.max_reflections is not None
                and spots_mm.size() > params.max_reflections):
            logger.info("Selecting subset of %i reflections for analysis" %
                        params.max_reflections)
            perm = flex.random_permutation(spots_mm.size())
            sel = perm[:params.max_reflections]
            spots_mm = spots_mm.select(sel)

        spot_lists_mm.append(spots_mm)

    if params.max_cell is None:
        max_cell = flex.median(flex.double(max_cell_list))
    else:
        max_cell = params.max_cell
    args = [(imageset, spots, max_cell, dps_params)
            for imageset, spots in zip(imagesets, spot_lists_mm)]

    results = easy_mp.parallel_map(
        func=run_dps,
        iterable=args,
        processes=nproc,
        method="multiprocessing",
        preserve_order=True,
        asynchronous=True,
        preserve_exception_message=True,
    )
    solution_lists = [r["solutions"] for r in results]
    amax_list = [r["amax"] for r in results]
    assert len(solution_lists) > 0

    detector = imagesets[0].get_detector()
    beam = imagesets[0].get_beam()

    # perform calculation
    if dps_params.indexing.improve_local_scope == "origin_offset":
        discoverer = better_experimental_model_discovery(
            imagesets,
            spot_lists_mm,
            solution_lists,
            amax_list,
            dps_params,
            wide_search_binning=wide_search_binning,
        )
        new_detector = discoverer.optimize_origin_offset_local_scope()
        old_panel, old_beam_centre = detector.get_ray_intersection(
            beam.get_s0())
        new_panel, new_beam_centre = new_detector.get_ray_intersection(
            beam.get_s0())

        old_beam_centre_px = detector[old_panel].millimeter_to_pixel(
            old_beam_centre)
        new_beam_centre_px = new_detector[new_panel].millimeter_to_pixel(
            new_beam_centre)

        logger.info("Old beam centre: %.2f, %.2f mm" % old_beam_centre +
                    " (%.1f, %.1f px)" % old_beam_centre_px)
        logger.info("New beam centre: %.2f, %.2f mm" % new_beam_centre +
                    " (%.1f, %.1f px)" % new_beam_centre_px)
        logger.info(
            "Shift: %.2f, %.2f mm" %
            (matrix.col(old_beam_centre) - matrix.col(new_beam_centre)).elems +
            " (%.1f, %.1f px)" % (matrix.col(old_beam_centre_px) -
                                  matrix.col(new_beam_centre_px)).elems)
        return new_detector, beam
    elif dps_params.indexing.improve_local_scope == "S0_vector":
        raise NotImplementedError()
Esempio n. 17
0
def merge_data_to_mtz(params, experiments, reflections):
    """Merge data (at each wavelength) and write to an mtz file object."""
    wavelengths = match_wavelengths(
        experiments,
        absolute_tolerance=params.wavelength_tolerance,
    )  # wavelengths is an ordered dict
    mtz_datasets = [
        MTZDataClass(wavelength=w, project_name=params.output.project_name)
        for w in wavelengths.keys()
    ]
    dataset_names = params.output.dataset_names
    crystal_names = params.output.crystal_names

    # check if best_unit_cell is set.
    best_unit_cell = params.best_unit_cell
    if not best_unit_cell:
        best_unit_cell = determine_best_unit_cell(experiments)
    reflections[0]["d"] = best_unit_cell.d(reflections[0]["miller_index"])
    for expt in experiments:
        expt.crystal.unit_cell = best_unit_cell

    if len(wavelengths) > 1:
        logger.info(
            "Multiple wavelengths found: \n%s",
            "\n".join("  Wavlength: %.5f, experiment numbers: %s " %
                      (k, ",".join(map(str, v)))
                      for k, v in wavelengths.items()),
        )
        if not dataset_names or len(dataset_names) != len(wavelengths):
            logger.info(
                "Unequal number of dataset names and wavelengths, using default naming."
            )
            dataset_names = [None] * len(wavelengths)
        if not crystal_names or len(crystal_names) != len(wavelengths):
            logger.info(
                "Unequal number of crystal names and wavelengths, using default naming."
            )
            crystal_names = [None] * len(wavelengths)
        experiments_subsets = []
        reflections_subsets = []
        for dataset, dname, cname in zip(mtz_datasets, dataset_names,
                                         crystal_names):
            dataset.dataset_name = dname
            dataset.crystal_name = cname
        for exp_nos in wavelengths.values():
            expids = [experiments[i].identifier for i in exp_nos]
            experiments_subsets.append(
                ExperimentList([experiments[i] for i in exp_nos]))
            reflections_subsets.append(
                reflections[0].select_on_experiment_identifiers(expids))
    else:
        mtz_datasets[0].dataset_name = dataset_names[0]
        mtz_datasets[0].crystal_name = crystal_names[0]
        experiments_subsets = [experiments]
        reflections_subsets = reflections

    # merge and truncate the data for each wavelength group
    for experimentlist, reflection_table, mtz_dataset in zip(
            experiments_subsets, reflections_subsets, mtz_datasets):
        # First generate two merge_equivalents objects, collect merging stats
        merged, merged_anomalous, stats_summary = merge(
            experimentlist,
            reflection_table,
            d_min=params.d_min,
            d_max=params.d_max,
            combine_partials=params.combine_partials,
            partiality_threshold=params.partiality_threshold,
            best_unit_cell=best_unit_cell,
            anomalous=params.anomalous,
            assess_space_group=params.assess_space_group,
            n_bins=params.merging.n_bins,
            use_internal_variance=params.merging.use_internal_variance,
        )

        merged_array = merged.array()
        # Save the relevant data in the mtz_dataset dataclass
        # This will add the data for IMEAN/SIGIMEAN
        mtz_dataset.merged_array = merged_array
        if merged_anomalous:
            merged_anomalous_array = merged_anomalous.array()
            # This will add the data for I(+), I(-), SIGI(+), SIGI(-), N(+), N(-)
            mtz_dataset.merged_anomalous_array = merged_anomalous_array
            mtz_dataset.multiplicities = merged_anomalous.redundancies()
        else:
            merged_anomalous_array = None
            # This will add the data for N
            mtz_dataset.multiplicities = merged.redundancies()

        if params.anomalous:
            merged_intensities = merged_anomalous_array
        else:
            merged_intensities = merged_array

        anom_amplitudes = None
        if params.truncate:
            amplitudes, anom_amplitudes, dano = truncate(merged_intensities)
            # This will add the data for F, SIGF
            mtz_dataset.amplitudes = amplitudes
            # This will add the data for F(+), F(-), SIGF(+), SIGF(-)
            mtz_dataset.anomalous_amplitudes = anom_amplitudes
            # This will add the data for DANO, SIGDANO
            mtz_dataset.dano = dano

        # print out analysis statistics
        show_wilson_scaling_analysis(merged_intensities)
        if stats_summary:
            logger.info(stats_summary)
        if anom_amplitudes:
            logger.info(make_dano_table(anom_amplitudes))

    # pass the dataclasses to an MTZ writer to generate the mtz file and return.
    return make_merged_mtz_file(mtz_datasets)
Esempio n. 18
0
parser.add_argument('in_refl', help='Input reflection file.')
parser.add_argument(
    'output',
    help=
    'Template for output file. {output}.expt, {output}.refl will be produced.')
args = parser.parse_args()

# Set parameters
expt_filename = args.in_expt
refl_filename = args.in_refl
new_expt_filename = args.output + '.expt'
new_refl_filename = args.output + '.refl'

# Get experiments
expts = ExperimentListFactory.from_json_file(expt_filename)
new_expts = ExperimentList()

# Initialize flex tables for refl files
refl_input = flex.reflection_table().from_file(refl_filename)
refl_output = refl_input.copy()
refl_output["id"] = flex.int([-1] * len(refl_output))

# Initialize data frame
dials_df = rs.DataSet({
    'Wavelength': refl_input['Wavelength'],
    'ID': refl_input['id'],
    'new_ID': [-1] * len(refl_input)
})  #.infer_mtz_dtypes()

# Generate beams per reflection
print(f'Number of rows: {len(dials_df)}')
Esempio n. 19
0
def run_indexing(
    reflections,
    experiment,
    working_directory,
    extra_args,
    expected_unit_cell,
    expected_rmsds,
    expected_hall_symbol,
    n_expected_lattices=1,
    relative_length_tolerance=0.005,
    absolute_angle_tolerance=0.5,
):
    commands = ["dials.index"]
    if isinstance(reflections, list):
        commands.extend(reflections)
    else:
        commands.append(reflections)
    if isinstance(experiment, list):
        commands.extend(experiment)
    else:
        commands.append(experiment)
    commands.extend(extra_args)

    result = procrunner.run(commands, working_directory=working_directory)
    assert not result.returncode and not result.stderr

    out_expts = working_directory.join("indexed.expt")
    out_refls = working_directory.join("indexed.refl")
    assert out_expts.check()
    assert out_refls.check()

    experiments_list = load.experiment_list(out_expts.strpath,
                                            check_format=False)
    assert len(experiments_list.crystals()) == n_expected_lattices
    indexed_reflections = flex.reflection_table.from_file(out_refls.strpath)
    indexed_reflections.assert_experiment_identifiers_are_consistent(
        experiments_list)
    rmsds = None

    for i, experiment in enumerate(experiments_list):
        assert unit_cells_are_similar(
            experiment.crystal.get_unit_cell(),
            expected_unit_cell,
            relative_length_tolerance=relative_length_tolerance,
            absolute_angle_tolerance=absolute_angle_tolerance,
        ), (
            experiment.crystal.get_unit_cell().parameters(),
            expected_unit_cell.parameters(),
        )
        sg = experiment.crystal.get_space_group()
        assert sg.type().hall_symbol() == expected_hall_symbol, (
            sg.type().hall_symbol(),
            expected_hall_symbol,
        )
        reflections = indexed_reflections.select(
            indexed_reflections["id"] == i)
        mi = reflections["miller_index"]
        assert (mi != (0, 0, 0)).count(False) == 0
        reflections = reflections.select(mi != (0, 0, 0))
        reflections = reflections.select(
            reflections.get_flags(reflections.flags.used_in_refinement))
        assert len(reflections) > 0
        obs_x, obs_y, obs_z = reflections["xyzobs.mm.value"].parts()
        calc_x, calc_y, calc_z = reflections["xyzcal.mm"].parts()
        rmsd_x = flex.mean(flex.pow2(obs_x - calc_x))**0.5
        rmsd_y = flex.mean(flex.pow2(obs_y - calc_y))**0.5
        rmsd_z = flex.mean(flex.pow2(obs_z - calc_z))**0.5
        rmsds = (rmsd_x, rmsd_y, rmsd_z)
        for actual, expected in zip(rmsds, expected_rmsds):
            assert actual <= expected, "%s %s" % (rmsds, expected_rmsds)
        assert experiment.identifier != ""
        expt = ExperimentList()
        expt.append(experiment)
        reflections.assert_experiment_identifiers_are_consistent(expt)

    return _indexing_result(indexed_reflections, experiments_list, rmsds)
Esempio n. 20
0
class DataManager(object):
    def __init__(self, experiments, reflections):
        self._input_experiments = experiments
        self._input_reflections = reflections

        self._experiments = copy.deepcopy(experiments)
        self._reflections = copy.deepcopy(reflections)

        self._set_batches()

    def _set_batches(self):
        max_batches = max(e.scan.get_image_range()[1]
                          for e in self._experiments)
        max_batches += 10  # allow some head room

        n = int(math.ceil(math.log10(max_batches)))

        for i, expt in enumerate(self._experiments):
            expt.scan.set_batch_offset(i * 10**n)
            logger.debug(
                "%s %s" %
                (expt.scan.get_batch_offset(), expt.scan.get_batch_range()))

    @property
    def experiments(self):
        return self._experiments

    @experiments.setter
    def experiments(self, experiments):
        self._experiments = experiments

    @property
    def reflections(self):
        return self._reflections

    @reflections.setter
    def reflections(self, reflections):
        self._reflections = reflections

    def select(self, experiment_identifiers):
        self._experiments = ExperimentList([
            expt for expt in self._experiments
            if expt.identifier in experiment_identifiers
        ])
        experiment_identifiers = self._experiments.identifiers()
        sel = flex.bool(len(self._reflections), False)
        for i_expt, identifier in enumerate(experiment_identifiers):
            sel_expt = self._reflections['identifier'] == identifier
            sel.set_selected(sel_expt, True)
            self._reflections['id'].set_selected(sel_expt, i_expt)
        self._reflections = self._reflections.select(sel)
        assert self.reflections.are_experiment_identifiers_consistent(
            self._experiments)

    def reflections_as_miller_arrays(self,
                                     intensity_key='intensity.sum.value'):
        from cctbx import crystal, miller
        variance_key = intensity_key.replace('.value', '.variance')
        assert intensity_key in self._reflections
        assert variance_key in self._reflections

        miller_arrays = []
        for expt in self._experiments:
            crystal_symmetry = crystal.symmetry(
                unit_cell=expt.crystal.get_unit_cell(),
                space_group=expt.crystal.get_space_group())
            sel = ((self._reflections.get_flags(
                self._reflections.flags.integrated_sum)
                    & (self._reflections['identifier'] == expt.identifier)))
            assert sel.count(True) > 0
            refl = self._reflections.select(sel)
            data = refl[intensity_key]
            variances = refl[variance_key]
            # FIXME probably need to do some filtering of intensities similar to that
            # done in export_mtz
            miller_indices = refl['miller_index']
            assert variances.all_gt(0)
            sigmas = flex.sqrt(variances)

            miller_set = miller.set(crystal_symmetry,
                                    miller_indices,
                                    anomalous_flag=False)
            intensities = miller.array(miller_set, data=data, sigmas=sigmas)
            intensities.set_observation_type_xray_intensity()
            intensities.set_info(
                miller.array_info(source='DIALS', source_type='pickle'))
            miller_arrays.append(intensities)
        return miller_arrays

    def reindex(self, cb_op=None, cb_ops=None, space_group=None):
        assert [cb_op, cb_ops].count(None) == 1

        if cb_op is not None:
            logger.info('Reindexing: %s' % cb_op)
            self._reflections['miller_index'] = cb_op.apply(
                self._reflections['miller_index'])

            for expt in self._experiments:
                cryst_reindexed = expt.crystal.change_basis(cb_op)
                if space_group is not None:
                    cryst_reindexed.set_space_group(space_group)
                expt.crystal.update(cryst_reindexed)

        elif isinstance(cb_ops, dict):
            for cb_op, dataset_ids in cb_ops.iteritems():
                cb_op = sgtbx.change_of_basis_op(cb_op)

                for dataset_id in dataset_ids:
                    expt = self._experiments[dataset_id]
                    logger.info('Reindexing experiment %s: %s' %
                                (expt.identifier, cb_op.as_xyz()))
                    cryst_reindexed = expt.crystal.change_basis(cb_op)
                    if space_group is not None:
                        cryst_reindexed.set_space_group(space_group)
                    expt.crystal.update(cryst_reindexed)
                    sel = self._reflections['identifier'] == expt.identifier
                    self._reflections['miller_index'].set_selected(
                        sel,
                        cb_op.apply(
                            self._reflections['miller_index'].select(sel)))

        else:
            assert len(cb_ops) == len(self._experiments)
            for cb_op, expt in zip(cb_ops, self._experiments):
                logger.info('Reindexing experiment %s: %s' %
                            (expt.identifier, cb_op.as_xyz()))
                cryst_reindexed = expt.crystal.change_basis(cb_op)
                if space_group is not None:
                    cryst_reindexed.set_space_group(space_group)
                expt.crystal.update(cryst_reindexed)
                sel = self._reflections['identifier'] == expt.identifier
                self._reflections['miller_index'].set_selected(
                    sel,
                    cb_op.apply(self._reflections['miller_index'].select(sel)))

    def export_reflections(self, filename):
        self._reflections.as_pickle(filename)

    def export_experiments(self, filename):
        dump.experiment_list(self._experiments, filename)

    def export_mtz(self, filename=None, params=None):
        if params is None:
            params = export_phil_scope.extract()
        if filename is not None:
            params.mtz.hklout = filename

        m = export_mtz(
            self._reflections,
            self._experiments,
            params.mtz.hklout,
            include_partials=params.mtz.include_partials,
            keep_partials=params.mtz.keep_partials,
            scale_partials=params.mtz.scale_partials,
            min_isigi=params.mtz.min_isigi,
            force_static_model=params.mtz.force_static_model,
            filter_ice_rings=params.mtz.filter_ice_rings,
            ignore_profile_fitting=params.mtz.ignore_profile_fitting,
            apply_scales=params.mtz.apply_scales)
        m.show_summary()

        b1 = set(b.num() for b in m.batches())
        b2 = set(m.get_column('BATCH').extract_values().as_double().iround())
        assert len(b2.difference(b1)) == 0

        return params.mtz.hklout
Esempio n. 21
0
def update_all_data(reflections_path=None, experiments_path=None):
    dat = InfoData()

    if (reflections_path != None):

        try:
            refl_tabl = flex.reflection_table.from_pickle(reflections_path)
            dat.n_strng = refl_tabl.get_flags(
                refl_tabl.flags.strong).count(True)
            print "dat.n_strng =", dat.n_strng
            dat.n_index = refl_tabl.get_flags(
                refl_tabl.flags.indexed).count(True)
            print "dat.n_index =", dat.n_index
            dat.n_refnd = refl_tabl.get_flags(
                refl_tabl.flags.used_in_refinement).count(True)
            print "dat.n_refnd =", dat.n_refnd
            dat.n_integ_sum = refl_tabl.get_flags(
                refl_tabl.flags.integrated_sum).count(True)
            print "dat.n_integ_sum =", dat.n_integ_sum
            dat.n_integ_prf = refl_tabl.get_flags(
                refl_tabl.flags.integrated_prf).count(True)
            print "dat.n_integ_prf =", dat.n_integ_prf

        except:
            print "failed to find reflections"

    if (experiments_path != None):

        print "trying experiments"
        try:
            experiments = ExperimentListFactory.from_json_file(
                experiments_path, check_format=False)
        except:
            try:
                # FIXME here only take the first datablock. What if there are more?
                datablock = DataBlockFactory.from_serialized_format(
                    experiments_path, check_format=False)[0]

                # FIXME here only take the first model from each
                beam = datablock.unique_beams()[0]
                detector = datablock.unique_detectors()[0]
                scan = datablock.unique_scans()[0]

                # build a pseudo ExperimentList (with empty crystals)
                experiments = ExperimentList()
                experiments.append(
                    Experiment(beam=beam, detector=detector, scan=scan))

            except ValueError:
                print "failed to read json file"
                return dat

        print "len(experiments)", len(experiments)

        # FIXME take just the first experiment. What if there are more?
        exp = experiments[0]

        # Get crystal data
        if exp.crystal is not None:
            unit_cell = exp.crystal.get_unit_cell()
            dat.a, dat.b, dat.c, dat.alpha, dat.beta, dat.gamma = unit_cell.parameters(
            )

            exp_crystal = exp.crystal
            #print "exp_crystal = ", exp_crystal
            b_mat = exp.crystal.get_B()
            dat.b11 = b_mat[0]
            dat.b12 = b_mat[1]
            dat.b13 = b_mat[2]
            dat.b21 = b_mat[3]
            dat.b22 = b_mat[4]
            dat.b23 = b_mat[5]
            dat.b31 = b_mat[6]
            dat.b32 = b_mat[7]
            dat.b33 = b_mat[8]

            sg = str(exp.crystal.get_space_group().info())
            print "spgr = ", sg
            dat.spg_group = sg

            from scitbx import matrix
            u_mat = matrix.sqr(exp.crystal.get_U())

            dat.u11 = b_mat[0]
            dat.u12 = b_mat[1]
            dat.u13 = b_mat[2]
            dat.u21 = b_mat[3]
            dat.u22 = b_mat[4]
            dat.u23 = b_mat[5]
            dat.u31 = b_mat[6]
            dat.u32 = b_mat[7]
            dat.u33 = b_mat[8]

            rot_angs = u_mat.r3_rotation_matrix_as_x_y_z_angles(deg=True)
            print "u_mat =", u_mat

            print "rot_angs =", rot_angs
            dat.r1, dat.r2, dat.r3 = rot_angs

        # Get beam data
        dat.w_lambda = exp.beam.get_wavelength()

        # Get detector data
        # assume details for the panel the beam intersects are the same for the whole detector
        pnl_beam_intersects, (beam_x, beam_y) = \
            exp.detector.get_ray_intersection(exp.beam.get_s0())
        pnl = exp.detector[pnl_beam_intersects]
        print "beam_x, beam_y =", beam_x, beam_y

        dat.xb = beam_x
        dat.yb = beam_y

        dist = pnl.get_distance()

        print "pnl_beam_intersects             ", pnl_beam_intersects
        print "dist                            ", dist

        dat.dd = dist

        dat.img_ran1, dat.img_ran2 = exp.scan.get_image_range()
        dat.oscil1, dat.oscil2 = exp.scan.get_oscillation()

        # is the next line right? check what dials.show does
        dat.e_time = max(exp.scan.get_exposure_times())
        #print set(exp.scan.get_exposure_times())

        dat.n_pans = len(exp.detector)
        dat.x_px_size, dat.y_px_size = pnl.get_pixel_size()
        dat.gain = pnl.get_gain()
        dat.max_res = exp.detector.get_max_resolution(exp.beam.get_s0())

        # manually finding template from experiments_path

        try:
            with open(experiments_path) as infile:
                json_info = json.load(infile)

            if (type(json_info) is dict):
                print "found Dictionary"
                imageset = json_info['imageset']

            elif (type(json_info) is list):
                print "found List"
                imageset = json_info[0]['imageset']

            dat.tmpl_str = imageset[0]['template']

            print "dat.tmpl_str =", dat.tmpl_str

        except:
            print "failed to find template in JSON file"

    return dat
from IPython import embed

# Load DIALS files
expt_file = "dials_temp_files/mega_ultra_refined.expt"
refl_file = "dials_temp_files/mega_ultra_refined.refl"

# Get data
print('Loading DIALS files.')
elist = ExperimentListFactory.from_json_file(expt_file, check_format=False)
refls = reflection_table.from_file(refl_file)

# Remove outliers
print('Removing outliers')
idx = refls.get_flags(refls.flags.used_in_refinement).as_numpy_array()
idy = np.arange(len(elist))[idx].tolist()
elist = ExperimentList([elist[i] for i in idy])
refls = refls.select(flex.bool(idx))

# Get experiment data from experiment objects
print('Getting experiment data.')
img_num = 0
i = 0
img = elist.imagesets()[img_num]
experiment = elist[0]
while (True):  # Get first expt for this image
    experiment = elist[i]
    if (experiment.imageset == img):
        break
    i = i + 1
cryst = experiment.crystal
spacegroup = gemmi.SpaceGroup(
Esempio n. 23
0
def experiments_masks(request, dials_data):
    filename = (dials_data(request.param["directory"]) /
                request.param["filename"]).strpath
    return ExperimentList.from_file(filename), request.param["masks"]
def test_filtered_arrays_from_experiments_reflections():
    """Test the creating of a miller array from crystal and reflection table."""
    refl = generate_integrated_test_reflections()
    refl["miller_index"] = flex.miller_index([(1, 0, 0), (2, 0, 0), (3, 0, 0),
                                              (4, 0, 0), (5, 0, 0), (6, 0, 0)])
    experiments = ExperimentList()
    exp_dict = {
        "__id__": "crystal",
        "real_space_a": [1.0, 0.0, 0.0],
        "real_space_b": [0.0, 1.0, 0.0],
        "real_space_c": [0.0, 0.0, 2.0],
        "space_group_hall_symbol": " C 2y",
    }
    crystal = Crystal.from_dict(exp_dict)
    experiments.append(Experiment(crystal=crystal))

    miller_set = filtered_arrays_from_experiments_reflections(
        experiments, [refl])[0]
    assert isinstance(miller_set, miller.set)
    assert list(miller_set.data()) == [4.6, 2.4, 2.5]  # same as calling filter
    # for export on scale intensity reducer.
    # now try for prf
    del refl["intensity.scale.value"]
    miller_set = filtered_arrays_from_experiments_reflections(
        experiments, [refl])[0]
    assert isinstance(miller_set, miller.set)
    assert list(miller_set.data()) == [1.0, 2.0, 3.0]  # same as calling filter
    # for export on prf + sum intensity reducer.
    # now just for sum
    del refl["intensity.prf.value"]
    miller_set = filtered_arrays_from_experiments_reflections(
        experiments, [refl])[0]
    assert isinstance(miller_set, miller.set)
    assert list(miller_set.data()) == [11.0, 12.0, 13.0,
                                       14.0]  # same as calling
    # filter for export on prf intensity reducer.

    # Now try with a bad dataset - should be filtered.
    refl = generate_integrated_test_reflections()
    refl["miller_index"] = flex.miller_index([(1, 0, 0), (2, 0, 0), (3, 0, 0),
                                              (4, 0, 0), (5, 0, 0), (6, 0, 0)])
    # Trigger filtering on prf/sum, but when prf is bad - should proceed with sum
    refl.unset_flags(flex.bool(6, True), refl.flags.integrated_prf)
    del refl["intensity.scale.value"]
    refl2 = generate_integrated_test_reflections()
    refl2["partiality"] = flex.double(6, 0.0)
    experiments = ExperimentList()
    experiments.append(Experiment(crystal=crystal))
    experiments.append(Experiment(crystal=crystal))
    miller_sets = filtered_arrays_from_experiments_reflections(
        experiments, [refl, refl2], outlier_rejection_after_filter=True)
    assert len(miller_sets) == 1

    experiments = ExperimentList()
    experiments.append(Experiment(crystal=crystal))
    experiments.append(Experiment(crystal=crystal))
    refl2 = generate_integrated_test_reflections()
    refl2["partiality"] = flex.double(6, 0.0)
    with pytest.raises(ValueError):
        refl["partiality"] = flex.double(6, 0.0)
        _ = filtered_arrays_from_experiments_reflections(
            experiments, [refl, refl2])
Esempio n. 25
0
def test_forward_panel_edge(dials_data):
    expt = ExperimentList.from_file(
        dials_data("centroid_test_data").join(
            "imported_experiments.json").strpath)[0]

    # Get the models
    beam = expt.beam
    detector = expt.detector
    gonio = expt.goniometer
    scan = expt.scan

    # Set some parameters
    sigma_divergence = 0.00101229
    mosaicity = 0.157 * math.pi / 180
    n_sigma = 3
    grid_size = 7
    delta_divergence = n_sigma * sigma_divergence

    step_size = delta_divergence / grid_size
    delta_divergence2 = delta_divergence + step_size * 0.5
    delta_mosaicity = n_sigma * mosaicity

    # Create the bounding box calculator
    calculate_bbox = BBoxCalculator3D(beam, detector, gonio, scan,
                                      delta_divergence2, delta_mosaicity)

    # Initialise the transform
    spec = transform.TransformSpec(beam, detector, gonio, scan,
                                   sigma_divergence, mosaicity, n_sigma + 1,
                                   grid_size)

    assert len(detector) == 1

    s0 = beam.get_s0()
    m2 = gonio.get_rotation_axis()
    s0_length = matrix.col(beam.get_s0()).length()

    image_size = detector[0].get_image_size()
    refl_xy = [
        (0, 0),
        (2, 3),
        (4, 1000),
        (1000, 5),
        (image_size[0] - 1, image_size[1] - 1),
        (image_size[0] - 2, 1),
        (1, image_size[1] - 5),
        (1000, image_size[1] - 4),
        (image_size[0] - 3, 1000),
    ]

    for x, y in refl_xy:
        z = random.uniform(0, 9)

        # Get random s1, phi, panel
        s1 = matrix.col(detector[0].get_pixel_lab_coord(
            (x, y))).normalize() * s0_length
        phi = scan.get_angle_from_array_index(z, deg=False)
        panel = 0

        # Calculate the bounding box
        bbox = calculate_bbox(s1, z, panel)
        x0, x1, y0, y1, z0, z1 = bbox

        # Create the coordinate system
        cs = CoordinateSystem(m2, s0, s1, phi)

        # Create the image
        image = gaussian((z1 - z0, y1 - y0, x1 - x0), 10.0,
                         (z - z0, y - y0, x - x0), (2.0, 2.0, 2.0))

        # Mask for the foreground pixels
        refl_mask = image > 1e-3
        bg = flex.double(image.accessor())

        # Shoebox mask, i.e. mask out pixels that are outside the panel bounds
        shoebox_mask = flex.bool(image.accessor(), False)
        for j in range(y1 - y0):
            for i in range(x1 - x0):
                if (j + y0 >= 0 and j + y0 < image_size[1] and i + x0 >= 0
                        and i + x0 < image_size[0]):
                    for k in range(z1 - z0):
                        shoebox_mask[k, j, i] = True

        mask = refl_mask & shoebox_mask

        # from matplotlib import pyplot as plt
        # fig, axes = plt.subplots(ncols=refl_mask.focus()[0], nrows=4)
        # for i in range(refl_mask.focus()[0]):
        # axes[0, i].imshow(image.as_numpy_array()[i])
        # axes[1, i].imshow(refl_mask.as_numpy_array()[i])
        # axes[2, i].imshow(shoebox_mask.as_numpy_array()[i])
        # axes[3, i].imshow(mask.as_numpy_array()[i])
        # plt.show()

        # Transform the image to the grid
        transformed = transform.TransformForward(spec, cs, bbox, 0,
                                                 image.as_double(), bg,
                                                 refl_mask)
        grid = transformed.profile()

        mask = refl_mask & shoebox_mask
        # assert only pixels within the panel were transformed
        assert flex.sum(grid) == pytest.approx(flex.sum(
            image.select(mask.as_1d())),
                                               rel=0.01)
        # The total transformed counts should be less than the (unmasked) image counts
        assert flex.sum(grid) < flex.sum(image)

        # Transform the image to the grid, this time without a background
        transformed = transform.TransformForward(spec, cs, bbox, 0,
                                                 image.as_double(), refl_mask)
        grid = transformed.profile()

        mask = refl_mask & shoebox_mask
        # assert only pixels within the panel were transformed
        assert flex.sum(grid) == pytest.approx(flex.sum(
            image.select(mask.as_1d())),
                                               rel=0.01)
        # The total transformed counts should be less than the (unmasked) image counts
        assert flex.sum(grid) < flex.sum(image)
Esempio n. 26
0
def test_split_by_wavelength(tmpdir):
    """Test the split_by_wavelength option of dials.split_experiments"""
    experiments = ExperimentList()
    exp = generate_exp(wavelength=1.0)
    exp.identifier = "0"
    experiments.append(exp)
    exp = generate_exp(wavelength=0.5)
    exp.identifier = "1"
    experiments.append(exp)

    reflections = flex.reflection_table()
    reflections["id"] = flex.int([0, 1])
    reflections["intensity"] = flex.double([100.0, 200.0])
    reflections.experiment_identifiers()[0] = "0"
    reflections.experiment_identifiers()[1] = "1"

    experiments.as_json(tmpdir.join("tmp.expt").strpath)
    reflections.as_file(tmpdir.join("tmp.refl").strpath)

    result = procrunner.run(
        [
            "dials.split_experiments", "tmp.expt", "tmp.refl",
            "by_wavelength=True"
        ],
        working_directory=tmpdir,
    )
    assert not result.returncode and not result.stderr

    for i, (wl, ids,
            intensity) in enumerate(zip([0.5, 1.0], ["1", "0"],
                                        [200.0, 100.0])):
        assert tmpdir.join("split_%d.expt" % i).check()
        assert tmpdir.join("split_%d.refl" % i).check()
        exp_single = load.experiment_list(tmpdir.join("split_%d.expt" %
                                                      i).strpath,
                                          check_format=False)
        ref_single = flex.reflection_table.from_file(
            tmpdir.join("split_%d.refl" % i).strpath)
        assert exp_single[0].beam.get_wavelength() == wl
        assert exp_single[0].identifier == ids
        id_ = ref_single["id"][0]
        assert ref_single.experiment_identifiers()[id_] == ids
        assert list(ref_single["intensity"]) == [intensity]

    # Now test for successful error handling if no identifiers set.
    experiments[0].identifier = ""
    experiments[1].identifier = ""
    experiments.as_json(tmpdir.join("tmp.expt").strpath)
    result = procrunner.run(
        [
            "dials.split_experiments", "tmp.expt", "tmp.refl",
            "by_wavelength=True"
        ],
        working_directory=tmpdir,
    )
    assert result.returncode == 1
    assert result.stderr.startswith(b"Sorry")

    experiments[0].identifier = "0"
    experiments[1].identifier = "1"
    del reflections.experiment_identifiers()[0]
    del reflections.experiment_identifiers()[1]
    experiments.as_json(tmpdir.join("tmp.expt").strpath)
    reflections.as_file(tmpdir.join("tmp.refl").strpath)
    result = procrunner.run(
        [
            "dials.split_experiments", "tmp.expt", "tmp.refl",
            "by_wavelength=True"
        ],
        working_directory=tmpdir,
    )
    assert result.returncode == 1
    assert result.stderr.startswith(b"Sorry")
Esempio n. 27
0
    def index(self):

        experiments = ExperimentList()

        had_refinement_error = False
        have_similar_crystal_models = False

        while True:
            if had_refinement_error or have_similar_crystal_models:
                break
            max_lattices = self.params.multiple_lattice_search.max_lattices
            if max_lattices is not None and len(experiments) >= max_lattices:
                break
            if len(experiments) > 0:
                cutoff_fraction = (self.params.multiple_lattice_search.
                                   recycle_unindexed_reflections_cutoff)
                d_spacings = 1 / self.reflections["rlp"].norms()
                d_min_indexed = flex.min(
                    d_spacings.select(self.indexed_reflections))
                min_reflections_for_indexing = cutoff_fraction * len(
                    self.reflections.select(d_spacings > d_min_indexed))
                crystal_ids = self.reflections.select(
                    d_spacings > d_min_indexed)["id"]
                if (crystal_ids
                        == -1).count(True) < min_reflections_for_indexing:
                    logger.info(
                        "Finish searching for more lattices: %i unindexed reflections remaining."
                        % ((crystal_ids == -1).count(True)))
                    break

            n_lattices_previous_cycle = len(experiments)

            if self.d_min is None:
                self.d_min = self.params.refinement_protocol.d_min_start

            if len(experiments) == 0:
                experiments.extend(self.find_lattices())
            else:
                try:
                    new = self.find_lattices()
                    experiments.extend(new)
                except Sorry:
                    logger.info("Indexing remaining reflections failed")

            if self.params.refinement_protocol.d_min_step is libtbx.Auto:
                n_cycles = self.params.refinement_protocol.n_macro_cycles
                if self.d_min is None or n_cycles == 1:
                    self.params.refinement_protocol.d_min_step = 0
                else:
                    d_spacings = 1 / self.reflections["rlp"].norms()
                    d_min_all = flex.min(d_spacings)
                    self.params.refinement_protocol.d_min_step = (
                        self.d_min - d_min_all) / (n_cycles - 1)
                    logger.info("Using d_min_step %.1f" %
                                self.params.refinement_protocol.d_min_step)

            if len(experiments) == 0:
                raise DialsIndexError("No suitable lattice could be found.")
            elif len(experiments) == n_lattices_previous_cycle:
                # no more lattices found
                break

            for i_cycle in range(
                    self.params.refinement_protocol.n_macro_cycles):
                if (i_cycle > 0 and self.d_min is not None
                        and self.params.refinement_protocol.d_min_step > 0):
                    d_min = self.d_min - self.params.refinement_protocol.d_min_step
                    d_min = max(d_min, 0)
                    if self.params.refinement_protocol.d_min_final is not None:
                        d_min = max(
                            d_min, self.params.refinement_protocol.d_min_final)
                    if d_min >= 0:
                        self.d_min = d_min
                        logger.info("Increasing resolution to %.2f Angstrom" %
                                    d_min)

                # reset reflection lattice flags
                # the lattice a given reflection belongs to: a value of -1 indicates
                # that a reflection doesn't belong to any lattice so far
                self.reflections["id"] = flex.int(len(self.reflections), -1)

                self.index_reflections(experiments, self.reflections)

                if i_cycle == 0 and self.params.known_symmetry.space_group is not None:
                    self._apply_symmetry_post_indexing(
                        experiments, self.reflections,
                        n_lattices_previous_cycle)

                logger.info("\nIndexed crystal models:")
                self.show_experiments(experiments,
                                      self.reflections,
                                      d_min=self.d_min)

                if self._check_have_similar_crystal_models(experiments):
                    have_similar_crystal_models = True
                    break

                logger.info("")
                logger.info("#" * 80)
                logger.info("Starting refinement (macro-cycle %i)" %
                            (i_cycle + 1))
                logger.info("#" * 80)
                logger.info("")
                self.indexed_reflections = self.reflections["id"] > -1

                sel = flex.bool(len(self.reflections), False)
                lengths = 1 / self.reflections["rlp"].norms()
                if self.d_min is not None:
                    isel = (lengths <= self.d_min).iselection()
                    sel.set_selected(isel, True)
                sel.set_selected(self.reflections["id"] == -1, True)
                self.reflections.unset_flags(sel,
                                             self.reflections.flags.indexed)
                self.unindexed_reflections = self.reflections.select(sel)

                reflections_for_refinement = self.reflections.select(
                    self.indexed_reflections)
                if self.params.refinement_protocol.mode == "repredict_only":
                    refined_experiments, refined_reflections = (
                        experiments,
                        reflections_for_refinement,
                    )
                    from dials.algorithms.refinement.prediction.managed_predictors import (
                        ExperimentsPredictorFactory, )

                    ref_predictor = ExperimentsPredictorFactory.from_experiments(
                        experiments,
                        spherical_relp=self.all_params.refinement.
                        parameterisation.spherical_relp_model,
                    )
                    ref_predictor(refined_reflections)
                else:
                    try:
                        refined_experiments, refined_reflections = self.refine(
                            experiments, reflections_for_refinement)
                    except (DialsRefineConfigError,
                            DialsRefineRuntimeError) as e:
                        if len(experiments) == 1:
                            raise DialsIndexRefineError(e.message)
                        had_refinement_error = True
                        logger.info("Refinement failed:")
                        logger.info(e)
                        del experiments[-1]
                        break

                self._unit_cell_volume_sanity_check(experiments,
                                                    refined_experiments)

                self.refined_reflections = refined_reflections
                self.refined_reflections.unset_flags(
                    self.refined_reflections["id"] < 0,
                    self.refined_reflections.flags.indexed,
                )

                for i, expt in enumerate(self.experiments):
                    ref_sel = self.refined_reflections.select(
                        self.refined_reflections["imageset_id"] == i)
                    ref_sel = ref_sel.select(ref_sel["id"] >= 0)
                    for i_expt in set(ref_sel["id"]):
                        refined_expt = refined_experiments[i_expt]
                        expt.detector = refined_expt.detector
                        expt.beam = refined_expt.beam
                        expt.goniometer = refined_expt.goniometer
                        expt.scan = refined_expt.scan
                        refined_expt.imageset = expt.imageset

                if not (self.all_params.refinement.parameterisation.beam.fix
                        == "all" and self.all_params.refinement.
                        parameterisation.detector.fix == "all"):
                    # Experimental geometry may have changed - re-map centroids to
                    # reciprocal space
                    self.reflections = self._map_centroids_to_reciprocal_space(
                        self.experiments, self.reflections)

                # update for next cycle
                experiments = refined_experiments
                self.refined_experiments = refined_experiments

                logger.info("\nRefined crystal models:")
                self.show_experiments(self.refined_experiments,
                                      self.reflections,
                                      d_min=self.d_min)

                if (i_cycle >= 2 and self.d_min
                        == self.params.refinement_protocol.d_min_final):
                    logger.info(
                        "Target d_min_final reached: finished with refinement")
                    break

        if self.refined_experiments is None:
            raise DialsIndexRefineError(
                "None of the experiments could refine.")

        if len(self.refined_experiments) > 1:
            from dials.algorithms.indexing.compare_orientation_matrices import (
                rotation_matrix_differences, )

            logger.info(
                rotation_matrix_differences(
                    self.refined_experiments.crystals()))

        self._xyzcal_mm_to_px(self.experiments, self.refined_reflections)
Esempio n. 28
0
    def _index(self):
        """Actually do the autoindexing using the data prepared by the
        previous method."""

        idxref = self.Idxref()

        self._index_remove_masked_regions()
        for file in ["SPOT.XDS"]:
            idxref.set_input_data_file(file, self._indxr_payload[file])

        # edit SPOT.XDS to remove reflections in untrusted regions of the detector

        idxref.set_data_range(self._indxr_images[0][0],
                              self._indxr_images[0][1])
        idxref.set_background_range(self._indxr_images[0][0],
                                    self._indxr_images[0][1])

        # set the phi start etc correctly

        for block in self._indxr_images[:1]:
            starting_frame = block[0]
            starting_angle = self.get_scan().get_angle_from_image_index(
                starting_frame)

            idxref.set_starting_frame(starting_frame)
            idxref.set_starting_angle(starting_angle)

            idxref.add_spot_range(block[0], block[1])

        for block in self._indxr_images[1:]:
            idxref.add_spot_range(block[0], block[1])

        if self._indxr_user_input_lattice:
            idxref.set_indexer_user_input_lattice(True)

        if self._indxr_input_lattice and self._indxr_input_cell:
            idxref.set_indexer_input_lattice(self._indxr_input_lattice)
            idxref.set_indexer_input_cell(self._indxr_input_cell)

            logger.debug("Set lattice: %s", self._indxr_input_lattice)
            logger.debug("Set cell: %f %f %f %f %f %f" %
                         self._indxr_input_cell)

            original_cell = self._indxr_input_cell
        elif self._indxr_input_lattice:
            idxref.set_indexer_input_lattice(self._indxr_input_lattice)
            original_cell = None
        else:
            original_cell = None

        converter = to_xds(self.get_imageset())
        xds_beam_centre = converter.detector_origin

        idxref.set_beam_centre(xds_beam_centre[0], xds_beam_centre[1])

        # fixme need to check if the lattice, cell have been set already,
        # and if they have, pass these in as input to the indexing job.

        done = False

        while not done:
            try:
                done = idxref.run()

                # N.B. in here if the IDXREF step was being run in the first
                # pass done is FALSE however there should be a refined
                # P1 orientation matrix etc. available - so keep it!

            except XDSException as e:
                # inspect this - if we have complaints about not
                # enough reflections indexed, and we have a target
                # unit cell, and they are the same, well ignore it

                if "solution is inaccurate" in str(e):
                    logger.debug(
                        "XDS complains solution inaccurate - ignoring")
                    done = idxref.continue_from_error()
                elif ("insufficient percentage (< 70%)" in str(e)
                      or "insufficient percentage (< 50%)"
                      in str(e)) and original_cell:
                    done = idxref.continue_from_error()
                    lattice, cell, mosaic = idxref.get_indexing_solution()
                    # compare solutions FIXME should use xds_cell_deviation
                    check = PhilIndex.params.xia2.settings.xds_check_cell_deviation
                    for j in range(3):
                        # allow two percent variation in unit cell length
                        if (math.fabs(
                            (cell[j] - original_cell[j]) / original_cell[j]) >
                                0.02 and check):
                            logger.debug("XDS unhappy and solution wrong")
                            raise e
                        # and two degree difference in angle
                        if (math.fabs(cell[j + 3] - original_cell[j + 3]) > 2.0
                                and check):
                            logger.debug("XDS unhappy and solution wrong")
                            raise e
                    logger.debug("XDS unhappy but solution ok")
                elif "insufficient percentage (< 70%)" in str(
                        e) or "insufficient percentage (< 50%)" in str(e):
                    logger.debug("XDS unhappy but solution probably ok")
                    done = idxref.continue_from_error()
                else:
                    raise e

        FileHandler.record_log_file(
            "%s INDEX" % self.get_indexer_full_name(),
            os.path.join(self.get_working_directory(), "IDXREF.LP"),
        )

        for file in ["SPOT.XDS", "XPARM.XDS"]:
            self._indxr_payload[file] = idxref.get_output_data_file(file)

        # need to get the indexing solutions out somehow...

        self._indxr_other_lattice_cell = idxref.get_indexing_solutions()

        (
            self._indxr_lattice,
            self._indxr_cell,
            self._indxr_mosaic,
        ) = idxref.get_indexing_solution()

        xparm_file = os.path.join(self.get_working_directory(), "XPARM.XDS")
        models = dxtbx.load(xparm_file)
        crystal_model = to_crystal(xparm_file)

        # this information gets lost when re-creating the models from the
        # XDS results - however is not refined so can simply copy from the
        # input - https://github.com/xia2/xia2/issues/372
        models.get_detector()[0].set_thickness(
            converter.get_detector()[0].get_thickness())

        experiment = Experiment(
            beam=models.get_beam(),
            detector=models.get_detector(),
            goniometer=models.get_goniometer(),
            scan=models.get_scan(),
            crystal=crystal_model,
            # imageset=self.get_imageset(),
        )

        experiment_list = ExperimentList([experiment])
        self.set_indexer_experiment_list(experiment_list)

        # I will want this later on to check that the lattice was ok
        self._idxref_subtree_problem = idxref.get_index_tree_problem()
Esempio n. 29
0
def test_change_of_basis_ops_to_minimum_cell_1037(mocker):
    # See https://github.com/dials/dials/issues/1037

    input_ucs = [
        (
            4.805202948916906,
            12.808064769657364,
            16.544899201125446,
            106.45808502003258,
            90.0065567098825,
            100.77735674275475,
        ),
        (
            4.808011343212577,
            12.821894835790472,
            16.557339561965573,
            106.48431244651402,
            90.0252848479048,
            100.77252933676507,
        ),
        (
            4.8096632137789985,
            12.815648858527567,
            16.55931712239122,
            106.48990701341536,
            90.01703141314147,
            100.80397887485773,
        ),
        (
            4.807294085194974,
            12.822386757910516,
            16.560411742466663,
            106.43185845358086,
            90.02067929544215,
            100.79522302759383,
        ),
    ]

    # Setup the input experiments and reflection tables
    expts = ExperimentList()
    for uc in input_ucs:
        uc = uctbx.unit_cell(uc)
        sg = sgtbx.space_group_info("P1").group()
        B = scitbx.matrix.sqr(uc.fractionalization_matrix()).transpose()
        expts.append(Experiment(crystal=Crystal(B, space_group=sg, reciprocal=True)))

    # We want to spy on the return value of this function
    mocker.spy(symmetry, "unit_cells_are_similar_to")

    # Actually run the method we are testing
    cb_ops = change_of_basis_ops_to_minimum_cell(
        expts, max_delta=5, relative_length_tolerance=0.05, absolute_angle_tolerance=2
    )
    import pytest_mock

    if getattr(pytest_mock, "version", "").startswith("1."):
        assert symmetry.unit_cells_are_similar_to.return_value is True
    else:
        assert symmetry.unit_cells_are_similar_to.spy_return is True
    cb_ops_as_xyz = [cb_op.as_xyz() for cb_op in cb_ops]
    assert len(set(cb_ops_as_xyz)) == 1
    # Actual cb_ops are machine dependent (sigh)
    assert cb_ops_as_xyz[0] in ("x,y,z", "-x,y,-z", "x-y,-y,-z")
Esempio n. 30
0
def test_blank_integrated_analysis(dials_data):
    expts = ExperimentList.from_file(dials_data("insulin_processed") /
                                     "integrated.expt",
                                     check_format=False)
    refl = flex.reflection_table.from_file(
        dials_data("insulin_processed") / "integrated.refl")
    results = detect_blanks.blank_integrated_analysis(refl,
                                                      expts[0].scan,
                                                      phi_step=5,
                                                      fractional_loss=0.1)
    assert results["data"][0]["x"] == [
        2.5,
        7.5,
        12.5,
        17.5,
        22.5,
        27.5,
        32.5,
        37.5,
        42.5,
    ]
    assert results["data"][0]["y"] == pytest.approx([
        27.903266149430973,
        25.832527090455052,
        26.9236206883069,
        26.50234804728626,
        26.41019377727383,
        25.810676090828185,
        24.844906790823064,
        25.89992001081651,
        25.580718362291474,
    ])
    assert not any(results["data"][0]["blank"])
    assert results["blank_regions"] == []

    # Now with some "blank" regions - make some of the reflections weak
    z = refl["xyzobs.px.value"].parts()[2]
    refl["intensity.prf.value"].set_selected(
        z < 10, refl["intensity.prf.value"] * 0.05)
    results = detect_blanks.blank_integrated_analysis(refl,
                                                      expts[0].scan,
                                                      phi_step=5,
                                                      fractional_loss=0.1)
    assert results["data"][0]["y"] == pytest.approx([
        1.3951633074715482,
        1.2916263545227527,
        26.9236206883069,
        26.50234804728626,
        26.41019377727383,
        25.810676090828185,
        24.844906790823064,
        25.89992001081651,
        25.580718362291474,
    ])
    assert results["data"][0]["blank"] == [
        True,
        True,
        False,
        False,
        False,
        False,
        False,
        False,
        False,
    ]
    assert results["blank_regions"] == [(0, 10)]

    # Unset the integrated_prf flags, so the analysis should instead use the umodified
    # intensity.sum.value instead
    refl.unset_flags(flex.bool(len(refl), True), refl.flags.integrated_prf)
    results = detect_blanks.blank_integrated_analysis(refl,
                                                      expts[0].scan,
                                                      phi_step=5,
                                                      fractional_loss=0.1)
    assert not any(results["data"][0]["blank"])
    assert results["blank_regions"] == []