示例#1
0
  def tst_updating(self):

    from dials.array_family import flex

    # The columns as lists
    c1 = list(range(10))
    c2 = list(range(10))
    c3 = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'i', 'j', 'k']

    # Create a table with some elements
    table0 = flex.reflection_table()
    table1 = flex.reflection_table()
    table2 = flex.reflection_table()
    table1['col1'] = flex.int(c1)
    table1['col2'] = flex.double(c2)
    table2['col3'] = flex.std_string(c3)

    # Update from zero columns
    table0.update(table1)
    assert(table0.is_consistent())
    assert(table0.nrows() == 10)
    assert(table0.ncols() == 2)
    print 'OK'

    # Update table1 with table2 columns
    table1.update(table2)
    assert(table1.is_consistent())
    assert(table1.nrows() == 10)
    assert(table1.ncols() == 3)
    assert(table2.is_consistent())
    assert(table2.nrows() == 10)
    assert(table2.ncols() == 1)
    print 'OK'

    # Update trable1 with invalid table
    c3 = ['a', 'b', 'c']

    # Create a table with some elements
    table2 = flex.reflection_table()
    table2['col3'] = flex.std_string(c3)
    try:
      table1.update(table2)
      assert(False)
    except Exception:
      pass

    assert(table1.is_consistent())
    assert(table1.nrows() == 10)
    assert(table1.ncols() == 3)
    assert(table2.is_consistent())
    assert(table2.nrows() == 3)
    assert(table2.ncols() == 1)
    print 'OK'
示例#2
0
  def tst_copy(self):
    import copy
    from dials.array_family import flex

    # Create a table
    table = flex.reflection_table([
      ('col1', flex.int(range(10)))])

    # Make a shallow copy of the table
    shallow = copy.copy(table)
    shallow['col2'] = flex.double(range(10))
    assert(table.ncols() == 2)
    assert(table.is_consistent())
    print 'OK'

    # Make a deep copy of the table
    deep = copy.deepcopy(table)
    deep['col3'] = flex.std_string(10)
    assert(table.ncols() == 2)
    assert(deep.ncols() == 3)
    assert(table.is_consistent())
    assert(deep.is_consistent())

    table2 = table.copy()
    table2['col3'] = flex.std_string(10)
    assert(table.ncols() == 2)
    assert(table2.ncols() == 3)
    assert(table.is_consistent())
    assert(table2.is_consistent())
    print 'OK'
示例#3
0
  def decode(self, handle):
    '''Decode the reflection data.'''
    from dials.array_family import flex

    # Get the group containing the reflection data
    g = handle['entry/data_processing']

    # Create the list of reflections
    rl = flex.reflection_table(int(g.attrs['num_reflections']))

    # Decode all the columns
    for key in g:
      item = g[key]
      name = item.attrs['flex_type']
      if name == 'shoebox':
        flex_type = getattr(flex, name)
        data = item['data']
        mask = item['mask']
        background = item['background']
        col = flex_type(len(rl))
        for i in range(len(rl)):
          dd = data['%d' % i].value
          col[i].data = flex.double(data['%d' % i].value)
          col[i].mask = flex.int(mask['%d' % i].value)
          col[i].background = flex.double(background['%d' % i].value)

      else:
        flex_type = getattr(flex, name)
        col = self.decode_column(flex_type, item)
      rl[str(key)] = col

    # Return the list of reflections
    return rl
示例#4
0
文件: finder.py 项目: dials/dials
  def __call__(self, imageset, shoeboxes):
    '''
    Filter shoeboxes and create reflection table

    '''
    from dials.array_family import flex

    # Calculate the spot centroids
    centroid = shoeboxes.centroid_valid()
    logger.info('Calculated {0} spot centroids'.format(len(shoeboxes)))

    # Calculate the spot intensities
    intensity = shoeboxes.summed_intensity()
    logger.info('Calculated {0} spot intensities'.format(len(shoeboxes)))

    # Create the observations
    observed = flex.observation(shoeboxes.panels(), centroid, intensity)

    # Filter the reflections and select only the desired spots
    flags = self.filter_spots(None,
        sweep=imageset,
        observations=observed,
        shoeboxes=shoeboxes)
    observed = observed.select(flags)
    shoeboxes = shoeboxes.select(flags)

    # Return as a reflection list
    return flex.reflection_table(observed, shoeboxes)
示例#5
0
  def tst_serialize(self):

    from dials.array_family import flex

    # The columns as lists
    c1 = list(range(10))
    c2 = list(range(10))
    c3 = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'i', 'j', 'k']

    # Create a table with some elements
    table = flex.reflection_table()
    table['col1'] = flex.int(c1)
    table['col2'] = flex.double(c2)
    table['col3'] = flex.std_string(c3)

    # Pickle, then unpickle
    import cPickle as pickle
    obj = pickle.dumps(table)
    new_table = pickle.loads(obj)
    assert(new_table.is_consistent())
    assert(new_table.nrows() == 10)
    assert(new_table.ncols() == 3)
    assert(all(a == b for a, b in zip(new_table['col1'], c1)))
    assert(all(a == b for a, b in zip(new_table['col2'], c2)))
    assert(all(a == b for a, b in zip(new_table['col3'], c3)))
    print 'OK'
示例#6
0
def spot_resolution_shells(imagesets, reflections, params):
  goniometer = imagesets[0].get_goniometer()
  from dials.algorithms.indexing import indexer
  from dials.array_family import flex
  mapped_reflections = flex.reflection_table()
  for i, imageset in enumerate(imagesets):
    if 'imageset_id' in reflections:
      sel = (reflections['imageset_id'] == i)
    else:
      sel = (reflections['id'] == i)
    if isinstance(reflections['id'], flex.size_t):
      reflections['id'] = reflections['id'].as_int()
    refl = indexer.indexer_base.map_spots_pixel_to_mm_rad(
      reflections.select(sel),
      imageset.get_detector(), imageset.get_scan())

    indexer.indexer_base.map_centroids_to_reciprocal_space(
      refl, imageset.get_detector(), imageset.get_beam(),
      imageset.get_goniometer())
    mapped_reflections.extend(refl)
  reflections = mapped_reflections
  two_theta_array = reflections['rlp'].norms()
  h0 = flex.weighted_histogram(two_theta_array ** 2, n_slots=params.shells)
  n = h0.slots()
  d = 1.0 / flex.sqrt(h0.slot_centers())

  for j in range(params.shells):
    print '%d %f %d' % (j, d[j], n[j])
  def generate_reflections(self):
    """Use reeke_model to generate indices of reflections near to the Ewald
    sphere that might be observed on a still image. Build a reflection_table
    of these."""
    from cctbx.sgtbx import space_group_info

    space_group_type = space_group_info("P 1").group().type()

    # create a ReekeIndexGenerator
    UB = self.crystal.get_U() * self.crystal.get_B()
    axis = self.goniometer.get_rotation_axis()
    s0 = self.beam.get_s0()
    dmin = 1.5
    # use the same UB at the beginning and end - the margin parameter ensures
    # we still have indices close to the Ewald sphere generated
    from dials.algorithms.spot_prediction import ReekeIndexGenerator
    r = ReekeIndexGenerator(UB, UB, space_group_type, axis, s0, dmin=1.5, margin=1)

    # generate indices
    hkl = r.to_array()
    nref = len(hkl)

    # create a reflection table
    from dials.array_family import flex
    table = flex.reflection_table()
    table['flags'] = flex.size_t(nref, 0)
    table['id']    = flex.int(nref, 0)
    table['panel'] = flex.size_t(nref, 0)
    table['miller_index'] = flex.miller_index(hkl)
    table['entering']     = flex.bool(nref, True)
    table['s1']           = flex.vec3_double(nref)
    table['xyzcal.mm']    = flex.vec3_double(nref)
    table['xyzcal.px']    = flex.vec3_double(nref)

    return table
  def run(self):
    from dials.algorithms.profile_model.modeller import ProfileModellerIface
    from dials.algorithms.profile_model.modeller import MultiExpProfileModeller
    from dials.array_family import flex

    class Modeller(ProfileModellerIface):

      def __init__(self, index, expected):
        self.index = index
        self.accumulated = False
        self.finalized = False
        self.expected = expected
        super(Modeller, self).__init__()

      def model(self, reflections):
        assert(reflections['id'].all_eq(self.index))
        assert(len(reflections) == self.expected)

      def accumulate(self, other):
        self.accumulated = True
        assert(self.index == other.index)

      def finalize(self):
        assert(self.accumulated == True)
        self.finalized = True

    # The expected number of reflections
    expected = [100, 200, 300, 400, 500]

    # Create some reflections
    reflections = flex.reflection_table()
    reflections["id"] = flex.int()
    for idx in range(len(expected)):
      for n in range(expected[idx]):
        reflections.append({
          "id" : idx
        })

    # Create two modellers
    modeller1 = MultiExpProfileModeller()
    modeller2 = MultiExpProfileModeller()
    for idx in range(len(expected)):
      modeller1.add(Modeller(idx, expected[idx]))
      modeller2.add(Modeller(idx, expected[idx]))

    # Model the reflections
    modeller1.model(reflections)
    modeller2.model(reflections)

    # Accumulate
    modeller1.accumulate(modeller2)

    # Finalize
    modeller1.finalize()

    # Check finalized
    assert(modeller1.finalized)

    # Test passed
    print 'OK'
示例#9
0
  def __call__(self, params, options):
    ''' Import the integrate.hkl file. '''

    from iotbx.xds import integrate_hkl
    from dials.array_family import flex
    from dials.util.command_line import Command
    from cctbx import sgtbx

    # Get the unit cell to calculate the resolution
    uc = self._experiment.crystal.get_unit_cell()

    # Read the INTEGRATE.HKL file
    Command.start('Reading INTEGRATE.HKL')
    handle = integrate_hkl.reader()
    handle.read_file(self._integrate_hkl)
    hkl    = flex.miller_index(handle.hkl)
    xyzcal = flex.vec3_double(handle.xyzcal)
    xyzobs = flex.vec3_double(handle.xyzobs)
    iobs   = flex.double(handle.iobs)
    sigma  = flex.double(handle.sigma)
    rlp = flex.double(handle.rlp)
    peak = flex.double(handle.peak) * 0.01
    Command.end('Read %d reflections from INTEGRATE.HKL file.' % len(hkl))

    # Derive the reindex matrix
    rdx = self.derive_reindex_matrix(handle)
    print 'Reindex matrix:\n%d %d %d\n%d %d %d\n%d %d %d' % (rdx.elems)

    # Reindex the reflections
    Command.start('Reindexing reflections')
    cb_op = sgtbx.change_of_basis_op(sgtbx.rt_mx(sgtbx.rot_mx(rdx.elems)))
    hkl = cb_op.apply(hkl)
    Command.end('Reindexed %d reflections' % len(hkl))

    # Create the reflection list
    Command.start('Creating reflection table')
    table = flex.reflection_table()
    table['id'] = flex.int(len(hkl), 0)
    table['panel'] = flex.size_t(len(hkl), 0)
    table['miller_index'] = hkl
    table['xyzcal.px'] = xyzcal
    table['xyzobs.px.value'] = xyzobs
    table['intensity.cor.value'] = iobs
    table['intensity.cor.variance'] = sigma**2
    table['intensity.prf.value'] = iobs * peak / rlp
    table['intensity.prf.variance'] = (sigma * peak / rlp)**2
    table['lp'] = 1.0 / rlp
    table['d'] = flex.double(uc.d(h) for h in hkl)
    Command.end('Created table with {0} reflections'.format(len(table)))

    # Output the table to pickle file
    if params.output.filename is None:
      params.output.filename = 'integrate_hkl.pickle'
    Command.start('Saving reflection table to %s' % params.output.filename)
    table.as_pickle(params.output.filename)
    Command.end('Saved reflection table to %s' % params.output.filename)
示例#10
0
    def __call__(self):
        self.corrected_reflections = flex.reflection_table()
        for expt, refl in zip(
            self.experiments, self.reflections.split_by_experiment_id()
        ):
            # extract experiment details
            detector = expt.detector
            panels = [p for p in detector]
            panel_size_px = [p.get_image_size() for p in panels]
            pixel_size_mm = [p.get_pixel_size()[0] for p in panels]
            detector_dist_mm = [p.get_distance() for p in panels]
            beam = expt.beam
            wavelength_ang = beam.get_wavelength()

            # exclude reflections with no foreground pixels
            refl_valid = refl.select(
                refl["num_pixels.valid"] > 0 and refl["num_pixels.foreground"] > 0
            )
            refl_zero = refl_valid.select(refl_valid["intensity.sum.value"] == 0)
            refl_nonzero = refl_valid.select(refl_valid["intensity.sum.value"] != 0)

            def correct(refl_sele, smart_sigmas=True):
                kapton_correction = image_kapton_correction(
                    panel_size_px=panel_size_px,
                    pixel_size_mm=pixel_size_mm,
                    detector_dist_mm=detector_dist_mm,
                    wavelength_ang=wavelength_ang,
                    reflections_sele=refl_sele,
                    params=self.params,
                    expt=expt,
                    refl=refl,
                    smart_sigmas=smart_sigmas,
                    logger=self.logger,
                )

                k_corr, k_sigmas = kapton_correction()
                refl_sele["kapton_absorption_correction"] = k_corr
                if smart_sigmas:
                    refl_sele["kapton_absorption_correction_sigmas"] = k_sigmas
                    # apply corrections and propagate error
                    # term1 = (sig(C)/C)^2
                    # term2 = (sig(Imeas)/Imeas)^2
                    # I' = C*I
                    # sig^2(I') = (I')^2*(term1 + term2)
                    integrated_data = refl_sele["intensity.sum.value"]
                    integrated_variance = refl_sele["intensity.sum.variance"]
                    integrated_sigma = flex.sqrt(integrated_variance)
                    term1 = flex.pow(k_sigmas / k_corr, 2)
                    term2 = flex.pow(integrated_sigma / integrated_data, 2)
                    integrated_data *= k_corr
                    integrated_variance = flex.pow(integrated_data, 2) * (term1 + term2)
                    refl_sele["intensity.sum.value"] = integrated_data
                    refl_sele["intensity.sum.variance"] = integrated_variance
                    # order is purposeful: the two lines above require that integrated_data
                    # has already been corrected!
                else:
                    refl_sele["intensity.sum.value"] *= k_corr
                    refl_sele["intensity.sum.variance"] *= (k_corr) ** 2
                return refl_sele

            if len(refl_zero) > 0 and self.params.smart_sigmas:
                # process nonzero intensity reflections with smart sigmas as requested
                # but turn them off for zero intensity reflections to avoid a division by zero
                # during error propogation. Not at all certain this is the best way.
                self.corrected_reflections.extend(
                    correct(refl_nonzero, smart_sigmas=True)
                )
                self.corrected_reflections.extend(
                    correct(refl_zero, smart_sigmas=False)
                )
            else:
                self.corrected_reflections.extend(
                    correct(refl_valid, smart_sigmas=self.params.smart_sigmas)
                )

        return self.experiments, self.corrected_reflections
def generate_simple_table(prf=True):
    """Generate a reflection table for testing intensity combination.
    The numbers are contrived to make sum intensities agree well at high
    intensity but terribly at low and vice versa for profile intensities."""
    reflections = flex.reflection_table()
    reflections["miller_index"] = flex.miller_index([
        (0, 0, 1),
        (0, 0, 1),
        (0, 0, 1),
        (0, 0, 1),
        (0, 0, 1),
        (0, 0, 2),
        (0, 0, 2),
        (0, 0, 2),
        (0, 0, 2),
        (0, 0, 2),
        (0, 0, 3),
        (0, 0, 3),
        (0, 0, 3),
        (0, 0, 3),
        (0, 0, 3),
        (0, 0, 4),
        (0, 0, 4),
        (0, 0, 4),
        (0, 0, 4),
        (0, 0, 4),
        (0, 0, 5),
        (0, 0, 5),
        (0, 0, 5),
        (0, 0, 5),
        (0, 0, 5),
    ])
    reflections["inverse_scale_factor"] = flex.double(25, 1.0)
    # Contrive an example that should give the best cc12 when combined.
    # make sum intensities agree well at high intensity but terribly at low
    # and vice versa for profile intensities.
    # profile less consistent at high intensity here

    # sumless consistent at low intensity here
    reflections["intensity.sum.value"] = flex.double([
        10000.0,
        11000.0,
        9000.0,
        8000.0,
        12000.0,
        500.0,
        5600.0,
        5500.0,
        2000.0,
        6000.0,
        100.0,
        50.0,
        150.0,
        75.0,
        125.0,
        30.0,
        10.0,
        2.0,
        35.0,
        79.0,
        1.0,
        10.0,
        20.0,
        10.0,
        5.0,
    ])
    reflections["intensity.sum.variance"] = flex.double([10000] * 5 +
                                                        [5000] * 5 +
                                                        [100] * 5 + [30] * 5 +
                                                        [10] * 5)
    reflections.set_flags(flex.bool(25, False),
                          reflections.flags.outlier_in_scaling)
    reflections.set_flags(flex.bool(25, True), reflections.flags.integrated)
    reflections["lp"] = flex.double(25, 0.5)
    if prf:
        reflections["intensity.prf.value"] = flex.double([
            10000.0,
            16000.0,
            12000.0,
            6000.0,
            9000.0,
            5000.0,
            2000.0,
            1500.0,
            1300.0,
            9000.0,
            100.0,
            80.0,
            120.0,
            90.0,
            100.0,
            30.0,
            40.0,
            50.0,
            30.0,
            30.0,
            10.0,
            12.0,
            9.0,
            8.0,
            10.0,
        ])
        reflections["intensity.prf.variance"] = flex.double([1000] * 5 +
                                                            [500] * 5 +
                                                            [10] * 5 +
                                                            [3] * 5 + [1] * 5)
    reflections = calculate_prescaling_correction(reflections)
    return reflections
示例#12
0
def run():

  # The script usage
  usage  = "usage: xia2.multi_crystal_scale_and_merge [options] [param.phil] " \
           "experiments1.json experiments2.json reflections1.pickle " \
           "reflections2.pickle..."

  # Create the parser
  parser = OptionParser(
    usage=usage,
    phil=phil_scope,
    read_reflections=True,
    read_experiments=True,
    check_format=False,
    epilog=help_message)

  # Parse the command line
  params, options = parser.parse_args(show_diff_phil=True)

  # Configure the logging

  for name in ('xia2', 'dials'):
    log.config(
      info=params.output.log,
      debug=params.output.debug_log,
      name=name)
  from dials.util.version import dials_version
  logger.info(dials_version())

  # Try to load the models and data
  if len(params.input.experiments) == 0:
    logger.info("No Experiments found in the input")
    parser.print_help()
    return
  if len(params.input.reflections) == 0:
    logger.info("No reflection data found in the input")
    parser.print_help()
    return
  try:
    assert len(params.input.reflections) == len(params.input.experiments)
  except AssertionError:
    raise Sorry("The number of input reflections files does not match the "
      "number of input experiments")

  if params.seed is not None:
    import random
    flex.set_random_seed(params.seed)
    random.seed(params.seed)

  expt_filenames = OrderedDict((e.filename, e.data) for e in params.input.experiments)
  refl_filenames = OrderedDict((r.filename, r.data) for r in params.input.reflections)

  experiments = flatten_experiments(params.input.experiments)
  reflections = flatten_reflections(params.input.reflections)

  reflections_all = flex.reflection_table()
  assert len(reflections) == 1 or len(reflections) == len(experiments)
  if len(reflections) > 1:
    for i, (expt, refl) in enumerate(zip(experiments, reflections)):
      expt.identifier = '%i' % i
      refl['identifier'] = flex.std_string(refl.size(), expt.identifier)
      refl['id'] = flex.int(refl.size(), i)
      reflections_all.extend(refl)
      reflections_all.experiment_identifiers()[i] = expt.identifier
  else:
    reflections_all = reflections[0]
    assert 'identifier' in reflections_all
    assert len(set(reflections_all['identifier'])) == len(experiments)

  assert reflections_all.are_experiment_identifiers_consistent(experiments)

  if params.identifiers is not None:
    identifiers = []
    for identifier in params.identifiers:
      identifiers.extend(identifier.split(','))
    params.identifiers = identifiers
  scaled = ScaleAndMerge.MultiCrystalScale(experiments, reflections_all, params)
示例#13
0
def load(entry):
    print("Loading NXreflections")

    # Check the feature is present
    assert "features" in entry
    assert 7 in entry["features"]

    # Get the entry
    refls = entry["reflections"]
    if refls.attrs["NX_class"] == "NXsubentry":
        # Backward compatibility. See https://github.com/nexusformat/definitions/pull/752
        # Get the definition
        definition = refls["definition"]
        assert definition[()] == "NXreflections"
        assert definition.attrs["version"] == 1
    else:
        assert refls.attrs["NX_class"] == "NXreflections"

    # The paths to the experiments
    experiments = list(refls["experiments"])

    # The columns to try
    columns = [
        "miller_index",
        "id",
        "partial_id",
        "entering",
        "flags",
        "panel",
        "d",
        "partiality",
        "xyzcal.px",
        "xyzcal.mm",
        "bbox",
        "xyzobs.px.value",
        "xyzobs.px.variance",
        "xyzobs.mm.value",
        "xyzobs.mm.variance",
        "background.mean",
        "intensity.sum.value",
        "intensity.sum.variance",
        "intensity.prf.value",
        "intensity.prf.variance",
        "profile.correlation",
        "lp",
        "num_pixels.background",
        "num_pixels.foreground",
        "num_pixels.background_used",
        "num_pixels.valid",
        "profile.rmsd",
    ]

    # The reflection table
    table = None

    # For each column in the reflection table dump to file
    for key in columns:
        try:
            col = read(refls, key)
        except KeyError:
            continue
        if table is None:
            table = flex.reflection_table()
        table[key] = col

    # Return the table
    return table, experiments
示例#14
0
 def reflection_table_stub(self, reflections):
     '''Return an empty reflection table with the same format as the reflection table input to this class'''
     table = flex.reflection_table()
     for key in reflections:
         table[key] = type(reflections[key])()
     return table
示例#15
0
    def split_for_scan_range(self, experiments, reference, scan_range):
        ''' Update experiments when scan range is set. '''
        from dxtbx.model.experiment_list import ExperimentList
        from dxtbx.model.experiment_list import Experiment
        from dials.array_family import flex

        # Only do anything is the scan range is set
        if scan_range is not None and len(scan_range) > 0:

            # Ensure that all experiments have the same imageset and scan
            iset = [e.imageset for e in experiments]
            scan = [e.scan for e in experiments]
            assert (all(x == iset[0] for x in iset))
            assert (all(x == scan[0] for x in scan))

            # Get the imageset and scan
            iset = experiments[0].imageset
            scan = experiments[0].scan

            # Get the array range
            if scan is not None:
                frame10, frame11 = scan.get_array_range()
                assert (scan.get_num_images() == len(iset))
            else:
                frame10, frame11 = (0, len(iset))

            # Create the new lists
            new_experiments = ExperimentList()
            new_reference_all = reference.split_by_experiment_id()
            new_reference = flex.reflection_table()
            for i in range(len(new_reference_all) - len(experiments)):
                new_reference_all.append(flex.reflection_table())
            assert (len(new_reference_all) == len(experiments))

            # Loop through all the scan ranges and create a new experiment list with
            # the requested scan ranges.
            for frame00, frame01 in scan_range:
                assert (frame01 > frame00)
                assert (frame00 >= frame10)
                assert (frame01 <= frame11)
                index0 = frame00 - frame10
                index1 = index0 + (frame01 - frame00)
                assert (index0 < index1)
                assert (index0 >= 0)
                assert (index1 <= len(iset))
                new_iset = iset[index0:index1]
                if scan is None:
                    new_scan = None
                else:
                    new_scan = scan[index0:index1]
                for i, e1 in enumerate(experiments):
                    e2 = Experiment()
                    e2.beam = e1.beam
                    e2.detector = e1.detector
                    e2.goniometer = e1.goniometer
                    e2.crystal = e1.crystal
                    e2.profile = e1.profile
                    e2.imageset = new_iset
                    e2.scan = new_scan
                    new_reference_all[i]['id'] = flex.int(
                        len(new_reference_all[i]), len(new_experiments))
                    new_reference.extend(new_reference_all[i])
                    new_experiments.append(e2)
            experiments = new_experiments
            reference = new_reference

            # Print some information
            logger.info(
                'Modified experiment list to integrate over requested scan range'
            )
            for frame00, frame01 in scan_range:
                logger.info(' scan_range = %d -> %d' % (frame00, frame01))
            logger.info('')

        # Return the experiments
        return experiments, reference
示例#16
0
def generated_refl_for_comb():
    """Create a reflection table suitable for splitting into blocks."""
    reflections = flex.reflection_table()
    reflections["intensity"] = flex.double([1.0, 2.0, 3.0, 4.0, 500.0, 6.0, 2.0, 2.0])
    reflections["variance"] = flex.double(8, 1.0)
    reflections["intensity.prf.value"] = flex.double(
        [1.0, 3.0, 3.0, 4.0, 50.0, 6.0, 3.0, 2.0]
    )
    reflections["intensity.prf.variance"] = flex.double(
        [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0]
    )
    reflections["intensity.sum.value"] = flex.double(
        [1.0, 4.0, 3.0, 4.0, 500.0, 6.0, 6.0, 2.0]
    )
    reflections["intensity.sum.variance"] = flex.double(8, 1.0)
    reflections["miller_index"] = flex.miller_index(
        [
            (1, 0, 0),
            (2, 0, 0),
            (0, 0, 1),
            (2, 2, 2),
            (1, 0, 0),
            (2, 0, 0),
            (1, 0, 0),
            (1, 0, 0),
        ]
    )
    reflections["d"] = flex.double([0.8, 2.1, 2.0, 1.4, 1.6, 2.5, 2.5, 2.5])
    reflections["partiality"] = flex.double(8, 1.0)
    reflections["Esq"] = flex.double(8, 1.0)
    reflections["inverse_scale_factor"] = flex.double(8, 1.0)
    reflections["xyzobs.px.value"] = flex.vec3_double(
        [
            (0.0, 0.0, 0.0),
            (0.0, 0.0, 5.0),
            (0.0, 0.0, 8.0),
            (0.0, 0.0, 10.0),
            (0.0, 0.0, 12.0),
            (0.0, 0.0, 15.0),
            (0.0, 0.0, 15.0),
            (0.0, 0.0, 15.0),
        ]
    )
    reflections["s1"] = flex.vec3_double(
        [
            (0.0, 0.1, 1.0),
            (0.0, 0.1, 1.0),
            (0.0, 0.1, 1.0),
            (0.0, 0.1, 1.0),
            (0.0, 0.1, 1.0),
            (0.0, 0.1, 1.0),
            (0.0, 0.1, 1.0),
            (0.0, 0.1, 1.0),
        ]
    )
    reflections.set_flags(flex.bool(8, True), reflections.flags.integrated)
    reflections.set_flags(
        flex.bool([False] * 5 + [True] + [False] * 2), reflections.flags.bad_for_scaling
    )
    reflections["id"] = flex.int(8, 0)
    reflections.experiment_identifiers()[0] = "0"
    reflections = calculate_prescaling_correction(reflections)
    return reflections
    def run(self, experiments, reflections):
        self.logger.log_step_time("SCALE_FRAMES")
        if self.params.scaling.algorithm != "mark0":  # mark1 implies no scaling/post-refinement
            self.logger.log("No scaling was done")
            if self.mpi_helper.rank == 0:
                self.logger.main_log("No scaling was done")
            return experiments, reflections

        new_experiments = ExperimentList()
        new_reflections = flex.reflection_table()

        # scale experiments, one at a time. Reject experiments that do not correlate with the reference or fail to scale.
        results = []
        slopes = []
        correlations = []
        high_res_experiments = 0
        experiments_rejected_because_of_low_signal = 0
        experiments_rejected_because_of_low_correlation_with_reference = 0

        target_symm = symmetry(
            unit_cell=self.params.scaling.unit_cell,
            space_group_info=self.params.scaling.space_group)
        for experiment in experiments:
            exp_reflections = reflections.select(
                reflections['exp_id'] == experiment.identifier)

            # Build a miller array for the experiment reflections
            exp_miller_indices = miller.set(
                target_symm, exp_reflections['miller_index_asymmetric'], True)
            exp_intensities = miller.array(
                exp_miller_indices, exp_reflections['intensity.sum.value'],
                flex.double(
                    flex.sqrt(exp_reflections['intensity.sum.variance'])))

            model_intensities = self.params.scaling.i_model

            # Extract an array of HKLs from the model to match the experiment HKLs
            matching_indices = miller.match_multi_indices(
                miller_indices_unique=model_intensities.indices(),
                miller_indices=exp_intensities.indices())

            # Least squares
            if self.params.scaling.mark0.fit_reference_to_experiment:  # RB: in cxi-merge we fit reference to experiment, but we should really do it the other way
                result = self.fit_reference_to_experiment(
                    model_intensities, exp_intensities, matching_indices)
            else:
                result = self.fit_experiment_to_reference(
                    model_intensities, exp_intensities, matching_indices)

            if result.error == scaling_result.err_low_signal:
                experiments_rejected_because_of_low_signal += 1
                continue
            elif result.error == scaling_result.err_low_correlation:
                experiments_rejected_because_of_low_correlation_with_reference += 1
                continue

            slopes.append(result.slope)
            correlations.append(result.correlation)

            if self.params.output.log_level == 0:
                self.logger.log(
                    "Experiment ID: %s; Slope: %f; Correlation %f" %
                    (experiment.identifier, result.slope, result.correlation))

            # count high resolution experiments
            if exp_intensities.d_min() <= self.params.merging.d_min:
                high_res_experiments += 1

            # apply scale factors
            if not self.params.postrefinement.enable:
                if self.params.scaling.mark0.fit_reference_to_experiment:
                    exp_reflections['intensity.sum.value'] /= result.slope
                    exp_reflections['intensity.sum.variance'] /= (
                        result.slope**2)
                else:
                    exp_reflections['intensity.sum.value'] *= result.slope
                    exp_reflections['intensity.sum.variance'] *= (
                        result.slope**2)

            new_experiments.append(experiment)
            new_reflections.extend(exp_reflections)

        rejected_experiments = len(experiments) - len(new_experiments)
        assert rejected_experiments == experiments_rejected_because_of_low_signal + \
                                        experiments_rejected_because_of_low_correlation_with_reference

        reflections_removed_because_of_rejected_experiments = reflections.size(
        ) - new_reflections.size()

        self.logger.log("Experiments rejected because of low signal: %d" %
                        experiments_rejected_because_of_low_signal)
        self.logger.log(
            "Experiments rejected because of low correlation with reference: %d"
            % experiments_rejected_because_of_low_correlation_with_reference)
        self.logger.log(
            "Reflections rejected because of rejected experiments: %d" %
            reflections_removed_because_of_rejected_experiments)
        self.logger.log("High resolution experiments: %d" %
                        high_res_experiments)
        if self.params.postrefinement.enable:
            self.logger.log(
                "Note: scale factors were not applied, because postrefinement is enabled"
            )

        # MPI-reduce all counts
        comm = self.mpi_helper.comm
        MPI = self.mpi_helper.MPI
        total_experiments_rejected_because_of_low_signal = comm.reduce(
            experiments_rejected_because_of_low_signal, MPI.SUM, 0)
        total_experiments_rejected_because_of_low_correlation_with_reference = comm.reduce(
            experiments_rejected_because_of_low_correlation_with_reference,
            MPI.SUM, 0)
        total_reflections_removed_because_of_rejected_experiments = comm.reduce(
            reflections_removed_because_of_rejected_experiments, MPI.SUM, 0)
        total_high_res_experiments = comm.reduce(high_res_experiments, MPI.SUM,
                                                 0)
        all_slopes = comm.reduce(slopes, MPI.SUM, 0)
        all_correlations = comm.reduce(correlations, MPI.SUM, 0)

        # rank 0: log data statistics
        if self.mpi_helper.rank == 0:
            self.logger.main_log(
                'Experiments rejected because of low signal: %d' %
                total_experiments_rejected_because_of_low_signal)
            self.logger.main_log(
                'Experiments rejected because of low correlation with reference: %d'
                %
                total_experiments_rejected_because_of_low_correlation_with_reference
            )
            self.logger.main_log(
                'Reflections rejected because of rejected experiments: %d' %
                total_reflections_removed_because_of_rejected_experiments)
            self.logger.main_log(
                'Experiments with high resolution of %5.2f Angstrom or better: %d'
                % (self.params.merging.d_min, total_high_res_experiments))

            if len(all_slopes) > 0:
                stats_slope = flex.mean_and_variance(flex.double(all_slopes))
                self.logger.main_log(
                    'Average experiment scale factor wrt reference: %f' %
                    (stats_slope.mean()))
            if len(all_correlations) > 0:
                stats_correlation = flex.mean_and_variance(
                    flex.double(all_correlations))
                self.logger.main_log(
                    'Average experiment correlation with reference: %f +/- %f'
                    %
                    (stats_correlation.mean(),
                     stats_correlation.unweighted_sample_standard_deviation()))

            if self.params.postrefinement.enable:
                self.logger.main_log(
                    "Note: scale factors were not applied, because postrefinement is enabled"
                )

        self.logger.log_step_time("SCALE_FRAMES", True)

        # Do we have any data left?
        from xfel.merging.application.utils.data_counter import data_counter
        data_counter(self.params).count(new_experiments, new_reflections)

        return new_experiments, new_reflections
示例#18
0
def simple_gaussian_spots(params):
    from dials.array_family import flex
    from scitbx import matrix

    r = params.rotation
    axis = matrix.col((r.axis.x, r.axis.y, r.axis.z))
    if axis.length() > 0:
        rotation = axis.axis_and_angle_as_r3_rotation_matrix(r.angle, deg=True)
    else:
        rotation = matrix.sqr((1, 0, 0, 0, 1, 0, 0, 0, 1))

    # generate mask and peak values

    from dials.algorithms.shoebox import MaskCode

    mask_peak = MaskCode.Valid | MaskCode.Foreground
    mask_back = MaskCode.Valid | MaskCode.Background

    from dials.util.command_line import ProgressBar

    p = ProgressBar(title="Generating reflections")

    rlist = flex.reflection_table(params.nrefl)
    hkl = flex.miller_index(params.nrefl)
    s1 = flex.vec3_double(params.nrefl)
    xyzmm = flex.vec3_double(params.nrefl)
    xyzpx = flex.vec3_double(params.nrefl)
    panel = flex.size_t(params.nrefl)
    bbox = flex.int6(params.nrefl)

    for j in range(params.nrefl):
        p.update(j * 100.0 / params.nrefl)
        hkl[j] = (random.randint(0, 20), random.randint(0, 20),
                  random.randint(0, 20))
        phi = 2 * math.pi * random.random()
        s1[j] = (0, 0, 0)
        xyzpx[j] = (0, 0, 0)
        xyzmm[j] = (0, 0, phi)
        panel[j] = 0
        bbox[j] = (
            0,
            params.shoebox_size.x,
            0,
            params.shoebox_size.y,
            0,
            params.shoebox_size.z,
        )

    p.finished("Generating %d reflections" % params.nrefl)
    intensity = flex.double(params.nrefl)
    shoebox = flex.shoebox(panel, bbox)
    shoebox.allocate_with_value(MaskCode.Valid)

    p = ProgressBar(title="Generating shoeboxes")

    for i in range(len(rlist)):

        p.update(i * 100.0 / params.nrefl)
        mask = shoebox[i].mask

        if params.pixel_mask == "precise":
            # flag everything as background: peak will me assigned later
            for j in range(len(mask)):
                mask[j] = mask_back
        elif params.pixel_mask == "all":
            # flag we have no idea what anything is
            mask_none = MaskCode.Valid | MaskCode.Foreground | MaskCode.Background
            for j in range(len(mask)):
                mask[j] = mask_none
        elif params.pixel_mask == "static":
            from scitbx.array_family import flex

            x0 = params.spot_offset.x + params.shoebox_size.x / 2
            y0 = params.spot_offset.x + params.shoebox_size.y / 2
            z0 = params.spot_offset.x + params.shoebox_size.z / 2
            sx = params.mask_nsigma * params.spot_size.x
            sy = params.mask_nsigma * params.spot_size.y
            sz = params.mask_nsigma * params.spot_size.z

            # The x, y, z indices
            z, y, x = zip(*itertools.product(*(range(n) for n in mask.all())))
            xyz = flex.vec3_double(flex.double(x), flex.double(y),
                                   flex.double(z))

            # Calculate SUM(((xj - xj0) / sxj)**2) for each element
            xyz0 = (x0, y0, z0)
            isxyz = (1.0 / sx, 1.0 / sy, 1.0 / sz)
            dxyz = sum(
                (x * isx)**2
                for x, isx in zip(((xyz - xyz0) * rotation).parts(), isxyz))

            # Set the mask values
            index = dxyz <= 1.0
            index.reshape(mask.accessor())
            mask.set_selected(index, MaskCode.Valid | MaskCode.Foreground)
            mask.set_selected(not index, MaskCode.Valid | MaskCode.Background)

        sbox = shoebox[i].data

        # reflection itself, including setting the peak region if we're doing that
        # FIXME use flex arrays to make the rotation bit more efficient as this is
        # now rather slow...

        counts_true = 0
        for j in range(params.counts):
            _x = random.gauss(0, params.spot_size.x)
            _y = random.gauss(0, params.spot_size.y)
            _z = random.gauss(0, params.spot_size.z)

            Rxyz = rotation * matrix.col((_x, _y, _z)).elems

            x = int(Rxyz[0] + params.spot_offset.x + params.shoebox_size.x / 2)
            y = int(Rxyz[1] + params.spot_offset.y + params.shoebox_size.y / 2)
            z = int(Rxyz[2] + params.spot_offset.z + params.shoebox_size.z / 2)

            if x < 0 or x >= params.shoebox_size.x:
                continue
            if y < 0 or y >= params.shoebox_size.y:
                continue
            if z < 0 or z >= params.shoebox_size.z:
                continue
            sbox[z, y, x] += 1
            counts_true += 1
            if params.pixel_mask == "precise":
                mask[z, y, x] = mask_peak

        intensity[i] = counts_true

        if params.background:
            # background:flat;
            for j in range(params.background * len(sbox)):
                x = random.randint(0, params.shoebox_size.x - 1)
                y = random.randint(0, params.shoebox_size.y - 1)
                z = random.randint(0, params.shoebox_size.z - 1)
                sbox[z, y, x] += 1
        else:
            # or inclined
            random_background_plane(
                sbox,
                params.background_a,
                params.background_b,
                params.background_c,
                params.background_d,
            )

    rlist["miller_index"] = hkl
    rlist["s1"] = s1
    rlist["xyzcal.px"] = xyzpx
    rlist["xyzcal.mm"] = xyzmm
    rlist["bbox"] = bbox
    rlist["panel"] = panel
    rlist["shoebox"] = shoebox
    rlist["intensity.sum.value"] = intensity
    p.finished("Generating %d shoeboxes" % params.nrefl)

    return rlist
示例#19
0
    def run(self, experiments, reflections):
        self.logger.log_step_time("POSTREFINEMENT")
        if (not self.params.postrefinement.enable) or (
                self.params.scaling.algorithm !=
                "mark0"):  # mark1 implies no scaling/post-refinement
            self.logger.log("No post-refinement was done")
            if self.mpi_helper.rank == 0:
                self.logger.main_log("No post-refinement was done")
            return experiments, reflections

        target_symm = symmetry(
            unit_cell=self.params.scaling.unit_cell,
            space_group_info=self.params.scaling.space_group)
        i_model = self.params.scaling.i_model
        miller_set = self.params.scaling.miller_set

        # Ensure that match_multi_indices() will return identical results
        # when a frame's observations are matched against the
        # pre-generated Miller set, miller_set, and the reference
        # data set, i_model.  The implication is that the same match
        # can be used to map Miller indices to array indices for intensity
        # accumulation, and for determination of the correlation
        # coefficient in the presence of a scaling reference.
        assert len(i_model.indices()) == len(miller_set.indices())
        assert (i_model.indices() == miller_set.indices()).count(False) == 0

        new_experiments = ExperimentList()
        new_reflections = flex.reflection_table()

        experiments_rejected_by_reason = {}  # reason:how_many_rejected

        for experiment in experiments:

            exp_reflections = reflections.select(
                reflections['exp_id'] == experiment.identifier)

            # Build a miller array with _original_ miller indices of the experiment reflections
            exp_miller_indices_original = miller.set(
                target_symm, exp_reflections['miller_index'],
                not self.params.merging.merge_anomalous)
            observations_original_index = miller.array(
                exp_miller_indices_original,
                exp_reflections['intensity.sum.value'],
                flex.double(
                    flex.sqrt(exp_reflections['intensity.sum.variance'])))

            assert exp_reflections.size() == exp_miller_indices_original.size()
            assert observations_original_index.size(
            ) == exp_miller_indices_original.size()

            # Build a miller array with _asymmetric_ miller indices of the experiment reflections
            exp_miller_indices_asu = miller.set(
                target_symm, exp_reflections['miller_index_asymmetric'], True)
            observations = miller.array(
                exp_miller_indices_asu, exp_reflections['intensity.sum.value'],
                flex.double(
                    flex.sqrt(exp_reflections['intensity.sum.variance'])))

            matches = miller.match_multi_indices(
                miller_indices_unique=miller_set.indices(),
                miller_indices=observations.indices())

            pair1 = flex.int([pair[1] for pair in matches.pairs()
                              ])  # refers to the observations
            pair0 = flex.int([pair[0] for pair in matches.pairs()
                              ])  # refers to the model

            # narrow things down to the set that matches, only
            observations_pair1_selected = observations.customized_copy(
                indices=flex.miller_index(
                    [observations.indices()[p] for p in pair1]),
                data=flex.double([observations.data()[p] for p in pair1]),
                sigmas=flex.double([observations.sigmas()[p] for p in pair1]))

            observations_original_index_pair1_selected = observations_original_index.customized_copy(
                indices=flex.miller_index(
                    [observations_original_index.indices()[p] for p in pair1]),
                data=flex.double(
                    [observations_original_index.data()[p] for p in pair1]),
                sigmas=flex.double(
                    [observations_original_index.sigmas()[p] for p in pair1]))
            ###################
            I_observed = observations_pair1_selected.data()
            chosen = chosen_weights(observations_pair1_selected, self.params)

            MILLER = observations_original_index_pair1_selected.indices()
            ORI = crystal_orientation(experiment.crystal.get_A(),
                                      basis_type.reciprocal)
            Astar = matrix.sqr(ORI.reciprocal_matrix())
            Astar_from_experiment = matrix.sqr(experiment.crystal.get_A())
            assert Astar == Astar_from_experiment

            WAVE = experiment.beam.get_wavelength()
            BEAM = matrix.col((0.0, 0.0, -1. / WAVE))
            BFACTOR = 0.
            MOSAICITY_DEG = experiment.crystal.get_half_mosaicity_deg()
            DOMAIN_SIZE_A = experiment.crystal.get_domain_size_ang()

            # calculation of correlation here
            I_reference = flex.double(
                [i_model.data()[pair[0]] for pair in matches.pairs()])
            I_invalid = flex.bool(
                [i_model.sigmas()[pair[0]] < 0. for pair in matches.pairs()])
            use_weights = False  # New facility for getting variance-weighted correlation

            if use_weights:
                # variance weighting
                I_weight = flex.double([
                    1. / (observations_pair1_selected.sigmas()[pair[1]])**2
                    for pair in matches.pairs()
                ])
            else:
                I_weight = flex.double(
                    len(observations_pair1_selected.sigmas()), 1.)

            I_weight.set_selected(I_invalid, 0.)
            chosen.set_selected(I_invalid, 0.)
            """Explanation of 'include_negatives' semantics as originally implemented in cxi.merge postrefinement:
         include_negatives = True
         + and - reflections both used for Rh distribution for initial estimate of RS parameter
         + and - reflections both used for calc/obs correlation slope for initial estimate of G parameter
         + and - reflections both passed to the refinery and used in the target function (makes sense if
                             you look at it from a certain point of view)

         include_negatives = False
         + and - reflections both used for Rh distribution for initial estimate of RS parameter
         +       reflections only used for calc/obs correlation slope for initial estimate of G parameter
         + and - reflections both passed to the refinery and used in the target function (makes sense if
                             you look at it from a certain point of view)

         NOTE: by the new design, "include negatives" is always True
      """

            SWC = simple_weighted_correlation(I_weight, I_reference,
                                              I_observed)
            if self.params.output.log_level == 0:
                self.logger.log("Old correlation is: %f" % SWC.corr)

            Rhall = flex.double()
            for mill in MILLER:
                H = matrix.col(mill)
                Xhkl = Astar * H
                Rh = (Xhkl + BEAM).length() - (1. / WAVE)
                Rhall.append(Rh)
            Rs = math.sqrt(flex.mean(Rhall * Rhall))

            RS = 1. / 10000.  # reciprocal effective domain size of 1 micron
            RS = Rs  # try this empirically determined approximate, monochrome, a-mosaic value
            current = flex.double([SWC.slope, BFACTOR, RS, 0., 0.])

            parameterization_class = rs_parameterization
            refinery = rs2_refinery(ORI=ORI,
                                    MILLER=MILLER,
                                    BEAM=BEAM,
                                    WAVE=WAVE,
                                    ICALCVEC=I_reference,
                                    IOBSVEC=I_observed,
                                    WEIGHTS=chosen)
            refinery.set_profile_shape(self.params.postrefinement.lineshape)

            func = refinery.fvec_callable(parameterization_class(current))
            functional = flex.sum(func * func)

            if self.params.output.log_level == 0:
                self.logger.log("functional: %f" % functional)

            self.current = current
            self.parameterization_class = parameterization_class
            self.refinery = refinery

            self.observations_pair1_selected = observations_pair1_selected
            self.observations_original_index_pair1_selected = observations_original_index_pair1_selected

            error_detected = False

            try:
                self.run_plain()

                result_observations_original_index, result_observations, result_matches = self.result_for_cxi_merge(
                )

                assert result_observations_original_index.size(
                ) == result_observations.size()
                assert result_matches.pairs().size(
                ) == result_observations_original_index.size()
            except (AssertionError, ValueError, RuntimeError) as e:
                error_detected = True
                reason = repr(e)
                if not reason:
                    reason = "Unknown error"
                if not reason in experiments_rejected_by_reason:
                    experiments_rejected_by_reason[reason] = 1
                else:
                    experiments_rejected_by_reason[reason] += 1

            if not error_detected:
                new_experiments.append(experiment)

                new_exp_reflections = flex.reflection_table()
                new_exp_reflections[
                    'miller_index_asymmetric'] = flex.miller_index(
                        result_observations.indices())
                new_exp_reflections['intensity.sum.value'] = flex.double(
                    result_observations.data())
                new_exp_reflections['intensity.sum.variance'] = flex.double(
                    flex.pow(result_observations.sigmas(), 2))
                new_exp_reflections['exp_id'] = flex.std_string(
                    len(new_exp_reflections), experiment.identifier)

                # The original reflection table, i.e. the input to this run() method, has more columns than those used
                # for the postrefinement ("data" and "sigma" in the miller arrays). The problems is: some of the input reflections may have been rejected by now.
                # So to bring those extra columns over to the new reflection table, we have to create a subset of the original exp_reflections table,
                # which would match (by original miller indices) the miller array results of the postrefinement.
                match_original_indices = miller.match_multi_indices(
                    miller_indices_unique=exp_miller_indices_original.indices(
                    ),
                    miller_indices=result_observations_original_index.indices(
                    ))
                exp_reflections_match_results = exp_reflections.select(
                    match_original_indices.pairs().column(0))
                assert (exp_reflections_match_results['intensity.sum.value'] ==
                        result_observations_original_index.data()
                        ).count(False) == 0
                new_exp_reflections[
                    'intensity.sum.value.unmodified'] = exp_reflections_match_results[
                        'intensity.sum.value.unmodified']
                new_exp_reflections[
                    'intensity.sum.variance.unmodified'] = exp_reflections_match_results[
                        'intensity.sum.variance.unmodified']

                new_reflections.extend(new_exp_reflections)

        # report rejected experiments, reflections
        experiments_rejected_by_postrefinement = len(experiments) - len(
            new_experiments)
        reflections_rejected_by_postrefinement = reflections.size(
        ) - new_reflections.size()

        self.logger.log("Experiments rejected by post-refinement: %d" %
                        experiments_rejected_by_postrefinement)
        self.logger.log("Reflections rejected by post-refinement: %d" %
                        reflections_rejected_by_postrefinement)

        all_reasons = []
        for reason, count in six.iteritems(experiments_rejected_by_reason):
            self.logger.log("Experiments rejected due to %s: %d" %
                            (reason, count))
            all_reasons.append(reason)

        comm = self.mpi_helper.comm
        MPI = self.mpi_helper.MPI

        # Collect all rejection reasons from all ranks. Use allreduce to let each rank have all reasons.
        all_reasons = comm.allreduce(all_reasons, MPI.SUM)
        all_reasons = set(all_reasons)

        # Now that each rank has all reasons from all ranks, we can treat the reasons in a uniform way.
        total_experiments_rejected_by_reason = {}
        for reason in all_reasons:
            rejected_experiment_count = 0
            if reason in experiments_rejected_by_reason:
                rejected_experiment_count = experiments_rejected_by_reason[
                    reason]
            total_experiments_rejected_by_reason[reason] = comm.reduce(
                rejected_experiment_count, MPI.SUM, 0)

        total_accepted_experiment_count = comm.reduce(len(new_experiments),
                                                      MPI.SUM, 0)

        # how many reflections have we rejected due to post-refinement?
        rejected_reflections = len(reflections) - len(new_reflections)
        total_rejected_reflections = self.mpi_helper.sum(rejected_reflections)

        if self.mpi_helper.rank == 0:
            for reason, count in six.iteritems(
                    total_experiments_rejected_by_reason):
                self.logger.main_log(
                    "Total experiments rejected due to %s: %d" %
                    (reason, count))
            self.logger.main_log("Total experiments accepted: %d" %
                                 total_accepted_experiment_count)
            self.logger.main_log(
                "Total reflections rejected due to post-refinement: %d" %
                total_rejected_reflections)

        self.logger.log_step_time("POSTREFINEMENT", True)

        # Do we have any data left?
        from xfel.merging.application.utils.data_counter import data_counter
        data_counter(self.params).count(new_experiments, new_reflections)

        return new_experiments, new_reflections
示例#20
0
def symmetry(experiments, reflection_tables, params=None):
    """
    Run symmetry analysis

    Args:
        experiments: An experiment list.
        reflection_tables: A list of reflection tables.
        params: The dials.symmetry phil scope.
    """
    result = None
    if params is None:
        params = phil_scope.extract()
    refls_for_sym = []

    if params.laue_group is Auto:
        logger.info("=" * 80)
        logger.info("")
        logger.info("Performing Laue group analysis")
        logger.info("")

        # Transform models into miller arrays
        n_datasets = len(experiments)

        # Map experiments and reflections to minimum cell
        # Eliminate reflections that are systematically absent due to centring
        # of the lattice, otherwise they would lead to non-integer miller indices
        # when reindexing to a primitive setting
        cb_ops = change_of_basis_ops_to_minimum_cell(
            experiments,
            params.lattice_symmetry_max_delta,
            params.relative_length_tolerance,
            params.absolute_angle_tolerance,
        )
        reflection_tables = eliminate_sys_absent(experiments, reflection_tables)
        experiments, reflection_tables = apply_change_of_basis_ops(
            experiments, reflection_tables, cb_ops
        )

        refls_for_sym = get_subset_for_symmetry(
            experiments, reflection_tables, params.exclude_images
        )

        datasets = filtered_arrays_from_experiments_reflections(
            experiments,
            refls_for_sym,
            outlier_rejection_after_filter=True,
            partiality_threshold=params.partiality_threshold,
        )
        if len(datasets) != n_datasets:
            raise ValueError(
                """Some datasets have no reflection after prefiltering, please check
    input data and filtering settings e.g partiality_threshold"""
            )

        datasets = [
            ma.as_anomalous_array().merge_equivalents().array() for ma in datasets
        ]
        result = LaueGroupAnalysis(
            datasets,
            normalisation=params.normalisation,
            d_min=params.d_min,
            min_i_mean_over_sigma_mean=params.min_i_mean_over_sigma_mean,
            lattice_symmetry_max_delta=params.lattice_symmetry_max_delta,
            relative_length_tolerance=params.relative_length_tolerance,
            absolute_angle_tolerance=params.absolute_angle_tolerance,
            best_monoclinic_beta=params.best_monoclinic_beta,
        )
        logger.info("")
        logger.info(result)

        if params.output.json is not None:
            d = result.as_dict()
            d["cb_op_inp_min"] = [str(cb_op) for cb_op in cb_ops]
            # Copy the "input_symmetry" to "min_cell_symmetry" as it isn't technically
            # the input symmetry to dials.symmetry
            d["min_cell_symmetry"] = d["input_symmetry"]
            del d["input_symmetry"]
            json_str = json.dumps(d, indent=2)
            with open(params.output.json, "w") as f:
                f.write(json_str)

        # Change of basis operator from input unit cell to best unit cell
        cb_op_inp_best = result.best_solution.subgroup["cb_op_inp_best"]
        # Get the best space group.
        best_subsym = result.best_solution.subgroup["best_subsym"]
        best_space_group = best_subsym.space_group().build_derived_acentric_group()
        logger.info(
            tabulate(
                [[str(best_subsym.space_group_info()), str(best_space_group.info())]],
                ["Patterson group", "Corresponding MX group"],
            )
        )
        # Reindex the input data
        experiments, reflection_tables = _reindex_experiments_reflections(
            experiments, reflection_tables, best_space_group, cb_op_inp_best
        )

    elif params.laue_group is not None:
        if params.change_of_basis_op is not None:
            cb_op = sgtbx.change_of_basis_op(params.change_of_basis_op)
        else:
            cb_op = sgtbx.change_of_basis_op()
        # Reindex the input data
        experiments, reflection_tables = _reindex_experiments_reflections(
            experiments, reflection_tables, params.laue_group.group(), cb_op
        )

    if params.systematic_absences.check:
        logger.info("=" * 80)
        logger.info("")
        logger.info("Analysing systematic absences")
        logger.info("")

        # Get the laue class from the current space group.
        space_group = experiments[0].crystal.get_space_group()
        laue_group = str(space_group.build_derived_patterson_group().info())
        logger.info("Laue group: %s", laue_group)
        if laue_group in ("I m -3", "I m m m"):
            if laue_group == "I m -3":
                logger.info(
                    """Space groups I 2 3 & I 21 3 cannot be distinguished with systematic absence
analysis, due to lattice centering.
Using space group I 2 3, space group I 21 3 is equally likely.\n"""
                )
            if laue_group == "I m m m":
                logger.info(
                    """Space groups I 2 2 2 & I 21 21 21 cannot be distinguished with systematic absence
analysis, due to lattice centering.
Using space group I 2 2 2, space group I 21 21 21 is equally likely.\n"""
                )
        elif laue_group not in laue_groups_for_absence_analysis:
            logger.info("No absences to check for this laue group\n")
        else:
            if not refls_for_sym:
                refls_for_sym = get_subset_for_symmetry(
                    experiments, reflection_tables, params.exclude_images
                )

            if (params.d_min is Auto) and (result is not None):
                d_min = result.intensities.resolution_range()[1]
            elif params.d_min is Auto:
                d_min = resolution_filter_from_reflections_experiments(
                    refls_for_sym,
                    experiments,
                    params.min_i_mean_over_sigma_mean,
                    params.min_cc_half,
                )
            else:
                d_min = params.d_min

            # combine before sys abs test - only triggers if laue_group=None and
            # multiple input files.
            if len(reflection_tables) > 1:
                joint_reflections = flex.reflection_table()
                for table in refls_for_sym:
                    joint_reflections.extend(table)
            else:
                joint_reflections = refls_for_sym[0]

            merged_reflections = prepare_merged_reflection_table(
                experiments, joint_reflections, d_min
            )
            run_systematic_absences_checks(
                experiments,
                merged_reflections,
                float(params.systematic_absences.significance_level),
            )

    logger.info(
        "Saving reindexed experiments to %s in space group %s",
        params.output.experiments,
        str(experiments[0].crystal.get_space_group().info()),
    )
    experiments.as_file(params.output.experiments)
    if params.output.reflections is not None:
        if len(reflection_tables) > 1:
            joint_reflections = flex.reflection_table()
            for table in reflection_tables:
                joint_reflections.extend(table)
        else:
            joint_reflections = reflection_tables[0]
        logger.info(
            "Saving %s reindexed reflections to %s",
            len(joint_reflections),
            params.output.reflections,
        )
        joint_reflections.as_file(params.output.reflections)

    if params.output.html and params.systematic_absences.check:
        ScrewAxisObserver().generate_html_report(params.output.html)
示例#21
0
def load(entry):
  from dials.array_family import flex

  print("Loading NXreflections")

  # Check the feature is present
  assert("features" in entry)
  assert(7 in entry["features"])

  # Get the entry
  refls = entry['reflections']
  assert(refls.attrs['NX_class'] == 'NXsubentry')

  # Get the definition
  definition = refls['definition']
  assert(definition.value == 'NXreflections')
  assert(definition.attrs['version'] == 1)

  # The paths to the experiments
  experiments = list(refls['experiments'])

  # The columns to try
  columns = [
    'miller_index',
    'id',
    'partial_id',
    'entering',
    'flags',
    'panel',
    'd',
    'partiality',
    'xyzcal.px',
    'xyzcal.mm',
    'bbox',
    'xyzobs.px.value',
    'xyzobs.px.variance',
    'xyzobs.mm.value',
    'xyzobs.mm.variance',
    'background.mean',
    'intensity.sum.value',
    'intensity.sum.variance',
    'intensity.prf.value',
    'intensity.prf.variance',
    'profile.correlation',
    'lp',
    'num_pixels.background',
    'num_pixels.foreground',
    'num_pixels.background_used',
    'num_pixels.valid',
    'profile.rmsd',
  ]

  # The reflection table
  table = None

  # For each column in the reflection table dump to file
  for key in columns:
    try:
      col = read(refls, key)
      if table is None:
        table = flex.reflection_table()
      table[key] = col
    except KeyError:
      pass

  # Return the table
  return table, experiments
示例#22
0
def export_mtz(integrated_data, experiment_list, params):
    """Export data from integrated_data corresponding to experiment_list to an
    MTZ file hklout."""

    # if mtz filename is auto, then choose scaled.mtz or integrated.mtz
    if params.mtz.hklout in (None, Auto, "auto"):
        if ("intensity.scale.value"
                in integrated_data) and ("intensity.scale.variance"
                                         in integrated_data):
            params.mtz.hklout = "scaled.mtz"
            logger.info(
                "Data appears to be scaled, setting mtz.hklout = 'scaled_unmerged.mtz'"
            )
        else:
            params.mtz.hklout = "integrated.mtz"
            logger.info(
                "Data appears to be unscaled, setting mtz.hklout = 'integrated.mtz'"
            )

    # First get the experiment identifier information out of the data
    expids_in_table = integrated_data.experiment_identifiers()
    if not list(expids_in_table.keys()):
        reflection_tables = parse_multiple_datasets([integrated_data])
        experiment_list, refl_list = assign_unique_identifiers(
            experiment_list, reflection_tables)
        integrated_data = flex.reflection_table()
        for reflections in refl_list:
            integrated_data.extend(reflections)
        expids_in_table = integrated_data.experiment_identifiers()
    integrated_data.assert_experiment_identifiers_are_consistent(
        experiment_list)
    expids_in_list = list(experiment_list.identifiers())

    # Convert experiment_list to a real python list or else identity assumptions
    # fail like:
    #   assert experiment_list[0] is experiment_list[0]
    # And assumptions about added attributes break
    experiment_list = list(experiment_list)

    # Validate multi-experiment assumptions
    if len(experiment_list) > 1:
        # All experiments should match crystals, or else we need multiple crystals/datasets
        if not all(x.crystal == experiment_list[0].crystal
                   for x in experiment_list[1:]):
            logger.warning(
                "Experiment crystals differ. Using first experiment crystal for file-level data."
            )

        wavelengths = match_wavelengths(experiment_list)
        if len(wavelengths.keys()) > 1:
            logger.info(
                "Multiple wavelengths found: \n%s",
                "\n".join("  Wavlength: %.5f, experiment numbers: %s " %
                          (k, ",".join(map(str, v)))
                          for k, v in wavelengths.items()),
            )
    else:
        wavelengths = OrderedDict(
            {experiment_list[0].beam.get_wavelength(): [0]})

    # also only work correctly with one panel (for the moment)
    if any(len(experiment.detector) != 1 for experiment in experiment_list):
        logger.warning("Ignoring multiple panels in output MTZ")

    # Clean up the data with the passed in options
    integrated_data = filter_reflection_table(
        integrated_data,
        intensity_choice=params.intensity,
        partiality_threshold=params.mtz.partiality_threshold,
        combine_partials=params.mtz.combine_partials,
        min_isigi=params.mtz.min_isigi,
        filter_ice_rings=params.mtz.filter_ice_rings,
        d_min=params.mtz.d_min,
    )

    # get batch offsets and image ranges - even for scanless experiments
    batch_offsets = [
        expt.scan.get_batch_offset() for expt in experiment_list
        if expt.scan is not None
    ]
    unique_offsets = set(batch_offsets)
    if len(set(unique_offsets)) <= 1:
        logger.debug("Calculating new batches")
        batch_offsets = calculate_batch_offsets(experiment_list)
        batch_starts = [
            e.scan.get_image_range()[0] if e.scan else 0
            for e in experiment_list
        ]
        effective_offsets = [
            o + s for o, s in zip(batch_offsets, batch_starts)
        ]
        unique_offsets = set(effective_offsets)
    else:
        logger.debug("Keeping existing batches")
    image_ranges = get_image_ranges(experiment_list)
    if len(unique_offsets) != len(batch_offsets):

        raise ValueError("Duplicate batch offsets detected: %s" % ", ".join(
            str(item)
            for item, count in Counter(batch_offsets).items() if count > 1))

    # Create the mtz file
    mtz_writer = UnmergedMTZWriter(
        experiment_list[0].crystal.get_space_group())

    # FIXME TODO for more than one experiment into an MTZ file:
    #
    # - add an epoch (or recover an epoch) from the scan and add this as an extra
    #   column to the MTZ file for scaling, so we know that the two lattices were
    #   integrated at the same time
    # ✓ decide a sensible BATCH increment to apply to the BATCH value between
    #   experiments and add this

    for id_ in expids_in_table.keys():
        # Grab our subset of the data
        loc = expids_in_list.index(
            expids_in_table[id_])  # get strid and use to find loc in list
        experiment = experiment_list[loc]
        if len(list(wavelengths.keys())) > 1:
            for i, (wl, exps) in enumerate(wavelengths.items()):
                if loc in exps:
                    wavelength = wl
                    dataset_id = i + 1
                    break
        else:
            wavelength = list(wavelengths.keys())[0]
            dataset_id = 1
        reflections = integrated_data.select(integrated_data["id"] == id_)
        batch_offset = batch_offsets[loc]
        image_range = image_ranges[loc]
        reflections = assign_batches_to_reflections([reflections],
                                                    [batch_offset])[0]
        experiment.data = dict(reflections)

        # Do any crystal transformations for the experiment
        cb_op_to_ref = (experiment.crystal.get_space_group().info().
                        change_of_basis_op_to_reference_setting())
        experiment.crystal = experiment.crystal.change_basis(cb_op_to_ref)
        experiment.data["miller_index_rebase"] = cb_op_to_ref.apply(
            experiment.data["miller_index"])

        s0n = matrix.col(experiment.beam.get_s0()).normalize().elems
        logger.debug("Beam vector: %.4f %.4f %.4f" % s0n)

        mtz_writer.add_batch_list(
            image_range,
            experiment,
            wavelength,
            dataset_id,
            batch_offset=batch_offset,
            force_static_model=params.mtz.force_static_model,
        )

        # Create the batch offset array. This gives us an experiment (id)-dependent
        # batch offset to calculate the correct batch from image number.
        experiment.data["batch_offset"] = flex.int(len(experiment.data["id"]),
                                                   batch_offset)

        # Calculate whether we have a ROT value for this experiment, and set the column
        _, _, z = experiment.data["xyzcal.px"].parts()
        if experiment.scan:
            experiment.data[
                "ROT"] = experiment.scan.get_angle_from_array_index(z)
        else:
            experiment.data["ROT"] = z

    mtz_writer.add_crystal(
        params.mtz.crystal_name,
        unit_cell=experiment_list[0].crystal.get_unit_cell()
    )  # Note: add unit cell here as may have changed basis since creating mtz.
    # For multi-wave unmerged mtz, we add an empty dataset for each wavelength,
    # but only write the data into the final dataset (for unmerged the batches
    # link the unmerged data to the individual wavelengths).
    for wavelength in wavelengths:
        mtz_writer.add_empty_dataset(wavelength)

    # Combine all of the experiment data columns before writing
    combined_data = {
        k: v.deep_copy()
        for k, v in experiment_list[0].data.items()
    }
    for experiment in experiment_list[1:]:
        for k, v in experiment.data.items():
            combined_data[k].extend(v)
    # ALL columns must be the same length
    assert len({len(v)
                for v in combined_data.values()
                }) == 1, "Column length mismatch"
    assert len(combined_data["id"]) == len(
        integrated_data["id"]), "Lost rows in split/combine"

    # Write all the data and columns to the mtz file
    mtz_writer.write_columns(combined_data)

    logger.info("Saving {} integrated reflections to {}".format(
        len(combined_data["id"]), params.mtz.hklout))
    mtz_file = mtz_writer.mtz_file
    mtz_file.write(params.mtz.hklout)

    return mtz_file
示例#23
0
def test_FilteringReductionMethods():
    """Test the FilteringReductionMethods class."""
    # Test ice ring filtering
    reflections = generate_simple_table()
    reflections = FilteringReductionMethods.filter_ice_rings(reflections)
    assert list(reflections["intensity.prf.value"]) == [2.0, 3.0]

    # Test filtering on I/sigI
    reflections = generate_simple_table()
    reflections = FilteringReductionMethods._filter_on_min_isigi(
        reflections, "prf", 2.5
    )
    assert list(reflections["intensity.prf.value"]) == [3.0]

    # Test bad variance filtering
    reflections = generate_simple_table()
    reflections["intensity.prf.variance"][0] = 0.0
    reflections = FilteringReductionMethods._filter_bad_variances(reflections, "prf")
    assert list(reflections["intensity.prf.value"]) == [2.0, 3.0]

    # Test filtering on dmin
    reflections = generate_simple_table()
    reflections["d"] = flex.double([1.0, 2.0, 3.0])
    reflections = FilteringReductionMethods.filter_on_d_min(reflections, 1.5)
    assert list(reflections["d"]) == [2.0, 3.0]

    # test calculate_lp_qe_correction_and_filter - should be lp/qe
    # cases, qe, dqe , lp , qe zero
    r = flex.reflection_table()
    r["data"] = flex.double([1.0, 2.0, 3.0])
    r, c = FilteringReductionMethods.calculate_lp_qe_correction_and_filter(r)
    assert list(c) == pytest.approx([1.0, 1.0, 1.0])

    r["lp"] = flex.double([1.0, 0.5, 1.0])
    r, c = FilteringReductionMethods.calculate_lp_qe_correction_and_filter(r)
    assert list(c) == pytest.approx([1.0, 0.5, 1.0])

    r["qe"] = flex.double([0.25, 1.0, 0.0])
    r, c = FilteringReductionMethods.calculate_lp_qe_correction_and_filter(r)
    assert list(c) == pytest.approx([4.0, 0.5])
    del r["qe"]
    r["dqe"] = flex.double([0.25, 0.0])
    r, c = FilteringReductionMethods.calculate_lp_qe_correction_and_filter(r)
    assert list(c) == pytest.approx([4.0])

    # test filter unassigned
    r = flex.reflection_table()
    r["id"] = flex.int([-1, 0])
    r["i"] = flex.double([1.0, 2.0])
    r = FilteringReductionMethods.filter_unassigned_reflections(r)
    assert list(r["i"]) == [2.0]

    with mock.patch(
        fpath + ".sum_partial_reflections", side_effect=return_reflections_side_effect
    ) as sum_partials:
        reflections = generate_simple_table()
        reflections = FilteringReductionMethods.combine_and_filter_partials(
            reflections, partiality_threshold=0.7
        )
        assert sum_partials.call_count == 1
        assert list(reflections["intensity.prf.value"]) == [1.0, 2.0]
        reflections = generate_simple_table()
        FilteringReductionMethods.combine_and_filter_partials(
            reflections, partiality_threshold=0.4
        )
        assert sum_partials.call_count == 2
        assert list(reflections["intensity.prf.value"]) == [1.0, 2.0, 3.0]
示例#24
0
def test_ScaleIntensityReducer():
    """Test that the reflection table is reduced on scaling intensities"""
    reflections = generate_integrated_test_reflections()
    reflections = ScaleIntensityReducer.reduce_on_intensities(reflections)
    assert list(reflections["intensity.scale.value"]) == pytest.approx(
        [23.0, 24.0, 25.0]
    )
    assert list(reflections["intensity.scale.variance"]) == pytest.approx(
        [2.3, 2.4, 2.5]
    )
    del reflections["inverse_scale_factor"]
    with pytest.raises(AssertionError):
        reflections = ScaleIntensityReducer.reduce_on_intensities(reflections)

    reflections = generate_test_reflections_for_scaling()
    reflections = ScaleIntensityReducer.apply_scaling_factors(reflections)
    assert list(reflections["intensity.scale.value"]) == pytest.approx(
        [21.0 / 5.0, 23.0 / 5.0, 24.0 / 10.0, 25.0 / 10.0, 26.0 / 10.0, 27.0 / 10.0]
    )
    assert list(reflections["intensity.scale.variance"]) == pytest.approx(
        [2.1 / 25.0, 2.3 / 25.0, 2.4 / 100, 2.5 / 100, 2.6 / 100, 2.7 / 100]
    )
    del reflections["inverse_scale_factor"]
    with pytest.raises(AssertionError):
        reflections = ScaleIntensityReducer.apply_scaling_factors(reflections)

    reflections = generate_test_reflections_for_scaling()
    del reflections["partiality"]
    reflections = ScaleIntensityReducer.apply_scaling_factors(reflections)
    assert list(reflections["intensity.scale.value"]) == [
        21.0 / 5.0,
        22.0 / 5.0,
        23.0 / 5.0,
        24.0 / 10.0,
        25.0 / 10.0,
        26.0 / 10.0,
        27.0 / 10.0,
    ]

    # test IsgiI selecting
    r = flex.reflection_table()
    r["intensity.scale.value"] = flex.double([1.0, 2.0])
    r["intensity.scale.variance"] = flex.double([1.0, 1.0])
    r = ScaleIntensityReducer.filter_on_min_isigi(r, 1.5)
    assert list(r["intensity.scale.value"]) == [2.0]

    # now test a typical case - reflection table with inv scale factor,
    # check that we only apply that correction and do any relevant filtering
    reflections = generate_integrated_test_reflections()
    reflections["lp"] = flex.double(6, 0.6)
    reflections = ScaleIntensityReducer.filter_for_export(reflections)

    assert list(reflections["intensity.scale.value"]) == pytest.approx(
        [23.0 / 5.0, 24.0 / 10.0, 25.0 / 10.0]
    )
    assert list(reflections["intensity.scale.variance"]) == pytest.approx(
        [2.3 / 25.0, 0.024, 0.025]
    )

    assert "intensity.prf.value" not in reflections
    assert "intensity.sum.value" not in reflections
    assert "intensity.prf.variance" not in reflections
    assert "intensity.sum.variance" not in reflections
def test_split_by_wavelength(tmpdir):
    """Test the split_by_wavelength option of dials.split_experiments"""
    experiments = ExperimentList()
    exp = generate_exp(wavelength=1.0)
    exp.identifier = "0"
    experiments.append(exp)
    exp = generate_exp(wavelength=0.5)
    exp.identifier = "1"
    experiments.append(exp)

    reflections = flex.reflection_table()
    reflections["id"] = flex.int([0, 1])
    reflections["intensity"] = flex.double([100.0, 200.0])
    reflections.experiment_identifiers()[0] = "0"
    reflections.experiment_identifiers()[1] = "1"

    experiments.as_json(tmpdir.join("tmp.expt").strpath)
    reflections.as_file(tmpdir.join("tmp.refl").strpath)

    result = procrunner.run(
        [
            "dials.split_experiments", "tmp.expt", "tmp.refl",
            "by_wavelength=True"
        ],
        working_directory=tmpdir,
    )
    assert not result.returncode and not result.stderr

    for i, (wl, ids,
            intensity) in enumerate(zip([0.5, 1.0], ["1", "0"],
                                        [200.0, 100.0])):
        assert tmpdir.join("split_%d.expt" % i).check()
        assert tmpdir.join("split_%d.refl" % i).check()
        exp_single = ExperimentListFactory.from_json_file(tmpdir.join(
            "split_%d.expt" % i).strpath,
                                                          check_format=False)
        ref_single = flex.reflection_table.from_file(
            tmpdir.join("split_%d.refl" % i).strpath)
        assert exp_single[0].beam.get_wavelength() == wl
        assert exp_single[0].identifier == ids
        id_ = ref_single["id"][0]
        assert ref_single.experiment_identifiers()[id_] == ids
        assert list(ref_single["intensity"]) == [intensity]

    # Now test for successful error handling if no identifiers set.
    experiments[0].identifier = ""
    experiments[1].identifier = ""
    experiments.as_json(tmpdir.join("tmp.expt").strpath)
    result = procrunner.run(
        [
            "dials.split_experiments", "tmp.expt", "tmp.refl",
            "by_wavelength=True"
        ],
        working_directory=tmpdir,
    )
    assert result.returncode == 1
    assert result.stderr.startswith(b"Sorry")

    experiments[0].identifier = "0"
    experiments[1].identifier = "1"
    del reflections.experiment_identifiers()[0]
    del reflections.experiment_identifiers()[1]
    experiments.as_json(tmpdir.join("tmp.expt").strpath)
    reflections.as_file(tmpdir.join("tmp.refl").strpath)
    result = procrunner.run(
        [
            "dials.split_experiments", "tmp.expt", "tmp.refl",
            "by_wavelength=True"
        ],
        working_directory=tmpdir,
    )
    assert result.returncode == 1
    assert result.stderr.startswith(b"Sorry")
示例#26
0
    def run(self):
        ''' Perform the integration. '''
        from dials.util.command_line import heading
        from dials.util.options import flatten_reflections, flatten_experiments
        from dials.util import log
        from time import time
        from libtbx.utils import Sorry

        # Check the number of arguments is correct
        start_time = time()

        # Parse the command line
        params, options = self.parser.parse_args(show_diff_phil=False)
        reference = flatten_reflections(params.input.reflections)
        experiments = flatten_experiments(params.input.experiments)
        if len(reference) == 0 and len(experiments) == 0:
            self.parser.print_help()
            return
        if len(reference) == 0:
            reference = None
        elif len(reference) != 1:
            raise Sorry('more than 1 reflection file was given')
        else:
            reference = reference[0]
        if len(experiments) == 0:
            raise Sorry('no experiment list was specified')

        # Save phil parameters
        if params.output.phil is not None:
            with open(params.output.phil, "w") as outfile:
                outfile.write(self.parser.diff_phil.as_str())

        # Configure logging
        log.config(params.verbosity,
                   info=params.output.log,
                   debug=params.output.debug_log)

        from dials.util.version import dials_version
        logger.info(dials_version())

        # Log the diff phil
        diff_phil = self.parser.diff_phil.as_str()
        if diff_phil is not '':
            logger.info('The following parameters have been modified:\n')
            logger.info(diff_phil)

        for abs_params in params.absorption_correction:
            if abs_params.apply:
                if not (params.integration.debug.output
                        and not params.integration.debug.separate_files):
                    raise Sorry('Shoeboxes must be saved to integration intermediates to apply an absorption correction. '\
                      +'Set integration.debug.output=True, integration.debug.separate_files=False and '\
                      +'integration.debug.delete_shoeboxes=True to temporarily store shoeboxes.')

        # Print if we're using a mask
        for i, exp in enumerate(experiments):
            mask = exp.imageset.external_lookup.mask
            if mask.filename is not None:
                if mask.data:
                    logger.info('Using external mask: %s' % mask.filename)
                    for tile in mask.data:
                        logger.info(' Mask has %d pixels masked' %
                                    tile.data().count(False))

        # Print the experimental models
        for i, exp in enumerate(experiments):
            logger.debug("Models for experiment %d" % i)
            logger.debug("")
            logger.debug(str(exp.beam))
            logger.debug(str(exp.detector))
            if exp.goniometer:
                logger.debug(str(exp.goniometer))
            if exp.scan:
                logger.debug(str(exp.scan))
            logger.debug(str(exp.crystal))

        logger.info("=" * 80)
        logger.info("")
        logger.info(heading("Initialising"))
        logger.info("")

        # Load the data
        reference, rubbish = self.process_reference(reference)

        # Check pixels don't belong to neighbours
        if reference is not None:
            if exp.goniometer is not None and exp.scan is not None:
                self.filter_reference_pixels(reference, experiments)
        logger.info("")

        # Initialise the integrator
        from dials.algorithms.profile_model.factory import ProfileModelFactory
        from dials.algorithms.integration.integrator import IntegratorFactory
        from dials.array_family import flex

        # Modify experiment list if scan range is set.
        experiments, reference = self.split_for_scan_range(
            experiments, reference, params.scan_range)

        # Predict the reflections
        logger.info("")
        logger.info("=" * 80)
        logger.info("")
        logger.info(heading("Predicting reflections"))
        logger.info("")
        predicted = flex.reflection_table.from_predictions_multi(
            experiments,
            dmin=params.prediction.d_min,
            dmax=params.prediction.d_max,
            margin=params.prediction.margin,
            force_static=params.prediction.force_static,
            padding=params.prediction.padding)

        # Match reference with predicted
        if reference:
            matched, reference, unmatched = predicted.match_with_reference(
                reference)
            assert (len(matched) == len(predicted))
            assert (matched.count(True) <= len(reference))
            if matched.count(True) == 0:
                raise Sorry('''
          Invalid input for reference reflections.
          Zero reference spots were matched to predictions
        ''')
            elif len(unmatched) != 0:
                logger.info('')
                logger.info('*' * 80)
                logger.info(
                    'Warning: %d reference spots were not matched to predictions'
                    % (len(unmatched)))
                logger.info('*' * 80)
                logger.info('')
            rubbish.extend(unmatched)

            if len(experiments) > 1:
                # filter out any experiments without matched reference reflections
                # f_: filtered
                from dxtbx.model.experiment_list import ExperimentList
                f_reference = flex.reflection_table()
                f_predicted = flex.reflection_table()
                f_rubbish = flex.reflection_table()
                f_experiments = ExperimentList()
                good_expt_count = 0

                def refl_extend(src, dest, eid):
                    tmp = src.select(src['id'] == eid)
                    tmp['id'] = flex.int(len(tmp), good_expt_count)
                    dest.extend(tmp)

                for expt_id, experiment in enumerate(experiments):
                    if len(reference.select(reference['id'] == expt_id)) != 0:
                        refl_extend(reference, f_reference, expt_id)
                        refl_extend(predicted, f_predicted, expt_id)
                        refl_extend(rubbish, f_rubbish, expt_id)
                        f_experiments.append(experiment)
                        good_expt_count += 1
                    else:
                        logger.info(
                            "Removing experiment %d: no reference reflections matched to predictions"
                            % expt_id)

                reference = f_reference
                predicted = f_predicted
                experiments = f_experiments
                rubbish = f_rubbish

        # Select a random sample of the predicted reflections
        if not params.sampling.integrate_all_reflections:
            predicted = self.sample_predictions(experiments, predicted, params)

        # Compute the profile model
        if (params.create_profile_model and reference is not None
                and "shoebox" in reference):
            experiments = ProfileModelFactory.create(params, experiments,
                                                     reference)
        else:
            experiments = ProfileModelFactory.create(params, experiments)
            for expr in experiments:
                if expr.profile is None:
                    raise Sorry('No profile information in experiment list')
        del reference

        # Compute the bounding box
        predicted.compute_bbox(experiments)

        # Create the integrator
        logger.info("")
        integrator = IntegratorFactory.create(params, experiments, predicted)

        # Integrate the reflections
        reflections = integrator.integrate()

        # Append rubbish data onto the end
        if rubbish is not None and params.output.include_bad_reference:
            mask = flex.bool(len(rubbish), True)
            rubbish.unset_flags(mask, rubbish.flags.integrated_sum)
            rubbish.unset_flags(mask, rubbish.flags.integrated_prf)
            rubbish.set_flags(mask, rubbish.flags.bad_reference)
            reflections.extend(rubbish)

        # Correct integrated intensities for absorption correction, if necessary
        for abs_params in params.absorption_correction:
            if abs_params.apply and abs_params.algorithm == "fuller_kapton":
                from dials.algorithms.integration.kapton_correction import multi_kapton_correction
                experiments, reflections = multi_kapton_correction(
                    experiments,
                    reflections,
                    abs_params.fuller_kapton,
                    logger=logger)()

        if params.significance_filter.enable:
            from dials.algorithms.integration.stills_significance_filter import SignificanceFilter
            sig_filter = SignificanceFilter(params)
            refls = sig_filter(experiments, reflections)
            logger.info(
                "Removed %d reflections out of %d when applying significance filter"
                % (len(reflections) - len(refls), len(reflections)))
            reflections = refls

        # Delete the shoeboxes used for intermediate calculations, if requested
        if params.integration.debug.delete_shoeboxes and 'shoebox' in reflections:
            del reflections['shoebox']

        # Save the reflections
        self.save_reflections(reflections, params.output.reflections)
        self.save_experiments(experiments, params.output.experiments)

        # Write a report if requested
        if params.output.report is not None:
            integrator.report().as_file(params.output.report)

        # Print the total time taken
        logger.info("\nTotal time taken: %f" % (time() - start_time))
示例#27
0
文件: merge.py 项目: jmp1985/dials
def merge(
    experiments,
    reflections,
    d_min=None,
    d_max=None,
    combine_partials=True,
    partiality_threshold=0.4,
    anomalous=True,
    use_internal_variance=False,
    assess_space_group=False,
    n_bins=20,
):
    """
    Merge reflection table data and generate a summary of the merging statistics.

    This procedure filters the input data, merges the data (normal and optionally
    anomalous), assesses the space group symmetry and generates a summary
    of the merging statistics.
    """

    logger.info("\nMerging scaled reflection data\n")
    # first filter bad reflections using dials.util.filter methods
    reflections = filter_reflection_table(
        reflections,
        intensity_choice=["scale"],
        d_min=d_min,
        d_max=d_max,
        combine_partials=combine_partials,
        partiality_threshold=partiality_threshold,
    )
    # ^ scale factor has been applied, so now set to 1.0 - okay as not
    # going to output scale factor in merged mtz.
    reflections["inverse_scale_factor"] = flex.double(reflections.size(), 1.0)

    scaled_array = scaled_data_as_miller_array([reflections], experiments)
    # Note, merge_equivalents does not raise an error if data is unique.
    merged = scaled_array.merge_equivalents(
        use_internal_variance=use_internal_variance).array()
    merged_anom = None

    if anomalous:
        anomalous_scaled = scaled_array.as_anomalous_array()
        merged_anom = anomalous_scaled.merge_equivalents(
            use_internal_variance=use_internal_variance).array()

    # Before merge, do assessment of the space_group
    if assess_space_group:
        merged_reflections = flex.reflection_table()
        merged_reflections["intensity"] = merged.data()
        merged_reflections["variance"] = flex.pow2(merged.sigmas())
        merged_reflections["miller_index"] = merged.indices()
        logger.info("Running systematic absences check")
        run_systematic_absences_checks(experiments, merged_reflections)

    try:
        stats, anom_stats = merging_stats_from_scaled_array(
            scaled_array,
            n_bins,
            use_internal_variance,
        )
    except DialsMergingStatisticsError as e:
        logger.error(e, exc_info=True)
        stats_summary = None
    else:
        if anomalous and anom_stats:
            stats_summary = make_merging_statistics_summary(anom_stats)
        else:
            stats_summary = make_merging_statistics_summary(stats)
        stats_summary += table_1_summary(stats, anom_stats)

    return merged, merged_anom, stats_summary
示例#28
0
    def run(self):
        '''Execute the script.'''

        from dials.util.options import flatten_reflections, flatten_experiments
        from libtbx.utils import Sorry
        from dials.array_family import flex

        # Parse the command line
        params, options = self.parser.parse_args(show_diff_phil=True)

        # Try to load the models and data
        if not params.input.experiments:
            print "No Experiments found in the input"
            self.parser.print_help()
            return
        if params.input.reflections:
            if len(params.input.reflections) != len(params.input.experiments):
                raise Sorry(
                    "The number of input reflections files does not match the "
                    "number of input experiments")

        experiments = flatten_experiments(params.input.experiments)
        if params.input.reflections:
            reflections = flatten_reflections(params.input.reflections)[0]
        else:
            reflections = None

        import math
        experiments_template = "%s_%%0%sd.json" % (
            params.output.experiments_prefix,
            int(math.floor(math.log10(len(experiments))) + 1))
        reflections_template = "%s_%%0%sd.pickle" % (
            params.output.reflections_prefix,
            int(math.floor(math.log10(len(experiments))) + 1))

        from dxtbx.model.experiment_list import ExperimentList
        from dxtbx.serialize import dump
        if params.by_detector:
            if reflections is None:
                split_data = {
                    detector: {
                        'experiments': ExperimentList()
                    }
                    for detector in experiments.detectors()
                }
            else:
                split_data = {
                    detector: {
                        'experiments': ExperimentList(),
                        'reflections': flex.reflection_table()
                    }
                    for detector in experiments.detectors()
                }

            for i, experiment in enumerate(experiments):
                split_expt_id = experiments.detectors().index(
                    experiment.detector)
                experiment_filename = experiments_template % split_expt_id
                print 'Adding experiment %d to %s' % (i, experiment_filename)
                split_data[experiment.detector]['experiments'].append(
                    experiment)
                if reflections is not None:
                    reflections_filename = reflections_template % split_expt_id
                    print 'Adding reflections for experiment %d to %s' % (
                        i, reflections_filename)
                    ref_sel = reflections.select(reflections['id'] == i)
                    ref_sel['id'] = flex.int(
                        len(ref_sel),
                        len(split_data[experiment.detector]['experiments']) -
                        1)
                    split_data[experiment.detector]['reflections'].extend(
                        ref_sel)

            for i, detector in enumerate(experiments.detectors()):
                experiment_filename = experiments_template % i
                print 'Saving experiment %d to %s' % (i, experiment_filename)
                dump.experiment_list(split_data[detector]['experiments'],
                                     experiment_filename)

                if reflections is not None:
                    reflections_filename = reflections_template % i
                    print 'Saving reflections for experiment %d to %s' % (
                        i, reflections_filename)
                    split_data[detector]['reflections'].as_pickle(
                        reflections_filename)
        else:
            for i, experiment in enumerate(experiments):
                from dxtbx.model.experiment_list import ExperimentList
                from dxtbx.serialize import dump
                experiment_filename = experiments_template % i
                print 'Saving experiment %d to %s' % (i, experiment_filename)
                dump.experiment_list(ExperimentList([experiment]),
                                     experiment_filename)

                if reflections is not None:
                    reflections_filename = reflections_template % i
                    print 'Saving reflections for experiment %d to %s' % (
                        i, reflections_filename)
                    ref_sel = reflections.select(reflections['id'] == i)
                    ref_sel['id'] = flex.int(len(ref_sel), 0)
                    ref_sel.as_pickle(reflections_filename)

        return
示例#29
0
    def run(self):
        """Execute the script."""
        start_time = time()

        # Parse the command line
        params, _ = self.parser.parse_args(show_diff_phil=False)

        # set up global experiments and reflections lists

        reflections = flex.reflection_table()
        global_id = 0

        experiments = ExperimentList()

        # loop through the input, building up the global lists
        nrefs_per_exp = []
        for ref_wrapper, exp_wrapper in zip(
            params.input.reflections, params.input.experiments
        ):
            refs = ref_wrapper.data
            exps = exp_wrapper.data
            for i, exp in enumerate(exps):
                sel = refs["id"] == i
                sub_ref = refs.select(sel)
                nrefs_per_exp.append(len(sub_ref))
                sub_ref["id"] = flex.int(len(sub_ref), global_id)
                reflections.extend(sub_ref)
                experiments.append(exp)
                global_id += 1

        # Try to load the models and data
        nexp = len(experiments)
        if nexp == 0:
            print("No Experiments found in the input")
            self.parser.print_help()
            return
        if not reflections:
            print("No reflection data found in the input")
            self.parser.print_help()
            return

        self.check_input(reflections)

        # Configure the logging
        log.config(info=params.output.log, debug=params.output.debug_log)
        logger.info(dials_version())

        # Log the diff phil
        diff_phil = self.parser.diff_phil.as_str()
        if diff_phil != "":
            logger.info("The following parameters have been modified:\n")
            logger.info(diff_phil)

        # Convert to P 1?
        if params.refinement.triclinic:
            reflections, experiments = self.convert_to_P1(reflections, experiments)

        # Combine crystals?
        if params.refinement.combine_crystal_models and len(experiments) > 1:
            logger.info("Combining {0} crystal models".format(len(experiments)))
            experiments = self.combine_crystals(experiments)

        # Filter integrated centroids?
        if params.refinement.filter_integrated_centroids:
            reflections = self.filter_integrated_centroids(reflections)

        # Filter data if scaled to remove outliers
        if "inverse_scale_factor" in reflections:
            try:
                reflections = filter_reflection_table(reflections, ["scale"])
            except ValueError as e:
                logger.warn(e)
                logger.info(
                    "Filtering on scaled data failed, proceeding with integrated data."
                )

        # Get the refiner
        logger.info("Configuring refiner")
        refiner = self.create_refiner(params, reflections, experiments)

        # Refine the geometry
        if nexp == 1:
            logger.info("Performing refinement of a single Experiment...")
        else:
            logger.info("Performing refinement of {} Experiments...".format(nexp))
        refiner.run()

        # get the refined experiments
        experiments = refiner.get_experiments()
        crystals = experiments.crystals()

        if len(crystals) == 1:
            # output the refined model for information
            logger.info("")
            logger.info("Final refined crystal model:")
            logger.info(crystals[0])
            logger.info(self.cell_param_table(crystals[0]))

        # Save the refined experiments to file
        output_experiments_filename = params.output.experiments
        logger.info(
            "Saving refined experiments to {}".format(output_experiments_filename)
        )
        experiments.as_file(output_experiments_filename)

        # Create correlation plots
        if params.output.correlation_plot.filename is not None:
            create_correlation_plots(refiner, params.output)

        if params.output.cif is not None:
            self.generate_cif(crystals[0], refiner, filename=params.output.cif)

        if params.output.p4p is not None:
            self.generate_p4p(
                crystals[0], experiments[0].beam, filename=params.output.p4p
            )

        if params.output.mmcif is not None:
            self.generate_mmcif(crystals[0], refiner, filename=params.output.mmcif)

        # Log the total time taken
        logger.info("\nTotal time taken: {:.2f}s".format(time() - start_time))
示例#30
0
def test_average_bbox_size():
    """Test behaviour of function for obtaining average bbox size."""
    reflections = flex.reflection_table()
    reflections["bbox"] = flex.int6(*(flex.int(10, i) for i in range(6)))
    assert _average_bbox_size(reflections) == (1, 1, 1)
示例#31
0
    def __init__(
        self,
        rtable,
        elist,
        calculate_variances=False,
        keep_singles=False,
        uncertainty="sigma",
        outfile=None,
    ):
        """
        Generate z-scores and a normal probability plot from a DIALS
        reflection_table and a dxtbx ExperimentList, containing the observations
        and the corresponding experiments, respectively.

        :param rtable: A reflection table object, containing at least the columns
          * ``miller_index``
          * ``intensity.sum.value``
          * ``intensity.sum.variance``
          * ``xyzobs.px.value``
        :type rtable: dials.array_family_flex_ext.reflection_table
        :param elist: A corresponding experiment list.
        :type elist: dxtbx_model_ext.ExperimentList
        :param calculate_variances: Choose whether to calculate weighted
        aggregate variances.  Doing so incurs a performance penalty.
        Defaullts to False.
        :type calculate_variances: bool
        :param keep_singles: Choose whether to keep multiplicity-1 reflections.
        Defaults to False.
        :type keep_singles: bool
        :param uncertainty: Measure of spread to use in normalising the
        z-scores, i.e. z = (I - <I>) / uncertainty.
        Possible values for uncertainty:
        * 'sigma':    Use measured sigma values;
        * 'stddev':   Use sample standard deviations calculated as
                      square-root of unbiased weighted sample variances
                      of symmetry-equivalent reflection intensities;
        Defaults to 'sigma'.
        :type uncertainty: str
        :param outfile: Filename root for output PNG plots.
        Defaults to None.
        :type: outfile: str
        """

        from dxtbx.model import ExperimentList
        from cctbx import miller

        if not isinstance(rtable, flex.reflection_table) or not isinstance(
            elist, ExperimentList
        ):
            raise TypeError(
                "Must be called with a reflection table and an experiment list."
            )

        rtable = rtable.copy()
        # Discard unindexed reflections (only necessary because of
        # https://github.com/dials/dials/issues/615 —
        # TODO remove the line below when issue #615 is fixed).
        rtable.del_selected(rtable["id"] == -1)
        rtable["miller_index.asu"] = rtable["miller_index"]

        # Divide reflections by the space group to which they have been indexed.
        self.rtables = {
            expt.crystal.get_space_group().make_tidy(): flex.reflection_table()
            for expt in elist
        }
        for expt, sel in rtable.iterate_experiments_and_indices(elist):
            sg = expt.crystal.get_space_group().make_tidy()
            self.rtables[sg].extend(rtable.select(sel))
        # Map Miller indices to asymmetric unit.
        for space_group, rtable in self.rtables.items():
            # TODO Handle anomalous flag sensibly.  Currently assumes not anomalous.
            miller.map_to_asu(space_group.type(), False, rtable["miller_index.asu"])

        # Calculate normal probability plot data.
        self._multiplicity_mean_error_stddev(
            calculate_variances=calculate_variances, keep_singles=keep_singles
        )
        self._make_z(uncertainty)
        self._probplot_data()

        self.rtable = flex.reflection_table()
        for rtable in self.rtables.values():
            self.rtable.extend(rtable)

        if not outfile:
            outfile = ""
        self.outfile = outfile
示例#32
0
    def run_refinement_and_outlier_rejection(self):
        ''' Code taken from the index function of StillsIndexer '''
        self.d_min = self.all_params.indexing.refinement_protocol.d_min_start
        self.indexed_reflections = (self.reflections['id'] > -1)
        if self.d_min is None:
            sel = self.reflections['id'] <= -1
        else:
            sel = flex.bool(len(self.reflections), False)
            lengths = 1 / self.reflections['rlp'].norms()
            isel = (lengths >= self.d_min).iselection()
            sel.set_selected(isel, True)
            sel.set_selected(self.reflections['id'] > -1, False)
        self.unindexed_reflections = self.reflections.select(sel)

        reflections_for_refinement = self.reflections.select(
            self.indexed_reflections)
        import copy
        experiments = copy.deepcopy(self.experiments)
        print("Starting Refinement")

        try:
            refined_experiments, refined_reflections = self.refine(
                experiments, reflections_for_refinement)
        except Exception as e:
            s = str(e)
            raise Sorry(e)
        # Force mosaicity values to certain values depending on phil params
        if self.all_params.iota.iota_mosaicity.domain_size_ang is not None:
            for exp in refined_experiments:
                exp.crystal.set_domain_size_ang(
                    self.all_params.iota.iota_mosaicity.domain_size_ang)
        if self.all_params.iota.iota_mosaicity.half_deg is not None:
            for exp in refined_experiments:
                exp.crystal.set_half_mosaicity_deg(
                    self.all_params.iota.iota_mosaicity.half_deg)

        # sanity check for unrealistic unit cell volume increase during refinement
        # usually this indicates too many parameters are being refined given the
        # number of observations provided.
        if not self.all_params.indexing.refinement_protocol.disable_unit_cell_volume_sanity_check:
            for orig_expt, refined_expt in zip(experiments,
                                               refined_experiments):
                uc1 = orig_expt.crystal.get_unit_cell()
                uc2 = refined_expt.crystal.get_unit_cell()
                volume_change = abs(uc1.volume() - uc2.volume()) / uc1.volume()
                cutoff = 0.5
                if volume_change > cutoff:
                    msg = "\n".join((
                        "Unrealistic unit cell volume increase during refinement of %.1f%%.",
                        "Please try refining fewer parameters, either by enforcing symmetry",
                        "constraints (space_group=) and/or disabling experimental geometry",
                        "refinement (detector.fix=all and beam.fix=all). To disable this",
                        "sanity check set disable_unit_cell_volume_sanity_check=True."
                    )) % (100 * volume_change)
                    raise Sorry(msg)

        self.refined_reflections = refined_reflections.select(
            refined_reflections['id'] > -1)

        for i, expt in enumerate(self.experiments):
            ref_sel = self.refined_reflections.select(
                self.refined_reflections["imageset_id"] == i)
            ref_sel = ref_sel.select(ref_sel["id"] >= 0)
            for i_expt in set(ref_sel["id"]):
                refined_expt = refined_experiments[i_expt]
                expt.detector = refined_expt.detector
                expt.beam = refined_expt.beam
                expt.goniometer = refined_expt.goniometer
                expt.scan = refined_expt.scan
                refined_expt.imageset = expt.imageset

        if not (self.all_params.refinement.parameterisation.beam.fix == 'all'
                and self.all_params.refinement.parameterisation.detector.fix
                == 'all'):
            # Experimental geometry may have changed - re-map centroids to
            # reciprocal space

            spots_mm = self.reflections
            self.reflections = flex.reflection_table()
            for i, expt in enumerate(self.experiments):
                spots_sel = spots_mm.select(spots_mm["imageset_id"] == i)
                #spots_sel.map_centroids_to_reciprocal_space(expt.detector, expt.beam, expt.goniometer)
                spots_sel.map_centroids_to_reciprocal_space(
                    self.experiments[i:i + 1])
                self.reflections.extend(spots_sel)

        # update for next cycle
        experiments = refined_experiments
        self.refined_experiments = refined_experiments

        # discard experiments with zero reflections after refinement
        id_set = set(self.refined_reflections['id'])
        if len(id_set) < len(self.refined_experiments):
            filtered_refined_reflections = flex.reflection_table()
            for i in range(len(self.refined_experiments)):
                if i not in id_set:
                    del self.refined_experiments[i]
            for old, new in zip(sorted(id_set), range(len(id_set))):
                subset = self.refined_reflections.select(
                    self.refined_reflections['id'] == old)
                subset['id'] = flex.int(len(subset), new)
                filtered_refined_reflections.extend(subset)
            self.refined_reflections = filtered_refined_reflections

        #if len(self.refined_experiments) > 1:
        #Aug_refactor  --> import is now rotation_matrix_differences but unused
        # hence commenting it out
        #from dials.algorithms.indexing.compare_orientation_matrices \
        #     import show_rotation_matrix_differences
        # FIXME
        #show_rotation_matrix_differences(
        #  self.refined_experiments.crystals(), out=info_handle)

        #logger.info("Final refined crystal models:")
        for i, crystal_model in enumerate(self.refined_experiments.crystals()):
            n_indexed = 0
            for i_expt in experiments.where(crystal=crystal_model):
                n_indexed += (self.reflections['id'] == i).count(True)
            #logger.info("model %i (%i reflections):" %(i+1, n_indexed))
            #logger.info(crystal_model)

        if 'xyzcal.mm' in self.refined_reflections:  # won't be there if refine_all_candidates = False and no isoforms
            self.refined_reflections['xyzcal.px'] = flex.vec3_double(
                len(self.refined_reflections))
            for i, imageset in enumerate(self.experiments.imagesets()):
                imgset_sel = self.refined_reflections['imageset_id'] == i
                # set xyzcal.px field in self.refined_reflections
                refined_reflections = self.refined_reflections.select(
                    imgset_sel)
                panel_numbers = flex.size_t(refined_reflections['panel'])
                xyzcal_mm = refined_reflections['xyzcal.mm']
                x_mm, y_mm, z_rad = xyzcal_mm.parts()
                xy_cal_mm = flex.vec2_double(x_mm, y_mm)
                xy_cal_px = flex.vec2_double(len(xy_cal_mm))
                for i_panel in range(len(imageset.get_detector())):
                    panel = imageset.get_detector()[i_panel]
                    sel = (panel_numbers == i_panel)
                    isel = sel.iselection()
                    ref_panel = refined_reflections.select(
                        panel_numbers == i_panel)
                    xy_cal_px.set_selected(
                        sel, panel.millimeter_to_pixel(xy_cal_mm.select(sel)))
                x_px, y_px = xy_cal_px.parts()
                scan = imageset.get_scan()
                if scan is not None:
                    z_px = scan.get_array_index_from_angle(z_rad, deg=False)
                else:
                    # must be a still image, z centroid not meaningful
                    z_px = z_rad
                xyzcal_px = flex.vec3_double(x_px, y_px, z_px)
                self.refined_reflections['xyzcal.px'].set_selected(
                    imgset_sel, xyzcal_px)

                return self.refined_experiments, self.refined_reflections
示例#33
0
def split_for_scan_range(experiments, reference, scan_range):
    """Update experiments when scan range is set.

    Args:
        experiments: An experiment list
        reference: A reflection table of reference reflections
        scan_range (tuple): Range of scan images to be processed

    Returns:
        experiments: A new experiment list with the requested scan ranges
        reference: A reflection table with data from the scan ranges

    Raises:
        ValueError: If bad input for scan range.
    """

    # Only do anything is the scan range is set
    if scan_range is not None and len(scan_range) > 0:

        # Ensure that all experiments have the same imageset and scan
        iset = [e.imageset for e in experiments]
        scan = [e.scan for e in experiments]
        assert all(x == iset[0] for x in iset)
        assert all(x == scan[0] for x in scan)

        # Get the imageset and scan
        iset = experiments[0].imageset
        scan = experiments[0].scan

        # Get the array range
        if scan is not None:
            frames_start, frames_end = scan.get_array_range()
            assert scan.get_num_images() == len(iset)
        else:
            frames_start, frames_end = (0, len(iset))

        # Create the new lists
        new_experiments = ExperimentList()
        new_reference_all = reference.split_by_experiment_id()
        new_reference = flex.reflection_table()
        for i in range(len(new_reference_all) - len(experiments)):
            new_reference_all.append(flex.reflection_table())
        assert len(new_reference_all) == len(experiments)

        # Loop through all the scan ranges and create a new experiment list with
        # the requested scan ranges.
        for scan_start, scan_end in scan_range:
            # Validate the requested scan range
            if scan_end == scan_start:
                raise ValueError(
                    "Scan range end must be higher than start; pass {},{} for single image".format(
                        scan_start, scan_start + 1
                    )
                )
            if scan_end < scan_start:
                raise ValueError("Scan range must be in ascending order")
            elif scan_start < frames_start or scan_end > frames_end:
                raise ValueError(
                    "Scan range must be within image range {}..{}".format(
                        frames_start, frames_end
                    )
                )

            assert scan_end > scan_start
            assert scan_start >= frames_start
            assert scan_end <= frames_end

            index_start = scan_start - frames_start
            index_end = index_start + (scan_end - scan_start)
            assert index_start < index_end
            assert index_start >= 0
            assert index_end <= len(iset)
            new_iset = iset[index_start:index_end]
            if scan is None:
                new_scan = None
            else:
                new_scan = scan[index_start:index_end]

            for i, e1 in enumerate(experiments):
                e2 = Experiment()
                e2.beam = e1.beam
                e2.detector = e1.detector
                e2.goniometer = e1.goniometer
                e2.crystal = slice_crystal(e1.crystal, (index_start, index_end))
                e2.profile = e1.profile
                e2.imageset = new_iset
                e2.scan = new_scan
                new_reference_all[i]["id"] = flex.int(
                    len(new_reference_all[i]), len(new_experiments)
                )
                new_reference.extend(new_reference_all[i])
                new_experiments.append(e2)
        experiments = new_experiments
        reference = new_reference

        # Print some information
        logger.info("Modified experiment list to integrate over requested scan range")
        for scan_start, scan_end in scan_range:
            logger.info(" scan_range = %d -> %d", scan_start, scan_end)

    # Return the experiments
    return experiments, reference
示例#34
0
def run_integration(params, experiments, reference=None):
    """Perform the integration.

    Returns:
        experiments: The integrated experiments
        reflections: The integrated reflections
        report(optional): An integration report.

    Raises:
        ValueError: For a number of bad inputs
        RuntimeError: If the profile model creation fails
    """
    predicted = None
    rubbish = None

    for abs_params in params.absorption_correction:
        if abs_params.apply:
            if not (
                params.integration.debug.output
                and not params.integration.debug.separate_files
            ):
                raise ValueError(
                    "Shoeboxes must be saved to integration intermediates to apply an absorption correction. "
                    + "Set integration.debug.output=True, integration.debug.separate_files=False and "
                    + "integration.debug.delete_shoeboxes=True to temporarily store shoeboxes."
                )

    # Print if we're using a mask
    for i, exp in enumerate(experiments):
        mask = exp.imageset.external_lookup.mask
        if mask.filename is not None:
            if mask.data:
                logger.info("Using external mask: %s", mask.filename)
                for tile in mask.data:
                    logger.info(" Mask has %d pixels masked", tile.data().count(False))

    # Print the experimental models
    for i, exp in enumerate(experiments):
        summary = "\n".join(
            (
                "",
                "=" * 80,
                "",
                "Experiments",
                "",
                "Models for experiment %d" % i,
                "",
                str(exp.beam),
                str(exp.detector),
            )
        )
        if exp.goniometer:
            summary += str(exp.goniometer) + "\n"
        if exp.scan:
            summary += str(exp.scan) + "\n"
        summary += str(exp.crystal)
        logger.info(summary)

    logger.info("\n".join(("", "=" * 80, "")))
    logger.info(heading("Initialising"))

    # Load the data
    if reference:
        reference, rubbish = process_reference(reference)

        # Check pixels don't belong to neighbours
        if exp.goniometer is not None and exp.scan is not None:
            reference = filter_reference_pixels(reference, experiments)

        # Modify experiment list if scan range is set.
        experiments, reference = split_for_scan_range(
            experiments, reference, params.scan_range
        )

    # Modify experiment list if exclude images is set
    if params.exclude_images:
        for experiment in experiments:
            for index in params.exclude_images:
                experiment.imageset.mark_for_rejection(index, True)

    # Predict the reflections
    logger.info("\n".join(("", "=" * 80, "")))
    logger.info(heading("Predicting reflections"))
    predicted = flex.reflection_table.from_predictions_multi(
        experiments,
        dmin=params.prediction.d_min,
        dmax=params.prediction.d_max,
        margin=params.prediction.margin,
        force_static=params.prediction.force_static,
        padding=params.prediction.padding,
    )

    # Match reference with predicted
    if reference:
        matched, reference, unmatched = predicted.match_with_reference(reference)
        assert len(matched) == len(predicted)
        assert matched.count(True) <= len(reference)
        if matched.count(True) == 0:
            raise ValueError(
                """
        Invalid input for reference reflections.
        Zero reference spots were matched to predictions
    """
            )
        elif unmatched:
            msg = (
                "Warning: %d reference spots were not matched to predictions"
                % unmatched.size()
            )
            border = "\n".join(("", "*" * 80, ""))
            logger.info("".join((border, msg, border)))
            rubbish.extend(unmatched)

        if len(experiments) > 1:
            # filter out any experiments without matched reference reflections
            # f_: filtered

            f_reference = flex.reflection_table()
            f_predicted = flex.reflection_table()
            f_rubbish = flex.reflection_table()
            f_experiments = ExperimentList()
            good_expt_count = 0

            def refl_extend(src, dest, eid):
                old_id = eid
                new_id = good_expt_count
                tmp = src.select(src["id"] == old_id)
                tmp["id"] = flex.int(len(tmp), good_expt_count)
                if old_id in tmp.experiment_identifiers():
                    identifier = tmp.experiment_identifiers()[old_id]
                    del tmp.experiment_identifiers()[old_id]
                    tmp.experiment_identifiers()[new_id] = identifier
                dest.extend(tmp)

            for expt_id, experiment in enumerate(experiments):
                if len(reference.select(reference["id"] == expt_id)) != 0:
                    refl_extend(reference, f_reference, expt_id)
                    refl_extend(predicted, f_predicted, expt_id)
                    refl_extend(rubbish, f_rubbish, expt_id)
                    f_experiments.append(experiment)
                    good_expt_count += 1
                else:
                    logger.info(
                        "Removing experiment %d: no reference reflections matched to predictions",
                        expt_id,
                    )

            reference = f_reference
            predicted = f_predicted
            experiments = f_experiments
            rubbish = f_rubbish

    # Select a random sample of the predicted reflections
    if not params.sampling.integrate_all_reflections:
        predicted = sample_predictions(experiments, predicted, params)

    # Compute the profile model - either load existing or compute
    # can raise RuntimeError
    experiments = ProfileModelFactory.create(params, experiments, reference)
    for expr in experiments:
        if expr.profile is None:
            raise ValueError("No profile information in experiment list")
    del reference

    # Compute the bounding box
    predicted.compute_bbox(experiments)

    # Create the integrator
    integrator = create_integrator(params, experiments, predicted)

    # Integrate the reflections
    reflections = integrator.integrate()

    # Remove unintegrated reflections
    if not params.output.output_unintegrated_reflections:
        keep = reflections.get_flags(reflections.flags.integrated, all=False)
        logger.info(
            "Removing %d unintegrated reflections of %d total"
            % (keep.count(False), keep.size())
        )

        reflections = reflections.select(keep)

    # Append rubbish data onto the end
    if rubbish is not None and params.output.include_bad_reference:
        mask = flex.bool(len(rubbish), True)
        rubbish.unset_flags(mask, rubbish.flags.integrated_sum)
        rubbish.unset_flags(mask, rubbish.flags.integrated_prf)
        rubbish.set_flags(mask, rubbish.flags.bad_reference)
        reflections.extend(rubbish)

    # Correct integrated intensities for absorption correction, if necessary
    for abs_params in params.absorption_correction:
        if abs_params.apply and abs_params.algorithm == "fuller_kapton":
            from dials.algorithms.integration.kapton_correction import (
                multi_kapton_correction,
            )

            experiments, reflections = multi_kapton_correction(
                experiments, reflections, abs_params.fuller_kapton, logger=logger
            )()

    if params.significance_filter.enable:
        from dials.algorithms.integration.stills_significance_filter import (
            SignificanceFilter,
        )

        sig_filter = SignificanceFilter(params)
        filtered_refls = sig_filter(experiments, reflections)
        accepted_expts = ExperimentList()
        accepted_refls = flex.reflection_table()
        logger.info(
            "Removed %d reflections out of %d when applying significance filter",
            (reflections.size() - filtered_refls.size()),
            reflections.size(),
        )
        for expt_id, expt in enumerate(experiments):
            refls = filtered_refls.select(filtered_refls["id"] == expt_id)
            if refls:
                accepted_expts.append(expt)
                current_id = expt_id
                new_id = len(accepted_expts) - 1
                refls["id"] = flex.int(len(refls), new_id)
                if expt.identifier:
                    del refls.experiment_identifiers()[current_id]
                    refls.experiment_identifiers()[new_id] = expt.identifier
                accepted_refls.extend(refls)
            else:
                logger.info(
                    "Removed experiment %d which has no reflections left after applying significance filter",
                    expt_id,
                )

        if not accepted_refls:
            raise ValueError("No reflections left after applying significance filter")
        experiments = accepted_expts
        reflections = accepted_refls

    # Write a report if requested
    report = None
    if params.output.report is not None:
        report = integrator.report()

    return experiments, reflections, report
示例#35
0
def data_from_unmerged_mtz(filename):
    """
    Produce a minimal reflection table from an MTZ file.

    The returned reflection table will not contain all the standard
    columns, only those that are necessary for the IntensityDist class.

    :param filename: Name of an unmerged MTZ input file.
    :type filename: str
    :return: A reflection table object, containing only the columns
      * ``miller_index``
      * ``intensity.sum.value``
      * ``intensity.sum.variance``
      * ``xyzobs.px.value``
      * ``id``
    :rtype: dials.array_family_flex_ext.reflection_table
    """

    from iotbx import mtz
    from dxtbx.model import Crystal, Experiment, ExperimentList

    m = mtz.object(filename).crystals()  # Parse MTZ, with lots of useful methods.
    # Get some data and turn it into a reflection table and experiment list.
    # First, the reflection table
    col_dict = {}
    for crystal in m:
        for dataset in crystal.datasets():
            cols = dataset.columns()  # Gets column objects.
            col_dict = {c.label(): c for c in cols}  # A dict of all the columns.
            if col_dict:
                break
        if col_dict:
            break
    if not col_dict:
        raise RuntimeError("Unable to read data from mtz file %s" % filename)
    h, k, l = (
        col_dict[label].extract_values().as_double().iround()
        for label in ("H", "K", "L")
    )
    intensity, sigI, x, y = (
        col_dict[label].extract_values().as_double()
        for label in ("I", "SIGI", "XDET", "YDET")
    )
    # Honestly flex?!  Oh well, for now, we have to go round the houses:
    frame = col_dict["BATCH"].extract_values().as_double().iround().as_double()

    rtable = flex.reflection_table()
    rtable["miller_index"] = flex.miller_index(h, k, l)
    rtable["intensity.sum.value"] = intensity
    rtable["intensity.sum.variance"] = flex.pow2(sigI)
    rtable["xyzobs.px.value"] = flex.vec3_double(x, y, frame)
    rtable["id"] = flex.int(rtable.size(), 0)

    # Now generate a corresponding experiment list.
    indices = flex.vec3_double([(1, 0, 0), (0, 1, 0), (0, 0, 1)])
    # Each Crystal object needs to be constructed from xyz unit cell
    # parameters and a space group.
    abc = [m[0].unit_cell().orthogonalize(vec) for vec in indices]
    space_group = m[0].crystal_symmetry().space_group()
    crystal_params = abc + [space_group]

    elist = ExperimentList([Experiment(crystal=Crystal(*crystal_params))])

    return rtable, elist
示例#36
0
def test_reflections_to_batch_properties(data_array, example_miller_set,
                                         example_crystal):
    """Test the helper functions that provide the batch properties from reflection
    tables and experiments."""
    # first make a reflection table.
    reflections = flex.reflection_table()
    reflections["intensity.scale.value"] = data_array.data() * flex.double(
        9, 2.0)
    reflections["inverse_scale_factor"] = flex.double(9, 2.0)
    reflections["intensity.scale.variance"] = flex.double(
        9, 4.0) * flex.double(9, 4.0)
    reflections["xyzobs.px.value"] = flex.vec3_double([(0, 0, 0.1)] * 3 +
                                                      [(0, 0, 1.1)] * 3 +
                                                      [(0, 0, 2.1)] * 2 +
                                                      [(0, 0, 3.1)])
    reflections["miller_index"] = example_miller_set.indices()
    reflections["id"] = flex.int(9, 1)
    reflections.set_flags(flex.bool(9, True), reflections.flags.integrated)
    reflections.set_flags(flex.bool(9, True), reflections.flags.scaled)

    experiments = [mock.Mock()]
    experiments[0].scan.get_image_range.return_value = [1, 10]
    experiments[0].crystal = example_crystal
    experiments[0].beam.get_wavelength.return_value = 1

    (
        bins,
        rmerge,
        isigi,
        scalesvsbatch,
        batch_data,
    ) = reflection_tables_to_batch_dependent_properties(  # pylint: disable=unbalanced-tuple-unpacking
        [reflections], experiments)

    assert bins == expected_results["bins"]
    assert rmerge == pytest.approx(expected_results["rmergevb"], 1e-4)
    assert isigi == pytest.approx(expected_results["isigivb"], 1e-4)
    assert scalesvsbatch == pytest.approx([2.0] * 4, 1e-4)
    assert batch_data == [{"range": (1, 10), "id": 0}]

    # now try a two experiment dataset in a combined table.
    import copy

    reflections_2 = copy.deepcopy(reflections)
    reflections_2["id"] = flex.int(9, 2)
    reflections.extend(reflections_2)
    experiments = [mock.Mock(), mock.Mock()]
    experiments[0].scan.get_image_range.return_value = [1, 10]
    experiments[0].crystal = example_crystal
    experiments[0].beam.get_wavelength.return_value = 1
    experiments[1].scan.get_image_range.return_value = [1, 10]
    experiments[1].crystal = example_crystal
    experiments[1].beam.get_wavelength.return_value = 1

    (
        bins,
        rmerge,
        isigi,
        scalesvsbatch,
        batch_data,
    ) = combined_table_to_batch_dependent_properties(  # pylint: disable=unbalanced-tuple-unpacking
        reflections, experiments)

    assert bins == [1, 2, 3, 4, 101, 102, 103, 104]
    assert rmerge == pytest.approx(expected_results["rmergevb"] * 2, 1e-4)
    assert isigi == pytest.approx(expected_results["isigivb"] * 2, 1e-4)
    assert scalesvsbatch == pytest.approx([2.0] * 8, 1e-4)
    assert batch_data == [{
        "range": (1, 10),
        "id": 0
    }, {
        "range": (101, 110),
        "id": 1
    }]
示例#37
0
def test(dials_regression, run_in_tmpdir):
    data_dir = os.path.join(
        dials_regression, "refinement_test_data", "multi_narrow_wedges"
    )

    input_range = list(range(2, 49))
    for i in (8, 10, 15, 16, 34, 39, 45):
        input_range.remove(i)

    phil_input = "\n".join(
        (
            "  input.experiments={0}/data/sweep_%03d/experiments.json\n"
            + "  input.reflections={0}/data/sweep_%03d/reflections.pickle"
        )
        % (i, i)
        for i in input_range
    )
    # assert phil_input == "\n" + phil_input2 + "\n "

    input_phil = (
        phil_input.format(data_dir)
        + """
 reference_from_experiment.beam=0
 reference_from_experiment.scan=0
 reference_from_experiment.goniometer=0
 reference_from_experiment.detector=0
 """
    )

    with open("input.phil", "w") as phil_file:
        phil_file.writelines(input_phil)

    result = procrunner.run(["dials.combine_experiments", "input.phil"])
    assert not result.returncode and not result.stderr

    # load results
    exp = ExperimentListFactory.from_json_file("combined.expt", check_format=False)
    ref = flex.reflection_table.from_file("combined.refl")

    # test the experiments
    assert len(exp) == 103
    assert len(exp.crystals()) == 103
    assert len(exp.beams()) == 1
    assert len(exp.scans()) == 1
    assert len(exp.detectors()) == 1
    assert len(exp.goniometers()) == 1
    for e in exp:
        assert e.imageset is not None

    # test the reflections
    assert len(ref) == 11689

    result = procrunner.run(
        ["dials.split_experiments", "combined.expt", "combined.refl"]
    )
    assert not result.returncode and not result.stderr

    for i, e in enumerate(exp):
        assert os.path.exists("split_%03d.expt" % i)
        assert os.path.exists("split_%03d.refl" % i)

        exp_single = ExperimentListFactory.from_json_file(
            "split_%03d.expt" % i, check_format=False
        )
        ref_single = flex.reflection_table.from_file("split_%03d.refl" % i)

        assert len(exp_single) == 1
        assert exp_single[0].crystal == e.crystal
        assert exp_single[0].beam == e.beam
        assert exp_single[0].detector == e.detector
        assert exp_single[0].scan == e.scan
        assert exp_single[0].goniometer == e.goniometer
        assert exp_single[0].imageset == e.imageset
        assert len(ref_single) == len(ref.select(ref["id"] == i))
        assert ref_single["id"].all_eq(0)

    result = procrunner.run(
        ["dials.split_experiments", "combined.expt", "output.experiments_prefix=test"]
    )
    assert not result.returncode and not result.stderr

    for i in range(len(exp)):
        assert os.path.exists("test_%03d.expt" % i)

    # Modify a copy of the detector
    detector = copy.deepcopy(exp.detectors()[0])
    panel = detector[0]
    x, y, z = panel.get_origin()
    panel.set_frame(panel.get_fast_axis(), panel.get_slow_axis(), (x, y, z + 10))
    # Set half of the experiments to the new detector
    for i in range(len(exp) // 2):
        exp[i].detector = detector
    exp.as_json("modded.expt")

    result = procrunner.run(
        [
            "dials.split_experiments",
            "modded.expt",
            "combined.refl",
            "output.experiments_prefix=test_by_detector",
            "output.reflections_prefix=test_by_detector",
            "by_detector=True",
        ]
    )
    assert not result.returncode and not result.stderr

    for i in range(2):
        assert os.path.exists("test_by_detector_%03d.expt" % i)
        assert os.path.exists("test_by_detector_%03d.refl" % i)
    assert not os.path.exists("test_by_detector_%03d.expt" % 2)
    assert not os.path.exists("test_by_detector_%03d.refl" % 2)

    # Now do test when input has identifiers set
    reflections = flex.reflection_table().from_file("combined.refl")
    explist = ExperimentListFactory.from_json_file("combined.expt", check_format=False)
    # set string identifiers as nonconsecutive 0,2,4,6....
    for i, exp in enumerate(explist):
        assert i in reflections["id"]
        reflections.experiment_identifiers()[i] = str(i * 2)
        exp.identifier = str(i * 2)
    reflections.as_file("assigned.refl")
    explist.as_json("assigned.expt")

    result = procrunner.run(
        ["dials.split_experiments", "assigned.expt", "assigned.refl"]
    )
    assert not result.returncode and not result.stderr

    for i in range(len(explist)):
        assert os.path.exists("split_%03d.expt" % i)
        assert os.path.exists("split_%03d.refl" % i)

        exp_single = ExperimentListFactory.from_json_file(
            "split_%03d.expt" % i, check_format=False
        )
        ref_single = flex.reflection_table.from_file("split_%03d.refl" % i)

        assert len(exp_single) == 1
        # resets all ids to 0, but keeps mapping to unique identifier.
        # doesn't have to be set to 0 but doing this to keep more consistent with
        # other dials programs
        assert ref_single["id"].all_eq(0)
        assert ref_single.experiment_identifiers()[0] == str(i * 2)

    # update modded experiments to have same identifiers as assigned_experiments
    moddedlist = ExperimentListFactory.from_json_file("modded.expt", check_format=False)
    for i, exp in enumerate(moddedlist):
        exp.identifier = str(i * 2)
    moddedlist.as_json("modded.expt")

    result = procrunner.run(
        [
            "dials.split_experiments",
            "modded.expt",
            "assigned.refl",
            "output.experiments_prefix=test_by_detector",
            "output.reflections_prefix=test_by_detector",
            "by_detector=True",
        ]
    )
    assert not result.returncode and not result.stderr

    # Expect each datasets to have ids from 0..50 with experiment identifiers
    # all kept from before 0,2,4,6,...
    current_exp_id = 0
    for i in range(2):
        assert os.path.exists("test_by_detector_%03d.expt" % i)
        assert os.path.exists("test_by_detector_%03d.refl" % i)
        explist = ExperimentListFactory.from_json_file(
            "test_by_detector_%03d.expt" % i, check_format=False
        )
        refl = flex.reflection_table.from_file("test_by_detector_%03d.refl" % i)

        for k in range(len(explist)):
            assert refl.experiment_identifiers()[k] == str(current_exp_id)
            current_exp_id += 2

    assert not os.path.exists("test_by_detector_%03d.expt" % 2)
    assert not os.path.exists("test_by_detector_%03d.refl" % 2)
示例#38
0
def index(experiments, reflections, params):
    """
    Index the input experiments and reflections.

    Args:
        experiments: The experiments to index
        reflections (list): A list of reflection tables containing strong spots
        params: An instance of the indexing phil scope

    Returns:
        (tuple): tuple containing:
            experiments: The indexed experiment list
            reflections (dials.array_family.flex.reflection_table):
                The indexed reflections

    Raises:
        ValueError: `reflections` is an empty list or `experiments` contains a
                    combination of sequence and stills data.
        dials.algorithms.indexing.DialsIndexError: Indexing failed.
    """
    if experiments.crystals()[0] is not None:
        known_crystal_models = experiments.crystals()
    else:
        known_crystal_models = None

    if len(reflections) == 0:
        raise ValueError("No reflection lists found in input")
    elif len(reflections) == 1:
        if "imageset_id" not in reflections[0]:
            reflections[0]["imageset_id"] = reflections[0]["id"]
    elif len(reflections) > 1:
        assert len(reflections) == len(experiments)
        for i in range(len(reflections)):
            reflections[i]["imageset_id"] = flex.int(len(reflections[i]), i)
            if i > 0:
                reflections[0].extend(reflections[i])
    reflections = reflections[0]

    if params.indexing.image_range:
        reflections = slice_reflections(reflections, params.indexing.image_range)

    if len(experiments) == 1 or params.indexing.joint_indexing:
        indexed_experiments, indexed_reflections = _index_experiments(
            experiments,
            reflections,
            copy.deepcopy(params),
            known_crystal_models=known_crystal_models,
        )
    else:
        indexed_experiments = ExperimentList()
        indexed_reflections = flex.reflection_table()

        with concurrent.futures.ProcessPoolExecutor(
            max_workers=params.indexing.nproc
        ) as pool:
            futures = []
            for i_expt, expt in enumerate(experiments):
                refl = reflections.select(reflections["imageset_id"] == i_expt)
                refl["imageset_id"] = flex.size_t(len(refl), 0)
                futures.append(
                    pool.submit(
                        _index_experiments,
                        ExperimentList([expt]),
                        refl,
                        copy.deepcopy(params),
                        known_crystal_models=known_crystal_models,
                    )
                )
            tables_list = []
            for future in concurrent.futures.as_completed(futures):
                try:
                    idx_expts, idx_refl = future.result()
                except Exception as e:
                    print(e)
                else:
                    if idx_expts is None:
                        continue
                    # Update the experiment ids by incrementing by the number of indexed
                    # experiments already in the list
                    ##FIXME below, is i_expt correct - or should it be the
                    # index of the 'future'?
                    idx_refl["imageset_id"] = flex.size_t(idx_refl.size(), i_expt)
                    tables_list.append(idx_refl)
                    indexed_experiments.extend(idx_expts)
            tables_list = renumber_table_id_columns(tables_list)
            for table in tables_list:
                indexed_reflections.extend(table)
    return indexed_experiments, indexed_reflections
示例#39
0
    def run(self, args=None):
        """Execute the script."""

        # Parse the command line
        params, _ = self.parser.parse_args(args, show_diff_phil=True)

        # Try to load the models and data
        if not params.input.experiments:
            print("No Experiments found in the input")
            self.parser.print_help()
            return
        if params.input.reflections:
            if len(params.input.reflections) != len(params.input.experiments):
                raise Sorry(
                    "The number of input reflections files does not match the "
                    "number of input experiments")
        reflections, experiments = reflections_and_experiments_from_files(
            params.input.reflections, params.input.experiments)
        if reflections:
            reflections = reflections[0]
        else:
            reflections = None

        experiments_template = functools.partial(
            params.output.template.format,
            prefix=params.output.experiments_prefix,
            maxindexlength=len(str(len(experiments) - 1)),
            extension="expt",
        )

        reflections_template = functools.partial(
            params.output.template.format,
            prefix=params.output.reflections_prefix,
            maxindexlength=len(str(len(experiments) - 1)),
            extension="refl",
        )

        if params.output.chunk_sizes:
            if not sum(params.output.chunk_sizes) == len(experiments):
                raise Sorry(
                    "Sum of chunk sizes list (%s) not equal to number of experiments (%s)"
                    % (sum(params.output.chunk_sizes), len(experiments)))

        if params.by_wavelength:
            if reflections:
                if not reflections.experiment_identifiers():
                    raise Sorry(
                        "Unable to split by wavelength as no experiment "
                        "identifiers are set in the reflection table.")
            if all(experiments.identifiers() == ""):
                raise Sorry("Unable to split by wavelength as no experiment "
                            "identifiers are set in the experiment list.")

            wavelengths = match_wavelengths(experiments)
            for i, wl in enumerate(sorted(wavelengths.keys())):
                expids = []
                new_exps = ExperimentList()
                exp_nos = wavelengths[wl]
                imageset_ids = []  # record imageset ids to set in refl table
                imagesets_found = OrderedSet()
                for j in exp_nos:
                    expids.append(experiments[j].identifier)  # string
                    new_exps.append(experiments[j])
                    imagesets_found.add(experiments[j].imageset)
                    imageset_ids.append(
                        imagesets_found.index(experiments[j].imageset))
                experiment_filename = experiments_template(index=i)
                print(
                    f"Saving experiments with wavelength {wl} to {experiment_filename}"
                )
                new_exps.as_json(experiment_filename)
                if reflections:
                    refls = reflections.select_on_experiment_identifiers(
                        expids)
                    refls["imageset_id"] = flex.int(refls.size(), 0)
                    # now set the imageset ids
                    for k, iset_id in enumerate(imageset_ids):
                        # select the experiment based on id (unique per sweep),
                        # and set the imageset_id (not necessarily unique per sweep
                        # if imageset is shared)
                        sel = refls["id"] == k
                        refls["imageset_id"].set_selected(sel, iset_id)
                    reflections_filename = reflections_template(index=i)
                    print("Saving reflections with wavelength %s to %s" %
                          (wl, reflections_filename))
                    refls.as_file(reflections_filename)

        elif params.by_detector:
            assert (not params.output.chunk_size
                    ), "chunk_size + by_detector is not implemented"
            if reflections is None:
                split_data = {
                    detector: {
                        "experiments": ExperimentList()
                    }
                    for detector in experiments.detectors()
                }
            else:
                split_data = {
                    detector: {
                        "experiments": ExperimentList(),
                        "reflections": flex.reflection_table(),
                        "imagesets_found": OrderedSet(),
                    }
                    for detector in experiments.detectors()
                }
            for i, experiment in enumerate(experiments):
                split_expt_id = experiments.detectors().index(
                    experiment.detector)
                experiment_filename = experiments_template(index=split_expt_id)
                print("Adding experiment %d to %s" % (i, experiment_filename))
                split_data[experiment.detector]["experiments"].append(
                    experiment)
                if reflections is not None:
                    reflections_filename = reflections_template(
                        index=split_expt_id)
                    split_data[experiment.detector]["imagesets_found"].add(
                        experiment.imageset)
                    print("Adding reflections for experiment %d to %s" %
                          (i, reflections_filename))
                    if reflections.experiment_identifiers().keys():
                        # first find which id value corresponds to experiment in question
                        identifier = experiment.identifier
                        id_ = None
                        for k in reflections.experiment_identifiers().keys():
                            if reflections.experiment_identifiers(
                            )[k] == identifier:
                                id_ = k
                                break
                        if id_ is None:
                            raise Sorry(
                                "Unable to find id matching experiment identifier in reflection table."
                            )
                        ref_sel = reflections.select(reflections["id"] == id_)
                        # now reset ids and reset/update identifiers map
                        for k in ref_sel.experiment_identifiers().keys():
                            del ref_sel.experiment_identifiers()[k]
                        new_id = len(
                            split_data[experiment.detector]["experiments"]) - 1
                        ref_sel["id"] = flex.int(len(ref_sel), new_id)
                        ref_sel.experiment_identifiers()[new_id] = identifier
                    else:
                        ref_sel = reflections.select(reflections["id"] == i)
                        ref_sel["id"] = flex.int(
                            len(ref_sel),
                            len(split_data[experiment.detector]["experiments"])
                            - 1,
                        )
                    iset_id = split_data[
                        experiment.detector]["imagesets_found"].index(
                            experiment.imageset)
                    ref_sel["imageset_id"] = flex.int(ref_sel.size(), iset_id)
                    split_data[experiment.detector]["reflections"].extend(
                        ref_sel)

            for i, detector in enumerate(experiments.detectors()):
                experiment_filename = experiments_template(index=i)
                print("Saving experiment %d to %s" % (i, experiment_filename))
                split_data[detector]["experiments"].as_json(
                    experiment_filename)

                if reflections is not None:
                    reflections_filename = reflections_template(index=i)
                    print("Saving reflections for experiment %d to %s" %
                          (i, reflections_filename))
                    split_data[detector]["reflections"].as_file(
                        reflections_filename)
        elif params.output.chunk_size or params.output.chunk_sizes:

            def save_chunk(chunk_id, expts, refls):
                experiment_filename = experiments_template(index=chunk_id)
                print("Saving chunk %d to %s" %
                      (chunk_id, experiment_filename))
                expts.as_json(experiment_filename)
                if refls is not None:
                    reflections_filename = reflections_template(index=chunk_id)
                    print("Saving reflections for chunk %d to %s" %
                          (chunk_id, reflections_filename))
                    refls.as_file(reflections_filename)

            chunk_counter = 0
            chunk_expts = ExperimentList()
            if reflections:
                chunk_refls = flex.reflection_table()
            else:
                chunk_refls = None
            next_iset_id = 0
            imagesets_found = OrderedSet()
            for i, experiment in enumerate(experiments):
                chunk_expts.append(experiment)
                if reflections:
                    if reflections.experiment_identifiers().keys():
                        # first find which id value corresponds to experiment in question
                        identifier = experiment.identifier
                        id_ = None
                        for k in reflections.experiment_identifiers().keys():
                            if reflections.experiment_identifiers(
                            )[k] == identifier:
                                id_ = k
                                break
                        if id_ is None:
                            raise Sorry(
                                "Unable to find id matching experiment identifier in reflection table."
                            )
                        ref_sel = reflections.select(reflections["id"] == id_)
                        # now reset ids and reset/update identifiers map
                        for k in ref_sel.experiment_identifiers().keys():
                            del ref_sel.experiment_identifiers()[k]
                        new_id = len(chunk_expts) - 1
                        ref_sel["id"] = flex.int(len(ref_sel), new_id)
                        ref_sel.experiment_identifiers()[new_id] = identifier
                    else:
                        ref_sel = reflections.select(reflections["id"] == i)
                        ref_sel["id"] = flex.int(len(ref_sel),
                                                 len(chunk_expts) - 1)
                    if experiment.imageset not in imagesets_found:
                        imagesets_found.add(experiment.imageset)
                        ref_sel["imageset_id"] = flex.int(
                            ref_sel.size(), next_iset_id)
                        next_iset_id += 1
                    else:
                        iset_id = imagesets_found.index(experiment.imageset)
                        ref_sel["imageset_id"] = flex.int(
                            ref_sel.size(), iset_id)
                    chunk_refls.extend(ref_sel)
                if params.output.chunk_sizes:
                    chunk_limit = params.output.chunk_sizes[chunk_counter]
                else:
                    chunk_limit = params.output.chunk_size
                if len(chunk_expts) == chunk_limit:
                    save_chunk(chunk_counter, chunk_expts, chunk_refls)
                    chunk_counter += 1
                    chunk_expts = ExperimentList()
                    if reflections:
                        chunk_refls = flex.reflection_table()
                    else:
                        chunk_refls = None
            if len(chunk_expts) > 0:
                save_chunk(chunk_counter, chunk_expts, chunk_refls)
        else:
            for i, experiment in enumerate(experiments):

                experiment_filename = experiments_template(index=i)
                print("Saving experiment %d to %s" % (i, experiment_filename))
                ExperimentList([experiment]).as_json(experiment_filename)

                if reflections is not None:
                    reflections_filename = reflections_template(index=i)
                    print("Saving reflections for experiment %d to %s" %
                          (i, reflections_filename))
                    ref_sel = reflections.select(reflections["id"] == i)
                    if ref_sel.experiment_identifiers().keys():
                        identifier = ref_sel.experiment_identifiers()[i]
                        for k in ref_sel.experiment_identifiers().keys():
                            del ref_sel.experiment_identifiers()[k]
                        ref_sel["id"] = flex.int(ref_sel.size(), 0)
                        ref_sel.experiment_identifiers()[0] = identifier
                    else:
                        ref_sel["id"] = flex.int(len(ref_sel), 0)
                    ref_sel["imageset_id"] = flex.int(len(ref_sel), 0)
                    ref_sel.as_file(reflections_filename)

        return