Exemple #1
0
def tst_origin_offset_miller_indices():
  mi = flex.miller_index([(h, k, l) for h in range(5) \
                                    for k in range(5)
                                    for l in range(5)])
  omi = origin.offset_miller_indices(mi, (0, 1, -1))
  ref = flex.miller_index([(h, k, l) for h in range(5) \
                                     for k in range(1, 6)
                                     for l in range(-1, 4)])

  assert ref == omi
  print 'OK'
  def generate_reflections(self):
    """Use reeke_model to generate indices of reflections near to the Ewald
    sphere that might be observed on a still image. Build a reflection_table
    of these."""
    from cctbx.sgtbx import space_group_info

    space_group_type = space_group_info("P 1").group().type()

    # create a ReekeIndexGenerator
    UB = self.crystal.get_U() * self.crystal.get_B()
    axis = self.goniometer.get_rotation_axis()
    s0 = self.beam.get_s0()
    dmin = 1.5
    # use the same UB at the beginning and end - the margin parameter ensures
    # we still have indices close to the Ewald sphere generated
    from dials.algorithms.spot_prediction import ReekeIndexGenerator
    r = ReekeIndexGenerator(UB, UB, space_group_type, axis, s0, dmin=1.5, margin=1)

    # generate indices
    hkl = r.to_array()
    nref = len(hkl)

    # create a reflection table
    from dials.array_family import flex
    table = flex.reflection_table()
    table['flags'] = flex.size_t(nref, 0)
    table['id']    = flex.int(nref, 0)
    table['panel'] = flex.size_t(nref, 0)
    table['miller_index'] = flex.miller_index(hkl)
    table['entering']     = flex.bool(nref, True)
    table['s1']           = flex.vec3_double(nref)
    table['xyzcal.mm']    = flex.vec3_double(nref)
    table['xyzcal.px']    = flex.vec3_double(nref)

    return table
  def prepare_reflection_list(self,detector):

    spots = self.triclinic.get_observations_with_outlier_removal()
    ordinary_python_list_of_indexed_observations = [
      {
        "id":0,
        "panel":0,
        "miller_index":item["pred"],
        "xyzobs.px.value":(spots[item["spot"]].ctr_mass_x(),spots[item["spot"]].ctr_mass_y(),0.0),
        "xyzobs.px.variance":(0.25,0.25,0.25),
        "spotfinder_lookup":item["spot"]
      }
      for item in self.triclinic_pairs
    ]

    self.length = len(ordinary_python_list_of_indexed_observations)
    R= flex.reflection_table.empty_standard(self.length)

    R['miller_index'] = flex.miller_index([item["miller_index"] for item in ordinary_python_list_of_indexed_observations])
    R['xyzobs.px.value'] = flex.vec3_double([item["xyzobs.px.value"] for item in ordinary_python_list_of_indexed_observations])
    R['xyzobs.px.variance'] = flex.vec3_double([item["xyzobs.px.variance"] for item in ordinary_python_list_of_indexed_observations])
    R['spotfinder_lookup'] = flex.int([item["spotfinder_lookup"] for item in ordinary_python_list_of_indexed_observations])

    R['xyzobs.mm.value'] = flex.vec3_double(self.length)
    R['xyzobs.mm.variance'] = flex.vec3_double(self.length)

    pxlsz = detector[0].get_pixel_size()

    for idx in xrange(self.length):
      R['xyzobs.mm.value'][idx] = (R['xyzobs.px.value'][idx][0]*pxlsz[0], R['xyzobs.px.value'][idx][1]*pxlsz[1], R['xyzobs.px.value'][idx][2])
      R['xyzobs.mm.variance'][idx] = (R['xyzobs.px.variance'][idx][0]*pxlsz[0], R['xyzobs.px.variance'][idx][1]*pxlsz[1], R['xyzobs.px.variance'][idx][2])

    return R
def get_pix_coords(wavelength, A, mill_arr, detector, delta_i=0.02):
    """ Code copied from sim.py courtesy of Aaron and Tara """
    s0=col((0,0,-1/wavelength))
    q=flex.vec3_double([A*col(idx) for idx in  mill_arr.indices().as_vec3_double()])
    s0_hat=flex.vec3_double([s0.normalize()]*len(q))
    q_hat=q.each_normalize()
    #q_hat.cross(flex.vec3_double([s0_hat]*len(q_hat)))
    e1_hat = q_hat.cross(s0_hat)
    c0_hat = s0_hat.cross(e1_hat)
    q_len_sq = flex.double([col(v).length_sq() for v in q])
    a_side=q_len_sq*wavelength/2
    b_side=flex.sqrt(q_len_sq)-a_side**2
    #flex.vec3_double([sqrt(q.length_sq()-a_side**2 for idx in mill_arr)])
    r_vec=flex.vec3_double(-a_side*s0_hat+b_side*c0_hat)
    s1=r_vec+s0

    EQ=q+s0
    len_EQ=flex.double([col(v).length() for v in EQ])
    ratio=len_EQ*wavelength

    indices = flex.miller_index()
    coords =flex.vec2_double()
    for i in range(len(s1)):
        if ratio[i] > 1 - delta_i and ratio[i] < 1 + delta_i:
            indices.append(mill_arr.indices()[i])
            pix = detector[0].get_ray_intersection_px(s1[i])
            if detector[0].is_coord_valid(pix):
                coords.append(pix)

    return coords, indices
Exemple #5
0
  def __call__(self, params, options):
    ''' Import the integrate.hkl file. '''

    from iotbx.xds import integrate_hkl
    from dials.array_family import flex
    from dials.util.command_line import Command
    from cctbx import sgtbx

    # Get the unit cell to calculate the resolution
    uc = self._experiment.crystal.get_unit_cell()

    # Read the INTEGRATE.HKL file
    Command.start('Reading INTEGRATE.HKL')
    handle = integrate_hkl.reader()
    handle.read_file(self._integrate_hkl)
    hkl    = flex.miller_index(handle.hkl)
    xyzcal = flex.vec3_double(handle.xyzcal)
    xyzobs = flex.vec3_double(handle.xyzobs)
    iobs   = flex.double(handle.iobs)
    sigma  = flex.double(handle.sigma)
    rlp = flex.double(handle.rlp)
    peak = flex.double(handle.peak) * 0.01
    Command.end('Read %d reflections from INTEGRATE.HKL file.' % len(hkl))

    # Derive the reindex matrix
    rdx = self.derive_reindex_matrix(handle)
    print 'Reindex matrix:\n%d %d %d\n%d %d %d\n%d %d %d' % (rdx.elems)

    # Reindex the reflections
    Command.start('Reindexing reflections')
    cb_op = sgtbx.change_of_basis_op(sgtbx.rt_mx(sgtbx.rot_mx(rdx.elems)))
    hkl = cb_op.apply(hkl)
    Command.end('Reindexed %d reflections' % len(hkl))

    # Create the reflection list
    Command.start('Creating reflection table')
    table = flex.reflection_table()
    table['id'] = flex.int(len(hkl), 0)
    table['panel'] = flex.size_t(len(hkl), 0)
    table['miller_index'] = hkl
    table['xyzcal.px'] = xyzcal
    table['xyzobs.px.value'] = xyzobs
    table['intensity.cor.value'] = iobs
    table['intensity.cor.variance'] = sigma**2
    table['intensity.prf.value'] = iobs * peak / rlp
    table['intensity.prf.variance'] = (sigma * peak / rlp)**2
    table['lp'] = 1.0 / rlp
    table['d'] = flex.double(uc.d(h) for h in hkl)
    Command.end('Created table with {0} reflections'.format(len(table)))

    # Output the table to pickle file
    if params.output.filename is None:
      params.output.filename = 'integrate_hkl.pickle'
    Command.start('Saving reflection table to %s' % params.output.filename)
    table.as_pickle(params.output.filename)
    Command.end('Saved reflection table to %s' % params.output.filename)
def run(args):
  import libtbx.load_env
  usage = "%s experiments.json [options]" %libtbx.env.dispatcher_name

  parser = OptionParser(
    usage=usage,
    phil=phil_scope,
    read_experiments=True,
    check_format=False,
    epilog=help_message)

  params, options = parser.parse_args(show_diff_phil=True)
  experiments = flatten_experiments(params.input.experiments)
  if len(experiments) == 0:
    parser.print_help()
    return
  elif len(experiments) > 1:
    raise Sorry("More than one experiment present")

  assert len(params.miller_index), "Must specify at least one miller_index to predict."

  experiment = experiments[0]

  reflections = flex.reflection_table()
  miller_indices = flex.miller_index()
  entering_flags = flex.bool()
  for mi in params.miller_index:
    miller_indices.append(mi)
    miller_indices.append(mi)
    entering_flags.append(True)
    entering_flags.append(False)
  reflections['miller_index'] = miller_indices
  reflections['entering'] = entering_flags
  reflections['id'] = flex.size_t(len(reflections), 0)

  if params.expand_to_p1:
    from cctbx.miller import expand_to_p1_iselection
    proxy = expand_to_p1_iselection(
      experiment.crystal.get_space_group(),
      anomalous_flag=True,
      indices=miller_indices,
      build_iselection=True)
    reflections = reflections.select(proxy.iselection)
    reflections['miller_index'] = proxy.indices

  from dials.algorithms.refinement.prediction.managed_predictors import ExperimentsPredictor
  predictor = ExperimentsPredictor([experiment])
  predicted = predictor.predict(reflections)

  zmin, zmax = experiment.scan.get_array_range()
  z = predicted['xyzcal.px'].parts()[2]
  predicted = predicted.select((z >= zmin) & (z <= zmax))

  show_predictions(predicted)
Exemple #7
0
  def __call__(self, params, options):
    ''' Import the spot.xds file. '''
    from iotbx.xds import spot_xds
    from dials.util.command_line import Command
    from dials.array_family import flex

    # Read the SPOT.XDS file
    Command.start('Reading SPOT.XDS')
    handle = spot_xds.reader()
    handle.read_file(self._spot_xds)
    centroid = handle.centroid
    intensity = handle.intensity
    try:
      miller_index = handle.miller_index
    except AttributeError:
      miller_index = None
    Command.end('Read {0} spots from SPOT.XDS file.'.format(len(centroid)))

    # Create the reflection list
    Command.start('Creating reflection list')
    table = flex.reflection_table()
    table['id'] = flex.int(len(centroid), 0)
    table['panel'] = flex.size_t(len(centroid), 0)
    if miller_index:
      table['miller_index'] = flex.miller_index(miller_index)
    table['xyzobs.px.value'] = flex.vec3_double(centroid)
    table['intensity.sum.value'] = flex.double(intensity)
    Command.end('Created reflection list')

    # Remove invalid reflections
    Command.start('Removing invalid reflections')
    if miller_index and params.remove_invalid:
      flags = flex.bool([h != (0, 0, 0) for h in table['miller_index']])
      table = table.select(flags)
    Command.end('Removed invalid reflections, %d remaining' % len(table))

    # Fill empty standard columns
    if params.add_standard_columns:
      Command.start('Adding standard columns')
      rt = flex.reflection_table.empty_standard(len(table))
      rt.update(table)
      table = rt
      # set variances to unity
      table['xyzobs.mm.variance'] = flex.vec3_double(len(table), (1,1,1))
      table['xyzobs.px.variance'] = flex.vec3_double(len(table), (1,1,1))
      Command.end('Standard columns added')

    # Output the table to pickle file
    if params.output.filename is None:
      params.output.filename = 'spot_xds.pickle'
    Command.start('Saving reflection table to %s' % params.output.filename)
    table.as_pickle(params.output.filename)
    Command.end('Saved reflection table to %s' % params.output.filename)
Exemple #8
0
  def run(self):
    from os.path import join, exists
    from libtbx import easy_run
    from dials.array_family import flex


    assert(exists(join(self.path, "integrated.pickle")))

    input_filename = join(self.path, "integrated.pickle")

    easy_run.fully_buffered([
      'dev.dials.sort_reflections',
      input_filename,
      'key=intensity.sum.value',
      'output=sorted1.pickle',
    ]).raise_if_errors()

    assert(exists("sorted1.pickle"))

    sorted1 = flex.reflection_table.from_pickle("sorted1.pickle")
    self.assert_sorted(sorted1['intensity.sum.value'])

    easy_run.fully_buffered([
      'dev.dials.sort_reflections',
      input_filename,
      'output=sorted2.pickle',
      'key=intensity.sum.value',
      'reverse=True'
    ]).raise_if_errors()

    assert(exists("sorted2.pickle"))

    sorted1 = flex.reflection_table.from_pickle("sorted2.pickle")
    self.assert_sorted(sorted1['intensity.sum.value'], reverse=True)

    # test default sort on miller_index
    easy_run.fully_buffered([
      'dev.dials.sort_reflections',
      input_filename,
      'output=sorted3.pickle'
    ]).raise_if_errors()

    assert(exists("sorted3.pickle"))

    sorted1 = flex.reflection_table.from_pickle("sorted3.pickle")
    mi1 = sorted1['miller_index']
    orig = flex.reflection_table.from_pickle(input_filename)
    mi2 = flex.miller_index(sorted(orig['miller_index']))
    assert mi1.all_eq(mi2)

    print 'OK'
Exemple #9
0
  def __init__(self):
    from dials.array_family import flex

    self.reflections = flex.reflection_table()
    self.reflections['panel'] = flex.size_t()
    self.reflections['bbox'] = flex.int6()
    self.reflections['miller_index'] = flex.miller_index()
    self.reflections['s1'] = flex.vec3_double()
    self.reflections['xyzcal.px'] = flex.vec3_double()
    self.reflections['xyzcal.mm'] = flex.vec3_double()
    self.reflections['entering'] = flex.bool()
    self.reflections['id'] = flex.int()
    self.reflections["flags"] = flex.size_t()

    self.npanels = 2
    self.width = 1000
    self.height = 1000
    self.nrefl = 10000
    self.array_range = (0, 130)
    self.block_size = 20

    from random import randint, seed, choice
    seed(0)
    self.processed = [[] for i in range(12)]
    for i in range(self.nrefl):
      x0 = randint(0, self.width-10)
      y0 = randint(0, self.height-10)
      zs = randint(2, 9)
      x1 = x0 + randint(2, 10)
      y1 = y0 + randint(2, 10)
      for k, j in enumerate([10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120]):
        m = k + i * 12
        pos = choice(["left", "right", "centre"])
        if pos == 'left':
          z0 = j - zs
          z1 = j
        elif pos == 'right':
          z0 = j
          z1 = j + zs
        else:
          z0 = j - zs // 2
          z1 = j + zs // 2
        bbox = (x0, x1, y0, y1, z0, z1)
        self.reflections.append({
          "panel" : randint(0,1),
          "bbox" : bbox,
          "flags" : flex.reflection_table.flags.reference_spot
        })
        self.processed[k].append(m)
Exemple #10
0
    def _test_Imid_combinations(self):
        rows = []
        results = {}
        for Imid in self.Imids:
            combined_intensities = flex.double([])
            combined_sigmas = flex.double([])
            combined_scales = flex.double([])
            combined_indices = flex.miller_index([])
            for dataset in self.datasets:
                Int, Var = _get_Is_from_Imidval(dataset, Imid)
                Int *= dataset["prescaling_correction"]
                sigma = flex.sqrt(Var) * dataset["prescaling_correction"]
                combined_intensities.extend(Int)
                combined_sigmas.extend(sigma)
                combined_scales.extend(dataset["inverse_scale_factor"])
                combined_indices.extend(dataset["miller_index"])
            # apply scale factor before determining merging stats
            miller_set = miller.set(
                crystal_symmetry=self.active_scalers[0].experiment.crystal.
                get_crystal_symmetry(),
                indices=combined_indices,
                anomalous_flag=False,
            )
            i_obs = miller.array(miller_set,
                                 data=combined_intensities / combined_scales)
            i_obs.set_observation_type_xray_intensity()
            i_obs.set_sigmas(combined_sigmas / combined_scales)
            try:
                rmeas, cchalf = fast_merging_stats(array=i_obs)
                logger.debug("Imid: %s, Rmeas %s, cchalf %s", Imid, rmeas,
                             cchalf)
            except RuntimeError:
                raise DialsMergingStatisticsError(
                    "Unable to merge for intensity combination")

            # record the results
            results[Imid] = rmeas
            res_str = {0: "prf only", 1: "sum only"}
            if Imid not in res_str:
                res_str[Imid] = "Imid = " + str(round(Imid, 2))
            rows.append(
                [res_str[Imid],
                 str(round(cchalf, 5)),
                 str(round(rmeas, 5))])
        return rows, results
def generate_outlier_table():
    """Generate a reflection table for outlier testing."""
    rt = flex.reflection_table()
    rt["intensity"] = flex.double(
        [1.0, 1.0, 1.0, 1.0, 20.0, 1.0, 1.0, 20.0, 400.0, 500.0, 10.0, 10.0]
    )
    rt["variance"] = flex.double(12, 1.0)
    rt["inverse_scale_factor"] = flex.double(12, 1.0)
    rt["miller_index"] = flex.miller_index(
        [
            (0, 0, 1),
            (0, 0, 1),
            (0, 0, 1),
            (0, 0, 1),
            (0, 0, 1),
            (0, 0, 2),
            (0, 0, 2),
            (0, 0, 2),
            (0, 0, 2),
            (0, 0, 2),
            (0, 0, 23),
            (0, 0, 3),
        ]
    )
    rt.set_flags(
        flex.bool(
            [
                True,
                False,
                False,
                False,
                False,
                False,
                False,
                False,
                False,
                False,
                False,
                False,
            ]
        ),
        rt.flags.excluded_for_scaling,
    )
    rt.set_flags(flex.bool(12, False), rt.flags.user_excluded_in_scaling)
    return rt
Exemple #12
0
def many_to_asu_and_whether_positive(h, return_aso_only=True):
    """h is a tuple of 3-tuples"""
    sg = sgtbx.space_group(" P 4nw 2abw")  # SG 96!
    sym = crystal.symmetry(unit_cell=(79, 79, 38, 90, 90, 90),
                           space_group=sg)
    idx = flex.miller_index(h)
    mill_set_ano = miller.set(crystal_symmetry=sym, indices=idx, anomalous_flag=True)
    mill_set = miller.set(crystal_symmetry=sym, indices=idx, anomalous_flag=False)

    mill_asu_ano = mill_set_ano.map_to_asu()
    mill_asu = mill_set.map_to_asu()

    is_positive = mill_asu_ano.indices() == mill_asu.indices()

    if return_aso_only:
        return mill_asu_ano, is_positive
    else:
        return mill_asu_ano, mill_asu, is_positive
def test_sort():
  from dials.array_family import flex
  table = flex.reflection_table()
  table['a'] = flex.int([2, 4, 3, 1, 5])
  table['b'] = flex.vec2_double([(3, 2), (3, 1), (1, 3), (4, 5), (4, 3)])
  table['c'] = flex.miller_index([(3,2,1), (3,1,1), (2,4,2), (2,1,1), (1,1,1)])

  table.sort("a")
  assert list(table['a']) == [1, 2, 3, 4, 5]

  table.sort("b")
  assert list(table['b']) == [(1,3), (3,1), (3,2), (4,3), (4,5)]

  table.sort("c")
  assert list(table['c']) == [(1,1,1),(2,1,1),(2,4,2),(3,1,1),(3,2,1)]

  table.sort("c", order=(1,2,0))
  assert list(table['c']) == [(1, 1, 1), (2, 1, 1), (3, 1, 1), (3, 2, 1), (2, 4, 2)]
def generated_refl_to_scale():
    """Generate a reflection table for targeted scaling."""
    reflections = flex.reflection_table()
    reflections["intensity.prf.value"] = flex.double([2.0, 5.0, 2.0, 1.0])
    reflections["intensity.prf.variance"] = flex.double([2.0, 5.0, 2.0, 1.0])
    reflections["intensity.sum.value"] = flex.double([2.0, 5.0, 2.0, 1.0])
    reflections["intensity.sum.variance"] = flex.double([2.0, 5.0, 2.0, 1.0])
    reflections["miller_index"] = flex.miller_index(
        [(1, 0, 0), (0, 0, 1), (1, 0, 0), (10, 0, 0)]
    )  # don't change
    reflections["d"] = flex.double([1.0, 2.0, 1.0, (4.0 / 3.0) ** 0.5])
    reflections["partiality"] = flex.double([1.0, 1.0, 1.0, 1.0])
    reflections.set_flags(
        flex.bool([True, True, True, True]), reflections.flags.integrated
    )
    reflections["id"] = flex.int(4, 1)
    reflections.experiment_identifiers()[1] = str(1)
    return reflections
Exemple #15
0
def prf_sum_refl_to_filter():
    """Generate a separate reflection table for filtering"""
    reflections = flex.reflection_table()
    reflections["partiality"] = flex.double(5, 1.0)
    reflections["id"] = flex.int(reflections.size(), 0)
    reflections.experiment_identifiers()[0] = "0"
    reflections["intensity.sum.value"] = flex.double([1.0, 2.0, 3.0, 4.0, 5.0])
    reflections["intensity.sum.variance"] = flex.double(5, 1.0)
    reflections["intensity.prf.value"] = flex.double([11.0, 12.0, 13.0, 14.0, 15.0])
    reflections["intensity.prf.variance"] = flex.double(5, 1.0)
    reflections["miller_index"] = flex.miller_index([(0, 0, 1)] * 5)
    reflections.set_flags(
        flex.bool([False, False, True, True, True]), reflections.flags.integrated_sum,
    )
    reflections.set_flags(
        flex.bool([True, False, False, True, True]), reflections.flags.integrated_prf,
    )
    return reflections
def import_spot_xds(spot_xds):
    sx = SpotXds(spot_xds)
    spots = filter(lambda x: x[-1][0] is not None and not (x[-1][0]==x[-1][1]==x[-1][2]==0), sx.items)

    # reference: dials/command_line/import_xds.py
    table = flex.reflection_table()
    table["id"] = flex.int(len(spots), 0)
    table["panel"] = flex.size_t(len(spots), 0) # only assuming single panel
    table["miller_index"] = flex.miller_index(map(lambda x: x[-1], spots))
    table["xyzobs.px.value"] = flex.vec3_double(map(lambda x: x[0], spots))
    table["flags"] = flex.size_t(len(table), table.flags.indexed | table.flags.strong)
    

    # dummy
    table["xyzobs.px.variance"] = flex.vec3_double(len(table), (1,1,1)) # TODO appropriate variance value
    table["s1"] = flex.vec3_double(len(table), (0,0,0)) # will be updated by set_obs_s1()

    return table
    def generate_reflections(self):
        """Use reeke_model to generate indices of reflections near to the Ewald
        sphere that might be observed on a still image. Build a reflection_table
        of these."""
        from cctbx.sgtbx import space_group_info

        space_group_type = space_group_info("P 1").group().type()

        # create a ReekeIndexGenerator
        UB = self.crystal.get_A()
        axis = self.goniometer.get_rotation_axis()
        s0 = self.beam.get_s0()
        dmin = 1.5
        # use the same UB at the beginning and end - the margin parameter ensures
        # we still have indices close to the Ewald sphere generated
        from dials.algorithms.spot_prediction import ReekeIndexGenerator

        r = ReekeIndexGenerator(UB,
                                UB,
                                space_group_type,
                                axis,
                                s0,
                                dmin=1.5,
                                margin=1)

        # generate indices
        hkl = r.to_array()
        nref = len(hkl)

        # create a reflection table
        from dials.array_family import flex

        table = flex.reflection_table()
        table["flags"] = flex.size_t(nref, 0)
        table["id"] = flex.int(nref, 0)
        table["panel"] = flex.size_t(nref, 0)
        table["miller_index"] = flex.miller_index(hkl)
        table["entering"] = flex.bool(nref, True)
        table["s1"] = flex.vec3_double(nref)
        table["xyzcal.mm"] = flex.vec3_double(nref)
        table["xyzcal.px"] = flex.vec3_double(nref)

        return table
Exemple #18
0
    def __init__(self):
        from os.path import join
        from dials.array_family import flex
        import libtbx.load_env
        from dials.array_family import flex
        try:
            dials_regression = libtbx.env.dist_path('dials_regression')
        except KeyError:
            print 'FAIL: dials_regression not configured'
            exit(0)

        self.path = join(dials_regression, "centroid_test_data")

        table = flex.reflection_table()
        table['hkl'] = flex.miller_index(360)
        table['id'] = flex.int(360)
        table['intensity.sum.value'] = flex.double(360)
        table.as_pickle("temp1.pickle")
        table.as_pickle("temp2.pickle")
Exemple #19
0
def small_reflection_table():
    """Generate reflection table to test the basis and target function."""
    # these miller_idx/d_values don't make physical sense, but I didn't want to
    # have to write the tests for lots of reflections.
    reflections = flex.reflection_table()
    reflections["intensity"] = flex.double([75.0, 10.0, 100.0, 50.0, 65.0])
    reflections["variance"] = flex.double([50.0, 10.0, 100.0, 50.0, 65.0])
    reflections["inverse_scale_factor"] = flex.double(5, 1.0)
    reflections["miller_index"] = flex.miller_index([(1, 0, 0), (0, 0, 1),
                                                     (1, 0, 0), (0, 0, 1),
                                                     (0, 0, 2)
                                                     ])  # don't change
    reflections["d"] = flex.double([2.0, 0.8, 2.0, 0.8, 1.5])  # don't change
    reflections["xyzobs.px.value"] = flex.vec3_double([
        (0.0, 0.0, i) for i in [0.0, 5.0, 10.0, 2.0, 8.0]
    ])
    reflections["s1"] = flex.vec3_double([(0.0, 0.1, 1.0)] * 2 +
                                         [(0.0, 0.1, 20.0)] * 3)
    return reflections
Exemple #20
0
def test_default_sort_on_miller_index(dials_regression, tmpdir):
    tmpdir.chdir()

    result = procrunner.run_process([
        'dev.dials.sort_reflections',
        os.path.join(dials_regression, "centroid_test_data",
                     "integrated.pickle"), 'output=sorted3.pickle'
    ])
    assert result['exitcode'] == 0
    assert result['stderr'] == ''
    assert os.path.exists("sorted3.pickle")

    from dials.array_family import flex
    data = flex.reflection_table.from_pickle("sorted3.pickle")
    mi1 = data['miller_index']
    orig = flex.reflection_table.from_pickle(
        os.path.join(dials_regression, "centroid_test_data",
                     "integrated.pickle"))
    mi2 = flex.miller_index(sorted(orig['miller_index']))
    assert mi1.all_eq(mi2)
Exemple #21
0
def test_sort():
    table = flex.reflection_table()
    table["a"] = flex.int([2, 4, 3, 1, 5])
    table["b"] = flex.vec2_double([(3, 2), (3, 1), (1, 3), (4, 5), (4, 3)])
    table["c"] = flex.miller_index([(3, 2, 1), (3, 1, 1), (2, 4, 2), (2, 1, 1),
                                    (1, 1, 1)])

    table.sort("a")
    assert list(table["a"]) == [1, 2, 3, 4, 5]

    table.sort("b")
    assert list(table["b"]) == [(1, 3), (3, 1), (3, 2), (4, 3), (4, 5)]

    table.sort("c")
    assert list(table["c"]) == [(1, 1, 1), (2, 1, 1), (2, 4, 2), (3, 1, 1),
                                (3, 2, 1)]

    table.sort("c", order=(1, 2, 0))
    assert list(table["c"]) == [(1, 1, 1), (2, 1, 1), (3, 1, 1), (3, 2, 1),
                                (2, 4, 2)]
Exemple #22
0
 def __init__(self, n_groups: int, n_refl: int, n_datasets: int = 1):
     """Create empty datastructures to which data can later be added."""
     self.Ih_table = pd.DataFrame()
     self.block_selections = [None] * n_datasets
     self.h_index_matrix = sparse.matrix(n_refl, n_groups)
     self._setup_info = {
         "next_row": 0,
         "next_dataset": 0,
         "setup_complete": False
     }
     self.dataset_info = {}
     self.n_datasets = n_datasets
     self.h_expand_matrix = None
     self.derivatives = None
     self.binner = None
     self._csc_rows = np.array([], dtype=np.uint64).reshape((0, ))
     self._csc_cols = np.array([], dtype=np.uint64).reshape((0, ))
     self._csc_h_index_matrix = None
     self._csc_h_expand_matrix = None
     self._hkl = flex.miller_index([])
def test_default_sort_on_miller_index(dials_data, tmpdir):
    result = procrunner.run(
        [
            "dials.sort_reflections",
            dials_data("centroid_test_data").join("integrated.pickle").strpath,
            "output=sorted3.refl",
        ],
        working_directory=tmpdir.strpath,
    )
    assert not result.returncode and not result.stderr
    assert tmpdir.join("sorted3.refl").check(file=1)

    from dials.array_family import flex

    data = flex.reflection_table.from_file(tmpdir.join("sorted3.refl").strpath)
    mi1 = data["miller_index"]
    orig = flex.reflection_table.from_file(
        dials_data("centroid_test_data").join("integrated.pickle").strpath)
    mi2 = flex.miller_index(sorted(orig["miller_index"]))
    assert mi1.all_eq(mi2)
Exemple #24
0
    def __init__(self, experiments):
        """
        Do the labelling

        """
        from dials.algorithms.spot_prediction import PixelToMillerIndex
        from collections import defaultdict
        from math import floor, sqrt
        from dials.array_family import flex

        # Get the experiment
        experiment = experiments[0]

        # Get the image size
        xsize, ysize = experiment.detector[0].get_image_size()

        # A class to map pixels to miller indices
        transform = PixelToMillerIndex(
            experiment.beam, experiment.detector, experiment.crystal
        )

        # For each pixel, assign to a miller index and also compute the distance
        reflections = defaultdict(list)
        for j in range(ysize):
            for i in range(xsize):
                h = transform.h(0, i, j)
                h0 = tuple(map(lambda x: int(floor(x + 0.5)), h))
                d = sqrt(sum(map(lambda x, y: (x - y) ** 2, h, h0)))
                reflections[h0].append((i, j, d))

        # Initialise arrays
        self._indices = flex.miller_index()
        self._distance = flex.double(flex.grid(ysize, xsize))
        self._label = flex.int(flex.grid(ysize, xsize))
        self._pixels = defaultdict(list)
        for index, (h, pixels) in enumerate(reflections.iteritems()):
            self._indices.append(h)
            for i, j, d in pixels:
                self._distance[j, i] = d
                self._label[j, i] = index
                self._pixels[h].append((i, j))
def test_outlier_rejection_with_small_outliers():

    rt = flex.reflection_table()
    rt["intensity"] = flex.double(
        [3560.84231, 3433.66407, 3830.64235, 0.20552, 3786.59537] +
        [4009.98652, 0.00000, 3578.91470, 3549.19151, 3379.58616] +
        [3686.38610, 3913.42869, 0.00000, 3608.84869, 3681.11110])
    rt["variance"] = flex.double(
        [10163.98104, 9577.90389, 9702.84868, 3.77427, 8244.70685] +
        [9142.38221, 1.51118, 9634.53782, 9870.73103, 9078.23488] +
        [8977.26984, 8712.91360, 1.78802, 7473.26521, 10075.49862])
    rt["inverse_scale_factor"] = flex.double(rt.size(), 1.0)
    rt["miller_index"] = flex.miller_index([(0, 0, 1)] * rt.size())
    expected_outliers = [3, 6, 12]

    OutlierRej = NormDevOutlierRejection(IhTable([rt], space_group("P 1")),
                                         zmax=6.0)
    OutlierRej.run()
    outliers = OutlierRej.final_outlier_arrays
    assert len(outliers) == 1
    assert set(outliers[0]) == set(expected_outliers)
Exemple #26
0
  def tst_sort(self):

    from dials.array_family import flex
    table = flex.reflection_table()
    table['a'] = flex.int([2, 4, 3, 1, 5])
    table['b'] = flex.vec2_double([(3, 2), (3, 1), (1, 3), (4, 5), (4, 3)])
    table['c'] = flex.miller_index([(3,2,1), (3,1,1), (2,4,2), (2,1,1), (1,1,1)])

    table.sort("a")
    assert list(table['a']) == [1, 2, 3, 4, 5]

    table.sort("b")
    assert list(table['b']) == [(1,3), (3,1), (3,2), (4,3), (4,5)]

    table.sort("c")
    assert list(table['c']) == [(1,1,1),(2,1,1),(2,4,2),(3,1,1),(3,2,1)]

    table.sort("c", order=(1,2,0))
    assert list(table['c']) == [(1, 1, 1), (2, 1, 1), (3, 1, 1), (3, 2, 1), (2, 4, 2)]

    print "OK"
def test_select_highly_connected_reflections_in_bin():
    """Test the single-bin selection algorithm."""
    r1 = flex.reflection_table()
    n_list = [3, 3, 2, 1, 1, 2, 2]
    miller_indices = [[(0, 0, i + 1)] * n for i, n in enumerate(n_list)]
    r1["miller_index"] = flex.miller_index(
        list(itertools.chain.from_iterable(miller_indices)))
    r1["class_index"] = flex.int([0, 1, 1, 0, 1, 2, 0, 0, 2, 1, 1, 2, 0, 1])
    r1["intensity"] = flex.double(sum(n_list), 1)
    r1["variance"] = flex.double(sum(n_list), 1)
    r1["inverse_scale_factor"] = flex.double(sum(n_list), 1)

    sg = sgtbx.space_group("P1")
    Ih_table_block = IhTable([r1], sg).Ih_table_blocks[0]
    Ih_table_block.Ih_table["class_index"] = r1["class_index"].select(
        Ih_table_block.Ih_table["loc_indices"])

    indices, total_in_classes = select_highly_connected_reflections_in_bin(
        Ih_table_block, min_per_class=2, min_total=6, max_total=100)
    assert list(total_in_classes) == [2, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0]
    assert list(indices) == [0, 1, 2, 3, 4, 5, 10, 11]
Exemple #28
0
def generated_refl():
    """Generate a reflection table."""
    # these miller_idx/d_values don't make physical sense, but I didn't want to
    # have to write the tests for lots of reflections.
    reflections = flex.reflection_table()
    reflections["intensity.prf.value"] = flex.double([1.0, 10.0, 10.0, 1.0, 2.0])
    reflections["intensity.prf.variance"] = flex.double([1.0, 10.0, 10.0, 1.0, 2.0])
    reflections["intensity.sum.value"] = flex.double([10.0, 100.0, 100.0, 10.0, 10.0])
    reflections["intensity.sum.variance"] = flex.double(
        [10.0, 100.0, 100.0, 10.0, 10.0]
    )
    reflections["miller_index"] = flex.miller_index(
        [(1, 0, 0), (0, 0, 1), (1, 0, 0), (0, 0, 1), (0, 0, 2)]
    )  # don't change
    reflections["d"] = flex.double([0.8, 2.0, 0.8, 2.0, 1.2])  # don't change
    reflections["lp"] = flex.double(5, 1.0)
    reflections["partiality"] = flex.double([1.0, 1.0, 1.0, 1.0, 0.8])
    reflections["xyzobs.px.value"] = flex.vec3_double(
        [
            (0.0, 0.0, 0.0),
            (0.0, 0.0, 5.0),
            (0.0, 0.0, 10.0),
            (0.0, 0.0, 10.0),
            (0.0, 0.0, 7.5),
        ]
    )
    reflections["s1"] = flex.vec3_double(
        [
            (0.0, 0.1, 1.0),
            (0.0, 0.1, 1.0),
            (0.0, 0.1, 1.0),
            (0.0, 0.1, 1.0),
            (0.0, 0.1, 1.0),
        ]
    )
    reflections.set_flags(flex.bool(5, True), reflections.flags.integrated)
    reflections["id"] = flex.int(5, 0)
    reflections["partial_id"] = flex.int(range(0, 5))
    reflections.experiment_identifiers()[0] = str(0)
    return reflections
def test_as_miller_array():
    table = flex.reflection_table()
    table["intensity.1.value"] = flex.double([1.0, 2.0, 3.0])
    table["intensity.1.variance"] = flex.double([0.25, 1.0, 4.0])
    table["miller_index"] = flex.miller_index([(1, 0, 0), (2, 0, 0), (3, 0, 0)])

    crystal = Crystal(
        real_space_a=(10, 0, 0),
        real_space_b=(0, 11, 0),
        real_space_c=(0, 0, 12),
        space_group=sgtbx.space_group_info("P 222").group(),
    )
    experiment = Experiment(crystal=crystal)

    iobs = table.as_miller_array(experiment, intensity="1")
    assert list(iobs.data()) == list(table["intensity.1.value"])
    assert list(iobs.sigmas()) == list(flex.sqrt(table["intensity.1.variance"]))

    with pytest.raises(KeyError):
        _ = table.as_miller_array(experiment, intensity="2")
    table["intensity.2.value"] = flex.double([1.0, 2.0, 3.0])
    with pytest.raises(KeyError):
        _ = table.as_miller_array(experiment, intensity="2")
Exemple #30
0
    def index(self, reflections):

        # FIXME allow for the fact that there could be > 1 lattice on here to
        # e.g. assign index over small spherical radius

        miller_index = flex.miller_index()
        UB = matrix.sqr(self.crystal.get_A())
        UBi = UB.inverse()

        self.qobs = []

        for refl in reflections:
            x, y, z = refl["xyzobs.px.value"]
            p = matrix.col(self.panel.get_pixel_lab_coord((x, y)))
            q = p.normalize() / self.wavelength - self.s0
            self.qobs.append(q)
            hkl = UBi * q
            ihkl = [int(round(h)) for h in hkl]
            miller_index.append(ihkl)

        reflections["miller_index"] = miller_index
        self.data = reflections
        return reflections
def test_match_by_hkle():
    nn = 10

    h = flex.int([n % nn for n in range(nn)])
    k = flex.int([(n + 2) % nn for n in range(nn)])
    l = flex.int([(n + 4) % nn for n in range(nn)])
    e = flex.int([n % 2 for n in range(nn)])

    hkl = flex.miller_index(h, k, l)

    t0 = flex.reflection_table()
    t0["miller_index"] = hkl
    t0["entering"] = e

    i = list(range(nn))
    random.shuffle(i)
    t1 = t0.select(flex.size_t(i))

    # because t0.match_by_hkle(t1) will give the _inverse_ to i
    n0, n1 = t1.match_by_hkle(t0)

    assert list(n0) == list(range(nn))
    assert list(n1) == i
Exemple #32
0
def generated_refl():
    """Generate reflection table to test the basis and target function."""
    # these miller_idx/d_values don't make physical sense, but I didn't want to
    # have to write the tests for lots of reflections.
    reflections = flex.reflection_table()
    reflections["intensity.prf.value"] = flex.double(
        [75.0, 10.0, 100.0, 50.0, 65.0])
    reflections["intensity.prf.variance"] = flex.double(
        [50.0, 10.0, 100.0, 50.0, 65.0])
    reflections["miller_index"] = flex.miller_index([(1, 0, 0), (0, 0, 1),
                                                     (1, 0, 0), (0, 0, 1),
                                                     (0, 0, 2)
                                                     ])  # don't change
    reflections["d"] = flex.double([2.0, 0.8, 2.0, 0.8, 1.5])  # don't change
    # reflections['lp'] = flex.double([1.0, 1.0, 1.0])
    # reflections['dqe'] = flex.double([1.0, 1.0, 1.0])
    # reflections['partiality'] = flex.double([1.0, 1.0, 1.0])
    reflections["xyzobs.px.value"] = flex.vec3_double([
        (0.0, 0.0, 0.0),
        (0.0, 0.0, 5.0),
        (0.0, 0.0, 10.0),
        (0.0, 0.0, 2.0),
        (0.0, 0.0, 8.0),
    ])
    reflections["s1"] = flex.vec3_double([
        (0.0, 0.1, 1.0),
        (0.0, 0.1, 1.0),
        (0.0, 0.1, 20.0),
        (0.0, 0.1, 20.0),
        (0.0, 0.1, 20.0),
    ])
    reflections.set_flags(flex.bool([True, True, True, True, True]),
                          reflections.flags.integrated)
    reflections["id"] = flex.int(5, 0)
    reflections.experiment_identifiers()[0] = str(0)
    return [reflections]
Exemple #33
0
def refls_to_hkl(refls,
                 detector,
                 beam,
                 crystal,
                 update_table=False,
                 returnQ=False):
    """
    convert pixel panel reflections to miller index data

    :param refls:  reflecton table for a panel or a tuple of (x,y)
    :param detector:  dxtbx detector model
    :param beam:  dxtbx beam model
    :param crystal: dxtbx crystal model
    :param update_table: whether to update the refltable
    :param returnQ: whether to return intermediately computed q vectors
    :return: if as_numpy two Nx3 numpy arrays are returned
        (one for fractional and one for whole HKL)
        else dictionary of hkl_i (nearest) and hkl (fractional)
    """
    if 'rlp' not in list(refls.keys()):
        q_vecs = refls_to_q(refls, detector, beam, update_table=update_table)
    else:
        q_vecs = np.vstack([r['rlp'] for r in refls])
    Ai = sqr(crystal.get_A()).inverse()
    Ai = Ai.as_numpy_array()
    HKL = np.dot(Ai, q_vecs.T)
    HKLi = map(lambda h: np.ceil(h - 0.5).astype(int), HKL)
    if update_table:
        refls['miller_index'] = flex.miller_index(len(refls), (0, 0, 0))
        mil_idx = flex.vec3_int(tuple(map(tuple, np.vstack(HKLi).T)))
        for i in range(len(refls)):
            refls['miller_index'][i] = mil_idx[i]
    if returnQ:
        return np.vstack(HKL).T, np.vstack(HKLi).T, q_vecs
    else:
        return np.vstack(HKL).T, np.vstack(HKLi).T
Exemple #34
0
def test_reflection_manager():
    from dials.array_family import flex

    reflections = flex.reflection_table()
    reflections["panel"] = flex.size_t()
    reflections["bbox"] = flex.int6()
    reflections["miller_index"] = flex.miller_index()
    reflections["s1"] = flex.vec3_double()
    reflections["xyzcal.px"] = flex.vec3_double()
    reflections["xyzcal.mm"] = flex.vec3_double()
    reflections["entering"] = flex.bool()
    reflections["id"] = flex.int()
    reflections["flags"] = flex.size_t()

    width = 1000
    height = 1000
    nrefl = 10000
    array_range = (0, 130)
    block_size = 20
    block_overlap = 10

    random.seed(0)
    processed = [[] for i in range(12)]
    for i in range(nrefl):
        x0 = random.randint(0, width - 10)
        y0 = random.randint(0, height - 10)
        zs = random.randint(2, 9)
        x1 = x0 + random.randint(2, 10)
        y1 = y0 + random.randint(2, 10)
        for k, j in enumerate(
            [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120]):
            m = k + i * 12
            pos = random.choice(["left", "right", "centre"])
            if pos == "left":
                z0 = j - zs
                z1 = j
            elif pos == "right":
                z0 = j
                z1 = j + zs
            else:
                z0 = j - zs // 2
                z1 = j + zs // 2
            bbox = (x0, x1, y0, y1, z0, z1)
            reflections.append({
                "panel":
                random.randint(0, 1),
                "bbox":
                bbox,
                "flags":
                flex.reflection_table.flags.reference_spot,
            })
            processed[k].append(m)

        # Add reflection to ignore
        # zc = random.choice([10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120])
        # z0 = zc - 11
        # z1 = zc + 11
        # bbox = (x0, x1, y0, y1, z0, z1)
        # reflections.append({
        #   "panel" : randint(0,1),
        #   "bbox" : bbox,
        #   "flags" : flex.reflection_table.flags.reference_spot
        # })

    from dials.algorithms.integration.integrator import JobList, ReflectionManager

    jobs = JobList()
    jobs.add((0, 1), array_range, block_size, block_overlap)

    # Create the executor
    executor = ReflectionManager(jobs, reflections)

    # Ensure the tasks make sense
    jobs = [executor.job(i) for i in range(len(executor))]
    assert len(executor) == 12
    assert not executor.finished()
    assert len(jobs) == 12
    assert jobs[0].frames() == (0, 20)
    assert jobs[1].frames() == (10, 30)
    assert jobs[2].frames() == (20, 40)
    assert jobs[3].frames() == (30, 50)
    assert jobs[4].frames() == (40, 60)
    assert jobs[5].frames() == (50, 70)
    assert jobs[6].frames() == (60, 80)
    assert jobs[7].frames() == (70, 90)
    assert jobs[8].frames() == (80, 100)
    assert jobs[9].frames() == (90, 110)
    assert jobs[10].frames() == (100, 120)
    assert jobs[11].frames() == (110, 130)

    # Get the task specs
    data0 = executor.split(0)
    data1 = executor.split(1)
    data2 = executor.split(2)
    data3 = executor.split(3)
    data4 = executor.split(4)
    data5 = executor.split(5)
    data6 = executor.split(6)
    data7 = executor.split(7)
    data8 = executor.split(8)
    data9 = executor.split(9)
    data10 = executor.split(10)
    data11 = executor.split(11)
    assert len(data0) == len(processed[0])
    assert len(data1) == len(processed[1])
    assert len(data2) == len(processed[2])
    assert len(data3) == len(processed[3])
    assert len(data4) == len(processed[4])
    assert len(data5) == len(processed[5])
    assert len(data6) == len(processed[6])
    assert len(data7) == len(processed[7])
    assert len(data8) == len(processed[8])
    assert len(data9) == len(processed[9])
    assert len(data10) == len(processed[10])
    assert len(data11) == len(processed[11])

    # Add some results
    data0["data"] = flex.double(len(data0), 1)
    data1["data"] = flex.double(len(data1), 2)
    data2["data"] = flex.double(len(data2), 3)
    data3["data"] = flex.double(len(data3), 4)
    data4["data"] = flex.double(len(data4), 5)
    data5["data"] = flex.double(len(data5), 6)
    data6["data"] = flex.double(len(data6), 7)
    data7["data"] = flex.double(len(data7), 8)
    data8["data"] = flex.double(len(data8), 9)
    data9["data"] = flex.double(len(data9), 10)
    data10["data"] = flex.double(len(data10), 11)
    data11["data"] = flex.double(len(data11), 12)

    # Accumulate the data again
    assert not executor.finished()
    executor.accumulate(0, data0)
    executor.accumulate(1, data1)
    executor.accumulate(2, data2)
    executor.accumulate(3, data3)
    executor.accumulate(4, data4)
    executor.accumulate(5, data5)
    executor.accumulate(6, data6)
    executor.accumulate(7, data7)
    executor.accumulate(8, data8)
    executor.accumulate(9, data9)
    executor.accumulate(10, data10)
    executor.accumulate(11, data11)
    assert executor.finished()

    # Get results and check they're as expected
    data = executor.data()
    result = data["data"]
    for i in range(len(processed)):
        for j in range(len(processed[i])):
            assert result[processed[i][j]] == i + 1
Exemple #35
0
def test_to_from_msgpack(tmpdir):
    from dials.model.data import Shoebox

    def gen_shoebox():
        shoebox = Shoebox(0, (0, 4, 0, 3, 0, 1))
        shoebox.allocate()
        for k in range(1):
            for j in range(3):
                for i in range(4):
                    shoebox.data[k, j, i] = i + j + k + 0.1
                    shoebox.mask[k, j, i] = i % 2
                    shoebox.background[k, j, i] = i * j + 0.2
        return shoebox

    def compare(a, b):
        assert a.is_consistent()
        assert b.is_consistent()
        assert a.panel == b.panel
        assert a.bbox == b.bbox
        for aa, bb in zip(a.data, b.data):
            if abs(aa - bb) > 1e-9:
                return False
        for aa, bb in zip(a.background, b.background):
            if abs(aa - bb) > 1e-9:
                return False
        for aa, bb in zip(a.mask, b.mask):
            if aa != bb:
                return False
        return True

    # The columns as lists
    c1 = list(range(10))
    c2 = list(range(10))
    c3 = ["a", "b", "c", "d", "e", "f", "g", "i", "j", "k"]
    c4 = [True, False, True, False, True] * 2
    c5 = list(range(10))
    c6 = [(i + 1, i + 2) for i in range(10)]
    c7 = [(i + 1, i + 2, i + 3) for i in range(10)]
    c8 = [tuple(i + j for j in range(9)) for i in range(10)]
    c9 = [tuple(i + j for j in range(6)) for i in range(10)]
    c10 = [(i + 1, i + 2, i + 3) for i in range(10)]
    c11 = [gen_shoebox() for i in range(10)]

    # Create a table with some elements
    table = flex.reflection_table()
    table["col1"] = flex.int(c1)
    table["col2"] = flex.double(c2)
    table["col3"] = flex.std_string(c3)
    table["col4"] = flex.bool(c4)
    table["col5"] = flex.size_t(c5)
    table["col6"] = flex.vec2_double(c6)
    table["col7"] = flex.vec3_double(c7)
    table["col8"] = flex.mat3_double(c8)
    table["col9"] = flex.int6(c9)
    table["col10"] = flex.miller_index(c10)
    table["col11"] = flex.shoebox(c11)

    obj = table.as_msgpack()
    new_table = flex.reflection_table.from_msgpack(obj)
    assert new_table.is_consistent()
    assert new_table.nrows() == 10
    assert new_table.ncols() == 11
    assert all(tuple(a == b for a, b in zip(new_table["col1"], c1)))
    assert all(tuple(a == b for a, b in zip(new_table["col2"], c2)))
    assert all(tuple(a == b for a, b in zip(new_table["col3"], c3)))
    assert all(tuple(a == b for a, b in zip(new_table["col4"], c4)))
    assert all(tuple(a == b for a, b in zip(new_table["col5"], c5)))
    assert all(tuple(a == b for a, b in zip(new_table["col6"], c6)))
    assert all(tuple(a == b for a, b in zip(new_table["col7"], c7)))
    assert all(tuple(a == b for a, b in zip(new_table["col8"], c8)))
    assert all(tuple(a == b for a, b in zip(new_table["col9"], c9)))
    assert all(tuple(a == b for a, b in zip(new_table["col10"], c10)))
    assert all(tuple(compare(a, b) for a, b in zip(new_table["col11"], c11)))

    table.as_msgpack_file(tmpdir.join("reflections.mpack").strpath)
    new_table = flex.reflection_table.from_msgpack_file(
        tmpdir.join("reflections.mpack").strpath)
    assert new_table.is_consistent()
    assert new_table.nrows() == 10
    assert new_table.ncols() == 11
    assert all(tuple(a == b for a, b in zip(new_table["col1"], c1)))
    assert all(tuple(a == b for a, b in zip(new_table["col2"], c2)))
    assert all(tuple(a == b for a, b in zip(new_table["col3"], c3)))
    assert all(tuple(a == b for a, b in zip(new_table["col4"], c4)))
    assert all(tuple(a == b for a, b in zip(new_table["col5"], c5)))
    assert all(tuple(a == b for a, b in zip(new_table["col6"], c6)))
    assert all(tuple(a == b for a, b in zip(new_table["col7"], c7)))
    assert all(tuple(a == b for a, b in zip(new_table["col8"], c8)))
    assert all(tuple(a == b for a, b in zip(new_table["col9"], c9)))
    assert all(tuple(a == b for a, b in zip(new_table["col10"], c10)))
    assert all(tuple(compare(a, b) for a, b in zip(new_table["col11"], c11)))
Exemple #36
0
def offset_miller_indices(indices, offset):
    from dials.array_family import flex

    return flex.miller_index(
        *[mi.iround() for mi in (indices.as_vec3_double() + offset).parts()])
Exemple #37
0
    def get_data(self, decoder):
        '''Get the model data using the supplied decoder.'''
        return decoder.decode(self._handle)

    def set_reflections(self, reflections):
        '''Set the reflection data.'''
        self.set_data(reflections, ReflectionListEncoder())

    def get_reflections(self):
        '''Get the reflection data.'''
        return self.get_data(ReflectionListDecoder())


if __name__ == '__main__':
    from dials.array_family import flex
    reflections = flex.reflection_table([('hkl', flex.miller_index(10)),
                                         ('s1', flex.vec3_double(10)),
                                         ('bbox', flex.int6(10)),
                                         ('id', flex.int(10)),
                                         ('shoebox', flex.shoebox(10))])

    for i in range(10):
        reflections['shoebox'][i].data = flex.double(flex.grid(10, 10, 10))
        reflections['shoebox'][i].mask = flex.int(flex.grid(10, 10, 10))
        reflections['shoebox'][i].background = flex.double(
            flex.grid(10, 10, 10))

    for i in range(10):
        print reflections['shoebox'][i].data.all()

    writer = NexusFile('temp.h5', 'w')
def integrate(experiment):
    from dials.algorithms.spot_prediction import PixelToMillerIndex
    from dials.array_family import flex
    from math import floor, sqrt
    from collections import defaultdict

    detector = experiment.detector
    assert len(detector) == 1
    panel = detector[0]

    xsize, ysize = panel.get_image_size()

    transform = PixelToMillerIndex(experiment.beam, experiment.detector,
                                   experiment.crystal)

    data = experiment.imageset.get_raw_data(0)[0]

    mask = flex.bool(flex.grid(ysize, xsize), False)
    reflections = defaultdict(list)

    print("Doing pixel labelling")
    for j in range(ysize):
        for i in range(xsize):
            h = transform.h(0, i, j)
            h0 = tuple(map(lambda x: int(floor(x + 0.5)), h))

            d = sqrt(sum(map(lambda x, y: (x - y)**2, h, h0)))
            # if not hasattr(reflections[h0], "xd"):
            #   reflections[h0].xd = d
            #   reflections[h0].xc = i
            #   reflections[h0].yc = j
            # elif reflections[h0].xd > d:
            #   reflections[h0].xd = d
            #   reflections[h0].xc = i
            #   reflections[h0].yc = j

            if d < 0.3:
                mask[j, i] = True
            reflections[h0].append((j, i))

    # from matplotlib import pylab
    # #pylab.imshow(mask.as_numpy_array(), interpolation='none')
    # pylab.show()

    print("Integrating reflections")
    miller_index = flex.miller_index()
    intensity = flex.double()
    variance = flex.double()
    bbox = flex.int6()
    xyz = flex.vec3_double()
    for h, r in reflections.iteritems():

        # xc = r.xc
        # yc = r.yc

        b_sum = 0
        f_sum = 0
        b_cnt = 0
        f_cnt = 0
        for i in range(len(r)):
            y, x = r[i]
            m = mask[y, x]
            if data[y, x] >= 0:
                if m:
                    f_sum += data[y, x]
                    f_cnt += 1
                else:
                    b_sum += data[y, x]
                    b_cnt += 1
        Y, X = zip(*r)
        x0, x1, y0, y1 = min(X), max(X), min(Y), max(Y)
        if f_cnt > 0 and b_cnt > 0:
            B = b_sum / b_cnt
            I = f_sum - B * f_cnt
            V = f_sum + B * (1 + f_cnt / b_cnt)
            miller_index.append(h)
            intensity.append(I)
            variance.append(V)
            bbox.append((x0, x1, y0, y1, 0, 1))
            # xyz.append((xc, yc, 0))

    print("Integrated %d reflections" % len(reflections))
    print(flex.min(intensity), flex.max(intensity), flex.mean(intensity))
    reflections = flex.reflection_table()
    reflections["miller_index"] = miller_index
    reflections["intensity.sum.value"] = intensity
    reflections["intensity.sum.variance"] = variance
    reflections["bbox"] = bbox
    reflections["panel"] = flex.size_t(len(reflections), 0)
    reflections["id"] = flex.size_t(len(reflections), 0)
    # reflections["xyzcal.px"] = xyz
    # reflections["xyzobs.px"] = xyz
    reflections.set_flags(flex.size_t(range(len(reflections))),
                          reflections.flags.integrated_sum)
    return reflections
Exemple #39
0
def read(handle, key):
    from dxtbx.format.nexus import convert_units

    if key == "miller_index":
        h = flex.int(handle["h"][:].astype(np.int32))
        k = flex.int(handle["k"][:].astype(np.int32))
        l = flex.int(handle["l"][:].astype(np.int32))
        return flex.miller_index(h, k, l)
    elif key == "id":
        return flex.int(handle["id"][:].astype(int))
    elif key == "partial_id":
        return flex.size_t(handle["reflection_id"][:].astype(int))
    elif key == "entering":
        return flex.bool(handle["entering"][:])
    elif key == "flags":
        return flex.size_t(handle["flags"][:].astype(int))
    elif key == "panel":
        return flex.size_t(handle["det_module"][:].astype(int))
    elif key == "d":
        return flex.double(handle["d"][:])
    elif key == "partiality":
        return flex.double(handle["partiality"][:])
    elif key == "xyzcal.px":
        x = flex.double(handle["predicted_px_x"][:])
        y = flex.double(handle["predicted_px_y"][:])
        z = flex.double(handle["predicted_frame"][:])
        return flex.vec3_double(x, y, z)
    elif key == "xyzcal.mm":
        x = convert_units(
            flex.double(handle["predicted_x"][:]),
            handle["predicted_x"].attrs["units"],
            "mm",
        )
        y = convert_units(
            flex.double(handle["predicted_y"][:]),
            handle["predicted_y"].attrs["units"],
            "mm",
        )
        z = convert_units(
            flex.double(handle["predicted_phi"][:]),
            handle["predicted_phi"].attrs["units"],
            "rad",
        )
        return flex.vec3_double(x, y, z)
    elif key == "bbox":
        b = flex.int(handle["bounding_box"][:].astype(np.int32))
        return flex.int6(b.as_1d())
    elif key == "xyzobs.px.value":
        x = flex.double(handle["observed_px_x"][:])
        y = flex.double(handle["observed_px_y"][:])
        z = flex.double(handle["observed_frame"][:])
        return flex.vec3_double(x, y, z)
    elif key == "xyzobs.px.variance":
        x = flex.double(handle["observed_px_x_var"][:])
        y = flex.double(handle["observed_px_y_var"][:])
        z = flex.double(handle["observed_frame_var"][:])
        return flex.vec3_double(x, y, z)
    elif key == "xyzobs.mm.value":
        x = convert_units(
            flex.double(handle["observed_x"][:]),
            handle["observed_x"].attrs["units"],
            "mm",
        )
        y = convert_units(
            flex.double(handle["observed_y"][:]),
            handle["observed_y"].attrs["units"],
            "mm",
        )
        z = convert_units(
            flex.double(handle["observed_phi"][:]),
            handle["observed_phi"].attrs["units"],
            "rad",
        )
        return flex.vec3_double(x, y, z)
    elif key == "xyzobs.mm.variance":
        x = convert_units(
            flex.double(handle["observed_x_var"][:]),
            handle["observed_x_var"].attrs["units"],
            "mm",
        )
        y = convert_units(
            flex.double(handle["observed_y_var"][:]),
            handle["observed_y_var"].attrs["units"],
            "mm",
        )
        z = convert_units(
            flex.double(handle["observed_phi_var"][:]),
            handle["observed_phi_var"].attrs["units"],
            "rad",
        )
        return flex.vec3_double(x, y, z)
    elif key == "background.mean":
        return flex.double(handle["background_mean"][:])
    elif key == "intensity.sum.value":
        return flex.double(handle["int_sum"][:])
    elif key == "intensity.sum.variance":
        return flex.double(handle["int_sum_var"][:])
    elif key == "intensity.prf.value":
        return flex.double(handle["int_prf"][:])
    elif key == "intensity.prf.variance":
        return flex.double(handle["int_prf_var"][:])
    elif key == "profile.correlation":
        return flex.double(handle["prf_cc"][:])
    elif key == "lp":
        return flex.double(handle["lp"][:])
    elif key == "num_pixels.background":
        return flex.int(handle["num_bg"][:].astype(np.int32))
    elif key == "num_pixels.background_used":
        return flex.int(handle["num_bg_used"][:].astype(np.int32))
    elif key == "num_pixels.foreground":
        return flex.int(handle["num_fg"][:].astype(np.int32))
    elif key == "num_pixels.valid":
        return flex.int(handle["num_valid"][:].astype(np.int32))
    elif key == "profile.rmsd":
        return flex.double(handle["prf_rmsd"][:])
    else:
        raise KeyError("Column %s not read from file" % key)
  def __init__(self,measurements_orig, params, i_model, miller_set, result, out):
    measurements = measurements_orig.deep_copy()
    # Now manipulate the data to conform to unit cell, asu, and space group
    # of reference.  The resolution will be cut later.
    # Only works if there is NOT an indexing ambiguity!
    observations = measurements.customized_copy(
      anomalous_flag=not params.merge_anomalous,
      crystal_symmetry=miller_set.crystal_symmetry()
      ).map_to_asu()

    observations_original_index = measurements.customized_copy(
      anomalous_flag=not params.merge_anomalous,
      crystal_symmetry=miller_set.crystal_symmetry()
      )

    # Ensure that match_multi_indices() will return identical results
    # when a frame's observations are matched against the
    # pre-generated Miller set, self.miller_set, and the reference
    # data set, self.i_model.  The implication is that the same match
    # can be used to map Miller indices to array indices for intensity
    # accumulation, and for determination of the correlation
    # coefficient in the presence of a scaling reference.

    assert len(i_model.indices()) == len(miller_set.indices()) \
        and  (i_model.indices() ==
              miller_set.indices()).count(False) == 0
    matches = miller.match_multi_indices(
      miller_indices_unique=miller_set.indices(),
      miller_indices=observations.indices())

    pair1 = flex.int([pair[1] for pair in matches.pairs()])
    pair0 = flex.int([pair[0] for pair in matches.pairs()])
    # narrow things down to the set that matches, only
    observations_pair1_selected = observations.customized_copy(
      indices = flex.miller_index([observations.indices()[p] for p in pair1]),
      data = flex.double([observations.data()[p] for p in pair1]),
      sigmas = flex.double([observations.sigmas()[p] for p in pair1]),
    )
    observations_original_index_pair1_selected = observations_original_index.customized_copy(
      indices = flex.miller_index([observations_original_index.indices()[p] for p in pair1]),
      data = flex.double([observations_original_index.data()[p] for p in pair1]),
      sigmas = flex.double([observations_original_index.sigmas()[p] for p in pair1]),
    )
###################
    I_observed = observations_pair1_selected.data()
    chosen = chosen_weights(observations_pair1_selected, params)

    MILLER = observations_original_index_pair1_selected.indices()
    ORI = result["current_orientation"][0]
    Astar = matrix.sqr(ORI.reciprocal_matrix())
    WAVE = result["wavelength"]
    BEAM = matrix.col((0.0,0.0,-1./WAVE))
    BFACTOR = 0.

    #calculation of correlation here
    I_reference = flex.double([i_model.data()[pair[0]] for pair in matches.pairs()])
    use_weights = False # New facility for getting variance-weighted correlation

    if use_weights:
       #variance weighting
      I_weight = flex.double(
        [1./(observations_pair1_selected.sigmas()[pair[1]])**2 for pair in matches.pairs()])
    else:
      I_weight = flex.double(len(observations_pair1_selected.sigmas()), 1.)

    """Explanation of 'include_negatives' semantics as originally implemented in cxi.merge postrefinement:
       include_negatives = True
       + and - reflections both used for Rh distribution for initial estimate of RS parameter
       + and - reflections both used for calc/obs correlation slope for initial estimate of G parameter
       + and - reflections both passed to the refinery and used in the target function (makes sense if
                           you look at it from a certain point of view)

       include_negatives = False
       + and - reflections both used for Rh distribution for initial estimate of RS parameter
       +       reflections only used for calc/obs correlation slope for initial estimate of G parameter
       + and - reflections both passed to the refinery and used in the target function (makes sense if
                           you look at it from a certain point of view)
    """
    if params.include_negatives:
      SWC = simple_weighted_correlation(I_weight, I_reference, I_observed)
    else:
      non_positive = ( observations_pair1_selected.data() <= 0 )
      SWC = simple_weighted_correlation(I_weight.select(~non_positive),
            I_reference.select(~non_positive), I_observed.select(~non_positive))

    print >> out, "Old correlation is", SWC.corr
    assert params.postrefinement.algorithm=="rs_hybrid"
    Rhall = flex.double()
    for mill in MILLER:
        H = matrix.col(mill)
        Xhkl = Astar*H
        Rh = ( Xhkl + BEAM ).length() - (1./WAVE)
        Rhall.append(Rh)
    Rs = math.sqrt(flex.mean(Rhall*Rhall))

    RS = 1./10000. # reciprocal effective domain size of 1 micron
    RS = Rs        # try this empirically determined approximate, monochrome, a-mosaic value

    self.rs2_current = flex.double([SWC.slope, BFACTOR, RS, 0., 0.])
    self.rs2_parameterization_class = rs_parameterization

    self.rs2_refinery = rs2_refinery(ORI=ORI, MILLER=MILLER, BEAM=BEAM, WAVE=WAVE,
        ICALCVEC = I_reference, IOBSVEC = I_observed, WEIGHTS = chosen)
    self.rs2_refinery.set_profile_shape(params.postrefinement.lineshape)
    self.nave1_refinery = nave1_refinery(ORI=ORI, MILLER=MILLER, BEAM=BEAM, WAVE=WAVE,
        ICALCVEC = I_reference, IOBSVEC = I_observed, WEIGHTS = chosen)
    self.nave1_refinery.set_profile_shape(params.postrefinement.lineshape)

    self.out=out; self.params = params;
    self.miller_set = miller_set
    self.observations_pair1_selected = observations_pair1_selected;
    self.observations_original_index_pair1_selected = observations_original_index_pair1_selected
    self.i_model = i_model
Exemple #41
0
def run(args):
    import libtbx.load_env
    from libtbx.utils import Sorry

    usage = "%s [options] experiments.json indexed.pickle" % libtbx.env.dispatcher_name

    parser = OptionParser(
        usage=usage,
        phil=phil_scope,
        read_reflections=True,
        read_experiments=True,
        check_format=False,
        epilog=help_message,
    )

    params, options = parser.parse_args(show_diff_phil=True)

    reflections = flatten_reflections(params.input.reflections)
    experiments = flatten_experiments(params.input.experiments)
    if len(experiments) == 0 and len(reflections) == 0:
        parser.print_help()
        return
    elif len(experiments.crystals()) > 1:
        raise Sorry("Only one crystal can be processed at a time")
    if params.change_of_basis_op is None:
        raise Sorry("Please provide a change_of_basis_op.")

    reference_crystal = None
    if params.reference is not None:
        from dxtbx.serialize import load

        reference_experiments = load.experiment_list(params.reference, check_format=False)
        assert len(reference_experiments.crystals()) == 1
        reference_crystal = reference_experiments.crystals()[0]

    if len(experiments) and params.change_of_basis_op is libtbx.Auto:
        if reference_crystal is not None:
            from dials.algorithms.indexing.compare_orientation_matrices import (
                difference_rotation_matrix_and_euler_angles,
            )

            cryst = experiments.crystals()[0]
            R, euler_angles, change_of_basis_op = difference_rotation_matrix_and_euler_angles(cryst, reference_crystal)
            print "Change of basis op: %s" % change_of_basis_op
            print "Rotation matrix to transform input crystal to reference::"
            print R.mathematica_form(format="%.3f", one_row_per_line=True)
            print "Euler angles (xyz): %.2f, %.2f, %.2f" % euler_angles

        elif len(reflections):
            assert len(reflections) == 1

            # always re-map reflections to reciprocal space
            from dials.algorithms.indexing import indexer

            refl_copy = flex.reflection_table()
            for i, imageset in enumerate(experiments.imagesets()):
                if "imageset_id" in reflections[0]:
                    sel = reflections[0]["imageset_id"] == i
                else:
                    sel = reflections[0]["id"] == i
                refl = indexer.indexer_base.map_spots_pixel_to_mm_rad(
                    reflections[0].select(sel), imageset.get_detector(), imageset.get_scan()
                )

                indexer.indexer_base.map_centroids_to_reciprocal_space(
                    refl, imageset.get_detector(), imageset.get_beam(), imageset.get_goniometer()
                )
                refl_copy.extend(refl)

            # index the reflection list using the input experiments list
            refl_copy["id"] = flex.int(len(refl_copy), -1)
            from dials.algorithms.indexing import index_reflections

            index_reflections(refl_copy, experiments, tolerance=0.2)
            hkl_expt = refl_copy["miller_index"]
            hkl_input = reflections[0]["miller_index"]

            change_of_basis_op = derive_change_of_basis_op(hkl_input, hkl_expt)

            # reset experiments list since we don't want to reindex this
            experiments = []

    else:
        change_of_basis_op = sgtbx.change_of_basis_op(params.change_of_basis_op)

    if len(experiments):
        experiment = experiments[0]
        cryst_orig = copy.deepcopy(experiment.crystal)
        cryst_reindexed = cryst_orig.change_basis(change_of_basis_op)
        if params.space_group is not None:
            a, b, c = cryst_reindexed.get_real_space_vectors()
            cryst_reindexed = crystal_model(a, b, c, space_group=params.space_group.group())
        experiment.crystal.update(cryst_reindexed)

        print "Old crystal:"
        print cryst_orig
        print
        print "New crystal:"
        print cryst_reindexed
        print

        print "Saving reindexed experimental models to %s" % params.output.experiments
        dump.experiment_list(experiments, params.output.experiments)

    if len(reflections):
        assert len(reflections) == 1
        reflections = reflections[0]

        miller_indices = reflections["miller_index"]

        if params.hkl_offset is not None:
            h, k, l = miller_indices.as_vec3_double().parts()
            h += params.hkl_offset[0]
            k += params.hkl_offset[1]
            l += params.hkl_offset[2]
            miller_indices = flex.miller_index(h.iround(), k.iround(), l.iround())
        non_integral_indices = change_of_basis_op.apply_results_in_non_integral_indices(miller_indices)
        if non_integral_indices.size() > 0:
            print "Removing %i/%i reflections (change of basis results in non-integral indices)" % (
                non_integral_indices.size(),
                miller_indices.size(),
            )
        sel = flex.bool(miller_indices.size(), True)
        sel.set_selected(non_integral_indices, False)
        miller_indices_reindexed = change_of_basis_op.apply(miller_indices.select(sel))
        reflections["miller_index"].set_selected(sel, miller_indices_reindexed)
        reflections["miller_index"].set_selected(~sel, (0, 0, 0))

        print "Saving reindexed reflections to %s" % params.output.reflections
        easy_pickle.dump(params.output.reflections, reflections)
    21.009, 24.9846, 16.2607, 29.4217, 24.982, 27.2008, 15.2354, 15.2498,
    14.3514, 14.352, 21.0061, 16.2576, 24.9884, 21.0089, 15.2646, 16.2589,
    14.346, 14.347, 15.3548, 15.24, 25.0063, 16.2629, 16.266, 15.2613,
    15.2671, 16.2713, 16.2628, 14.3454, 15.2642, 24.9881, 27.4904, 24.975,
    15.2658, 27.2176, 14.3565, 15.2576, 15.2653, 15.2673, 14.3385, 14.355,
    27.2235, 25.0048, 25.0138, 27.1408, 25.0315, 14.3464, 27.2386, 21.0258,
    25.004, 14.3446, 15.2299, 15.2723, 14.3643, 14.3474, 14.3584, 15.2848,
    21.0256, 21.0246, 15.261, 25.0207, 27.2373, 16.2848, 16.2854, 14.3575,
    14.3636, 29.4477, 27.2583, 14.3619, 21.0374, 21.0399, 16.2755, 14.3487,
    14.3618, 14.3608, 15.2829, 27.2497, 15.2715, 15.2699, 16.2646, 16.2786,
    16.2821, 16.2696, 21.0368, 21.0307, 25.0431, 21.0456, 21.0224, 27.2257,
    27.2486, 25.0266, 25.0252, 29.4661, 25.0415, 25.0266, 25.046, 29.4752,
    27.2545, 29.4521, 37.3152, 29.4306, 29.4684, 37.3646, 28.9946, 28.9884,
    29.4736, 29.4737, 30.0142]

  miller_indices = flex.miller_index()
  two_thetas_obs = flex.double()
  for i, two_theta in enumerate(two_thetas):
    d_spacing = uctbx.two_theta_as_d(two_theta, wavelength, deg=True)
    for j, d in enumerate(ms.d_spacings().data()):
      if abs(d - d_spacing) < 0.1:
        miller_indices.append(ms.indices()[j])
        two_thetas_obs.append(two_theta)

  show_fit(
    two_thetas_obs, miller_indices, wavelength, unit_cell_start)

  refined = refinery(
    two_thetas_obs, miller_indices, wavelength, unit_cell_start,
      space_group=crystal_symmetry.space_group())
  print
Exemple #43
0
    '''Get the model data using the supplied decoder.'''
    return decoder.decode(self._handle)

  def set_reflections(self, reflections):
    '''Set the reflection data.'''
    self.set_data(reflections, ReflectionListEncoder())

  def get_reflections(self):
    '''Get the reflection data.'''
    return self.get_data(ReflectionListDecoder())


if __name__ == '__main__':
  from dials.array_family import flex
  reflections = flex.reflection_table([
    ('hkl', flex.miller_index(10)),
    ('s1', flex.vec3_double(10)),
    ('bbox', flex.int6(10)),
    ('id', flex.int(10)),
    ('shoebox', flex.shoebox(10))
  ])

  for i in range(10):
    reflections['shoebox'][i].data = flex.double(flex.grid(10,10,10))
    reflections['shoebox'][i].mask = flex.int(flex.grid(10,10,10))
    reflections['shoebox'][i].background = flex.double(flex.grid(10,10,10))

  for i in range(10):
    print reflections['shoebox'][i].data.all()

  writer = NexusFile('temp.h5', 'w')
Exemple #44
0
def read(handle, key):
  from dials.array_family import flex
  import numpy as np
  if   key == 'miller_index':
    h = flex.int(handle['h'][:].astype(np.int32))
    k = flex.int(handle['k'][:].astype(np.int32))
    l = flex.int(handle['l'][:].astype(np.int32))
    return flex.miller_index(h,k,l)
  elif key == 'id':
    return flex.int(handle['id'][:].astype(int))
  elif key == 'partial_id':
    return flex.size_t(handle['reflection_id'][:].astype(int))
  elif key == 'entering':
    return flex.bool(handle['entering'][:])
  elif key == 'flags':
    return flex.size_t(handle['flags'][:].astype(int))
  elif key == 'panel':
    return flex.size_t(handle['det_module'][:].astype(int))
  elif key == 'd':
    return flex.double(handle['d'][:])
  elif key == 'partiality':
    return flex.double(handle['partiality'][:])
  elif key == 'xyzcal.px':
    x = flex.double(handle['prd_px_x'][:])
    y = flex.double(handle['prd_px_y'][:])
    z = flex.double(handle['prd_frame'][:])
    return flex.vec3_double(x, y, z)
  elif key == 'xyzcal.mm':
    x = flex.double(handle['prd_mm_x'][:])
    y = flex.double(handle['prd_mm_y'][:])
    z = flex.double(handle['prd_phi'][:])
    return flex.vec3_double(x, y, z)
  elif key == 'bbox':
    x0 = flex.int(handle['bbx0'][:].astype(np.int32))
    x1 = flex.int(handle['bbx1'][:].astype(np.int32))
    y0 = flex.int(handle['bby0'][:].astype(np.int32))
    y1 = flex.int(handle['bby1'][:].astype(np.int32))
    z0 = flex.int(handle['bbz0'][:].astype(np.int32))
    z1 = flex.int(handle['bbz1'][:].astype(np.int32))
    return flex.int6(x0, x1, y0, y1, z0, z1)
  elif key == 'xyzobs.px.value':
    x = flex.double(handle['obs_px_x_val'][:])
    y = flex.double(handle['obs_px_y_val'][:])
    z = flex.double(handle['obs_frame_val'][:])
    return flex.vec3_double(x, y, z)
  elif key == 'xyzobs.px.variance':
    x = flex.double(handle['obs_px_x_var'][:])
    y = flex.double(handle['obs_px_y_var'][:])
    z = flex.double(handle['obs_frame_var'][:])
    return flex.vec3_double(x, y, z)
  elif key == 'xyzobs.mm.value':
    x = flex.double(handle['obs_mm_x_val'][:])
    y = flex.double(handle['obs_mm_y_val'][:])
    z = flex.double(handle['obs_phi_val'][:])
    return flex.vec3_double(x, y, z)
  elif key == 'xyzobs.mm.variance':
    x = flex.double(handle['obs_mm_x_var'][:])
    y = flex.double(handle['obs_mm_y_var'][:])
    z = flex.double(handle['obs_phi_var'][:])
    return flex.vec3_double(x, y, z)
  elif key == 'background.mean':
    return flex.double(handle['bkg_mean'][:])
  elif key == 'intensity.sum.value':
    return flex.double(handle['int_sum_val'][:])
  elif key == 'intensity.sum.variance':
    return flex.double(handle['int_sum_var'][:])
  elif key == 'intensity.prf.value':
    return flex.double(handle['int_prf_val'][:])
  elif key == 'intensity.prf.variance':
    return flex.double(handle['int_prf_var'][:])
  elif key == 'profile.correlation':
    return flex.double(handle['prf_cc'][:])
  elif key == 'lp':
    return flex.double(handle['lp'][:])
  elif key == 'num_pixels.background':
    return flex.int(handle['num_bg'][:].astype(np.int32))
  elif key == 'num_pixels.background_used':
    return flex.int(handle['num_bg_used'][:].astype(np.int32))
  elif key == 'num_pixels.foreground':
    return flex.int(handle['num_fg'][:].astype(np.int32))
  elif key == 'num_pixels.valid':
    return flex.int(handle['num_valid'][:].astype(np.int32))
  elif key == 'profile.rmsd':
    return flex.double(handle['prf_rmsd'][:])
  else:
    raise KeyError('Column %s not read from file' % key)
Exemple #45
0
def test_export_dials():

  from dials.array_family import flex
  print 'Creating dummy reflection table...'
  table = flex.reflection_table()
  table['miller_index'] = flex.miller_index(100)
  table['id'] = flex.int(100)
  table['intensity.sum.value'] = flex.double(100)
  table['intensity.sum.variance'] = flex.double(100)
  table['intensity.prf.value'] = flex.double(100)
  table['intensity.prf.variance'] = flex.double(100)
  table['lp'] = flex.double(100)
  table['panel'] = flex.size_t(100)
  table['bbox'] = flex.int6(100)
  table['xyzcal.px'] = flex.vec3_double(100)
  table['xyzcal.mm'] = flex.vec3_double(100)
  table['xyzobs.px.value'] = flex.vec3_double(100)
  table['xyzobs.px.variance'] = flex.vec3_double(100)
  table['xyzobs.mm.value'] = flex.vec3_double(100)
  table['xyzobs.mm.variance'] = flex.vec3_double(100)
  table['partiality'] = flex.double(100)
  table['d'] = flex.double(100)
  table['s1'] = flex.vec3_double(100)
  table['rlp'] = flex.vec3_double(100)
  table['background.mean'] = flex.double(100)
  table['entering'] = flex.bool(100)
  table['flags'] = flex.size_t(100)
  table['profile.correlation'] = flex.double(100)

  # Open the file
  outfile = File('test_file.mtz2', 'w')

  # Get the entry
  entry = outfile.entry

  print 'Writing reflection table stuff...'
  # Save some processed data
  diffraction = entry.diffraction

  # Set the experiments
  experiment = diffraction.experiments[0]
  experiment['beam'] = 0
  experiment['detector'] = 0
  experiment['goniometer'] = 0
  experiment['scan'] = 0
  experiment['crystal'] =0

  # Get columns into array
  col1, col2, col3 = zip(*list(table['miller_index']))
  col4 = table['id']
  col5 = table['intensity.sum.value']
  col6 = table['intensity.sum.variance']
  col7 = table['intensity.prf.value']
  col8 = table['intensity.prf.variance']
  col9 = table['lp']
  col10 = table['panel']
  col11, col12, col13, col14, col15, col16 = table['bbox'].parts()
  col17, col18, col19 = table['xyzcal.px'].parts()
  col20, col21, col22 = table['xyzcal.mm'].parts()
  col23, col24, col25 = table['xyzobs.px.value'].parts()
  col26, col27, col28 = table['xyzobs.px.variance'].parts()
  col29, col30, col31 = table['xyzobs.mm.value'].parts()
  col32, col33, col34 = table['xyzobs.mm.variance'].parts()
  col35 = table['partiality']
  col36 = table['d']
  col37 = table['background.mean']
  col38 = table['entering']
  col39 = table['flags']
  col40 = table['profile.correlation']

  # Some data
  diffraction['h'] = col1
  diffraction['k'] = col2
  diffraction['l'] = col3
  diffraction['id'] = col4
  diffraction['int_sum_val'] = col5
  diffraction['int_sum_var'] = col6
  diffraction['int_prf_val'] = col7
  diffraction['int_prf_var'] = col8
  diffraction['lp'] = col9
  diffraction['det_module'] = col10
  diffraction['bbx0'] = col11
  diffraction['bbx1'] = col12
  diffraction['bby0'] = col13
  diffraction['bby1'] = col14
  diffraction['bbz0'] = col15
  diffraction['bbz1'] = col16
  diffraction['prd_px_x'] = col17
  diffraction['prd_px_y'] = col18
  diffraction['prd_frame'] = col19
  diffraction['prd_mm_x'] = col20
  diffraction['prd_mm_y'] = col21
  diffraction['prd_phi'] = col22
  diffraction['obs_px_x_val'] = col23
  diffraction['obs_px_x_var'] = col24
  diffraction['obs_px_y_val'] = col25
  diffraction['obs_px_y_var'] = col26
  diffraction['obs_frame_val'] = col27
  diffraction['obs_frame_var'] = col28
  diffraction['obs_mm_x_val'] = col29
  diffraction['obs_mm_x_var'] = col30
  diffraction['obs_mm_y_val'] = col31
  diffraction['obs_mm_y_var'] = col32
  diffraction['obs_phi_val'] = col33
  diffraction['obs_phi_var'] = col34
  diffraction['partiality'] = col35
  diffraction['d'] = col36
  diffraction['bkg_mean'] = col37
  diffraction['entering'] = col38
  diffraction['flags'] = col39
  diffraction['prf_cc'] = col40

  # Flush the file
  outfile.flush()
    def __init__(self, measurements_orig, params, i_model, miller_set, result,
                 out):
        measurements = measurements_orig.deep_copy()

        # Now manipulate the data to conform to unit cell, asu, and space group
        # of reference.  The resolution will be cut later.
        # Only works if there is NOT an indexing ambiguity!
        observations = measurements.customized_copy(
            anomalous_flag=not params.merge_anomalous,
            crystal_symmetry=miller_set.crystal_symmetry()).map_to_asu()

        observations_original_index = measurements.customized_copy(
            anomalous_flag=not params.merge_anomalous,
            crystal_symmetry=miller_set.crystal_symmetry())

        # Ensure that match_multi_indices() will return identical results
        # when a frame's observations are matched against the
        # pre-generated Miller set, self.miller_set, and the reference
        # data set, self.i_model.  The implication is that the same match
        # can be used to map Miller indices to array indices for intensity
        # accumulation, and for determination of the correlation
        # coefficient in the presence of a scaling reference.

        assert len(i_model.indices()) == len(miller_set.indices()) \
            and  (i_model.indices() ==
                  miller_set.indices()).count(False) == 0

        matches = miller.match_multi_indices(
            miller_indices_unique=miller_set.indices(),
            miller_indices=observations.indices())

        pair1 = flex.int([pair[1] for pair in matches.pairs()])
        pair0 = flex.int([pair[0] for pair in matches.pairs()])
        # narrow things down to the set that matches, only
        observations_pair1_selected = observations.customized_copy(
            indices=flex.miller_index(
                [observations.indices()[p] for p in pair1]),
            data=flex.double([observations.data()[p] for p in pair1]),
            sigmas=flex.double([observations.sigmas()[p] for p in pair1]),
        )
        observations_original_index_pair1_selected = observations_original_index.customized_copy(
            indices=flex.miller_index(
                [observations_original_index.indices()[p] for p in pair1]),
            data=flex.double(
                [observations_original_index.data()[p] for p in pair1]),
            sigmas=flex.double(
                [observations_original_index.sigmas()[p] for p in pair1]),
        )
        ###################
        I_observed = observations_pair1_selected.data()
        MILLER = observations_original_index_pair1_selected.indices()
        ORI = result["current_orientation"][0]
        Astar = matrix.sqr(ORI.reciprocal_matrix())
        WAVE = result["wavelength"]
        BEAM = matrix.col((0.0, 0.0, -1. / WAVE))
        BFACTOR = 0.

        #calculation of correlation here
        I_reference = flex.double(
            [i_model.data()[pair[0]] for pair in matches.pairs()])
        I_invalid = flex.bool(
            [i_model.sigmas()[pair[0]] < 0. for pair in matches.pairs()])
        use_weights = False  # New facility for getting variance-weighted correlation

        if use_weights:
            #variance weighting
            I_weight = flex.double([
                1. / (observations_pair1_selected.sigmas()[pair[1]])**2
                for pair in matches.pairs()
            ])
        else:
            I_weight = flex.double(len(observations_pair1_selected.sigmas()),
                                   1.)
        I_weight.set_selected(I_invalid, 0.)
        """Explanation of 'include_negatives' semantics as originally implemented in cxi.merge postrefinement:
       include_negatives = True
       + and - reflections both used for Rh distribution for initial estimate of RS parameter
       + and - reflections both used for calc/obs correlation slope for initial estimate of G parameter
       + and - reflections both passed to the refinery and used in the target function (makes sense if
                           you look at it from a certain point of view)

       include_negatives = False
       + and - reflections both used for Rh distribution for initial estimate of RS parameter
       +       reflections only used for calc/obs correlation slope for initial estimate of G parameter
       + and - reflections both passed to the refinery and used in the target function (makes sense if
                           you look at it from a certain point of view)
    """
        if params.include_negatives:
            SWC = simple_weighted_correlation(I_weight, I_reference,
                                              I_observed)
        else:
            non_positive = (observations_pair1_selected.data() <= 0)
            SWC = simple_weighted_correlation(
                I_weight.select(~non_positive),
                I_reference.select(~non_positive),
                I_observed.select(~non_positive))

        print >> out, "Old correlation is", SWC.corr
        if params.postrefinement.algorithm == "rs":
            Rhall = flex.double()
            for mill in MILLER:
                H = matrix.col(mill)
                Xhkl = Astar * H
                Rh = (Xhkl + BEAM).length() - (1. / WAVE)
                Rhall.append(Rh)
            Rs = math.sqrt(flex.mean(Rhall * Rhall))

            RS = 1. / 10000.  # reciprocal effective domain size of 1 micron
            RS = Rs  # try this empirically determined approximate, monochrome, a-mosaic value
            current = flex.double([SWC.slope, BFACTOR, RS, 0., 0.])

            parameterization_class = rs_parameterization
            refinery = rs_refinery(ORI=ORI,
                                   MILLER=MILLER,
                                   BEAM=BEAM,
                                   WAVE=WAVE,
                                   ICALCVEC=I_reference,
                                   IOBSVEC=I_observed)

        elif params.postrefinement.algorithm == "eta_deff":
            eta_init = 2. * result["ML_half_mosaicity_deg"][0] * math.pi / 180.
            D_eff_init = 2. * result["ML_domain_size_ang"][0]
            current = flex.double([
                SWC.slope,
                BFACTOR,
                eta_init,
                0.,
                0.,
                D_eff_init,
            ])

            parameterization_class = eta_deff_parameterization
            refinery = eta_deff_refinery(ORI=ORI,
                                         MILLER=MILLER,
                                         BEAM=BEAM,
                                         WAVE=WAVE,
                                         ICALCVEC=I_reference,
                                         IOBSVEC=I_observed)

        func = refinery.fvec_callable(parameterization_class(current))
        functional = flex.sum(func * func)
        print >> out, "functional", functional
        self.current = current
        self.parameterization_class = parameterization_class
        self.refinery = refinery
        self.out = out
        self.params = params
        self.miller_set = miller_set
        self.observations_pair1_selected = observations_pair1_selected
        self.observations_original_index_pair1_selected = observations_original_index_pair1_selected
Exemple #47
0
      score = evaluation_function(hkls)
      print "Estimated sweep completeness: %5.1f %%   sweep multiplicity : %.1f   sweep score: %.1f" %\
            (completeness, multiplicity, score)
      return {'completeness': completeness, 'multiplicity': multiplicity, 'score': score}

#    # count the number of observations per reflection to construct an evaluation function
#    seen_hkl_multiplicity = {x: 0 for x in possible_hkl}
#    for hkl in detectable_rays['miller_index']:
#      seen_hkl_multiplicity[hkl] += 1

    expt = experiments[0]
    spacegroup = expt.crystal.get_space_group()
    unit_cell = expt.crystal.get_unit_cell()

    possible_hkl = self.list_possible_reflections(spacegroup, unit_cell, dmin, dmax)
    possible_hkl_flex = flex.miller_index(possible_hkl)
    hkl_to_id = { hkl: id for (id, hkl) in enumerate(possible_hkl) }
    id_to_hkl = [ hkl for (id, hkl) in enumerate(possible_hkl) ]

    # find mapping of reciprocal space onto reciprocal asymmetric unit and its inverse
    from cctbx.miller import map_to_asu
    asu_hkl_flex = flex.miller_index(possible_hkl)
    map_to_asu(spacegroup.type(), False, asu_hkl_flex)
    # TODO: Treat anomalous signal?
    map_hkl_to_symmhkl = {r: rs for (r, rs) in zip(possible_hkl, list(asu_hkl_flex))}
    map_symmhkl_to_hkl = {}
    for k, v in map_hkl_to_symmhkl.iteritems():
      map_symmhkl_to_hkl[v] = map_symmhkl_to_hkl.get(v, [])
      map_symmhkl_to_hkl[v].append(k)

    unique_asu_indices = set(asu_hkl_flex)
def simple_gaussian_spots(params):
  from dials.array_family import flex
  from dials.algorithms import shoebox
  import random
  import math

  from scitbx import matrix
  r = params.rotation
  axis = matrix.col((r.axis.x, r.axis.y, r.axis.z))
  if axis.length() > 0:
    rotation = axis.axis_and_angle_as_r3_rotation_matrix(r.angle, deg=True)
  else:
    rotation = matrix.sqr((1, 0, 0, 0, 1, 0, 0, 0, 1))

  # generate mask and peak values

  from dials.algorithms.shoebox import MaskCode
  mask_peak = MaskCode.Valid|MaskCode.Foreground
  mask_back = MaskCode.Valid|MaskCode.Background

  from dials.util.command_line import ProgressBar
  p = ProgressBar(title = 'Generating reflections')

  rlist = flex.reflection_table(params.nrefl)
  hkl = flex.miller_index(params.nrefl)
  s1 = flex.vec3_double(params.nrefl)
  xyzmm = flex.vec3_double(params.nrefl)
  xyzpx = flex.vec3_double(params.nrefl)
  panel = flex.size_t(params.nrefl)
  bbox = flex.int6(params.nrefl)

  for j in range(params.nrefl):
    p.update(j * 100.0 / params.nrefl)
    hkl[j] = (random.randint(0, 20),
              random.randint(0, 20),
              random.randint(0, 20))
    phi = 2 * math.pi * random.random()
    s1[j] = (0, 0, 0)
    xyzpx[j] = (0, 0, 0)
    xyzmm[j] = (0, 0, phi)
    panel[j] = 0
    bbox[j] = (0, params.shoebox_size.x,
               0, params.shoebox_size.y,
               0, params.shoebox_size.z)

  p.finished('Generating %d reflections' % params.nrefl)
  intensity = flex.double(params.nrefl)
  shoebox = flex.shoebox(panel, bbox)
  shoebox.allocate_with_value(MaskCode.Valid)

  p = ProgressBar(title = 'Generating shoeboxes')

  for i in range(len(rlist)):

    p.update(i * 100.0 / params.nrefl)
    mask = shoebox[i].mask

    if params.pixel_mask == 'precise':
      # flag everything as background: peak will me assigned later
      for j in range(len(mask)):
        mask[j] = mask_back
    elif params.pixel_mask == 'all':
      # flag we have no idea what anything is
      mask_none = MaskCode.Valid|MaskCode.Foreground|MaskCode.Background
      for j in range(len(mask)):
        mask[j] = mask_none
    elif params.pixel_mask == 'static':
      import itertools
      from scitbx.array_family import flex
      x0 = params.spot_offset.x + params.shoebox_size.x / 2
      y0 = params.spot_offset.x + params.shoebox_size.y / 2
      z0 = params.spot_offset.x + params.shoebox_size.z / 2
      sx = params.mask_nsigma * params.spot_size.x
      sy = params.mask_nsigma * params.spot_size.y
      sz = params.mask_nsigma * params.spot_size.z

      # The x, y, z indices
      z, y, x = zip(*itertools.product(*(range(n) for n in mask.all())))
      xyz = flex.vec3_double(flex.double(x), flex.double(y), flex.double(z))

      # Calculate SUM(((xj - xj0) / sxj)**2) for each element
      xyz0 = (x0, y0, z0)
      isxyz = (1.0/sx, 1.0/sy, 1.0/sz)
      dxyz = sum([(x * isx)**2 for x, isx in
        zip(((xyz - xyz0) * rotation).parts(), isxyz)])

      # Set the mask values
      index = dxyz <= 1.0
      index.reshape(mask.accessor())
      mask.set_selected(index, MaskCode.Valid | MaskCode.Foreground)
      mask.set_selected(index != True, MaskCode.Valid | MaskCode.Background)

    sbox = shoebox[i].data

    # reflection itself, including setting the peak region if we're doing that
    # FIXME use flex arrays to make the rotation bit more efficient as this is
    # now rather slow...

    counts_true = 0
    for j in range(params.counts):
      _x = random.gauss(0, params.spot_size.x)
      _y = random.gauss(0, params.spot_size.y)
      _z = random.gauss(0, params.spot_size.z)

      Rxyz = rotation * matrix.col((_x, _y, _z)).elems

      x = int(Rxyz[0] + params.spot_offset.x + params.shoebox_size.x / 2)
      y = int(Rxyz[1] + params.spot_offset.y + params.shoebox_size.y / 2)
      z = int(Rxyz[2] + params.spot_offset.z + params.shoebox_size.z / 2)

      if x < 0 or x >= params.shoebox_size.x:
        continue
      if y < 0 or y >= params.shoebox_size.y:
        continue
      if z < 0 or z >= params.shoebox_size.z:
        continue
      sbox[z, y, x] += 1
      counts_true += 1
      if params.pixel_mask == 'precise':
        mask[z, y, x] = mask_peak

    intensity[i] = counts_true

    if params.background:
      # background:flat;
      for j in range(params.background * len(sbox)):
        x = random.randint(0, params.shoebox_size.x - 1)
        y = random.randint(0, params.shoebox_size.y - 1)
        z = random.randint(0, params.shoebox_size.z - 1)
        sbox[z, y, x] += 1
    else:
      # or inclined
      random_background_plane(sbox, params.background_a, params.background_b,
                              params.background_c, params.background_d)


  rlist['miller_index'] = hkl
  rlist['s1'] = s1
  rlist['xyzcal.px'] = xyzpx
  rlist['xyzcal.mm'] = xyzmm
  rlist['bbox'] = bbox
  rlist['panel'] = panel
  rlist['shoebox'] = shoebox
  rlist['intensity.sum.value'] = intensity
  p.finished('Generating %d shoeboxes' % params.nrefl)

  return rlist
Exemple #49
0
def offset_miller_indices(indices, offset):
  from dials.array_family import flex
  return flex.miller_index(
    *[mi.iround() for mi in (indices.as_vec3_double() + offset).parts()])