Exemple #1
0
  def run(self):
    ''' Parse the options. '''
    from dials.util.options import flatten_experiments, flatten_reflections
    # Parse the command line arguments
    params, options = self.parser.parse_args(show_diff_phil=True)
    self.params = params
    experiments = flatten_experiments(params.input.experiments)
    reflections = flatten_reflections(params.input.reflections)

    assert len(reflections) == len(experiments) == 1
    reflections = reflections[0]
    exp = experiments[0]

    from dials.algorithms.indexing import index_reflections
    from dials.algorithms.indexing.indexer import indexer_base

    reflections['id'] = flex.int(len(reflections), -1)
    reflections['imageset_id'] = flex.int(len(reflections), 0)
    reflections = indexer_base.map_spots_pixel_to_mm_rad(reflections, exp.detector, exp.scan)

    indexer_base.map_centroids_to_reciprocal_space(
      reflections, exp.detector, exp.beam, exp.goniometer,)

    index_reflections(reflections,
                      experiments, params.d_min,
                      tolerance=0.3)
    indexed_reflections = reflections.select(reflections['miller_index'] != (0,0,0))
    print "Indexed %d reflections out of %d"%(len(indexed_reflections), len(reflections))
    easy_pickle.dump("indexedstrong.pickle", indexed_reflections)
    def with_given_intensity(self, N, I, Ba, Bb, Bc, Bd):
        """ Generate reflections with a given intensity and background. """
        from dials.array_family import flex

        return self.with_individual_given_intensity(
            N, flex.int(N, I), flex.int(N, Ba), flex.int(N, Bb), flex.int(N, Bc), flex.int(N, Bd)
        )
Exemple #3
0
  def tst_find_overlapping(self):
    from dials.array_family import flex
    from random import randint, uniform
    from dials.model.data import Shoebox
    N = 10000
    r = flex.reflection_table(N)
    r['bbox'] = flex.int6(N)
    r['panel'] = flex.size_t(N)
    r['id'] = flex.int(N)
    r['imageset_id'] = flex.int(N)
    for i in range(N):
      x0 = randint(0, 100)
      x1 = randint(1, 10) + x0
      y0 = randint(0, 100)
      y1 = randint(1, 10) + y0
      z0 = randint(0, 100)
      z1 = randint(1, 10) + z0
      panel = randint(0,2)
      pid = randint(0,2)
      r['bbox'][i] = (x0,x1,y0,y1,z0,z1)
      r['panel'][i] = panel
      r['id'][i] = pid
      r['imageset_id'][i] = pid

    def is_overlap(b0, b1, border):
      b0 = b0[0]-border,b0[1]+border,b0[2]-border,b0[3]+border,b0[4]-border,b0[5]+border
      b1 = b1[0]-border,b1[1]+border,b1[2]-border,b1[3]+border,b1[4]-border,b1[5]+border
      if not (b1[0] > b0[1] or
              b1[1] < b0[0] or
              b1[2] > b0[3] or
              b1[3] < b0[2] or
              b1[4] > b0[5] or
              b1[5] < b0[4]):
        return True
      return False

    for i in [0, 2, 5]:
      overlaps = r.find_overlaps(border=i)
      for item in overlaps.edges():
        i0 = overlaps.source(item)
        i1 = overlaps.target(item)
        r0 = r[i0]
        r1 = r[i1]
        p0 = r0['panel']
        p1 = r1['panel']
        b0 = r0['bbox']
        b1 = r1['bbox']
        j0 = r0['imageset_id']
        j1 = r1['imageset_id']
        assert j0 == j1
        assert p0 == p1
        assert is_overlap(b0,b1,i)



    print 'OK'
Exemple #4
0
  def tst_split_blocks_1_frame(self):
    from dials.array_family import flex
    from random import randint, uniform, seed
    from dials.algorithms.integration.integrator import JobList
    r = flex.reflection_table()
    r['value1'] = flex.double()
    r['value2'] = flex.int()
    r['value3'] = flex.double()
    r['bbox'] = flex.int6()
    r['id'] = flex.int()
    expected = []
    for i in range(100):
      x0 = randint(0, 100)
      x1 = x0 + randint(1, 10)
      y0 = randint(0, 100)
      y1 = y0 + randint(1, 10)
      z0 = randint(0, 100)
      z1 = z0 + randint(1, 10)
      v1 = uniform(0, 100)
      v2 = randint(0, 100)
      v3 = uniform(0, 100)
      r.append({
        'id' : 0,
        'value1' : v1,
        'value2' : v2,
        'value3' : v3,
        'bbox' : (x0, x1, y0, y1, z0, z1)
      })
      for z in range(z0, z1):
        expected.append({
          'id' : 0,
          'value1' : v1,
          'value2' : v2,
          'value3' : v3,
          'bbox' : (x0, x1, y0, y1, z, z+1),
          'partial_id' : i,
        })

    jobs = JobList()
    jobs.add((0,1), (0, 111), 1)

    jobs.split(r)
    assert(len(r) == len(expected))
    EPS = 1e-7
    for r1, r2 in zip(r, expected):
      assert(r1['bbox'] == r2['bbox'])
      assert(r1['partial_id'] == r2['partial_id'])
      assert(abs(r1['value1'] - r2['value1']) < EPS)
      assert(r1['value2'] == r2['value2'])
      assert(abs(r1['value3'] - r2['value3']) < EPS)

    print 'OK'
Exemple #5
0
  def tst_copy(self):
    import copy
    from dials.array_family import flex

    # Create a table
    table = flex.reflection_table([
      ('col1', flex.int(range(10)))])

    # Make a shallow copy of the table
    shallow = copy.copy(table)
    shallow['col2'] = flex.double(range(10))
    assert(table.ncols() == 2)
    assert(table.is_consistent())
    print 'OK'

    # Make a deep copy of the table
    deep = copy.deepcopy(table)
    deep['col3'] = flex.std_string(10)
    assert(table.ncols() == 2)
    assert(deep.ncols() == 3)
    assert(table.is_consistent())
    assert(deep.is_consistent())

    table2 = table.copy()
    table2['col3'] = flex.std_string(10)
    assert(table.ncols() == 2)
    assert(table2.ncols() == 3)
    assert(table.is_consistent())
    assert(table2.is_consistent())
    print 'OK'
Exemple #6
0
  def tst_serialize(self):

    from dials.array_family import flex

    # The columns as lists
    c1 = list(range(10))
    c2 = list(range(10))
    c3 = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'i', 'j', 'k']

    # Create a table with some elements
    table = flex.reflection_table()
    table['col1'] = flex.int(c1)
    table['col2'] = flex.double(c2)
    table['col3'] = flex.std_string(c3)

    # Pickle, then unpickle
    import cPickle as pickle
    obj = pickle.dumps(table)
    new_table = pickle.loads(obj)
    assert(new_table.is_consistent())
    assert(new_table.nrows() == 10)
    assert(new_table.ncols() == 3)
    assert(all(a == b for a, b in zip(new_table['col1'], c1)))
    assert(all(a == b for a, b in zip(new_table['col2'], c2)))
    assert(all(a == b for a, b in zip(new_table['col3'], c3)))
    print 'OK'
Exemple #7
0
  def decode(self, handle):
    '''Decode the reflection data.'''
    from dials.array_family import flex

    # Get the group containing the reflection data
    g = handle['entry/data_processing']

    # Create the list of reflections
    rl = flex.reflection_table(int(g.attrs['num_reflections']))

    # Decode all the columns
    for key in g:
      item = g[key]
      name = item.attrs['flex_type']
      if name == 'shoebox':
        flex_type = getattr(flex, name)
        data = item['data']
        mask = item['mask']
        background = item['background']
        col = flex_type(len(rl))
        for i in range(len(rl)):
          dd = data['%d' % i].value
          col[i].data = flex.double(data['%d' % i].value)
          col[i].mask = flex.int(mask['%d' % i].value)
          col[i].background = flex.double(background['%d' % i].value)

      else:
        flex_type = getattr(flex, name)
        col = self.decode_column(flex_type, item)
      rl[str(key)] = col

    # Return the list of reflections
    return rl
Exemple #8
0
 def filter_frames(self, data):
     data = data[0]
     lp = gaussian_filter(data, 100)
     hp = data - lp # poormans background subtraction
     hp -= np.min(hp)
     sh = hp.shape
     print "here"
     hp = hp.astype('uint32')
     hp = flex.int(hp)
     print "here now"
     
     mask = flex.bool(np.ones_like(hp).astype('bool'))
     print "here now"
     result1 = flex.bool(np.zeros_like(hp).astype('bool'))
     spots = np.zeros_like(hp).astype('bool')
     print "here now"
     
     for i in range(3, self.parameters['spotsize'], 5):
         print "here now"
         algorithm = DispersionThreshold(sh, (i, i), 1, 1, 0, -1)
         print "here now"
         print type(hp), type(mask), type(result1)
         thing = algorithm(hp, mask, result1)
         print "here now"
         spots = spots + result1.as_numpy_array()
     return [data, spots*data]
  def tst_count_mask_values(self):
    from dials.model.data import Shoebox
    from random import randint, sample
    from dials.array_family import flex

    shoebox = flex.shoebox(10)
    num = flex.int(10)
    value = (1 << 2)
    for i in range(10):
      x0 = randint(0, 90)
      y0 = randint(0, 90)
      z0 = randint(0, 90)
      x1 = randint(1, 10) + x0
      y1 = randint(1, 10) + y0
      z1 = randint(1, 10) + z0

      shoebox[i] = Shoebox((x0, x1, y0, y1, z0, z1))
      shoebox[i].allocate()
      maxnum = len(shoebox[i].mask)
      num[i] = randint(1, maxnum)
      indices = sample(list(range(maxnum)), num[i])
      for j in indices:
        shoebox[i].mask[j] = value

    assert(shoebox.count_mask_values(value) == num)

    # Test passed
    print 'OK'
Exemple #10
0
  def tst_for_dataset(self, creator, filename):
    from dials.array_family import flex
    from dials.algorithms.shoebox import MaskCode
    print filename
    rlist = flex.reflection_table.from_pickle(filename)
    shoebox = rlist['shoebox']

    # FIXME doesn't work for single image
    zr = flex.int([s.bbox[5]-s.bbox[4] for s in shoebox])
    rlist = rlist.select(zr > 1)
    shoebox = rlist['shoebox']

    background = [sb.background.deep_copy() for sb in shoebox]
    success = creator(shoebox)
    assert(success.count(True) == len(success))
    diff = []
    for i in range(len(rlist)):
      mask = flex.bool([(m & MaskCode.Foreground) != 0 for m in shoebox[i].mask])
      px1 = background[i].select(mask)
      px2 = shoebox[i].background.select(mask)
      den = max([flex.mean(px1), 1.0])
      diff.append(flex.mean(px2 - px1) / den)
    diff = flex.double(diff)
    mv = flex.mean_and_variance(flex.double(diff))
    mean = mv.mean()
    sdev = mv.unweighted_sample_standard_deviation()
    try:
      assert(abs(mean) < 0.01)
    except Exception:
      print "Mean: %f, Sdev: %f", mean, sdev
      #from matplotlib import pylab
      #pylab.hist(diff)
      #pylab.show()
      raise
  def prepare_reflection_list(self,detector):

    spots = self.triclinic.get_observations_with_outlier_removal()
    ordinary_python_list_of_indexed_observations = [
      {
        "id":0,
        "panel":0,
        "miller_index":item["pred"],
        "xyzobs.px.value":(spots[item["spot"]].ctr_mass_x(),spots[item["spot"]].ctr_mass_y(),0.0),
        "xyzobs.px.variance":(0.25,0.25,0.25),
        "spotfinder_lookup":item["spot"]
      }
      for item in self.triclinic_pairs
    ]

    self.length = len(ordinary_python_list_of_indexed_observations)
    R= flex.reflection_table.empty_standard(self.length)

    R['miller_index'] = flex.miller_index([item["miller_index"] for item in ordinary_python_list_of_indexed_observations])
    R['xyzobs.px.value'] = flex.vec3_double([item["xyzobs.px.value"] for item in ordinary_python_list_of_indexed_observations])
    R['xyzobs.px.variance'] = flex.vec3_double([item["xyzobs.px.variance"] for item in ordinary_python_list_of_indexed_observations])
    R['spotfinder_lookup'] = flex.int([item["spotfinder_lookup"] for item in ordinary_python_list_of_indexed_observations])

    R['xyzobs.mm.value'] = flex.vec3_double(self.length)
    R['xyzobs.mm.variance'] = flex.vec3_double(self.length)

    pxlsz = detector[0].get_pixel_size()

    for idx in xrange(self.length):
      R['xyzobs.mm.value'][idx] = (R['xyzobs.px.value'][idx][0]*pxlsz[0], R['xyzobs.px.value'][idx][1]*pxlsz[1], R['xyzobs.px.value'][idx][2])
      R['xyzobs.mm.variance'][idx] = (R['xyzobs.px.variance'][idx][0]*pxlsz[0], R['xyzobs.px.variance'][idx][1]*pxlsz[1], R['xyzobs.px.variance'][idx][2])

    return R
  def generate_reflections(self):
    """Use reeke_model to generate indices of reflections near to the Ewald
    sphere that might be observed on a still image. Build a reflection_table
    of these."""
    from cctbx.sgtbx import space_group_info

    space_group_type = space_group_info("P 1").group().type()

    # create a ReekeIndexGenerator
    UB = self.crystal.get_U() * self.crystal.get_B()
    axis = self.goniometer.get_rotation_axis()
    s0 = self.beam.get_s0()
    dmin = 1.5
    # use the same UB at the beginning and end - the margin parameter ensures
    # we still have indices close to the Ewald sphere generated
    from dials.algorithms.spot_prediction import ReekeIndexGenerator
    r = ReekeIndexGenerator(UB, UB, space_group_type, axis, s0, dmin=1.5, margin=1)

    # generate indices
    hkl = r.to_array()
    nref = len(hkl)

    # create a reflection table
    from dials.array_family import flex
    table = flex.reflection_table()
    table['flags'] = flex.size_t(nref, 0)
    table['id']    = flex.int(nref, 0)
    table['panel'] = flex.size_t(nref, 0)
    table['miller_index'] = flex.miller_index(hkl)
    table['entering']     = flex.bool(nref, True)
    table['s1']           = flex.vec3_double(nref)
    table['xyzcal.mm']    = flex.vec3_double(nref)
    table['xyzcal.px']    = flex.vec3_double(nref)

    return table
Exemple #13
0
 def predict(self):
   # Populate the reflection table with predictions
   self.predicted = flex.reflection_table.from_predictions(
     self.experiment,
     force_static=True,
     dmin=self.d_min)
   self.predicted['id'] = flex.int(len(self.predicted), 0)
    def plot_scale_vs_x_y(self):
        from scitbx.array_family import flex
        from math import ceil

        print "Getting scale"
        points = [(int(xyz[1] / 8), int(xyz[0] / 8)) for xyz in self.xyz]
        scale = [x / d for x, d in zip(self.i_xds, self.i_dials)]

        print "Creating Grid"
        image_size = self.sweep.get_detector()[0].get_image_size()[::-1]
        image_size = (int(ceil(image_size[0] / 8)), int(ceil(image_size[1] / 8)))
        grid = flex.double(flex.grid(image_size))
        count = flex.int(flex.grid(image_size))
        for p, s in zip(points, scale):
            grid[p] += s
            count[p] += 1
        for i in range(len(grid)):
            if count[i] > 0:
                grid[i] /= count[i]

        # grid_points = [(j,i) for j in range(image_size[0]) for i in range(image_size[1])]

        # grid = griddata(points, scale, grid_points)
        # grid.shape = image_size
        from matplotlib import pyplot

        fig, ax = pyplot.subplots()
        pyplot.title("scale vs x/y")
        cax = pyplot.imshow(grid.as_numpy_array())
        cbar = fig.colorbar(cax)
        pyplot.savefig("plot-scale-vs-xy.png")
        pyplot.close()
Exemple #15
0
  def spot_count_per_panel(self, rlist):
    ''' Analyse the spot count per panel. '''
    from os.path import join
    panel = rlist['panel']
    if flex.max(panel) == 0:
      # only one panel, don't bother generating a plot
      return

    n_panels = int(flex.max(panel))
    spot_count_per_panel = flex.int()
    for i in range(n_panels):
      sel = (panel >= i) & (panel < (i+1))
      spot_count_per_panel.append(sel.count(True))

    from matplotlib import pyplot
    fig = pyplot.figure()
    ax = fig.add_subplot(111)
    ax.set_title("Spot count per panel")
    ax.scatter(
      list(range(len(spot_count_per_panel))), spot_count_per_panel,
      s=10, color='blue', marker='o', alpha=0.4)
    ax.set_xlabel("Panel #")
    ax.set_ylabel("# spots")
    pyplot.savefig(join(self.directory, "spots_per_panel.png"))
    pyplot.close()
Exemple #16
0
  def spot_count_per_image(self, rlist):
    ''' Analyse the spot count per image. '''
    from os.path import join
    x,y,z = rlist['xyzobs.px.value'].parts()
    max_z = int(math.ceil(flex.max(z)))

    ids = rlist['id']
    spot_count_per_image = []
    for j in range(flex.max(ids)+1):
      spot_count_per_image.append(flex.int())
      zsel = z.select(ids == j)
      for i in range(max_z):
        sel = (zsel >= i) & (zsel < (i+1))
        spot_count_per_image[j].append(sel.count(True))

    colours = ['blue', 'red', 'green', 'orange', 'purple', 'black'] * 10
    assert len(spot_count_per_image) <= colours

    from matplotlib import pyplot
    fig = pyplot.figure()
    ax = fig.add_subplot(111)
    ax.set_title("Spot count per image")
    for j in range(len(spot_count_per_image)):
      ax.scatter(
        list(range(len(spot_count_per_image[j]))), spot_count_per_image[j],
        s=5, color=colours[j], marker='o', alpha=0.4)
    ax.set_xlabel("Image #")
    ax.set_ylabel("# spots")
    pyplot.savefig(join(self.directory, "spots_per_image.png"))
    pyplot.close()
  def run(self):
    from dials.algorithms.profile_model.modeller import ProfileModellerIface
    from dials.algorithms.profile_model.modeller import MultiExpProfileModeller
    from dials.array_family import flex

    class Modeller(ProfileModellerIface):

      def __init__(self, index, expected):
        self.index = index
        self.accumulated = False
        self.finalized = False
        self.expected = expected
        super(Modeller, self).__init__()

      def model(self, reflections):
        assert(reflections['id'].all_eq(self.index))
        assert(len(reflections) == self.expected)

      def accumulate(self, other):
        self.accumulated = True
        assert(self.index == other.index)

      def finalize(self):
        assert(self.accumulated == True)
        self.finalized = True

    # The expected number of reflections
    expected = [100, 200, 300, 400, 500]

    # Create some reflections
    reflections = flex.reflection_table()
    reflections["id"] = flex.int()
    for idx in range(len(expected)):
      for n in range(expected[idx]):
        reflections.append({
          "id" : idx
        })

    # Create two modellers
    modeller1 = MultiExpProfileModeller()
    modeller2 = MultiExpProfileModeller()
    for idx in range(len(expected)):
      modeller1.add(Modeller(idx, expected[idx]))
      modeller2.add(Modeller(idx, expected[idx]))

    # Model the reflections
    modeller1.model(reflections)
    modeller2.model(reflections)

    # Accumulate
    modeller1.accumulate(modeller2)

    # Finalize
    modeller1.finalize()

    # Check finalized
    assert(modeller1.finalized)

    # Test passed
    print 'OK'
 def run(self):
   from dials.array_family import flex
   #self.test_for_reference()
   for filename in self.refl_filenames:
     refl = flex.reflection_table.from_pickle(filename)
     refl.compute_partiality(self.experiments)
     refl['id'] = flex.int(len(refl),0)
     self.test_for_reflections(refl, filename)
def plot_multirun_stats(runs,
                        run_numbers,
                        d_min,
                        ratio_cutoff=1,
                        n_strong_cutoff=40,
                        run_tags=[],
                        run_statuses=[],
                        interactive=False,
                        compress_runs=True,
                        xsize=30,
                        ysize=10,
                        high_vis=False):
  tset = flex.double()
  two_theta_low_set = flex.double()
  two_theta_high_set = flex.double()
  nset = flex.int()
  I_sig_I_low_set = flex.double()
  I_sig_I_high_set = flex.double()
  boundaries = []
  lengths = []
  runs_with_data = []
  offset = 0
  for idx in xrange(len(runs)):
    r = runs[idx]
    if len(r[0]) > 0:
      if compress_runs:
        tslice = r[0] - r[0][0] + offset
        offset += (r[0][-1] - r[0][0])
      else:
        tslice = r[0]
      last_end = r[0][-1]
      tset.extend(tslice)
      two_theta_low_set.extend(r[1])
      two_theta_high_set.extend(r[2])
      nset.extend(r[3])
      I_sig_I_low_set.extend(r[4])
      I_sig_I_high_set.extend(r[5])
      boundaries.append(tslice[0])
      boundaries.append(tslice[-1])
      lengths.append(len(tslice))
      runs_with_data.append(run_numbers[idx])
    else:
      boundaries.extend([None]*2)
  stats_tuple = get_run_stats(tset,
                              two_theta_low_set,
                              two_theta_high_set,
                              nset,
                              I_sig_I_low_set,
                              I_sig_I_high_set,
                              tuple(boundaries),
                              tuple(lengths),
                              runs_with_data,
                              ratio_cutoff=ratio_cutoff,
                              n_strong_cutoff=n_strong_cutoff)
  png = plot_run_stats(stats_tuple, d_min, run_tags=run_tags, run_statuses=run_statuses, interactive=interactive,
    xsize=xsize, ysize=ysize, high_vis=high_vis)
  return png
Exemple #20
0
  def __call__(self, params, options):
    ''' Import the integrate.hkl file. '''

    from iotbx.xds import integrate_hkl
    from dials.array_family import flex
    from dials.util.command_line import Command
    from cctbx import sgtbx

    # Get the unit cell to calculate the resolution
    uc = self._experiment.crystal.get_unit_cell()

    # Read the INTEGRATE.HKL file
    Command.start('Reading INTEGRATE.HKL')
    handle = integrate_hkl.reader()
    handle.read_file(self._integrate_hkl)
    hkl    = flex.miller_index(handle.hkl)
    xyzcal = flex.vec3_double(handle.xyzcal)
    xyzobs = flex.vec3_double(handle.xyzobs)
    iobs   = flex.double(handle.iobs)
    sigma  = flex.double(handle.sigma)
    rlp = flex.double(handle.rlp)
    peak = flex.double(handle.peak) * 0.01
    Command.end('Read %d reflections from INTEGRATE.HKL file.' % len(hkl))

    # Derive the reindex matrix
    rdx = self.derive_reindex_matrix(handle)
    print 'Reindex matrix:\n%d %d %d\n%d %d %d\n%d %d %d' % (rdx.elems)

    # Reindex the reflections
    Command.start('Reindexing reflections')
    cb_op = sgtbx.change_of_basis_op(sgtbx.rt_mx(sgtbx.rot_mx(rdx.elems)))
    hkl = cb_op.apply(hkl)
    Command.end('Reindexed %d reflections' % len(hkl))

    # Create the reflection list
    Command.start('Creating reflection table')
    table = flex.reflection_table()
    table['id'] = flex.int(len(hkl), 0)
    table['panel'] = flex.size_t(len(hkl), 0)
    table['miller_index'] = hkl
    table['xyzcal.px'] = xyzcal
    table['xyzobs.px.value'] = xyzobs
    table['intensity.cor.value'] = iobs
    table['intensity.cor.variance'] = sigma**2
    table['intensity.prf.value'] = iobs * peak / rlp
    table['intensity.prf.variance'] = (sigma * peak / rlp)**2
    table['lp'] = 1.0 / rlp
    table['d'] = flex.double(uc.d(h) for h in hkl)
    Command.end('Created table with {0} reflections'.format(len(table)))

    # Output the table to pickle file
    if params.output.filename is None:
      params.output.filename = 'integrate_hkl.pickle'
    Command.start('Saving reflection table to %s' % params.output.filename)
    table.as_pickle(params.output.filename)
    Command.end('Saved reflection table to %s' % params.output.filename)
Exemple #21
0
 def _predict_reflections(self, params, experiments):
   ''' Predict all the reflections. '''
   from dials.array_family import flex
   n_sigma = params.integration.shoebox.n_sigma
   result = flex.reflection_table()
   for i, experiment in enumerate(experiments):
     predicted = flex.reflection_table.from_predictions(experiment)
     predicted['id'] = flex.int(len(predicted), i)
     result.extend(predicted)
   return result
Exemple #22
0
  def tst_select(self):

    from dials.array_family import flex

    # The columns as lists
    c1 = list(range(10))
    c2 = list(range(10))
    c3 = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'i', 'j', 'k']

    # Create a table with some elements
    table = flex.reflection_table()
    table['col1'] = flex.int(c1)
    table['col2'] = flex.double(c2)
    table['col3'] = flex.std_string(c3)

    # Select some columns
    new_table = table.select(('col1', 'col2'))
    assert(new_table.nrows() == 10)
    assert(new_table.ncols() == 2)
    assert(all(a == b for a, b in zip(new_table['col1'], c1)))
    assert(all(a == b for a, b in zip(new_table['col2'], c2)))
    print 'OK'

    # Select some columns
    new_table = table.select(flex.std_string(['col1', 'col2']))
    assert(new_table.nrows() == 10)
    assert(new_table.ncols() == 2)
    assert(all(a == b for a, b in zip(new_table['col1'], c1)))
    assert(all(a == b for a, b in zip(new_table['col2'], c2)))
    print 'OK'

    # Select some rows
    index = flex.size_t([0, 1, 5, 8, 9])
    cc1 = [c1[i] for i in index]
    cc2 = [c2[i] for i in index]
    cc3 = [c3[i] for i in index]
    new_table = table.select(index)
    assert(new_table.nrows() == 5)
    assert(new_table.ncols() == 3)
    assert(all(a == b for a, b in zip(new_table['col1'], cc1)))
    assert(all(a == b for a, b in zip(new_table['col2'], cc2)))
    assert(all(a == b for a, b in zip(new_table['col3'], cc3)))
    print 'OK'

    # Select some rows
    index = flex.bool([True, True, False, False, False,
                       True, False, False, True, True])
    new_table = table.select(index)
    assert(new_table.nrows() == 5)
    assert(new_table.ncols() == 3)
    assert(all(a == b for a, b in zip(new_table['col1'], cc1)))
    assert(all(a == b for a, b in zip(new_table['col2'], cc2)))
    assert(all(a == b for a, b in zip(new_table['col3'], cc3)))
    print 'OK'
def build_np_img(nrow=64, ncol=64):

  data2d = flex.double(flex.grid(nrow, ncol), 0)
  mask2d = flex.int(flex.grid(nrow, ncol), 0)

  for x in range(nrow):
    for y in range(ncol):
      data2d[x, y] += (x * 1.00000000001 + y * 2.5555555555555) * 0.00000000001
      #print "number to see(aprox) =", data2d[x, y]


  return data2d, mask2d
Exemple #24
0
  def __call__(self, params, options):
    ''' Import the spot.xds file. '''
    from iotbx.xds import spot_xds
    from dials.util.command_line import Command
    from dials.array_family import flex

    # Read the SPOT.XDS file
    Command.start('Reading SPOT.XDS')
    handle = spot_xds.reader()
    handle.read_file(self._spot_xds)
    centroid = handle.centroid
    intensity = handle.intensity
    try:
      miller_index = handle.miller_index
    except AttributeError:
      miller_index = None
    Command.end('Read {0} spots from SPOT.XDS file.'.format(len(centroid)))

    # Create the reflection list
    Command.start('Creating reflection list')
    table = flex.reflection_table()
    table['id'] = flex.int(len(centroid), 0)
    table['panel'] = flex.size_t(len(centroid), 0)
    if miller_index:
      table['miller_index'] = flex.miller_index(miller_index)
    table['xyzobs.px.value'] = flex.vec3_double(centroid)
    table['intensity.sum.value'] = flex.double(intensity)
    Command.end('Created reflection list')

    # Remove invalid reflections
    Command.start('Removing invalid reflections')
    if miller_index and params.remove_invalid:
      flags = flex.bool([h != (0, 0, 0) for h in table['miller_index']])
      table = table.select(flags)
    Command.end('Removed invalid reflections, %d remaining' % len(table))

    # Fill empty standard columns
    if params.add_standard_columns:
      Command.start('Adding standard columns')
      rt = flex.reflection_table.empty_standard(len(table))
      rt.update(table)
      table = rt
      # set variances to unity
      table['xyzobs.mm.variance'] = flex.vec3_double(len(table), (1,1,1))
      table['xyzobs.px.variance'] = flex.vec3_double(len(table), (1,1,1))
      Command.end('Standard columns added')

    # Output the table to pickle file
    if params.output.filename is None:
      params.output.filename = 'spot_xds.pickle'
    Command.start('Saving reflection table to %s' % params.output.filename)
    table.as_pickle(params.output.filename)
    Command.end('Saved reflection table to %s' % params.output.filename)
Exemple #25
0
  def tst_updating(self):

    from dials.array_family import flex

    # The columns as lists
    c1 = list(range(10))
    c2 = list(range(10))
    c3 = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'i', 'j', 'k']

    # Create a table with some elements
    table0 = flex.reflection_table()
    table1 = flex.reflection_table()
    table2 = flex.reflection_table()
    table1['col1'] = flex.int(c1)
    table1['col2'] = flex.double(c2)
    table2['col3'] = flex.std_string(c3)

    # Update from zero columns
    table0.update(table1)
    assert(table0.is_consistent())
    assert(table0.nrows() == 10)
    assert(table0.ncols() == 2)
    print 'OK'

    # Update table1 with table2 columns
    table1.update(table2)
    assert(table1.is_consistent())
    assert(table1.nrows() == 10)
    assert(table1.ncols() == 3)
    assert(table2.is_consistent())
    assert(table2.nrows() == 10)
    assert(table2.ncols() == 1)
    print 'OK'

    # Update trable1 with invalid table
    c3 = ['a', 'b', 'c']

    # Create a table with some elements
    table2 = flex.reflection_table()
    table2['col3'] = flex.std_string(c3)
    try:
      table1.update(table2)
      assert(False)
    except Exception:
      pass

    assert(table1.is_consistent())
    assert(table1.nrows() == 10)
    assert(table1.ncols() == 3)
    assert(table2.is_consistent())
    assert(table2.nrows() == 3)
    assert(table2.ncols() == 1)
    print 'OK'
Exemple #26
0
  def tst_init(self):
    from dials.array_family import flex

    # test default
    table = flex.reflection_table()
    assert(table.is_consistent())
    assert(table.nrows() == 0)
    assert(table.ncols() == 0)
    assert(table.empty())
    print 'Ok'

    # test with nrows
    table = flex.reflection_table(10)
    assert(table.is_consistent())
    assert(table.nrows() == 10)
    assert(table.ncols() == 0)
    assert(table.empty())
    print 'OK'

    # test with valid columns
    table = flex.reflection_table([
      ('col1', flex.int(10)),
      ('col2', flex.double(10)),
      ('col3', flex.std_string(10))])
    assert(table.is_consistent())
    assert(table.nrows() == 10)
    assert(table.ncols() == 3)
    assert(not table.empty())
    print 'OK'

    # test with invalid columns
    try:
      table = flex.reflection_table([
        ('col1', flex.int(10)),
        ('col2', flex.double(20)),
        ('col3', flex.std_string(10))])
      assert(false)
    except Exception:
      pass
    print 'OK'
Exemple #27
0
  def _filter_by_distance(self, nn, dist):
    '''
    Filter the matches by distance.

    :param nn: The nearest neighbour list
    :param dist: The distances

    :returns: A reduced list of nearest neighbours

    '''
    from scitbx.array_family import flex
    index = range(len(nn))
    return flex.int([i for i in index if dist[i] <= self._max_separation])
Exemple #28
0
  def tst_delete(self):
    from dials.array_family import flex

    # Test del item
    table = flex.reflection_table()
    table['col1'] = flex.int([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
    table['col2'] = flex.int([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
    table['col3'] = flex.int([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
    del table['col3']
    assert(table.is_consistent())
    assert(table.nrows() == 10)
    assert(table.ncols() == 2)
    assert(not "col3" in table)
    print 'OK'

    # Test del row
    del table[5]
    assert(table.is_consistent())
    assert(table.nrows() == 9)
    assert(table.ncols() == 2)
    assert(all(a==b for a, b in zip(list(table['col1']),
      [0, 1, 2, 3, 4, 6, 7, 8, 9])))
    print 'OK'

    # Test del slice
    del table[0:10:2]
    assert(table.is_consistent())
    assert(table.nrows() == 4)
    assert(table.ncols() == 2)
    assert(all(a==b for a, b in zip(list(table['col1']),
      [1, 3, 6, 8])))
    print 'OK'

    # Test del slice
    del table[:]
    assert(table.is_consistent())
    assert(table.nrows() == 0)
    assert(table.ncols() == 2)
    print 'OK'
  def __init__(self, experiment, reflections,
               expected_miller_indices):

    from dials.algorithms.indexing \
         import index_reflections, index_reflections_local
    import copy

    # index reflections using simple "global" method
    self.reflections_global = copy.deepcopy(reflections)
    self.reflections_global['id'] = flex.int(len(self.reflections_global), -1)
    self.reflections_global['imageset_id'] = flex.int(len(self.reflections_global), 0)
    index_reflections(
      self.reflections_global, ExperimentList([experiment]))
    non_zero_sel = (self.reflections_global['miller_index'] != (0,0,0))
    assert self.reflections_global['id'].select(~non_zero_sel).all_eq(-1)
    self.misindexed_global = (
      expected_miller_indices == self.reflections_global['miller_index']).select(
        non_zero_sel).count(False)
    self.correct_global = (
      expected_miller_indices == self.reflections_global['miller_index']).count(True)


    # index reflections using xds-style "local" method
    self.reflections_local = copy.deepcopy(reflections)
    self.reflections_local['id'] = flex.int(len(self.reflections_local), -1)
    index_reflections_local(
      self.reflections_local, ExperimentList([experiment]))
    non_zero_sel = (self.reflections_local['miller_index'] != (0,0,0))
    assert self.reflections_local['id'].select(~non_zero_sel).all_eq(-1)
    self.misindexed_local = (
      expected_miller_indices == self.reflections_local['miller_index']).select(
        non_zero_sel).count(False)
    self.correct_local = (
      expected_miller_indices == self.reflections_local['miller_index']).count(True)

    print self.misindexed_global, self.correct_global, len(self.reflections_global)
    print self.misindexed_local, self.correct_local, len(self.reflections_local)
Exemple #30
0
  def __call__(self, datablock):
    '''
    Do the spot finding.

    :param datablock: The datablock to process
    :return: The observed spots

    '''
    from dials.array_family import flex
    import cPickle as pickle

    # Loop through all the imagesets and find the strong spots
    reflections = flex.reflection_table()
    for i, imageset in enumerate(datablock.extract_imagesets()):

      # Find the strong spots in the sweep
      logger.info('-' * 80)
      logger.info('Finding strong spots in imageset %d' % i)
      logger.info('-' * 80)
      logger.info('')
      table, hot_mask = self._find_spots_in_imageset(imageset)
      table['id'] = flex.int(table.nrows(), i)
      reflections.extend(table)

      # Write a hot pixel mask
      if self.write_hot_mask:
        if imageset.external_lookup.mask.data is not None:
          and_mask = []
          for m1, m2 in zip(imageset.external_lookup.mask.data, hot_mask):
            and_mask.append(m1 & m2)
          imageset.external_lookup.mask.data = tuple(and_mask)
        else:
          imageset.external_lookup.mask.data = hot_mask
        imageset.external_lookup.mask.filename = "hot_mask_%d.pickle" % i

        # Write the hot mask
        with open(imageset.external_lookup.mask.filename, "wb") as outfile:
          pickle.dump(hot_mask, outfile, protocol=pickle.HIGHEST_PROTOCOL)

    # Set the strong spot flag
    reflections.set_flags(
      flex.size_t_range(len(reflections)),
      reflections.flags.strong)

    # Check for overloads
    reflections.is_overloaded(datablock)

    # Return the reflections
    return reflections
Exemple #31
0
def write(handle, key, data):
    from dials.array_family import flex
    if key == 'miller_index':
        col1, col2, col3 = zip(*list(data))
        col1 = flex.int(col1)
        col2 = flex.int(col2)
        col3 = flex.int(col3)
        dsc1 = 'The h component of the miller index'
        dsc2 = 'The k component of the miller index'
        dsc3 = 'The l component of the miller index'
        make_int(handle, "h", col1, dsc1)
        make_int(handle, "k", col2, dsc2)
        make_int(handle, "l", col3, dsc3)
    elif key == 'id':
        col = data
        dsc = 'The experiment id'
        make_int(handle, "id", col, dsc)
    elif key == 'partial_id':
        col = data
        desc = 'The reflection id'
        make_uint(handle, "reflection_id", col, desc)
    elif key == 'entering':
        col = data
        dsc = 'Entering or exiting the Ewald sphere'
        make_bool(handle, 'entering', col, dsc)
    elif key == 'flags':
        col = data
        dsc = 'Status of the reflection in processing'
        make_uint(handle, 'flags', col, dsc)
    elif key == 'panel':
        col = data
        dsc = 'The detector module on which the reflection was recorded'
        make_uint(handle, 'det_module', col, dsc)
    elif key == 'd':
        col = data
        dsc = 'The resolution of the reflection'
        make_float(handle, 'd', col, dsc)
    elif key == 'partiality':
        col = data
        dsc = 'The partiality of the reflection'
        make_float(handle, 'partiality', col, dsc)
    elif key == 'xyzcal.px':
        col1, col2, col3 = data.parts()
        dsc1 = 'The predicted bragg peak fast pixel location'
        dsc2 = 'The predicted bragg peak slow pixel location'
        dsc3 = 'The predicted bragg peak frame number'
        make_float(handle, 'predicted_px_x', col1, dsc1)
        make_float(handle, 'predicted_px_y', col2, dsc2)
        make_float(handle, 'predicted_frame', col3, dsc3)
    elif key == 'xyzcal.mm':
        col1, col2, col3 = data.parts()
        dsc1 = 'The predicted bragg peak fast millimeter location'
        dsc2 = 'The predicted bragg peak slow millimeter location'
        dsc3 = 'The predicted bragg peak rotation angle number'
        make_float(handle, 'predicted_x', col1, dsc1, units='mm')
        make_float(handle, 'predicted_y', col2, dsc2, units='mm')
        make_float(handle, 'predicted_phi', col3, dsc3, units='rad')
    elif key == 'bbox':
        d = data.as_int()
        d.reshape(flex.grid((len(data)), 6))
        make_int(handle, 'bounding_box', d, 'The reflection bounding box')
    elif key == 'xyzobs.px.value':
        col1, col2, col3 = data.parts()
        dsc1 = 'The observed centroid fast pixel value'
        dsc2 = 'The observed centroid slow pixel value'
        dsc3 = 'The observed centroid frame value'
        make_float(handle, 'observed_px_x', col1, dsc1)
        make_float(handle, 'observed_px_y', col2, dsc2)
        make_float(handle, 'observed_frame', col3, dsc3)
    elif key == 'xyzobs.px.variance':
        col1, col2, col3 = data.parts()
        dsc1 = 'The observed centroid fast pixel variance'
        dsc2 = 'The observed centroid slow pixel variance'
        dsc3 = 'The observed centroid frame variance'
        make_float(handle, 'observed_px_x_var', col1, dsc1)
        make_float(handle, 'observed_px_y_var', col2, dsc2)
        make_float(handle, 'observed_frame_var', col3, dsc3)
    elif key == 'xyzobs.mm.value':
        col1, col2, col3 = data.parts()
        dsc1 = 'The observed centroid fast pixel value'
        dsc2 = 'The observed centroid slow pixel value'
        dsc3 = 'The observed centroid phi value'
        make_float(handle, 'observed_x', col1, dsc1, units='mm')
        make_float(handle, 'observed_y', col2, dsc2, units='mm')
        make_float(handle, 'observed_phi', col3, dsc3, units='rad')
    elif key == 'xyzobs.mm.variance':
        col1, col2, col3 = data.parts()
        dsc1 = 'The observed centroid fast pixel variance'
        dsc2 = 'The observed centroid slow pixel variance'
        dsc3 = 'The observed centroid phi variance'
        make_float(handle, 'observed_x_var', col1, dsc1, units='mm')
        make_float(handle, 'observed_y_var', col2, dsc2, units='mm')
        make_float(handle, 'observed_phi_var', col3, dsc3, units='rad')
    elif key == 'background.mean':
        col = data
        dsc = 'The mean background value'
        make_float(handle, 'background_mean', col, dsc)
    elif key == 'intensity.sum.value':
        col = data
        dsc = 'The value of the summed intensity'
        make_float(handle, 'int_sum', col, dsc)
    elif key == 'intensity.sum.variance':
        col = data
        dsc = 'The variance of the summed intensity'
        make_float(handle, 'int_sum_var', col, dsc)
    elif key == 'intensity.prf.value':
        col = data
        dsc = 'The value of the profile fitted intensity'
        make_float(handle, 'int_prf', col, dsc)
    elif key == 'intensity.prf.variance':
        col = data
        dsc = 'The variance of the profile fitted intensity'
        make_float(handle, 'int_prf_var', col, dsc)
    elif key == 'profile.correlation':
        col = data
        dsc = 'Profile fitting correlations'
        make_float(handle, 'prf_cc', col, dsc)
    elif key == 'lp':
        col = data
        dsc = 'The lorentz-polarization correction factor'
        make_float(handle, 'lp', col, dsc)
    elif key == 'num_pixels.background':
        col = data
        dsc = 'Number of background pixels'
        make_int(handle, 'num_bg', col, dsc)
    elif key == 'num_pixels.background_used':
        col = data
        dsc = 'Number of background pixels used'
        make_int(handle, 'num_bg_used', col, dsc)
    elif key == 'num_pixels.foreground':
        col = data
        dsc = 'Number of foreground pixels'
        make_int(handle, 'num_fg', col, dsc)
    elif key == 'num_pixels.valid':
        col = data
        dsc = 'Number of valid pixels'
        make_int(handle, 'num_valid', col, dsc)
    elif key == 'profile.rmsd':
        col = data
        dsc = 'Profile rmsd'
        make_float(handle, 'prf_rmsd', col, dsc)
    else:
        raise KeyError('Column %s not written to file' % key)
Exemple #32
0
    def run(self, experiments, reflections):

        self.logger.log_step_time("POSTREFINEMENT")

        if not self.params.postrefinement.enable:
            self.logger.log("Postrefinement was not done")
            if self.mpi_helper.rank == 0:
                self.logger.main_log("Postrefinement was not done")
            return experiments, reflections

        target_symm = symmetry(
            unit_cell=self.params.scaling.unit_cell,
            space_group_info=self.params.scaling.space_group)
        i_model = self.params.scaling.i_model
        miller_set = self.params.scaling.miller_set

        # Ensure that match_multi_indices() will return identical results
        # when a frame's observations are matched against the
        # pre-generated Miller set, self.miller_set, and the reference
        # data set, self.i_model.  The implication is that the same match
        # can be used to map Miller indices to array indices for intensity
        # accumulation, and for determination of the correlation
        # coefficient in the presence of a scaling reference.
        assert len(i_model.indices()) == len(miller_set.indices())
        assert (i_model.indices() == miller_set.indices()).count(False) == 0

        new_experiments = ExperimentList()
        new_reflections = flex.reflection_table()

        experiments_rejected_by_reason = {}  # reason:how_many_rejected

        for experiment in experiments:

            exp_reflections = reflections.select(
                reflections['exp_id'] == experiment.identifier)

            # Build a miller array for the experiment reflections with original miller indexes
            exp_miller_indices_original = miller.set(
                target_symm, exp_reflections['miller_index'],
                not self.params.merging.merge_anomalous)
            observations_original_index = miller.array(
                exp_miller_indices_original,
                exp_reflections['intensity.sum.value'],
                flex.double(
                    flex.sqrt(exp_reflections['intensity.sum.variance'])))

            assert exp_reflections.size() == exp_miller_indices_original.size()
            assert observations_original_index.size(
            ) == exp_miller_indices_original.size()

            # Build a miller array for the experiment reflections with asu miller indexes
            exp_miller_indices_asu = miller.set(
                target_symm, exp_reflections['miller_index_asymmetric'], True)
            observations = miller.array(
                exp_miller_indices_asu, exp_reflections['intensity.sum.value'],
                flex.double(
                    flex.sqrt(exp_reflections['intensity.sum.variance'])))

            matches = miller.match_multi_indices(
                miller_indices_unique=miller_set.indices(),
                miller_indices=observations.indices())

            pair1 = flex.int([pair[1] for pair in matches.pairs()
                              ])  # refers to the observations
            pair0 = flex.int([pair[0] for pair in matches.pairs()
                              ])  # refers to the model

            assert exp_reflections.size() == exp_miller_indices_original.size()
            assert observations_original_index.size(
            ) == exp_miller_indices_original.size()

            # narrow things down to the set that matches, only
            observations_pair1_selected = observations.customized_copy(
                indices=flex.miller_index(
                    [observations.indices()[p] for p in pair1]),
                data=flex.double([observations.data()[p] for p in pair1]),
                sigmas=flex.double([observations.sigmas()[p] for p in pair1]))

            observations_original_index_pair1_selected = observations_original_index.customized_copy(
                indices=flex.miller_index(
                    [observations_original_index.indices()[p] for p in pair1]),
                data=flex.double(
                    [observations_original_index.data()[p] for p in pair1]),
                sigmas=flex.double(
                    [observations_original_index.sigmas()[p] for p in pair1]))

            I_observed = observations_pair1_selected.data()
            MILLER = observations_original_index_pair1_selected.indices()

            ORI = crystal_orientation(experiment.crystal.get_A(),
                                      basis_type.reciprocal)
            Astar = matrix.sqr(ORI.reciprocal_matrix())
            Astar_from_experiment = matrix.sqr(experiment.crystal.get_A())
            assert Astar == Astar_from_experiment

            WAVE = experiment.beam.get_wavelength()
            BEAM = matrix.col((0.0, 0.0, -1. / WAVE))
            BFACTOR = 0.
            MOSAICITY_DEG = experiment.crystal.get_half_mosaicity_deg()
            DOMAIN_SIZE_A = experiment.crystal.get_domain_size_ang()

            # calculation of correlation here
            I_reference = flex.double(
                [i_model.data()[pair[0]] for pair in matches.pairs()])
            I_invalid = flex.bool(
                [i_model.sigmas()[pair[0]] < 0. for pair in matches.pairs()])
            use_weights = False  # New facility for getting variance-weighted correlation

            if use_weights:
                # variance weighting
                I_weight = flex.double([
                    1. / (observations_pair1_selected.sigmas()[pair[1]])**2
                    for pair in matches.pairs()
                ])
            else:
                I_weight = flex.double(
                    len(observations_pair1_selected.sigmas()), 1.)

            I_weight.set_selected(I_invalid, 0.)
            """Explanation of 'include_negatives' semantics as originally implemented in cxi.merge postrefinement:
         include_negatives = True
         + and - reflections both used for Rh distribution for initial estimate of RS parameter
         + and - reflections both used for calc/obs correlation slope for initial estimate of G parameter
         + and - reflections both passed to the refinery and used in the target function (makes sense if
                             you look at it from a certain point of view)

         include_negatives = False
         + and - reflections both used for Rh distribution for initial estimate of RS parameter
         +       reflections only used for calc/obs correlation slope for initial estimate of G parameter
         + and - reflections both passed to the refinery and used in the target function (makes sense if
                             you look at it from a certain point of view)
      """

            # RB: By design, for MPI-Merge "include negatives" is implicitly True
            SWC = simple_weighted_correlation(I_weight, I_reference,
                                              I_observed)
            if self.params.output.log_level == 0:
                self.logger.log("Old correlation is: %f" % SWC.corr)

            if self.params.postrefinement.algorithm == "rs":

                Rhall = flex.double()

                for mill in MILLER:
                    H = matrix.col(mill)
                    Xhkl = Astar * H
                    Rh = (Xhkl + BEAM).length() - (1. / WAVE)
                    Rhall.append(Rh)

                Rs = math.sqrt(flex.mean(Rhall * Rhall))

                RS = 1. / 10000.  # reciprocal effective domain size of 1 micron
                RS = Rs  # try this empirically determined approximate, monochrome, a-mosaic value
                current = flex.double([SWC.slope, BFACTOR, RS, 0., 0.])

                parameterization_class = rs_parameterization
                refinery = rs_refinery(ORI=ORI,
                                       MILLER=MILLER,
                                       BEAM=BEAM,
                                       WAVE=WAVE,
                                       ICALCVEC=I_reference,
                                       IOBSVEC=I_observed)

            elif self.params.postrefinement.algorithm == "eta_deff":

                eta_init = 2. * MOSAICITY_DEG * math.pi / 180.
                D_eff_init = 2. * DOMAIN_SIZE_A
                current = flex.double(
                    [SWC.slope, BFACTOR, eta_init, 0., 0., D_eff_init])

                parameterization_class = eta_deff_parameterization
                refinery = eta_deff_refinery(ORI=ORI,
                                             MILLER=MILLER,
                                             BEAM=BEAM,
                                             WAVE=WAVE,
                                             ICALCVEC=I_reference,
                                             IOBSVEC=I_observed)

            func = refinery.fvec_callable(parameterization_class(current))
            functional = flex.sum(func * func)

            if self.params.output.log_level == 0:
                self.logger.log("functional: %f" % functional)

            self.current = current
            self.parameterization_class = parameterization_class
            self.refinery = refinery

            self.observations_pair1_selected = observations_pair1_selected
            self.observations_original_index_pair1_selected = observations_original_index_pair1_selected

            error_detected = False

            try:
                self.run_plain()

                result_observations_original_index, result_observations, result_matches = self.result_for_cxi_merge(
                )

                assert result_observations_original_index.size(
                ) == result_observations.size()
                assert result_matches.pairs().size(
                ) == result_observations_original_index.size()

            except (AssertionError, ValueError, RuntimeError) as e:
                error_detected = True
                reason = repr(e)
                if not reason:
                    reason = "Unknown error"
                if not reason in experiments_rejected_by_reason:
                    experiments_rejected_by_reason[reason] = 1
                else:
                    experiments_rejected_by_reason[reason] += 1

            if not error_detected:
                new_experiments.append(experiment)

                new_exp_reflections = flex.reflection_table()
                new_exp_reflections[
                    'miller_index_asymmetric'] = flex.miller_index(
                        result_observations.indices())
                new_exp_reflections['intensity.sum.value'] = flex.double(
                    result_observations.data())
                new_exp_reflections['intensity.sum.variance'] = flex.double(
                    flex.pow(result_observations.sigmas(), 2))
                new_exp_reflections['exp_id'] = flex.std_string(
                    len(new_exp_reflections), experiment.identifier)
                new_reflections.extend(new_exp_reflections)
            '''
      # debugging
      elif reason.startswith("ValueError"):
        self.logger.log("Rejected b/c of value error exp id: %s; unit cell: %s"%(exp_id, str(experiment.crystal.get_unit_cell())) )
      '''

        # report rejected experiments, reflections
        experiments_rejected_by_postrefinement = len(experiments) - len(
            new_experiments)
        reflections_rejected_by_postrefinement = reflections.size(
        ) - new_reflections.size()

        self.logger.log("Experiments rejected by post-refinement: %d" %
                        experiments_rejected_by_postrefinement)
        self.logger.log("Reflections rejected by post-refinement: %d" %
                        reflections_rejected_by_postrefinement)

        all_reasons = []
        for reason, count in six.iteritems(experiments_rejected_by_reason):
            self.logger.log("Experiments rejected due to %s: %d" %
                            (reason, count))
            all_reasons.append(reason)

        comm = self.mpi_helper.comm
        MPI = self.mpi_helper.MPI

        # Collect all rejection reasons from all ranks. Use allreduce to let each rank have all reasons.
        all_reasons = comm.allreduce(all_reasons, MPI.SUM)
        all_reasons = set(all_reasons)

        # Now that each rank has all reasons from all ranks, we can treat the reasons in a uniform way.
        total_experiments_rejected_by_reason = {}
        for reason in all_reasons:
            rejected_experiment_count = 0
            if reason in experiments_rejected_by_reason:
                rejected_experiment_count = experiments_rejected_by_reason[
                    reason]
            total_experiments_rejected_by_reason[reason] = comm.reduce(
                rejected_experiment_count, MPI.SUM, 0)

        total_accepted_experiment_count = comm.reduce(len(new_experiments),
                                                      MPI.SUM, 0)

        # how many reflections have we rejected due to post-refinement?
        rejected_reflections = len(reflections) - len(new_reflections)
        total_rejected_reflections = self.mpi_helper.sum(rejected_reflections)

        if self.mpi_helper.rank == 0:
            for reason, count in six.iteritems(
                    total_experiments_rejected_by_reason):
                self.logger.main_log(
                    "Total experiments rejected due to %s: %d" %
                    (reason, count))
            self.logger.main_log("Total experiments accepted: %d" %
                                 total_accepted_experiment_count)
            self.logger.main_log(
                "Total reflections rejected due to post-refinement: %d" %
                total_rejected_reflections)

        self.logger.log_step_time("POSTREFINEMENT", True)

        return new_experiments, new_reflections
def run(args):
    import libtbx.load_env
    from dials.util import Sorry

    usage = "dials.reindex [options] indexed.expt indexed.refl"

    parser = OptionParser(
        usage=usage,
        phil=phil_scope,
        read_reflections=True,
        read_experiments=True,
        check_format=False,
        epilog=help_message,
    )

    params, options = parser.parse_args(show_diff_phil=True)

    reflections = flatten_reflections(params.input.reflections)
    experiments = flatten_experiments(params.input.experiments)
    if len(experiments) == 0 and len(reflections) == 0:
        parser.print_help()
        return
    if params.change_of_basis_op is None:
        raise Sorry("Please provide a change_of_basis_op.")

    reference_crystal = None
    if params.reference.experiments is not None:
        from dxtbx.serialize import load

        reference_experiments = load.experiment_list(
            params.reference.experiments, check_format=False)
        assert len(reference_experiments.crystals()) == 1
        reference_crystal = reference_experiments.crystals()[0]

    if params.reference.reflections is not None:
        # First check that we have everything as expected for the reference reindexing
        # Currently only supports reindexing one dataset at a time
        if params.reference.experiments is None:
            raise Sorry(
                """For reindexing against a reference dataset, a reference
experiments file must also be specified with the option: reference= """)
        if not os.path.exists(params.reference.reflections):
            raise Sorry("Could not locate reference dataset reflection file")
        if len(experiments) != 1 or len(reflections) != 1:
            raise Sorry(
                "Only one dataset can be reindexed to a reference at a time")

        reference_reflections = flex.reflection_table().from_file(
            params.reference.reflections)

        test_reflections = reflections[0]

        if (reference_crystal.get_space_group().type().number() !=
                experiments.crystals()[0].get_space_group().type().number()):
            raise Sorry("Space group of input does not match reference")

        # Set some flags to allow filtering, if wanting to reindex against
        # reference with data that has not yet been through integration
        if (test_reflections.get_flags(
                test_reflections.flags.integrated_sum).count(True) == 0):
            assert (
                "intensity.sum.value"
                in test_reflections), "No 'intensity.sum.value' in reflections"
            test_reflections.set_flags(
                flex.bool(test_reflections.size(), True),
                test_reflections.flags.integrated_sum,
            )
        if (reference_reflections.get_flags(
                reference_reflections.flags.integrated_sum).count(True) == 0):
            assert ("intensity.sum.value" in test_reflections
                    ), "No 'intensity.sum.value in reference reflections"
            reference_reflections.set_flags(
                flex.bool(reference_reflections.size(), True),
                reference_reflections.flags.integrated_sum,
            )

        # Make miller array of the two datasets
        try:
            test_miller_set = filtered_arrays_from_experiments_reflections(
                experiments, [test_reflections])[0]
        except ValueError:
            raise Sorry(
                "No reflections remain after filtering the test dataset")
        try:
            reference_miller_set = filtered_arrays_from_experiments_reflections(
                reference_experiments, [reference_reflections])[0]
        except ValueError:
            raise Sorry(
                "No reflections remain after filtering the reference dataset")

        from dials.algorithms.symmetry.reindex_to_reference import (
            determine_reindex_operator_against_reference, )

        change_of_basis_op = determine_reindex_operator_against_reference(
            test_miller_set, reference_miller_set)

    elif len(experiments) and params.change_of_basis_op is libtbx.Auto:
        if reference_crystal is not None:
            if len(experiments.crystals()) > 1:
                raise Sorry("Only one crystal can be processed at a time")
            from dials.algorithms.indexing.compare_orientation_matrices import (
                difference_rotation_matrix_axis_angle, )

            cryst = experiments.crystals()[0]
            R, axis, angle, change_of_basis_op = difference_rotation_matrix_axis_angle(
                cryst, reference_crystal)
            print("Change of basis op: %s" % change_of_basis_op)
            print("Rotation matrix to transform input crystal to reference::")
            print(R.mathematica_form(format="%.3f", one_row_per_line=True))
            print(
                "Rotation of %.3f degrees" % angle,
                "about axis (%.3f, %.3f, %.3f)" % axis,
            )

        elif len(reflections):
            assert len(reflections) == 1

            # always re-map reflections to reciprocal space
            refl_copy = flex.reflection_table()
            for i, imageset in enumerate(experiments.imagesets()):
                if "imageset_id" in reflections[0]:
                    sel = reflections[0]["imageset_id"] == i
                else:
                    sel = reflections[0]["id"] == i
                refl = reflections[0].select(sel)
                refl.centroid_px_to_mm(imageset.get_detector(),
                                       imageset.get_scan())
                refl.map_centroids_to_reciprocal_space(
                    imageset.get_detector(),
                    imageset.get_beam(),
                    imageset.get_goniometer(),
                )
                refl_copy.extend(refl)

            # index the reflection list using the input experiments list
            refl_copy["id"] = flex.int(len(refl_copy), -1)
            from dials.algorithms.indexing import index_reflections

            index_reflections(refl_copy, experiments, tolerance=0.2)
            hkl_expt = refl_copy["miller_index"]
            hkl_input = reflections[0]["miller_index"]

            change_of_basis_op = derive_change_of_basis_op(hkl_input, hkl_expt)

            # reset experiments list since we don't want to reindex this
            experiments = []

    else:
        change_of_basis_op = sgtbx.change_of_basis_op(
            params.change_of_basis_op)

    if len(experiments):
        for crystal in experiments.crystals():
            cryst_orig = copy.deepcopy(crystal)
            cryst_reindexed = cryst_orig.change_basis(change_of_basis_op)
            if params.space_group is not None:
                a, b, c = cryst_reindexed.get_real_space_vectors()
                A_varying = [
                    cryst_reindexed.get_A_at_scan_point(i)
                    for i in range(cryst_reindexed.num_scan_points)
                ]
                cryst_reindexed = Crystal(
                    a, b, c, space_group=params.space_group.group())
                cryst_reindexed.set_A_at_scan_points(A_varying)
            crystal.update(cryst_reindexed)

            print("Old crystal:")
            print(cryst_orig)
            print()
            print("New crystal:")
            print(cryst_reindexed)
            print()

        print("Saving reindexed experimental models to %s" %
              params.output.experiments)
        dump.experiment_list(experiments, params.output.experiments)

    if len(reflections):
        assert len(reflections) == 1
        reflections = reflections[0]

        miller_indices = reflections["miller_index"]

        if params.hkl_offset is not None:
            h, k, l = miller_indices.as_vec3_double().parts()
            h += params.hkl_offset[0]
            k += params.hkl_offset[1]
            l += params.hkl_offset[2]
            miller_indices = flex.miller_index(h.iround(), k.iround(),
                                               l.iround())
        non_integral_indices = change_of_basis_op.apply_results_in_non_integral_indices(
            miller_indices)
        if non_integral_indices.size() > 0:
            print(
                "Removing %i/%i reflections (change of basis results in non-integral indices)"
                % (non_integral_indices.size(), miller_indices.size()))
        sel = flex.bool(miller_indices.size(), True)
        sel.set_selected(non_integral_indices, False)
        miller_indices_reindexed = change_of_basis_op.apply(
            miller_indices.select(sel))
        reflections["miller_index"].set_selected(sel, miller_indices_reindexed)
        reflections["miller_index"].set_selected(~sel, (0, 0, 0))

        print("Saving reindexed reflections to %s" % params.output.reflections)
        easy_pickle.dump(params.output.reflections, reflections)
Exemple #34
0
    def find_spots(self, experiments: ExperimentList) -> flex.reflection_table:
        """
        Do spotfinding for a set of experiments.

        Args:
            experiments: The experiment list to process

        Returns:
            A new reflection table of found reflections
        """
        # Loop through all the experiments and get the unique imagesets
        imagesets = []
        for experiment in experiments:
            if experiment.imageset not in imagesets:
                imagesets.append(experiment.imageset)

        # Loop through all the imagesets and find the strong spots
        reflections = flex.reflection_table()

        for j, imageset in enumerate(imagesets):

            # Find the strong spots in the sequence
            logger.info(
                "-" * 80 + "\nFinding strong spots in imageset %d\n" +
                "-" * 80, j)
            table, hot_mask = self._find_spots_in_imageset(imageset)

            # Fix up the experiment ID's now
            table["id"] = flex.int(table.nrows(), -1)
            for i, experiment in enumerate(experiments):
                if experiment.imageset is not imageset:
                    continue
                if not self.is_stills and experiment.scan:
                    z0, z1 = experiment.scan.get_array_range()
                    z = table["xyzobs.px.value"].parts()[2]
                    table["id"].set_selected((z > z0) & (z < z1), i)
                    if experiment.identifier:
                        table.experiment_identifiers(
                        )[i] = experiment.identifier
                else:
                    table["id"] = flex.int(table.nrows(), j)
                    if experiment.identifier:
                        table.experiment_identifiers(
                        )[j] = experiment.identifier
            missed = table["id"] == -1
            assert missed.count(
                True) == 0, "Failed to remap {} experiment IDs".format(
                    missed.count(True))

            reflections.extend(table)
            # Write a hot pixel mask
            if self.write_hot_mask:
                if not imageset.external_lookup.mask.data.empty():
                    for m1, m2 in zip(hot_mask,
                                      imageset.external_lookup.mask.data):
                        m1 &= m2.data()
                    imageset.external_lookup.mask.data = ImageBool(hot_mask)
                else:
                    imageset.external_lookup.mask.data = ImageBool(hot_mask)
                imageset.external_lookup.mask.filename = "%s_%d.pickle" % (
                    self.hot_mask_prefix,
                    i,
                )

                # Write the hot mask
                with open(imageset.external_lookup.mask.filename,
                          "wb") as outfile:
                    pickle.dump(hot_mask,
                                outfile,
                                protocol=pickle.HIGHEST_PROTOCOL)

        # Set the strong spot flag
        reflections.set_flags(flex.size_t_range(len(reflections)),
                              reflections.flags.strong)

        # Check for overloads
        reflections.is_overloaded(experiments)

        # Return the reflections
        return reflections
Exemple #35
0
def split_for_scan_range(experiments, reference, scan_range):
    """Update experiments when scan range is set.

    Args:
        experiments: An experiment list
        reference: A reflection table of reference reflections
        scan_range (tuple): Range of scan images to be processed

    Returns:
        experiments: A new experiment list with the requested scan ranges
        reference: A reflection table with data from the scan ranges

    Raises:
        ValueError: If bad input for scan range.
    """

    # Only do anything is the scan range is set
    if scan_range is not None and len(scan_range) > 0:

        # Ensure that all experiments have the same imageset and scan
        iset = [e.imageset for e in experiments]
        scan = [e.scan for e in experiments]
        assert all(x == iset[0] for x in iset)
        assert all(x == scan[0] for x in scan)

        # Get the imageset and scan
        iset = experiments[0].imageset
        scan = experiments[0].scan

        # Get the array range
        if scan is not None:
            frames_start, frames_end = scan.get_array_range()
            assert scan.get_num_images() == len(iset)
        else:
            frames_start, frames_end = (0, len(iset))

        # Create the new lists
        new_experiments = ExperimentList()
        new_reference_all = reference.split_by_experiment_id()
        new_reference = flex.reflection_table()
        for i in range(len(new_reference_all) - len(experiments)):
            new_reference_all.append(flex.reflection_table())
        assert len(new_reference_all) == len(experiments)

        # Loop through all the scan ranges and create a new experiment list with
        # the requested scan ranges.
        for scan_start, scan_end in scan_range:
            # Validate the requested scan range
            if scan_end == scan_start:
                raise ValueError(
                    "Scan range end must be higher than start; pass {},{} for single image"
                    .format(scan_start, scan_start + 1))
            if scan_end < scan_start:
                raise ValueError("Scan range must be in ascending order")
            elif scan_start < frames_start or scan_end > frames_end:
                raise ValueError(
                    "Scan range must be within image range {}..{}".format(
                        frames_start, frames_end))

            assert scan_end > scan_start
            assert scan_start >= frames_start
            assert scan_end <= frames_end

            index_start = scan_start - frames_start
            index_end = index_start + (scan_end - scan_start)
            assert index_start < index_end
            assert index_start >= 0
            assert index_end <= len(iset)
            new_iset = iset[index_start:index_end]
            if scan is None:
                new_scan = None
            else:
                new_scan = scan[index_start:index_end]

            for i, e1 in enumerate(experiments):
                e2 = Experiment()
                e2.beam = e1.beam
                e2.detector = e1.detector
                e2.goniometer = e1.goniometer
                e2.crystal = slice_crystal(e1.crystal,
                                           (index_start, index_end))
                e2.profile = e1.profile
                e2.imageset = new_iset
                e2.scan = new_scan
                new_reference_all[i]["id"] = flex.int(
                    len(new_reference_all[i]), len(new_experiments))
                new_reference.extend(new_reference_all[i])
                new_experiments.append(e2)
        experiments = new_experiments
        reference = new_reference

        # Print some information
        logger.info(
            "Modified experiment list to integrate over requested scan range")
        for scan_start, scan_end in scan_range:
            logger.info(" scan_range = %d -> %d", scan_start, scan_end)

    # Return the experiments
    return experiments, reference
    def run(self):
        ''' Parse the options. '''
        from dials.util.options import flatten_experiments, flatten_reflections
        from dxtbx.model import ExperimentList
        from scitbx.math import five_number_summary
        # Parse the command line arguments
        params, options = self.parser.parse_args(show_diff_phil=True)
        self.params = params
        experiments = flatten_experiments(params.input.experiments)
        reflections = flatten_reflections(params.input.reflections)

        assert len(reflections) == 1
        reflections = reflections[0]
        print "Found", len(reflections), "reflections", "and", len(
            experiments), "experiments"

        filtered_reflections = flex.reflection_table()
        filtered_experiments = ExperimentList()

        skipped_reflections = flex.reflection_table()
        skipped_experiments = ExperimentList()

        if params.detector is not None:
            culled_reflections = flex.reflection_table()
            culled_experiments = ExperimentList()
            detector = experiments.detectors()[params.detector]
            for expt_id, experiment in enumerate(experiments):
                refls = reflections.select(reflections['id'] == expt_id)
                if experiment.detector is detector:
                    culled_experiments.append(experiment)
                    refls['id'] = flex.int(len(refls),
                                           len(culled_experiments) - 1)
                    culled_reflections.extend(refls)
                else:
                    skipped_experiments.append(experiment)
                    refls['id'] = flex.int(len(refls),
                                           len(skipped_experiments) - 1)
                    skipped_reflections.extend(refls)

            print "RMSD filtering %d experiments using detector %d, out of %d" % (
                len(culled_experiments), params.detector, len(experiments))
            reflections = culled_reflections
            experiments = culled_experiments

        difference_vector_norms = (reflections['xyzcal.mm'] -
                                   reflections['xyzobs.mm.value']).norms()

        if params.max_delta is not None:
            sel = difference_vector_norms <= params.max_delta
            reflections = reflections.select(sel)
            difference_vector_norms = difference_vector_norms.select(sel)

        data = flex.double()
        counts = flex.double()
        for i in xrange(len(experiments)):
            dvns = difference_vector_norms.select(reflections['id'] == i)
            counts.append(len(dvns))
            if len(dvns) == 0:
                data.append(0)
                continue
            rmsd = math.sqrt(flex.sum_sq(dvns) / len(dvns))
            data.append(rmsd)
        data *= 1000
        subset = data.select(counts > 0)
        print len(subset), "experiments with > 0 reflections"

        if params.show_plots:
            h = flex.histogram(subset, n_slots=40)
            fig = plt.figure()
            ax = fig.add_subplot('111')
            ax.plot(h.slot_centers().as_numpy_array(),
                    h.slots().as_numpy_array(), '-')
            plt.title("Histogram of %d image RMSDs" % len(subset))

            fig = plt.figure()
            plt.boxplot(subset, vert=False)
            plt.title("Boxplot of %d image RMSDs" % len(subset))
            plt.show()

        outliers = counts == 0
        min_x, q1_x, med_x, q3_x, max_x = five_number_summary(subset)
        print "Five number summary of RMSDs (microns): min %.1f, q1 %.1f, med %.1f, q3 %.1f, max %.1f" % (
            min_x, q1_x, med_x, q3_x, max_x)
        iqr_x = q3_x - q1_x
        cut_x = params.iqr_multiplier * iqr_x
        outliers.set_selected(data > q3_x + cut_x, True)
        #outliers.set_selected(col < q1_x - cut_x, True) # Don't throw away the images that are outliers in the 'good' direction!

        for i in xrange(len(experiments)):
            if outliers[i]:
                continue
            refls = reflections.select(reflections['id'] == i)
            refls['id'] = flex.int(len(refls), len(filtered_experiments))
            filtered_reflections.extend(refls)
            filtered_experiments.append(experiments[i])

        zeroes = counts == 0
        n_zero = len(counts.select(zeroes))
        print "Removed %d bad experiments and %d experiments with zero reflections, out of %d (%%%.1f)" % (
            len(experiments) - len(filtered_experiments) - n_zero, n_zero,
            len(experiments), 100 *
            ((len(experiments) - len(filtered_experiments)) /
             len(experiments)))

        if params.detector is not None:
            crystals = filtered_experiments.crystals()
            for expt_id, experiment in enumerate(skipped_experiments):
                if experiment.crystal in crystals:
                    filtered_experiments.append(experiment)
                    refls = skipped_reflections.select(
                        skipped_reflections['id'] == expt_id)
                    refls['id'] = flex.int(len(refls),
                                           len(filtered_experiments) - 1)
                    filtered_reflections.extend(refls)

        if params.delta_psi_filter is not None:
            delta_psi = filtered_reflections['delpsical.rad'] * 180 / math.pi
            sel = (delta_psi <= params.delta_psi_filter) & (
                delta_psi >= -params.delta_psi_filter)
            l = len(filtered_reflections)
            filtered_reflections = filtered_reflections.select(sel)
            print "Filtering by delta psi, removing %d out of %d reflections" % (
                l - len(filtered_reflections), l)

        print "Final experiment count", len(filtered_experiments)

        from dxtbx.model.experiment_list import ExperimentListDumper
        dump = ExperimentListDumper(filtered_experiments)
        dump.as_json(params.output.filtered_experiments)

        filtered_reflections.as_pickle(params.output.filtered_reflections)
Exemple #37
0
def test_average_bbox_size():
    """Test behaviour of function for obtaining average bbox size."""
    reflections = flex.reflection_table()
    reflections["bbox"] = flex.int6(*(flex.int(10, i) for i in range(6)))
    assert _average_bbox_size(reflections) == (1, 1, 1)
def test_set_selected():
  from dials.array_family import flex

  # The columns as lists
  c1 = list(range(10))
  c2 = list(range(10))
  c3 = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'i', 'j', 'k']

  # Create a table with some elements
  table1 = flex.reflection_table()
  table2 = flex.reflection_table()
  table1['col1'] = flex.int(c1)
  table2['col2'] = flex.double(c2)
  table2['col3'] = flex.std_string(c3)

  # Set selected columns
  table1.set_selected(('col3', 'col2'), table2)
  assert(table1.nrows() == 10)
  assert(table1.ncols() == 3)
  assert(all(a == b for a, b in zip(table1['col1'], c1)))
  assert(all(a == b for a, b in zip(table1['col2'], c2)))
  assert(all(a == b for a, b in zip(table1['col3'], c3)))

  # Set selected columns
  table1 = flex.reflection_table()
  table1['col1'] = flex.int(c1)
  table1.set_selected(flex.std_string(['col3', 'col2']), table2)
  assert(table1.nrows() == 10)
  assert(table1.ncols() == 3)
  assert(all(a == b for a, b in zip(table1['col1'], c1)))
  assert(all(a == b for a, b in zip(table1['col2'], c2)))
  assert(all(a == b for a, b in zip(table1['col3'], c3)))

  cc1 = list(range(10, 15))
  cc2 = list(range(10, 15))
  cc3 = ['l', 'm', 'n', 'o', 'p']

  # Set selected rows
  table2 = flex.reflection_table()
  table2['col1'] = flex.int(cc1)
  table2['col2'] = flex.double(cc2)
  table2['col3'] = flex.std_string(cc3)

  index = flex.size_t([0, 1, 5, 8, 9])
  ccc1 = copy.deepcopy(c1)
  ccc2 = copy.deepcopy(c2)
  ccc3 = copy.deepcopy(c3)
  for j, i in enumerate(index):
    ccc1[i] = cc1[j]
    ccc2[i] = cc2[j]
    ccc3[i] = cc3[j]
  table1.set_selected(index, table2)
  assert(all(a == b for a, b in zip(table1['col1'], ccc1)))
  assert(all(a == b for a, b in zip(table1['col2'], ccc2)))
  assert(all(a == b for a, b in zip(table1['col3'], ccc3)))

  # Set selected rows
  table2 = flex.reflection_table()
  table2['col1'] = flex.int(cc1)
  table2['col2'] = flex.double(cc2)
  table2['col3'] = flex.std_string(cc3)

  flags = flex.bool([True, True, False, False, False,
                     True, False, False, True, True])
  table1.set_selected(index, table2)
  assert(all(a == b for a, b in zip(table1['col1'], ccc1)))
  assert(all(a == b for a, b in zip(table1['col2'], ccc2)))
  assert(all(a == b for a, b in zip(table1['col3'], ccc3)))
def test_slicing():
  from dials.array_family import flex

  # The columns as lists
  c1 = list(range(10))
  c2 = list(range(10))
  c3 = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'i', 'j', 'k']

  # Create a table with some elements
  table = flex.reflection_table()
  table['col1'] = flex.int(c1)
  table['col2'] = flex.double(c2)
  table['col3'] = flex.std_string(c3)

  # Try forward slicing
  new_table = table[2:7:2]
  assert(new_table.ncols() == 3)
  assert(new_table.nrows() == 3)
  assert(new_table.is_consistent())
  c11 = c1[2:7:2]
  c22 = c2[2:7:2]
  c33 = c3[2:7:2]
  assert(all(a == b for a, b in zip(new_table['col1'], c11)))
  assert(all(a == b for a, b in zip(new_table['col2'], c22)))
  assert(all(a == b for a, b in zip(new_table['col3'], c33)))

  # Try backward slicing
  new_table = table[7:2:-2]
  assert(new_table.ncols() == 3)
  assert(new_table.nrows() == 3)
  assert(new_table.is_consistent())
  c11 = c1[7:2:-2]
  c22 = c2[7:2:-2]
  c33 = c3[7:2:-2]
  assert(all(a == b for a, b in zip(new_table['col1'], c11)))
  assert(all(a == b for a, b in zip(new_table['col2'], c22)))
  assert(all(a == b for a, b in zip(new_table['col3'], c33)))

  # Try setting forward slicing
  table[2:7:2] = new_table
  assert(table.ncols() == 3)
  assert(table.nrows() == 10)
  assert(table.is_consistent())
  c1[2:7:2] = c11
  c2[2:7:2] = c22
  c3[2:7:2] = c33
  assert(all(a == b for a, b in zip(table['col1'], c1)))
  assert(all(a == b for a, b in zip(table['col2'], c2)))
  assert(all(a == b for a, b in zip(table['col3'], c3)))

  # Try setting backward slicing
  table[7:2:-2] = new_table
  assert(table.ncols() == 3)
  assert(table.nrows() == 10)
  assert(table.is_consistent())
  c1[7:2:-2] = c11
  c2[7:2:-2] = c22
  c3[7:2:-2] = c33
  assert(all(a == b for a, b in zip(table['col1'], c1)))
  assert(all(a == b for a, b in zip(table['col2'], c2)))
  assert(all(a == b for a, b in zip(table['col3'], c3)))
Exemple #40
0
def test_merge_multi_wavelength(dials_data, tmpdir):
    """Test that merge handles multi-wavelength data suitably - should be
    exported into an mtz with seprate columns for each wavelength."""

    mean_labels = [
        "%sIMEAN_WAVE%s" % (pre, i) for i in [1, 2] for pre in ["", "SIG"]
    ]
    anom_labels = [
        "%sI_WAVE%s(%s)" % (pre, i, sgn) for i in [1, 2]
        for pre in ["", "SIG"] for sgn in ["+", "-"]
    ]
    amp_labels = [
        "%sF_WAVE%s" % (pre, i) for i in [1, 2] for pre in ["", "SIG"]
    ]
    anom_amp_labels = [
        "%sF_WAVE%s(%s)" % (pre, i, sgn) for i in [1, 2]
        for pre in ["", "SIG"] for sgn in ["+", "-"]
    ]

    location = dials_data("l_cysteine_4_sweeps_scaled")
    refl1 = location.join("scaled_30.refl").strpath
    expt1 = location.join("scaled_30.expt").strpath
    refl2 = location.join("scaled_35.refl").strpath
    expt2 = location.join("scaled_35.expt").strpath
    expts1 = ExperimentListFactory.from_json_file(expt1, check_format=False)
    expts1[0].beam.set_wavelength(0.5)
    expts2 = ExperimentListFactory.from_json_file(expt2, check_format=False)
    expts1.extend(expts2)

    tmp_expt = tmpdir.join("tmp.expt").strpath
    expts1.as_json(tmp_expt)

    reflections1 = flex.reflection_table.from_pickle(refl1)
    reflections2 = flex.reflection_table.from_pickle(refl2)
    # first need to resolve identifiers - usually done on loading
    reflections2["id"] = flex.int(reflections2.size(), 1)
    del reflections2.experiment_identifiers()[0]
    reflections2.experiment_identifiers()[1] = "3"
    reflections1.extend(reflections2)

    tmp_refl = tmpdir.join("tmp.refl").strpath
    reflections1.as_file(tmp_refl)

    # Can now run after creating our 'fake' multiwavelength dataset
    command = [
        "dials.merge", tmp_refl, tmp_expt, "truncate=True", "anomalous=True"
    ]
    result = procrunner.run(command, working_directory=tmpdir)
    assert not result.returncode and not result.stderr
    assert tmpdir.join("merged.mtz").check()
    m = mtz.object(tmpdir.join("merged.mtz").strpath)
    labels = []
    for ma in m.as_miller_arrays(merge_equivalents=False):
        labels.extend(ma.info().labels)
    assert all(x in labels for x in mean_labels)
    assert all(x in labels for x in anom_labels)
    assert all(x in labels for x in amp_labels)
    assert all(x in labels for x in anom_amp_labels)

    # 5 miller arrays for each dataset
    assert m.as_miller_arrays()[0].info().wavelength == pytest.approx(0.5)
    assert m.as_miller_arrays()[5].info().wavelength == pytest.approx(0.6889)
def test_experiment_identifiers():

  from dials.array_family import flex
  from dxtbx.model import ExperimentList, Experiment

  table = flex.reflection_table()
  table['id'] = flex.int([0,1,2,3])

  assert table.are_experiment_identifiers_consistent() == True

  identifiers = table.experiment_identifiers()
  identifiers[0] = 'abcd'
  identifiers[1] = 'efgh'
  identifiers[2] = 'ijkl'
  identifiers[3] = 'mnop'

  assert identifiers[0] == 'abcd'
  assert identifiers[1] == 'efgh'
  assert identifiers[2] == 'ijkl'
  assert identifiers[3] == 'mnop'

  for k, v in identifiers:
    if k == 0:
      assert v == 'abcd'
    if k == 1:
      assert v == 'efgh'
    if k == 2:
      assert v == 'ijkl'
    if k == 3:
      assert v == 'mnop'

  assert tuple(identifiers.keys()) == (0, 1, 2, 3)
  assert tuple(identifiers.values()) == ("abcd", "efgh", "ijkl", "mnop")


  assert table.are_experiment_identifiers_consistent() == True

  experiments = ExperimentList()
  experiments.append(Experiment(identifier="abcd"))
  experiments.append(Experiment(identifier="efgh"))
  experiments.append(Experiment(identifier="ijkl"))
  experiments.append(Experiment(identifier="mnop"))

  assert table.are_experiment_identifiers_consistent(experiments) == True

  experiments = ExperimentList()
  experiments.append(Experiment(identifier="abcd"))
  experiments.append(Experiment(identifier="efgh"))
  experiments.append(Experiment(identifier="ijkl"))
  experiments.append(Experiment(identifier="mnop"))
  experiments[3].identifier = "ijkl"

  assert table.are_experiment_identifiers_consistent(experiments) == False

  identifiers = table.experiment_identifiers()
  identifiers[0] = 'abcd'
  identifiers[1] = 'efgh'
  identifiers[2] = 'ijkl'
  identifiers[3] = 'ijkl'

  assert table.are_experiment_identifiers_consistent() == False

  identifiers[4] = 'mnop'

  import six.moves.cPickle as pickle
  pickled = pickle.dumps(table)
  table2 = pickle.loads(pickled)

  id1 = table.experiment_identifiers()
  id2 = table2.experiment_identifiers()

  for i in id1.keys():
    assert id1[i] == id2[i]

  other_table = flex.reflection_table()
  other_table['id'] = flex.int([3, 4])

  assert other_table.are_experiment_identifiers_consistent() == True

  identifiers = other_table.experiment_identifiers()
  identifiers[3] = 'mnop'
  identifiers[4] = 'qrst'

  table.extend(other_table)

  assert len(table.experiment_identifiers()) == 5
  assert table.experiment_identifiers()[0] == 'abcd'
  assert table.experiment_identifiers()[1] == 'efgh'
  assert table.experiment_identifiers()[2] == 'ijkl'
  assert table.experiment_identifiers()[3] == 'mnop'
  assert table.experiment_identifiers()[4] == 'qrst'
Exemple #42
0
    def run(self):
        import datetime
        time_now = datetime.datetime.now()

        self.mpi_logger.log(str(time_now))
        if self.mpi_helper.rank == 0:
            self.mpi_logger.main_log(str(time_now))

        self.mpi_logger.log_step_time("TOTAL")

        self.mpi_logger.log_step_time("PARSE_INPUT_PARAMS")
        self.parse_input()
        self.mpi_logger.log_step_time("PARSE_INPUT_PARAMS", True)

        if self.params.mp.debug.cProfile:
            import cProfile
            pr = cProfile.Profile()
            pr.enable()

        # Create the workers using the factories
        self.mpi_logger.log_step_time("CREATE_WORKERS")
        from xfel.merging import application
        import importlib, copy

        self._resolve_persistent_columns()

        workers = []
        self.params.dispatch.step_list = self.params.dispatch.step_list or default_steps
        for step in self.params.dispatch.step_list:
            step_factory_name = step
            step_additional_info = []

            step_info = step.split('_')
            assert len(step_info) > 0
            if len(step_info) > 1:
                step_factory_name = step_info[0]
                step_additional_info = step_info[1:]

            try:
                factory = importlib.import_module('xfel.merging.application.' +
                                                  step_factory_name +
                                                  '.factory')
            except ModuleNotFoundError:
                # remember the system path so the custom worker can temporarily modify it
                sys_path = copy.deepcopy(sys.path)
                pathstr = os.path.join('~', '.cctbx.xfel', 'merging',
                                       'application', step_factory_name,
                                       'factory.py')
                pathstr = os.path.expanduser(pathstr)
                modulename = 'xfel.merging.application.' + step_factory_name + '.factory'
                spec = importlib.util.spec_from_file_location(
                    modulename, pathstr)
                factory = importlib.util.module_from_spec(spec)
                spec.loader.exec_module(factory)
                # reset the path
                sys.path = sys_path

            workers.extend(
                factory.factory.from_parameters(self.params,
                                                step_additional_info,
                                                mpi_helper=self.mpi_helper,
                                                mpi_logger=self.mpi_logger))

        # Perform phil validation up front
        for worker in workers:
            worker.validate()
        self.mpi_logger.log_step_time("CREATE_WORKERS", True)

        # Do the work
        experiments = reflections = None
        step = 0
        while (workers):
            worker = workers.pop(0)
            self.mpi_logger.log_step_time("STEP_" + worker.__repr__())
            # Log worker name, i.e. execution step name
            step += 1
            if step > 1:
                self.mpi_logger.log('')
            step_desc = "STEP %d: %s" % (step, worker)
            self.mpi_logger.log(step_desc)

            if self.mpi_helper.rank == 0:
                if step > 1:
                    self.mpi_logger.main_log('')
                self.mpi_logger.main_log(step_desc)

            # Execute worker
            experiments, reflections = worker.run(experiments, reflections)
            self.mpi_logger.log_step_time("STEP_" + worker.__repr__(), True)
            if experiments:
                self.mpi_logger.log("Ending step with %d experiments" %
                                    len(experiments))

        if self.params.output.save_experiments_and_reflections:
            if len(reflections) and 'id' not in reflections:
                from dials.array_family import flex
                id_ = flex.int(len(reflections), -1)
                if experiments:
                    for expt_number, expt in enumerate(experiments):
                        sel = reflections['exp_id'] == expt.identifier
                        id_.set_selected(sel, expt_number)
                else:
                    for expt_number, exp_id in enumerate(
                            set(reflections['exp_id'])):
                        sel = reflections['exp_id'] == exp_id
                        id_.set_selected(sel, expt_number)
                reflections['id'] = id_

                assert (reflections['id'] == -1).count(True) == 0, ((
                    reflections['id'] == -1).count(True), len(reflections))

            if self.mpi_helper.size == 1:
                filename_suffix = ""
            else:
                filename_suffix = "_%06d" % self.mpi_helper.rank

            if len(reflections):
                reflections.as_pickle(
                    os.path.join(
                        self.params.output.output_dir, "%s%s.refl" %
                        (self.params.output.prefix, filename_suffix)))
            if experiments:
                experiments.as_file(
                    os.path.join(
                        self.params.output.output_dir, "%s%s.expt" %
                        (self.params.output.prefix, filename_suffix)))

        self.mpi_logger.log_step_time("TOTAL", True)

        if self.params.mp.debug.cProfile:
            pr.disable()
            pr.dump_stats(
                os.path.join(
                    self.params.output.output_dir, "cpu_%s_%d.prof" %
                    (self.params.output.prefix, self.mpi_helper.rank)))
Exemple #43
0
  import scipy.linalg # import dependency
except ImportError, e:
  pass
'''

if __name__ == "__main__":
    lst_flex = []
    lst_flex_norm = []

    for size_xyz in range(8, 6, -1):

        size_x = size_xyz * 2

        data_xyz_flex = flex.double(flex.grid(size_xyz, size_xyz, size_x), 15)
        data_flex_norm = flex.double(flex.grid(size_xyz, size_xyz, size_x), 15)
        data_flex_mask = flex.int(flex.grid(size_xyz, size_xyz, size_x), 0)

        tot = 0.0
        for frm in range(size_xyz):
            for row in range(size_xyz):
                for col in range(size_x):
                    data_xyz_flex[frm, row,
                                  col] += (row * 2 + col * 2 + frm * 2)
                    tot += data_xyz_flex[frm, row, col]
                    if row > 1 and row < size_xyz - 2 and col > 1 and col < size_x - 2:
                        data_flex_mask[frm, row, col] = MaskCode.Foreground

                        different_mask_values = '''
            MaskCode.Valid           =  "\\\\\\"
            MaskCode.Foreground      =  "//////"
            MaskCode.Background      =  "||||||"
def test_split_partials_with_shoebox():
  from dials.array_family import flex
  from random import randint, uniform
  from dials.model.data import Shoebox
  r = flex.reflection_table()
  r['value1'] = flex.double()
  r['value2'] = flex.int()
  r['value3'] = flex.double()
  r['bbox'] = flex.int6()
  r['panel'] = flex.size_t()
  r['shoebox'] = flex.shoebox()
  expected = []
  for i in range(100):
    x0 = randint(0, 100)
    x1 = x0 + randint(1, 10)
    y0 = randint(0, 100)
    y1 = y0 + randint(1, 10)
    z0 = randint(0, 100)
    z1 = z0 + randint(1, 10)
    v1 = uniform(0, 100)
    v2 = randint(0, 100)
    v3 = uniform(0, 100)
    sbox = Shoebox(0, (x0, x1, y0, y1, z0, z1))
    sbox.allocate()
    assert(sbox.is_consistent())
    w = x1 - x0
    h = y1 - y0
    for z in range(z0, z1):
      for y in range(y0, y1):
        for x in range(x0, x1):
          sbox.data[z-z0,y-y0,x-x0] = x+y*w+z*w*h
    r.append({
      'value1' : v1,
      'value2' : v2,
      'value3' : v3,
      'bbox' : (x0, x1, y0, y1, z0, z1),
      'panel' : 0,
      'shoebox' : sbox
    })
    for z in range(z0, z1):
      sbox = Shoebox(0, (x0, x1, y0, y1, z, z+1))
      sbox.allocate()
      assert(sbox.is_consistent())
      w = x1 - x0
      h = y1 - y0
      for y in range(y0, y1):
        for x in range(x0, x1):
          sbox.data[0,y-y0,x-x0] = x+y*w+z*w*h
      expected.append({
        'value1' : v1,
        'value2' : v2,
        'value3' : v3,
        'bbox' : (x0, x1, y0, y1, z, z+1),
        'partial_id' : i,
        'panel' : 0,
        'shoebox' : sbox
      })

  r.split_partials_with_shoebox()
  assert(len(r) == len(expected))
  EPS = 1e-7
  for r1, r2 in zip(r, expected):
    assert(abs(r1['value1'] - r2['value1']) < EPS)
    assert(r1['value2'] == r2['value2'])
    assert(abs(r1['value3'] - r2['value3']) < EPS)
    assert(r1['bbox'] == r2['bbox'])
    assert(r1['partial_id'] == r2['partial_id'])
    assert(r1['panel'] == r2['panel'])
    assert(r1['shoebox'].data.as_double().as_1d().all_approx_equal(
      r2['shoebox'].data.as_double().as_1d()))
Exemple #45
0
def run_integration(params, experiments, reference=None):
    """Perform the integration.

    Returns:
        experiments: The integrated experiments
        reflections: The integrated reflections
        report(optional): An integration report.

    Raises:
        ValueError: For a number of bad inputs
        RuntimeError: If the profile model creation fails
    """
    predicted = None
    rubbish = None

    for abs_params in params.absorption_correction:
        if abs_params.apply:
            if not (params.integration.debug.output
                    and not params.integration.debug.separate_files):
                raise ValueError(
                    "Shoeboxes must be saved to integration intermediates to apply an absorption correction. "
                    +
                    "Set integration.debug.output=True, integration.debug.separate_files=False and "
                    +
                    "integration.debug.delete_shoeboxes=True to temporarily store shoeboxes."
                )

    # Print if we're using a mask
    for i, exp in enumerate(experiments):
        mask = exp.imageset.external_lookup.mask
        if mask.filename is not None:
            if mask.data:
                logger.info("Using external mask: %s", mask.filename)
                for tile in mask.data:
                    logger.info(" Mask has %d pixels masked",
                                tile.data().count(False))

    # Print the experimental models
    for i, exp in enumerate(experiments):
        summary = "\n".join((
            "",
            "=" * 80,
            "",
            "Experiments",
            "",
            "Models for experiment %d" % i,
            "",
            str(exp.beam),
            str(exp.detector),
        ))
        if exp.goniometer:
            summary += str(exp.goniometer) + "\n"
        if exp.scan:
            summary += str(exp.scan) + "\n"
        summary += str(exp.crystal)
        logger.info(summary)

    logger.info("\n".join(("", "=" * 80, "")))
    logger.info(heading("Initialising"))

    # Load the data
    if reference:
        reference, rubbish = process_reference(reference)

        # Check pixels don't belong to neighbours
        if exp.goniometer is not None and exp.scan is not None:
            reference = filter_reference_pixels(reference, experiments)

        # Modify experiment list if scan range is set.
        experiments, reference = split_for_scan_range(experiments, reference,
                                                      params.scan_range)

    # Modify experiment list if exclude images is set
    if params.exclude_images:
        for experiment in experiments:
            for index in params.exclude_images:
                experiment.imageset.mark_for_rejection(index, True)

    # Predict the reflections
    logger.info("\n".join(("", "=" * 80, "")))
    logger.info(heading("Predicting reflections"))
    predicted = flex.reflection_table.from_predictions_multi(
        experiments,
        dmin=params.prediction.d_min,
        dmax=params.prediction.d_max,
        margin=params.prediction.margin,
        force_static=params.prediction.force_static,
        padding=params.prediction.padding,
    )

    # Match reference with predicted
    if reference:
        matched, reference, unmatched = predicted.match_with_reference(
            reference)
        assert len(matched) == len(predicted)
        assert matched.count(True) <= len(reference)
        if matched.count(True) == 0:
            raise ValueError("""
        Invalid input for reference reflections.
        Zero reference spots were matched to predictions
    """)
        elif unmatched:
            msg = (
                "Warning: %d reference spots were not matched to predictions" %
                unmatched.size())
            border = "\n".join(("", "*" * 80, ""))
            logger.info("".join((border, msg, border)))
            rubbish.extend(unmatched)

        if len(experiments) > 1:
            # filter out any experiments without matched reference reflections
            # f_: filtered

            f_reference = flex.reflection_table()
            f_predicted = flex.reflection_table()
            f_rubbish = flex.reflection_table()
            f_experiments = ExperimentList()
            good_expt_count = 0

            def refl_extend(src, dest, eid):
                old_id = eid
                new_id = good_expt_count
                tmp = src.select(src["id"] == old_id)
                tmp["id"] = flex.int(len(tmp), good_expt_count)
                if old_id in tmp.experiment_identifiers():
                    identifier = tmp.experiment_identifiers()[old_id]
                    del tmp.experiment_identifiers()[old_id]
                    tmp.experiment_identifiers()[new_id] = identifier
                dest.extend(tmp)

            for expt_id, experiment in enumerate(experiments):
                if len(reference.select(reference["id"] == expt_id)) != 0:
                    refl_extend(reference, f_reference, expt_id)
                    refl_extend(predicted, f_predicted, expt_id)
                    refl_extend(rubbish, f_rubbish, expt_id)
                    f_experiments.append(experiment)
                    good_expt_count += 1
                else:
                    logger.info(
                        "Removing experiment %d: no reference reflections matched to predictions",
                        expt_id,
                    )

            reference = f_reference
            predicted = f_predicted
            experiments = f_experiments
            rubbish = f_rubbish

    # Select a random sample of the predicted reflections
    if not params.sampling.integrate_all_reflections:
        predicted = sample_predictions(experiments, predicted, params)

    # Compute the profile model - either load existing or compute
    # can raise RuntimeError
    experiments = ProfileModelFactory.create(params, experiments, reference)
    for expr in experiments:
        if expr.profile is None:
            raise ValueError("No profile information in experiment list")
    del reference

    # Compute the bounding box
    predicted.compute_bbox(experiments)

    # Create the integrator
    integrator = create_integrator(params, experiments, predicted)

    # Integrate the reflections
    reflections = integrator.integrate()

    # Remove unintegrated reflections
    if not params.output.output_unintegrated_reflections:
        keep = reflections.get_flags(reflections.flags.integrated, all=False)
        logger.info("Removing %d unintegrated reflections of %d total" %
                    (keep.count(False), keep.size()))

        reflections = reflections.select(keep)

    # Append rubbish data onto the end
    if rubbish is not None and params.output.include_bad_reference:
        mask = flex.bool(len(rubbish), True)
        rubbish.unset_flags(mask, rubbish.flags.integrated_sum)
        rubbish.unset_flags(mask, rubbish.flags.integrated_prf)
        rubbish.set_flags(mask, rubbish.flags.bad_reference)
        reflections.extend(rubbish)

    # Correct integrated intensities for absorption correction, if necessary
    for abs_params in params.absorption_correction:
        if abs_params.apply and abs_params.algorithm == "fuller_kapton":
            from dials.algorithms.integration.kapton_correction import (
                multi_kapton_correction, )

            experiments, reflections = multi_kapton_correction(
                experiments,
                reflections,
                abs_params.fuller_kapton,
                logger=logger)()

    if params.significance_filter.enable:
        from dials.algorithms.integration.stills_significance_filter import (
            SignificanceFilter, )

        sig_filter = SignificanceFilter(params)
        filtered_refls = sig_filter(experiments, reflections)
        accepted_expts = ExperimentList()
        accepted_refls = flex.reflection_table()
        logger.info(
            "Removed %d reflections out of %d when applying significance filter",
            (reflections.size() - filtered_refls.size()),
            reflections.size(),
        )
        for expt_id, expt in enumerate(experiments):
            refls = filtered_refls.select(filtered_refls["id"] == expt_id)
            if refls:
                accepted_expts.append(expt)
                current_id = expt_id
                new_id = len(accepted_expts) - 1
                refls["id"] = flex.int(len(refls), new_id)
                if expt.identifier:
                    del refls.experiment_identifiers()[current_id]
                    refls.experiment_identifiers()[new_id] = expt.identifier
                accepted_refls.extend(refls)
            else:
                logger.info(
                    "Removed experiment %d which has no reflections left after applying significance filter",
                    expt_id,
                )

        if not accepted_refls:
            raise ValueError(
                "No reflections left after applying significance filter")
        experiments = accepted_expts
        reflections = accepted_refls

    # Write a report if requested
    report = None
    if params.output.report is not None:
        report = integrator.report()

    return experiments, reflections, report
Exemple #46
0
def run(i, imp):
  from random import randint
  from dials.array_family import flex

  #building a reflection table
  num_ref = 5
  ref_table = flex.reflection_table()

  shoebox = flex.shoebox(num_ref)
  ref_table['shoebox'] = shoebox

  intensity = flex.double(num_ref)
  ref_table['intensity.sum.value'] = intensity

  intensity_var = flex.double(num_ref)
  ref_table['intensity.sum.variance'] = intensity_var

  iterate = ref_table['shoebox']
  i_to_compare = []

  # bulding the shoebox with a desired content
  # which is a reflection with noise included

  n = 0
  for arr in iterate:
    img = flex.double(flex.grid(3, 3, 3))
    bkg = flex.double(flex.grid(3, 3, 3))
    msk = flex.int(flex.grid(3, 3, 3))
    for row in range(3):
      for col in range(3):
        for fra in range(3):
          img[row, col, fra] = row + col + fra + n * 9 + randint(0, i)
          bkg[row, col, fra] = 0.0
          msk[row, col, fra] = 3
    n += 1
    msk[1, 1, 1] = 5
    tmp_i = n * n * n * 3
    i_to_compare.append(tmp_i)
    img[1, 1, 1] += tmp_i

    arr.data = img[:, :, :]
    arr.background = bkg[:, :, :]
    arr.mask = msk[:, :, :]

  # calling the functions that we need to test
  # first select the algorithm for background calculation

  if imp == "inclined":
    print("testing inclined_background_subtractor")
    from dials.algorithms.background.inclined_background_subtractor \
     import layering_and_background_plane
    layering_and_background_plane(ref_table)
  elif imp == "flat":
    print("testing flat_background_subtractor")
    from dials.algorithms.background.flat_background_subtractor \
     import layering_and_background_avg
    layering_and_background_avg(ref_table)
  elif imp == "curved":
    print("testing curved_background_subtractor")
    from dials.algorithms.background.curved_background_subtractor \
     import layering_and_background_modl
    layering_and_background_modl(ref_table)

  # no matter which algorithm was used for background calculation
  # the integration summation must remain compatible

  from dials.algorithms.integration.summation2d \
    import  flex_2d_layering_n_integrating
  flex_2d_layering_n_integrating(ref_table)

  # comparing results

  result = "OK"
  resl_its = ref_table['intensity.sum.value']
  resl_var = ref_table['intensity.sum.variance']
  for n_its in range(len(resl_its)):
    if resl_its[n_its] <= i_to_compare[n_its] + i and \
       resl_its[n_its] >= i_to_compare[n_its] - i and \
       resl_var[n_its] > resl_its[n_its]:
      print("Ok ", n_its)
    else:
      print("Wrong num", n_its)

      print("i =", i)
      print("resl_its[n_its] =", resl_its[n_its])
      print("i_to_compare[n_its] =", i_to_compare[n_its])
      print("resl_var[n_its] =", resl_var[n_its])

      result = "wrong"
      raise RuntimeError('wrong result')
  return result
    def __init__(self, reflections, av_callback=flex.mean, debug=False):

        # flags to indicate at what level the analysis has been performed
        self._average_residuals = False
        self._spectral_analysis = False

        self._av_callback = av_callback

        # Remove invalid reflections
        reflections = reflections.select(~(reflections["miller_index"] == (0, 0, 0)))
        x, y, z = reflections["xyzcal.mm"].parts()
        sel = (x == 0) & (y == 0)
        reflections = reflections.select(~sel)
        self._nexp = flex.max(reflections["id"]) + 1

        # Ensure required keys are present
        if not all(k in reflections for k in ["x_resid", "y_resid", "phi_resid"]):
            x_obs, y_obs, phi_obs = reflections["xyzobs.mm.value"].parts()
            x_cal, y_cal, phi_cal = reflections["xyzcal.mm"].parts()

            # do not wrap around multiples of 2*pi; keep the full rotation
            # from zero to differentiate repeat observations.

            TWO_PI = 2.0 * math.pi
            resid = phi_cal - (flex.fmod_positive(phi_obs, TWO_PI))
            # ensure this is the smaller of two possibilities
            resid = flex.fmod_positive((resid + math.pi), TWO_PI) - math.pi
            phi_cal = phi_obs + resid
            reflections["x_resid"] = x_cal - x_obs
            reflections["y_resid"] = y_cal - y_obs
            reflections["phi_resid"] = phi_cal - phi_obs

        # create empty results list
        self._results = []

        # first, just determine a suitable block size for analysis
        for iexp in range(self._nexp):
            ref_this_exp = reflections.select(reflections["id"] == iexp)
            if len(ref_this_exp) == 0:
                # can't do anything, just keep an empty dictionary
                self._results.append({})
                continue
            phi_obs_deg = ref_this_exp["xyzobs.mm.value"].parts()[2] * RAD2DEG
            phi_range = flex.min(phi_obs_deg), flex.max(phi_obs_deg)
            phi_width = phi_range[1] - phi_range[0]
            ideal_block_size = 1.0
            old_nblocks = 0
            while True:
                nblocks = int(phi_width // ideal_block_size)
                if nblocks == old_nblocks:
                    nblocks -= 1
                nblocks = max(nblocks, 1)
                block_size = phi_width / nblocks
                nr = flex.int()
                for i in range(nblocks - 1):
                    blk_start = phi_range[0] + i * block_size
                    blk_end = blk_start + block_size
                    sel = (phi_obs_deg >= blk_start) & (phi_obs_deg < blk_end)
                    nref_in_block = sel.count(True)
                    nr.append(nref_in_block)
                # include max phi in the final block
                blk_start = phi_range[0] + (nblocks - 1) * block_size
                blk_end = phi_range[1]
                sel = (phi_obs_deg >= blk_start) & (phi_obs_deg <= blk_end)
                nref_in_block = sel.count(True)
                nr.append(nref_in_block)
                # Break if there are enough reflections, otherwise increase block size,
                # unless only one block remains
                if nblocks == 1:
                    break
                min_nr = flex.min(nr)
                if min_nr >= 50:
                    break
                if min_nr < 5:
                    fac = 2
                else:
                    fac = 50 / min_nr
                ideal_block_size *= fac
                old_nblocks = nblocks

            # collect the basic data for this experiment
            self._results.append(
                {
                    "block_size": block_size,
                    "nref_per_block": nr,
                    "nblocks": nblocks,
                    "phi_range": phi_range,
                }
            )

        # keep reflections for analysis
        self._reflections = reflections

        # for debugging, write out reflections used
        if debug:
            self._reflections.as_file("centroid_analysis.refl")
Exemple #48
0
    def run(self, args=None):
        """Execute the script."""

        from dials.util.options import reflections_and_experiments_from_files

        # Parse the command line
        params, options = self.parser.parse_args(args, show_diff_phil=True)
        reflections, experiments = reflections_and_experiments_from_files(
            params.input.reflections, params.input.experiments)

        # Try to load the models and data
        slice_exps = len(experiments) > 0
        slice_refs = len(reflections) > 0

        # Catch case of nothing to do
        if not slice_exps and not slice_refs:
            print("No suitable input provided")
            self.parser.print_help()
            return

        if reflections:
            if len(reflections) > 1:
                raise Sorry(
                    "Only one reflections list can be imported at present")
            reflections = reflections[0]

            # calculate frame numbers if needed
            if experiments:
                reflections = calculate_frame_numbers(reflections, experiments)

            # if we still don't have the right column give up
            if "xyzobs.px.value" not in reflections:
                raise Sorry(
                    "These reflections do not have frame numbers set, and "
                    "there are no experiments provided to calculate these.")

        # set trivial case where no scan range is provided at all
        if not params.image_range:
            params.image_range = [None]

        # check if slicing into blocks
        if params.block_size is not None:
            if not slice_exps:
                raise Sorry(
                    "For slicing into blocks, an experiment file must be provided"
                )

            if len(experiments) > 1:
                raise Sorry(
                    "For slicing into blocks please provide a single scan only"
                )
            scan = experiments[0].scan

            # Having extracted the scan, calculate the blocks
            params.image_range = calculate_block_ranges(
                scan, params.block_size)

            # Do the slicing then recombine
            sliced = [
                slice_experiments(experiments, [sr])[0]
                for sr in params.image_range
            ]
            generate_experiment_identifiers(sliced)
            sliced_experiments = ExperimentList(sliced)

            # slice reflections if present
            if slice_refs:
                sliced = [
                    slice_reflections(reflections, [sr])
                    for sr in params.image_range
                ]
                sliced_reflections = flex.reflection_table()
                identifiers = sliced_experiments.identifiers()
                # resetting experiment identifiers
                for i, rt in enumerate(sliced):
                    for k in rt.experiment_identifiers().keys():
                        del rt.experiment_identifiers()[k]
                    rt["id"] = flex.int(rt.size(), i)  # set id
                    rt.experiment_identifiers()[i] = identifiers[i]
                    sliced_reflections.extend(rt)

        else:
            # slice each dataset into the requested subset
            if slice_exps:
                sliced_experiments = slice_experiments(experiments,
                                                       params.image_range)
            if slice_refs:
                sliced_reflections = slice_reflections(reflections,
                                                       params.image_range)

        # Save sliced experiments
        if slice_exps:
            output_experiments_filename = params.output.experiments_filename
            if output_experiments_filename is None:
                # take first filename as template
                bname = basename(params.input.experiments[0].filename)
                bname = splitext(bname)[0]
                if not bname:
                    bname = "experiments"
                if len(params.image_range
                       ) == 1 and params.image_range[0] is not None:
                    ext = "_{0}_{1}.expt".format(*params.image_range[0])
                else:
                    ext = "_sliced.expt"
                output_experiments_filename = bname + ext
            print("Saving sliced experiments to {}".format(
                output_experiments_filename))

            sliced_experiments.as_file(output_experiments_filename)

        # Save sliced reflections
        if slice_refs:
            output_reflections_filename = params.output.reflections_filename
            if output_reflections_filename is None:
                # take first filename as template
                bname = basename(params.input.reflections[0].filename)
                bname = splitext(bname)[0]
                if not bname:
                    bname = "reflections"
                if len(params.image_range
                       ) == 1 and params.image_range[0] is not None:
                    ext = "_{0}_{1}.refl".format(*params.image_range[0])
                else:
                    ext = "_sliced.refl"
                output_reflections_filename = bname + ext

            print("Saving sliced reflections to {0}".format(
                output_reflections_filename))
            sliced_reflections.as_file(output_reflections_filename)

        return
Exemple #49
0
def test_assign_indices(dials_regression, space_group_symbol):
    experiments_json = os.path.join(dials_regression, "indexing_test_data",
                                    "i04_weak_data", "datablock_orig.json")

    experiments = load.experiment_list(experiments_json, check_format=False)
    sweep = experiments.imagesets()[0]

    sweep = sweep[:20]

    # set random seeds so tests more reliable
    seed = 54321
    random.seed(seed)
    flex.set_random_seed(seed)

    space_group_info = sgtbx.space_group_info(symbol=space_group_symbol)
    space_group = space_group_info.group()
    unit_cell = space_group_info.any_compatible_unit_cell(
        volume=random.uniform(1e4, 1e6))

    crystal_symmetry = crystal.symmetry(unit_cell=unit_cell,
                                        space_group=space_group)
    crystal_symmetry.show_summary()

    # the reciprocal matrix
    B = matrix.sqr(unit_cell.fractionalization_matrix()).transpose()
    U = random_rotation()
    A = U * B

    direct_matrix = A.inverse()
    cryst_model = Crystal(
        direct_matrix[0:3],
        direct_matrix[3:6],
        direct_matrix[6:9],
        space_group=space_group,
    )
    experiment = Experiment(
        imageset=sweep,
        beam=sweep.get_beam(),
        detector=sweep.get_detector(),
        goniometer=sweep.get_goniometer(),
        scan=sweep.get_scan(),
        crystal=cryst_model,
    )
    predicted_reflections = flex.reflection_table.from_predictions(experiment)
    use_fraction = 0.3
    use_sel = flex.random_selection(
        len(predicted_reflections),
        int(use_fraction * len(predicted_reflections)))
    predicted_reflections = predicted_reflections.select(use_sel)
    miller_indices = predicted_reflections["miller_index"]
    predicted_reflections["xyzobs.mm.value"] = predicted_reflections[
        "xyzcal.mm"]
    predicted_reflections["id"] = flex.int(len(predicted_reflections), 0)
    predicted_reflections.map_centroids_to_reciprocal_space(
        sweep.get_detector(), sweep.get_beam(), sweep.get_goniometer())

    # check that local and global indexing worked equally well in absence of errors
    result = CompareGlobalLocal(experiment, predicted_reflections,
                                miller_indices)
    assert result.misindexed_local == 0
    assert result.misindexed_global == 0

    a, b, c = map(matrix.col, cryst_model.get_real_space_vectors())
    relative_error = 0.02
    a *= 1 + relative_error
    b *= 1 + relative_error
    c *= 1 + relative_error

    cryst_model2 = Crystal(a, b, c, space_group=space_group)
    experiment.crystal = cryst_model2

    result = CompareGlobalLocal(experiment, predicted_reflections,
                                miller_indices)

    # check that the local indexing did a better job given the errors in the basis vectors
    # assert result.misindexed_local < result.misindexed_global
    assert result.misindexed_local == 0
    assert result.correct_local > result.correct_global
    # usually the number misindexed is much smaller than this
    assert result.misindexed_local < (0.001 * len(result.reflections_local))

    # the reciprocal matrix
    A = matrix.sqr(cryst_model.get_A())
    A = random_rotation(angle_max=0.5) * A

    direct_matrix = A.inverse()
    cryst_model2 = Crystal(
        direct_matrix[0:3],
        direct_matrix[3:6],
        direct_matrix[6:9],
        space_group=space_group,
    )
    experiment.crystal = cryst_model2

    result = CompareGlobalLocal(experiment, predicted_reflections,
                                miller_indices)

    # check that the local indexing did a better job given the errors in the basis vectors
    assert result.misindexed_local <= result.misindexed_global, (
        result.misindexed_local,
        result.misindexed_global,
    )
    assert result.misindexed_local < 0.01 * result.correct_local
    assert result.correct_local >= result.correct_global
    # usually the number misindexed is much smaller than this
    assert result.misindexed_local < (0.001 * len(result.reflections_local))
Exemple #50
0
  def run(self):
    ''' Extract the shoeboxes. '''
    from dials.util.options import flatten_reflections
    from dials.util.options import flatten_experiments
    from dials.util.options import flatten_datablocks
    from dials.util import log
    from dials.array_family import flex
    from libtbx.utils import Sorry

    # Parse the command line
    params, options = self.parser.parse_args(show_diff_phil=False)

    # Configure logging
    log.config()

    # Log the diff phil
    diff_phil = self.parser.diff_phil.as_str()
    if diff_phil is not '':
      logger.info('The following parameters have been modified:\n')
      logger.info(diff_phil)

    # Get the data
    reflections = flatten_reflections(params.input.reflections)
    experiments = flatten_experiments(params.input.experiments)
    datablocks = flatten_datablocks(params.input.datablock)
    if not any([experiments, datablocks, reflections]):
      self.parser.print_help()
      exit(0)
    elif experiments and datablocks:
      raise Sorry('Both experiment list and datablocks set')
    elif len(experiments) > 1:
      raise Sorry('More than 1 experiment set')
    elif len(datablocks) > 1:
      raise Sorry('More than 1 datablock set')
    elif len(experiments) == 1:
      imageset = experiments[0].imageset
    elif len(datablocks) == 1:
      imagesets = datablocks[0].extract_imagesets()
      if len(imagesets) != 1:
        raise Sorry('Need 1 imageset, got %d' % len(imagesets))
      imageset = imagesets[0]
    if len(reflections) != 1:
      raise Sorry('Need 1 reflection table, got %d' % len(reflections))
    else:
      reflections = reflections[0]

    # Check the reflections contain the necessary stuff
    assert("bbox" in reflections)
    assert("panel" in reflections)

    # Get some models
    detector = imageset.get_detector()
    scan = imageset.get_scan()
    frame0, frame1 = scan.get_array_range()

    # Add some padding but limit to image volume
    if params.padding > 0:
      logger.info('Adding %d pixels as padding' % params.padding)
      x0, x1, y0, y1, z0, z1 = reflections['bbox'].parts()
      x0 -= params.padding
      x1 += params.padding
      y0 -= params.padding
      y1 += params.padding
      # z0 -= params.padding
      # z1 += params.padding
      panel = reflections['panel']
      for i in range(len(reflections)):
        width, height = detector[panel[i]].get_image_size()
        if x0[i] < 0: x0[i] = 0
        if x1[i] > width: x1[i] = width
        if y0[i] < 0: y0[i] = 0
        if y1[i] > height: y1[i] = height
        if z0[i] < frame0: z0[i] = frame0
        if z1[i] > frame1: z1[i] = frame1
      reflections['bbox'] = flex.int6(x0, x1, y0, y1, z0, z1)

    # Save the old shoeboxes
    if "shoebox" in reflections:
      old_shoebox = reflections['shoebox']
    else:
      old_shoebox = None

    # Allocate the shoeboxes
    reflections["shoebox"] = flex.shoebox(
      reflections["panel"],
      reflections["bbox"],
      allocate=True)

    # Extract the shoeboxes
    reflections.extract_shoeboxes(imageset, verbose=True)

    # Preserve masking
    if old_shoebox is not None:
      from dials.algorithms.shoebox import MaskCode
      logger.info("Applying old shoebox mask")
      new_shoebox = reflections['shoebox']
      for i in range(len(reflections)):
        bbox0 = old_shoebox[i].bbox
        bbox1 = new_shoebox[i].bbox
        mask0 = old_shoebox[i].mask
        mask1 = new_shoebox[i].mask
        mask2 = flex.int(mask1.accessor(), 0)
        x0 = bbox0[0] - bbox1[0]
        x1 = bbox0[1] - bbox0[0] + x0
        y0 = bbox0[2] - bbox1[2]
        y1 = bbox0[3] - bbox0[2] + y0
        z0 = bbox0[4] - bbox1[4]
        z1 = bbox0[5] - bbox0[4] + z0
        mask2[z0:z1,y0:y1,x0:x1] = mask0
        mask1 = mask1.as_1d() | mask2.as_1d()
        if params.padding_is_background:
          selection = flex.size_t(range(len(mask1))).select(mask1 == MaskCode.Valid)
          values = flex.int(len(selection), MaskCode.Valid | MaskCode.Background)
          mask1.set_selected(selection, values)
        mask1.reshape(new_shoebox[i].mask.accessor())
        new_shoebox[i].mask = mask1

    # Saving the reflections to disk
    filename = params.output.reflections
    logger.info('Saving %d reflections to %s' % (len(reflections), filename))
    reflections.as_pickle(filename)
Exemple #51
0
def reflections_1():
    """Test reflection table with batch"""
    r = flex.reflection_table()
    r["batch"] = flex.int([1, 11, 21, 31, 41, 51, 61, 71, 81, 91])
    r.set_flags(flex.bool(10, False), r.flags.user_excluded_in_scaling)
    return r
Exemple #52
0
def reflections_2():
    """Test reflection table with batch"""
    r = flex.reflection_table()
    r["batch"] = flex.int([201, 211, 221, 231, 241, 251, 261, 271, 281, 291])
    r.set_flags(flex.bool(10, False), r.flags.user_excluded_in_scaling)
    return r
Exemple #53
0
 def batch_plot_shapes_and_annotations(self):
     light_grey = "#d3d3d3"
     grey = "#808080"
     shapes = []
     annotations = []
     batches = flex.int(self.batches)
     text = flex.std_string(batches.size())
     for i, batch in enumerate(self.batch_params):
         fillcolor = [light_grey, grey][i % 2]  # alternate colours
         shapes.append({
             "type":
             "rect",
             # x-reference is assigned to the x-values
             "xref":
             "x",
             # y-reference is assigned to the plot paper [0,1]
             "yref":
             "paper",
             "x0":
             self._batch_increments[i],
             "y0":
             0,
             "x1":
             self._batch_increments[i] +
             (batch["range"][1] - batch["range"][0]),
             "y1":
             1,
             "fillcolor":
             fillcolor,
             "opacity":
             0.2,
             "line": {
                 "width": 0
             },
         })
         annotations.append({
             # x-reference is assigned to the x-values
             "xref":
             "x",
             # y-reference is assigned to the plot paper [0,1]
             "yref":
             "paper",
             "x":
             self._batch_increments[i] +
             (batch["range"][1] - batch["range"][0]) / 2,
             "y":
             1,
             "text":
             f"{batch['id']}",
             "showarrow":
             False,
             "yshift":
             20,
             # 'arrowhead': 7,
             # 'ax': 0,
             # 'ay': -40
         })
         sel = (batches >= batch["range"][0]) & (batches <=
                                                 batch["range"][1])
         text.set_selected(
             sel,
             flex.std_string([
                 f"{batch['id']}: {j - batch['range'][0] + 1}"
                 for j in batches.select(sel)
             ]),
         )
     return shapes, annotations, list(text)
Exemple #54
0
    def assign_hkl_to_reflections(self,
                                  reflections,
                                  experiments,
                                  d_min=None,
                                  tolerance=0.3,
                                  debug=False):
        ''' Function to assign hkl values to reflections on a shot. Uses underlying c++
      function AssignIndices() available in DIALS'''
        from cctbx.array_family import flex

        reciprocal_lattice_points = reflections['rlp']
        reflections['miller_index'] = flex.miller_index(
            len(reflections), (0, 0, 0))
        # IOTA
        reflections['fractional_miller_index'] = flex.vec3_double(
            len(reflections), (0.0, 0.0, 0.0))

        if d_min is not None:
            d_spacings = 1 / reciprocal_lattice_points.norms()
            inside_resolution_limit = d_spacings > d_min
        else:
            inside_resolution_limit = flex.bool(
                reciprocal_lattice_points.size(), True)

        sel = inside_resolution_limit & (reflections['id'] == -1)
        isel = sel.iselection()
        rlps = reciprocal_lattice_points.select(isel)
        refs = reflections.select(isel)
        phi = refs['xyzobs.mm.value'].parts()[2]

        diffs = []
        norms = []
        hkl_ints = []

        UB_matrices = flex.mat3_double(
            [cm.get_A() for cm in experiments.crystals()])
        imgset_ids = reflections['imageset_id'].select(sel)

        for i_imgset, imgset in enumerate(experiments.imagesets()):
            sel_imgset = (imgset_ids == i_imgset)
            result = AssignIndices(rlps.select(sel_imgset),
                                   phi.select(sel_imgset),
                                   UB_matrices,
                                   tolerance=tolerance)

            miller_indices = result.miller_indices()
            crystal_ids = result.crystal_ids()
            expt_ids = flex.int(crystal_ids.size(), -1)
            for i_cryst, cryst in enumerate(experiments.crystals()):
                sel_cryst = (crystal_ids == i_cryst)
                for i_expt in experiments.where(crystal=cryst,
                                                imageset=imgset):
                    expt_ids.set_selected(sel_cryst, i_expt)

            reflections['miller_index'].set_selected(isel.select(sel_imgset),
                                                     miller_indices)
            reflections['id'].set_selected(isel.select(sel_imgset), expt_ids)
            reflections.set_flags(reflections['miller_index'] != (0, 0, 0),
                                  reflections.flags.indexed)
            reflections['id'].set_selected(
                reflections['miller_index'] == (0, 0, 0), -1)
Exemple #55
0
    def run(self, all_experiments, all_reflections):
        """ Load all the data using MPI """
        from dxtbx.model.experiment_list import ExperimentList
        from dials.array_family import flex

        # Both must be none or not none
        test = [all_experiments is None, all_reflections is None].count(True)
        assert test in [0, 2]
        if test == 2:
            all_experiments = ExperimentList()
            all_reflections = flex.reflection_table()
            starting_expts_count = starting_refls_count = 0
        else:
            starting_expts_count = len(all_experiments)
            starting_refls_count = len(all_reflections)
        self.logger.log(
            "Initial number of experiments: %d; Initial number of reflections: %d"
            % (starting_expts_count, starting_refls_count))

        # Generate and send a list of file paths to each worker
        if self.mpi_helper.rank == 0:
            file_list = self.get_list()
            self.logger.log(
                "Built an input list of %d json/pickle file pairs" %
                (len(file_list)))
            self.params.input.path = None  # Rank 0 has already parsed the input parameters
            per_rank_file_list = file_load_calculator(self.params, file_list, self.logger).\
                                    calculate_file_load(available_rank_count = self.mpi_helper.size)
            self.logger.log(
                'Transmitting a list of %d lists of json/pickle file pairs' %
                (len(per_rank_file_list)))
            transmitted = per_rank_file_list
        else:
            transmitted = None

        self.logger.log_step_time("BROADCAST_FILE_LIST")
        transmitted = self.mpi_helper.comm.bcast(transmitted, root=0)
        new_file_list = transmitted[
            self.mpi_helper.
            rank] if self.mpi_helper.rank < len(transmitted) else None
        self.logger.log_step_time("BROADCAST_FILE_LIST", True)

        # Load the data
        self.logger.log_step_time("LOAD")
        if new_file_list is not None:
            self.logger.log("Received a list of %d json/pickle file pairs" %
                            len(new_file_list))
            for experiments_filename, reflections_filename in new_file_list:
                self.logger.log("Reading %s %s" %
                                (experiments_filename, reflections_filename))
                experiments = ExperimentListFactory.from_json_file(
                    experiments_filename, check_format=False)
                reflections = flex.reflection_table.from_file(
                    reflections_filename)
                self.logger.log("Data read, prepping")

                if 'intensity.sum.value' in reflections:
                    reflections[
                        'intensity.sum.value.unmodified'] = reflections[
                            'intensity.sum.value'] * 1
                if 'intensity.sum.variance' in reflections:
                    reflections[
                        'intensity.sum.variance.unmodified'] = reflections[
                            'intensity.sum.variance'] * 1

                new_ids = flex.int(len(reflections), -1)
                new_identifiers = flex.std_string(len(reflections))
                eid = reflections.experiment_identifiers()
                for k in eid.keys():
                    del eid[k]
                for experiment_id, experiment in enumerate(experiments):
                    # select reflections of the current experiment
                    refls_sel = reflections['id'] == experiment_id

                    if refls_sel.count(True) == 0: continue

                    if experiment.identifier is None or len(
                            experiment.identifier) == 0:
                        experiment.identifier = create_experiment_identifier(
                            experiment, experiments_filename, experiment_id)

                    if not self.params.input.keep_imagesets:
                        experiment.imageset = None
                    all_experiments.append(experiment)

                    # Reflection experiment 'id' is unique within this rank; 'exp_id' (i.e. experiment identifier) is unique globally
                    new_identifiers.set_selected(refls_sel,
                                                 experiment.identifier)

                    new_id = len(all_experiments) - 1
                    eid[new_id] = experiment.identifier
                    new_ids.set_selected(refls_sel, new_id)
                assert (new_ids < 0
                        ).count(True) == 0, "Not all reflections accounted for"
                reflections['id'] = new_ids
                reflections['exp_id'] = new_identifiers
                all_reflections.extend(reflections)
        else:
            self.logger.log("Received a list of 0 json/pickle file pairs")
        self.logger.log_step_time("LOAD", True)

        self.logger.log('Read %d experiments consisting of %d reflections' %
                        (len(all_experiments) - starting_expts_count,
                         len(all_reflections) - starting_refls_count))
        self.logger.log("Memory usage: %d MB" % get_memory_usage())

        all_reflections = self.prune_reflection_table_keys(all_reflections)

        # Do we have any data?
        from xfel.merging.application.utils.data_counter import data_counter
        data_counter(self.params).count(all_experiments, all_reflections)
        return all_experiments, all_reflections
Exemple #56
0
    def index(self, provided_experiments=None, debug=False):
        ''' This step does  1. find_lattices (via a method like fft1d)
                        2. Assign hkl indices (through index_reflections)
                        3. Housekeeping like apply_symmetry, discard too similar models
    '''

        experiments = ExperimentList()
        have_similar_crystal_models = False

        self.d_min = self.params.refinement_protocol.d_min_start

        # Find lattices i.e the basis vectors & unit cell params
        # Possible to index multiple lattices  ??
        while True:
            max_lattices = self.params.multiple_lattice_search.max_lattices
            if max_lattices is not None and len(experiments) >= max_lattices:
                break
            n_lattices_previous_cycle = len(experiments)
            if len(experiments) == 0:
                experiments.extend(self.find_lattices())
            else:
                try:
                    new = self.find_lattices()
                    experiments.extend(new)
                except Sorry:
                    print('Indexing remaining reflections failed')
                    break

            if len(experiments) == 0:
                raise Sorry("No suitable lattice could be found.")

            # Initialize id values as -1 since no indexing has been done yet
            self.reflections['id'] = flex.int(len(self.reflections), -1)

            # Now index reflections
            self.index_reflections(experiments, self.reflections, debug=debug)

            # Housekeeping. Apply symmetry
            self._apply_symmetry_post_indexing(experiments, self.reflections,
                                               n_lattices_previous_cycle)

            # Aug_Refactor :: probably unnecessary to remove stuff below but still doing so to adapt to new style
            # Never mind, might keep it to have IOTA stuff working
            '''
      target_space_group = self.target_symmetry_primitive.space_group()
      for i_cryst, cryst in enumerate(experiments.crystals()):
        if i_cryst >= n_lattices_previous_cycle:
          new_cryst, cb_op_to_primitive = self.apply_symmetry(
                                        cryst, target_space_group)
          if provided_experiments is None:
            if self.cb_op_primitive_inp is not None:
              new_cryst = new_cryst.change_basis(self.cb_op_primitive_inp)
              logger.info(new_cryst.get_space_group().info())
            cryst.update(new_cryst)
            cryst.set_space_group(
                self.params.known_symmetry.space_group.group())
          for i_expt, expt in enumerate(experiments):
            if expt.crystal is not cryst:
              continue
            if not cb_op_to_primitive.is_identity_op():
              miller_indices = self.reflections['miller_index'].select(
                  self.reflections['id'] == i_expt)

              if provided_experiments is None:
                miller_indices = cb_op_to_primitive.apply(miller_indices)
              self.reflections['miller_index'].set_selected(
                  self.reflections['id'] == i_expt, miller_indices)

            if self.cb_op_primitive_inp is not None:
              miller_indices = self.reflections['miller_index'].select(
                  self.reflections['id'] == i_expt)

              if provided_experiments is None:
                miller_indices = self.cb_op_primitive_inp.apply(miller_indices)
              self.reflections['miller_index'].set_selected(
                  self.reflections['id'] == i_expt, miller_indices)
              # IOTA
              from scitbx.matrix import sqr
              hklfrac=flex.mat3_double(len(miller_indices), sqr(cryst.get_A()).inverse())*self.reflections['rlp'].select(self.reflections['id']==i_expt)
              self.reflections['fractional_miller_index'].set_selected(self.reflections['id']==i_expt, hklfrac)
    '''

        logger.info("\nIndexed crystal models:")
        self.show_experiments(experiments, self.reflections, d_min=self.d_min)

        # Discard nearly overlapping lattices
        # difference_rotation_matrix_axis_angle function is there still in DIALS 2.0 so no need to change anything below
        if len(experiments) > 1:
            from dials.algorithms.indexing.compare_orientation_matrices \
              import difference_rotation_matrix_axis_angle
            cryst_b = experiments.crystals()[-1]
            have_similar_crystal_models = False
            for i_a, cryst_a in enumerate(experiments.crystals()[:-1]):
                R_ab, axis, angle, cb_op_ab = \
                difference_rotation_matrix_axis_angle(cryst_a, cryst_b)
                min_angle = self.params.multiple_lattice_search.minimum_angular_separation
                if abs(angle) < min_angle:  # degrees
                    logger.info(
                        "Crystal models too similar, rejecting crystal %i:" %
                        (len(experiments)))
                    logger.info(
                        "Rotation matrix to transform crystal %i to crystal %i"
                        % (i_a + 1, len(experiments)))
                    logger.info(R_ab)
                    logger.info("Rotation of %.3f degrees" % angle +
                                " about axis (%.3f, %.3f, %.3f)" % axis)
                    have_similar_crystal_models = True
                    del experiments[-1]
                    break

        self.indexed_reflections = (self.reflections['id'] > -1)
        self.experiments = experiments
def test_del_selected():
  from dials.array_family import flex

  # The columns as lists
  c1 = list(range(10))
  c2 = list(range(10))
  c3 = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'i', 'j', 'k']

  # Create a table with some elements
  table1 = flex.reflection_table()
  table1['col1'] = flex.int(c1)
  table1['col2'] = flex.double(c2)
  table1['col3'] = flex.std_string(c3)

  # Del selected columns
  table1.del_selected(('col3', 'col2'))
  assert(table1.nrows() == 10)
  assert(table1.ncols() == 1)
  assert("col1" in table1)
  assert("col2" not in table1)
  assert("col3" not in table1)
  assert(all(a == b for a, b in zip(table1['col1'], c1)))

  # Del selected columns
  table1 = flex.reflection_table()
  table1['col1'] = flex.int(c1)
  table1['col2'] = flex.double(c2)
  table1['col3'] = flex.std_string(c3)
  table1.del_selected(flex.std_string(['col3', 'col2']))
  assert(table1.nrows() == 10)
  assert(table1.ncols() == 1)
  assert("col1" in table1)
  assert("col2" not in table1)
  assert("col3" not in table1)
  assert(all(a == b for a, b in zip(table1['col1'], c1)))

  # Del selected rows
  table1 = flex.reflection_table()
  table1['col1'] = flex.int(c1)
  table1['col2'] = flex.double(c2)
  table1['col3'] = flex.std_string(c3)

  index = flex.size_t([0, 1, 5, 8, 9])
  index2 = range(10)
  for i in index:
    index2.remove(i)
  ccc1 = [c1[i] for i in index2]
  ccc2 = [c2[i] for i in index2]
  ccc3 = [c3[i] for i in index2]
  table1.del_selected(index)
  assert(table1.nrows() == len(ccc1))
  assert(all(a == b for a, b in zip(table1['col1'], ccc1)))
  assert(all(a == b for a, b in zip(table1['col2'], ccc2)))
  assert(all(a == b for a, b in zip(table1['col3'], ccc3)))

  # Del selected rows
  table1 = flex.reflection_table()
  table1['col1'] = flex.int(c1)
  table1['col2'] = flex.double(c2)
  table1['col3'] = flex.std_string(c3)

  flags = flex.bool([True, True, False, False, False,
                     True, False, False, True, True])
  table1.del_selected(index)
  assert(table1.nrows() == len(ccc1))
  assert(all(a == b for a, b in zip(table1['col1'], ccc1)))
  assert(all(a == b for a, b in zip(table1['col2'], ccc2)))
  assert(all(a == b for a, b in zip(table1['col3'], ccc3)))
Exemple #58
0
    def _refine(self):
        for epoch, idxr in self._refinr_indexers.items():
            experiments = idxr.get_indexer_experiment_list()

            indexed_experiments = idxr.get_indexer_payload(
                "experiments_filename")
            indexed_reflections = idxr.get_indexer_payload("indexed_filename")

            if len(experiments) > 1:
                xsweeps = idxr._indxr_sweeps
                assert len(xsweeps) == len(experiments)
                assert (len(self._refinr_sweeps) == 1
                        )  # don't currently support joint refinement
                xsweep = self._refinr_sweeps[0]
                i = xsweeps.index(xsweep)
                experiments = experiments[i:i + 1]

                # Extract and output experiment and reflections for current sweep
                indexed_experiments = os.path.join(
                    self.get_working_directory(),
                    "%s_indexed.expt" % xsweep.get_name())
                indexed_reflections = os.path.join(
                    self.get_working_directory(),
                    "%s_indexed.refl" % xsweep.get_name())

                experiments.as_file(indexed_experiments)

                reflections = flex.reflection_table.from_file(
                    idxr.get_indexer_payload("indexed_filename"))
                sel = reflections["id"] == i
                assert sel.count(True) > 0
                imageset_id = reflections["imageset_id"].select(sel)
                assert imageset_id.all_eq(imageset_id[0])
                sel = reflections["imageset_id"] == imageset_id[0]
                reflections = reflections.select(sel)
                # set indexed reflections to id == 0 and imageset_id == 0
                reflections["id"].set_selected(reflections["id"] == i, 0)
                reflections["imageset_id"] = flex.int(len(reflections), 0)
                reflections.as_file(indexed_reflections)

            assert (len(experiments.crystals()) == 1
                    )  # currently only handle one lattice/sweep

            scan_static = PhilIndex.params.dials.refine.scan_static

            # Avoid doing scan-varying refinement on narrow wedges.
            start, end = experiments[0].scan.get_oscillation_range()
            total_oscillation_range = end - start

            if (PhilIndex.params.dials.refine.scan_varying
                    and total_oscillation_range > 5
                    and not PhilIndex.params.dials.fast_mode):
                scan_varying = PhilIndex.params.dials.refine.scan_varying
            else:
                scan_varying = False

            if scan_static:
                refiner = self.Refine()
                refiner.set_experiments_filename(indexed_experiments)
                refiner.set_indexed_filename(indexed_reflections)
                refiner.set_scan_varying(False)
                refiner.run()
                self._refinr_experiments_filename = (
                    refiner.get_refined_experiments_filename())
                self._refinr_indexed_filename = refiner.get_refined_filename()
            else:
                self._refinr_experiments_filename = indexed_experiments
                self._refinr_indexed_filename = indexed_reflections

            if scan_varying:
                refiner = self.Refine()
                refiner.set_experiments_filename(
                    self._refinr_experiments_filename)
                refiner.set_indexed_filename(self._refinr_indexed_filename)
                if total_oscillation_range < 36:
                    refiner.set_interval_width_degrees(
                        total_oscillation_range / 2)
                refiner.run()
                self._refinr_experiments_filename = (
                    refiner.get_refined_experiments_filename())
                self._refinr_indexed_filename = refiner.get_refined_filename()

            if scan_static or scan_varying:
                FileHandler.record_log_file(
                    "%s REFINE" % idxr.get_indexer_full_name(),
                    refiner.get_log_file())
                report = self.Report()
                report.set_experiments_filename(
                    self._refinr_experiments_filename)
                report.set_reflections_filename(self._refinr_indexed_filename)
                html_filename = os.path.join(
                    self.get_working_directory(),
                    "%i_dials.refine.report.html" % report.get_xpid(),
                )
                report.set_html_filename(html_filename)
                report.run()
                FileHandler.record_html_file(
                    "%s REFINE" % idxr.get_indexer_full_name(), html_filename)

            experiments = load.experiment_list(
                self._refinr_experiments_filename)
            self.set_refiner_payload("models.expt",
                                     self._refinr_experiments_filename)
            self.set_refiner_payload("observations.refl",
                                     self._refinr_indexed_filename)

            # this is the result of the cell refinement
            self._refinr_cell = experiments.crystals()[0].get_unit_cell(
            ).parameters()
def test_row_operations():
  from dials.array_family import flex

  # The columns as lists
  c1 = list(range(10))
  c2 = list(range(10))
  c3 = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'i', 'j', 'k']

  # Create a table with some elements
  table = flex.reflection_table()
  table['col1'] = flex.int(c1)
  table['col2'] = flex.double(c2)
  table['col3'] = flex.std_string(c3)

  # Extend the table
  table.extend(table)
  c1 = c1 * 2
  c2 = c2 * 2
  c3 = c3 * 2
  assert(table.nrows() == 20)
  assert(table.ncols() == 3)
  assert(table.is_consistent())
  assert(all(a == b for a, b in zip(table['col1'], c1)))
  assert(all(a == b for a, b in zip(table['col2'], c2)))
  assert(all(a == b for a, b in zip(table['col3'], c3)))

  # Append some rows to the table
  row = { 'col1' : 10 }
  c1 = c1 + [10]
  c2 = c2 + [0]
  c3 = c3 + ['']
  table.append(row)
  assert(table.nrows() == 21)
  assert(table.ncols() == 3)
  assert(table.is_consistent())
  assert(all(a == b for a, b in zip(table['col1'], c1)))
  assert(all(a == b for a, b in zip(table['col2'], c2)))
  assert(all(a == b for a, b in zip(table['col3'], c3)))

  row = { 'col2' : 11 }
  c1 = c1 + [0]
  c2 = c2 + [11]
  c3 = c3 + ['']
  table.append(row)
  assert(table.nrows() == 22)
  assert(table.ncols() == 3)
  assert(table.is_consistent())
  assert(all(a == b for a, b in zip(table['col1'], c1)))
  assert(all(a == b for a, b in zip(table['col2'], c2)))
  assert(all(a == b for a, b in zip(table['col3'], c3)))

  row = { 'col1' : 12, 'col2' : 12, 'col3' : 'l' }
  c1 = c1 + [12]
  c2 = c2 + [12]
  c3 = c3 + ['l']
  table.append(row)
  assert(table.nrows() == 23)
  assert(table.ncols() == 3)
  assert(table.is_consistent())
  assert(all(a == b for a, b in zip(table['col1'], c1)))
  assert(all(a == b for a, b in zip(table['col2'], c2)))
  assert(all(a == b for a, b in zip(table['col3'], c3)))

  # Try inserting some rows
  row = { 'col1' : -1 }
  c1.insert(5, -1)
  c2.insert(5, 0)
  c3.insert(5, '')
  table.insert(5, row)
  assert(table.nrows() == 24)
  assert(table.ncols() == 3)
  assert(table.is_consistent())
  assert(all(a == b for a, b in zip(table['col1'], c1)))
  assert(all(a == b for a, b in zip(table['col2'], c2)))
  assert(all(a == b for a, b in zip(table['col3'], c3)))

  row = { 'col1' : -2, 'col2' : -3, 'col3' : 'abc' }
  c1.insert(2, -2)
  c2.insert(2, -3)
  c3.insert(2, 'abc')
  table.insert(2, row)
  assert(table.nrows() == 25)
  assert(table.ncols() == 3)
  assert(table.is_consistent())
  assert(all(a == b for a, b in zip(table['col1'], c1)))
  assert(all(a == b for a, b in zip(table['col2'], c2)))
  assert(all(a == b for a, b in zip(table['col3'], c3)))

  # Try iterating through table rows
  for i in range(table.nrows()):
    row = table[i]
    assert(row['col1'] == c1[i])
    assert(row['col2'] == c2[i])
    assert(row['col3'] == c3[i])

  # Trying setting some rows
  row = { 'col1' : 100 }
  table[2] = row
  assert(table[2]['col1'] == 100)
  assert(table[2]['col2'] == c2[2])
  assert(table[2]['col3'] == c3[2])

  row = { 'col1' : 1000, 'col2' : 2000, 'col3' : 'hello' }
  table[10] = row
  assert(table[10]['col1'] == 1000)
  assert(table[10]['col2'] == 2000)
  assert(table[10]['col3'] == 'hello')
 def __init__(self):
   self.data = flex.int(range(height*width))
   self.data.reshape(flex.grid(height,width))