Пример #1
0
 def __init__(self, file_name, anomalous_flag, verbose):
   reflection_file = reflection_reader.cns_reflection_file(open(file_name))
   if (0 or verbose):
     print reflection_file.show_summary()
   assert reflection_file.anomalous == anomalous_flag
   names, self.miller_indices, self.hl = reflection_file.join_hl_group()
   self.fcalc = reflection_file.reciprocal_space_objects["FCALC"]
   self.pi = reflection_file.reciprocal_space_objects["PI"]
   assert not miller.match_indices(
     self.miller_indices, self.fcalc.indices).have_singles()
   assert not miller.match_indices(
     self.miller_indices, self.pi.indices).have_singles()
Пример #2
0
def exercise_match_indices():
    h0 = flex.miller_index(
        ((1, 2, 3), (-1, -2, -3), (2, 3, 4), (-2, -3, -4), (3, 4, 5)))
    d0 = flex.double((1, 2, 3, 4, 5))
    h1 = flex.miller_index(((-1, -2, -3), (-2, -3, -4), (1, 2, 3), (2, 3, 4)))
    d1 = flex.double((10, 20, 30, 40))
    mi = miller.match_indices(h0, h0)
    assert mi.have_singles() == 0
    assert list(mi.pairs()) == zip(range(5), range(5))
    mi = miller.match_indices(h0, h1)
    assert tuple(mi.singles(0)) == (4, )
    assert tuple(mi.singles(1)) == ()
    assert tuple(mi.pairs()) == ((0, 2), (1, 0), (2, 3), (3, 1))
    assert tuple(mi.pair_selection(0)) == (1, 1, 1, 1, 0)
    assert tuple(mi.single_selection(0)) == (0, 0, 0, 0, 1)
    assert tuple(mi.pair_selection(1)) == (1, 1, 1, 1)
    assert tuple(mi.single_selection(1)) == (0, 0, 0, 0)
    assert tuple(mi.paired_miller_indices(0)) \
        == tuple(h0.select(mi.pair_selection(0)))
    l1 = list(mi.paired_miller_indices(1))
    l2 = list(h1.select(mi.pair_selection(1)))
    l1.sort()
    l2.sort()
    assert l1 == l2
    assert approx_equal(tuple(mi.plus(d0, d1)), (31, 12, 43, 24))
    assert approx_equal(tuple(mi.minus(d0, d1)), (-29, -8, -37, -16))
    assert approx_equal(tuple(mi.multiplies(d0, d1)), (30, 20, 120, 80))
    assert approx_equal(tuple(mi.divides(d0, d1)),
                        (1 / 30., 2 / 10., 3 / 40., 4 / 20.))
    assert approx_equal(tuple(mi.additive_sigmas(d0, d1)), [
        math.sqrt(x * x + y * y)
        for x, y in ((1, 30), (2, 10), (3, 40), (4, 20))
    ])
    q = flex.size_t((3, 2, 0, 4, 1))
    h1 = h0.select(q)
    assert tuple(miller.match_indices(h1, h0).permutation()) == tuple(q)
    p = miller.match_indices(h0, h1).permutation()
    assert tuple(p) == (2, 4, 1, 0, 3)
    assert tuple(h1.select(p)) == tuple(h0)
    cd0 = [
        complex(a, b)
        for (a, b) in (1, 1), (2, 0), (3.5, -1.5), (5, -3), (-8, 5.4)
    ]
    cd1 = [
        complex(a, b)
        for (a, b) in (1, -1), (2, 1), (0.5, 1.5), (-1, -8), (10, 0)
    ]
    cd2 = flex.complex_double(cd0)
    cd3 = flex.complex_double(cd1)
    mi = miller.match_indices(h0, h0)
    assert approx_equal(tuple(mi.plus(cd2, cd3)),
                        ((2 + 0j), (4 + 1j), (4 + 0j), (4 - 11j), (2 + 5.4j)))
Пример #3
0
 def __init__(self, file_name, anomalous_flag, verbose):
     reflection_file = reflection_reader.cns_reflection_file(
         open(file_name))
     if (0 or verbose):
         print(reflection_file.show_summary())
     assert reflection_file.anomalous == anomalous_flag
     names, self.miller_indices, self.hl = reflection_file.join_hl_group()
     self.fcalc = reflection_file.reciprocal_space_objects["FCALC"]
     self.pi = reflection_file.reciprocal_space_objects["PI"]
     assert not miller.match_indices(self.miller_indices,
                                     self.fcalc.indices).have_singles()
     assert not miller.match_indices(self.miller_indices,
                                     self.pi.indices).have_singles()
Пример #4
0
def compare_fc(obs, other, tolerance = 1.0E-9):
  assert obs.is_complex_array()
  assert other.is_complex_array(), other.__class__
  matching = miller.match_indices(obs.indices(), other.indices())
  data0 = obs.select(matching.pairs().column(0)).data()
  data = other.select(matching.pairs().column(1)).data()
  assert data0.size() == data.size(), str(data0.size()) + " != " \
      + str(data.size())
  assert data.size() > 1, str(data.size())
  max_rel_dif = 0.0
  max_dif = 0.0
  max_mx = 0.0
  for i in xrange(data.size()):
    dif = abs(data[i]-data0[i])
    mx = max( abs(data[i]),abs(data0[i]) )
    if mx > tolerance*1.0E-2:
      rel_dif = dif / mx
    else:
      rel_dif = 0.0
    if rel_dif > max_rel_dif:
      max_rel_dif = rel_dif
      max_dif = dif
      max_mx = mx
  assert ((max_rel_dif <= tolerance) or (max_mx <= tolerance*1.0E-2)), \
    "max  rel_dif = "+ str(max_rel_dif)+ "   dif = "+str(max_dif)+"    mx =" \
    +str(max_mx)
  return data.size() # max_rel_dif
Пример #5
0
def make_joined_set(miller_arrays):
  if(len(miller_arrays)==0): return None
  cs0 = miller_arrays[0].crystal_symmetry()
  for ma in miller_arrays:
    if([ma.crystal_symmetry().unit_cell(), cs0.unit_cell()].count(None)>0):
      return None
    if(not ma.crystal_symmetry().is_similar_symmetry(cs0)): return None
  from cctbx import miller
  master_set = miller.set(
    crystal_symmetry=miller_arrays[0].crystal_symmetry(),
    indices=miller_arrays[0].indices(),
    anomalous_flag=False)
  master_indices = miller_arrays[0].indices().deep_copy()
  for array in miller_arrays[1:] :
    current_indices = array.indices()
    missing_isel = miller.match_indices(master_indices,
      current_indices).singles(1)
    missing_indices = current_indices.select(missing_isel)
    master_indices.extend(missing_indices)
  master_set = miller.set(
    crystal_symmetry=miller_arrays[0].crystal_symmetry(),
    indices=master_indices,
    anomalous_flag=False)
  return \
    master_set.map_to_asu().unique_under_symmetry().remove_systematic_absences()
Пример #6
0
    def __init__(self, model, diffs, params):
        self.params = params
        self.model = model
        if params.group_sulfurs:
            self.n = 1 + flex.max(self.model.scatterer_model_idx)
        else:
            self.n = self.model.N_anom_scatterers
        self.x = flex.double(self.n, 0.)

        from cctbx import miller
        matches = miller.match_indices(self.model.f_model_real.indices(),
                                       diffs.indices())
        self.sel0 = flex.size_t([p[0] for p in matches.pairs()])
        self.sel1 = flex.size_t([p[1] for p in matches.pairs()])

        self.diffs = diffs.select(self.sel1)

        print "SELECTED %d diffs out of %d" % (len(
            self.diffs.data()), len(diffs.data()))

        self.minimizer = scitbx.lbfgs.run(
            target_evaluator=self,
            termination_params=scitbx.lbfgs.termination_parameters(
                traditional_convergence_test=True,
                traditional_convergence_test_eps=1.e-4,
                max_iterations=20))
Пример #7
0
 def join_hl_group(self, group_index=None):
   if (group_index is None):
     assert len(self.groups) == 1
     group_index = 0
   selected_group = self.groups[group_index]
   assert len(selected_group) == 4
   names = []
   miller_indices = 0
   rsos = []
   matches = []
   for name in selected_group:
     names.append(name)
     rso = self.reciprocal_space_objects[name]
     assert rso.type == "real"
     rsos.append(rso)
     if (type(miller_indices) == type(0)): miller_indices = rso.indices
     match = miller.match_indices(miller_indices, rso.indices)
     assert not match.have_singles()
     matches.append(match)
   hl = flex.hendrickson_lattman()
   for ih in xrange(miller_indices.size()):
     coeff = []
     for ic in xrange(4):
       ih0, ih1 = matches[ic].pairs()[ih]
       assert ih0 == ih
       coeff.append(rsos[ic].data[ih1])
     hl.append(coeff)
   return names, miller_indices, hl
Пример #8
0
  def scale(self,other):
    from cctbx import miller
    matches = miller.match_indices(self.f_model_real.indices(),other.indices())
    sel0 = flex.size_t([p[0] for p in matches.pairs()])
    sel1 = flex.size_t([p[1] for p in matches.pairs()])

    val0 = self.f_model_real.data().select(sel0)
    val1 = other.data().select(sel1)
    plot=False
    if plot:
      from matplotlib import pyplot as plt
      plt.plot([-1,4],[-1,4],"g-")
      plt.plot(flex.log10(val0),flex.log10(val1),"r.")
      plt.show()

    from xfel.cxi.cxi_cc import correlation
    slope,offset,corr,N = correlation(
      self = self.f_model_real.select(sel0),
      other = other.select(sel1))
    print slope,offset,corr,N
    if plot:
      from matplotlib import pyplot as plt
      plt.plot([-1,4],[-1,4],"g-")
      plt.plot(flex.log10(val0),flex.log10(slope * val1),"r,")
      plt.show()
    return slope
Пример #9
0
 def join_hl_group(self, group_index=None):
   if (group_index is None):
     assert len(self.groups) == 1
     group_index = 0
   selected_group = self.groups[group_index]
   assert len(selected_group) == 4
   names = []
   miller_indices = 0
   rsos = []
   matches = []
   for name in selected_group:
     names.append(name)
     rso = self.reciprocal_space_objects[name]
     assert rso.type == "real"
     rsos.append(rso)
     if (type(miller_indices) == type(0)): miller_indices = rso.indices
     match = miller.match_indices(miller_indices, rso.indices)
     assert not match.have_singles()
     matches.append(match)
   hl = flex.hendrickson_lattman()
   for ih in range(miller_indices.size()):
     coeff = []
     for ic in range(4):
       ih0, ih1 = matches[ic].pairs()[ih]
       assert ih0 == ih
       coeff.append(rsos[ic].data[ih1])
     hl.append(coeff)
   return names, miller_indices, hl
Пример #10
0
    def scale(self, other):
        from cctbx import miller
        matches = miller.match_indices(self.f_model_real.indices(),
                                       other.indices())
        sel0 = flex.size_t([p[0] for p in matches.pairs()])
        sel1 = flex.size_t([p[1] for p in matches.pairs()])

        val0 = self.f_model_real.data().select(sel0)
        val1 = other.data().select(sel1)
        plot = False
        if plot:
            from matplotlib import pyplot as plt
            plt.plot([-1, 4], [-1, 4], "g-")
            plt.plot(flex.log10(val0), flex.log10(val1), "r.")
            plt.show()

        from xfel.cxi.cxi_cc import correlation
        slope, offset, corr, N = correlation(
            self=self.f_model_real.select(sel0), other=other.select(sel1))
        print slope, offset, corr, N
        if plot:
            from matplotlib import pyplot as plt
            plt.plot([-1, 4], [-1, 4], "g-")
            plt.plot(flex.log10(val0), flex.log10(slope * val1), "r,")
            plt.show()
        return slope
Пример #11
0
    def consistent_set_and_model(self, i_model=None):
        # Adjust the minimum d-spacing of the generated Miller set to assure
        # that the desired high-resolution limit is included even if the
        # observed unit cell differs slightly from the target.  Use the same
        # expansion formula as used in merging/general_fcalc.py, to assure consistency.
        # If a reference model is present, ensure that Miller indices are ordered
        # identically.

        symm = symmetry(unit_cell=self.params.scaling.unit_cell,
                        space_group_info=self.params.scaling.space_group)

        # set up the resolution limits
        d_max = 100000  # a default like in cxi-merge
        if self.params.merging.d_max != None:
            d_max = self.params.merging.d_max
        # RB: for later
        #d_max /= self.params.scaling.resolution_scalar
        d_min = self.params.merging.d_min * self.params.scaling.resolution_scalar

        miller_set = symm.build_miller_set(
            anomalous_flag=(not self.params.merging.merge_anomalous),
            d_max=d_max,
            d_min=d_min)
        miller_set = miller_set.change_basis(
            self.params.scaling.model_reindex_op).map_to_asu()

        # Handle the case where model is anomalous=False but the requested merging is anomalous=True
        if i_model.anomalous_flag() is False and miller_set.anomalous_flag(
        ) is True:
            i_model = i_model.generate_bijvoet_mates()

        # manage the sizes of arrays. General_fcalc assures that
        # N(i_model) >= N(miller_set) since it fills non-matches with invalid structure factors
        # However, if N(i_model) > N(miller_set), it's because this run of cxi.merge requested
        # a smaller resolution range.  Must prune off the reference model.
        if i_model.indices().size() > miller_set.indices().size():
            matches = miller.match_indices(i_model.indices(),
                                           miller_set.indices())
            pairs = matches.pairs()
            i_model = i_model.select(pairs.column(0))

        matches = miller.match_indices(i_model.indices(), miller_set.indices())
        assert not matches.have_singles()
        miller_set = miller_set.select(matches.permutation())

        return miller_set, i_model
Пример #12
0
    def remove_outliers(self):
        potential_outliers = self.nat.select(self.result)

        matches = miller.match_indices(self.nat.indices(),
                                       potential_outliers.indices())

        self.nat = self.nat.select(matches.single_selection(0))

        self.nat, self.der = self.nat.common_sets(self.der)
Пример #13
0
 def check_cb_op_perm(cb_op, perm):
   mi_cb = cb_op.apply(ra.miller_indices)
   miis = flex.random_permutation(size=ra.miller_indices.size())[:2]
   k = cb_op.apply(ra.miller_indices.select(miis))
   matches = miller.match_indices(k, ra.miller_indices)
   pairs = matches.pairs()
   assert pairs.column(0).all_eq(flex.size_t_range(k.size()))
   miis_cb = pairs.column(1)
   assert perm.select(miis).all_eq(miis_cb)
Пример #14
0
 def check_cb_op_perm(cb_op, perm):
     mi_cb = cb_op.apply(ra.miller_indices)
     miis = flex.random_permutation(size=ra.miller_indices.size())[:2]
     k = cb_op.apply(ra.miller_indices.select(miis))
     matches = miller.match_indices(k, ra.miller_indices)
     pairs = matches.pairs()
     assert pairs.column(0).all_eq(flex.size_t_range(k.size()))
     miis_cb = pairs.column(1)
     assert perm.select(miis).all_eq(miis_cb)
Пример #15
0
  def remove_outliers(self):
    potential_outliers = self.nat.select( self.result )

    matches = miller.match_indices( self.nat.indices(),
                                    potential_outliers.indices()  )

    self.nat = self.nat.select( matches.single_selection(0) )

    self.nat, self.der = self.nat.common_sets(self.der)
Пример #16
0
def exercise_match_indices():
  h0 = flex.miller_index(((1,2,3), (-1,-2,-3), (2,3,4), (-2,-3,-4), (3,4,5)))
  d0 = flex.double((1,2,3,4,5))
  h1 = flex.miller_index(((-1,-2,-3), (-2,-3,-4), (1,2,3), (2,3,4)))
  d1 = flex.double((10,20,30,40))
  mi = miller.match_indices(h0, h0)
  assert mi.have_singles() == 0
  assert list(mi.pairs()) == zip(range(5), range(5))
  mi = miller.match_indices(h0, h1)
  assert tuple(mi.singles(0)) == (4,)
  assert tuple(mi.singles(1)) == ()
  assert tuple(mi.pairs()) == ((0,2), (1,0), (2,3), (3,1))
  assert tuple(mi.pair_selection(0)) == (1, 1, 1, 1, 0)
  assert tuple(mi.single_selection(0)) == (0, 0, 0, 0, 1)
  assert tuple(mi.pair_selection(1)) == (1, 1, 1, 1)
  assert tuple(mi.single_selection(1)) == (0, 0, 0, 0)
  assert tuple(mi.paired_miller_indices(0)) \
      == tuple(h0.select(mi.pair_selection(0)))
  l1 = list(mi.paired_miller_indices(1))
  l2 = list(h1.select(mi.pair_selection(1)))
  l1.sort()
  l2.sort()
  assert l1 == l2
  assert approx_equal(tuple(mi.plus(d0, d1)), (31, 12, 43, 24))
  assert approx_equal(tuple(mi.minus(d0, d1)), (-29,-8,-37,-16))
  assert approx_equal(tuple(mi.multiplies(d0, d1)), (30,20,120,80))
  assert approx_equal(tuple(mi.divides(d0, d1)), (1/30.,2/10.,3/40.,4/20.))
  assert approx_equal(tuple(mi.additive_sigmas(d0, d1)), [
    math.sqrt(x*x+y*y) for x,y in ((1,30), (2,10), (3,40), (4,20))])
  q = flex.size_t((3,2,0,4,1))
  h1 = h0.select(q)
  assert tuple(miller.match_indices(h1, h0).permutation()) == tuple(q)
  p = miller.match_indices(h0, h1).permutation()
  assert tuple(p) == (2,4,1,0,3)
  assert tuple(h1.select(p)) == tuple(h0)
  cd0 = [ complex(a,b) for (a,b) in (1,1),(2,0),(3.5,-1.5),(5, -3),(-8,5.4) ]
  cd1 = [ complex(a,b) for (a,b) in (1,-1),(2,1),(0.5,1.5),(-1, -8),(10,0) ]
  cd2 = flex.complex_double(cd0)
  cd3 = flex.complex_double(cd1)
  mi = miller.match_indices(h0, h0)
  assert approx_equal(tuple(mi.plus(cd2,cd3)),
    ((2+0j), (4+1j), (4+0j), (4-11j), (2+5.4j)))
Пример #17
0
        def __iter__(O):
          from cctbx.miller import match_indices
          n_total = 0
          n_file = 0
          n_reindex = 0
          n_matches = 0
          n_strong_no_integration = 0
          n_integrated = 0
          n_common = 0
          n_weak = 0

          for item in O.pred_list:
            run_match = RUN.match(item)
            run_token = int(run_match.group(1))
            event_match = EVENT.match(item)
            event_token = int(event_match.group(1))
            obs = O.obs_reverse_lookup[(run_token,event_token)]
            strong_refls = flex.reflection_table.from_file(obs)
            nrefls = len(strong_refls)
            ri = reindex_miller = strong_refls["miller_index"]
            select_indexed = reindex_miller != (0,0,0)
            reindex = (select_indexed).count(True)
            strong_and_indexed = strong_refls.select(select_indexed)

            print (obs, nrefls, reindex)
            n_total += nrefls
            n_file += 1
            n_reindex += reindex

            int_refls = flex.reflection_table.from_file(item)
            ii = integration_miller = int_refls["miller_index"]
            MM = match_indices(strong_and_indexed["miller_index"], int_refls["miller_index"])
            #print("  Strong+indexed",reindex, "integrated",len(ii), "in common",len(MM.pairs()),
            #"indexed but no integration", len(MM.singles(0)), "integrated weak", len(MM.singles(1)))
            n_integrated += len(ii)
            n_common += len(MM.pairs())
            n_strong_no_integration += len(MM.singles(0))
            n_weak += len(MM.singles(1))

            P = MM.pairs()
            A = P.column(0)
            B = P.column(1)
            strong_and_indexed = strong_and_indexed.select(A)
            int_refls = int_refls.select(B)
            # transfer over the calculated positions from integrate2 to strong refls

            strong_and_indexed["xyzcal.mm"] = int_refls["xyzcal.mm"]
            strong_and_indexed["xyzcal.px"] = int_refls["xyzcal.px"]
            strong_and_indexed["delpsical.rad"] = int_refls["delpsical.rad"]
            yield dict(strfile = item, strong_refls=strong_and_indexed)
          print ("Grand total is %d from %d files of which %d reindexed"%(n_total,n_file, n_reindex))
          print ("TOT Strong+indexed",n_reindex, "integrated",n_integrated, "in common",
          n_common, "indexed but no integration", n_strong_no_integration, "integrated weak", n_weak)
Пример #18
0
def commonalize(arrays):
    new_arrays = []
    a0 = arrays[0]
    for f, f_obs, f_model, flag in arrays[1:]:
        pairs = miller.match_indices(a0[1].indices(), f_obs.indices()).pairs()
        a0[1] = a0[1].select(pairs.column(0))
        a0[2] = a0[2].select(pairs.column(0))
        a0[3] = a0[3].select(pairs.column(0))
        f_obs = f_obs.select(pairs.column(1))
        f_model = f_model.select(pairs.column(1))
        flag = flag.select(pairs.column(1))
        new_arrays.append([f, f_obs, f_model, flag])

    new_arrays2 = []

    for f, f_obs, f_model, flag in new_arrays:
        pairs = miller.match_indices(a0[1].indices(), f_obs.indices()).pairs()
        f_obs = f_obs.select(pairs.column(1))
        f_model = f_model.select(pairs.column(1))
        flag = flag.select(pairs.column(1))
        new_arrays2.append([f, f_obs, f_model, flag])

    return [a0] + new_arrays2
Пример #19
0
def exercise_match_cached_fast():
  h0 = flex.miller_index(((1,2,3), (-1,-2,-3), (2,3,4), (-2,-3,-4), (3,4,5)))
  h1 = flex.miller_index(((-1,-2,-3), (-2,-3,-4), (1,2,3), (2,3,4)))
  mi_ref = miller.match_indices(h0, h1)
  mi = miller.match_indices(h0)
  mi.match_cached_fast(h1)
  assert sorted(mi.pairs()) == sorted(mi_ref.pairs())
  mi_ref = miller.match_indices(h0, h0)
  mi.match_cached_fast(h0)
  assert sorted(mi.pairs()) == sorted(mi_ref.pairs())

  try: mi.singles(0)
  except RuntimeError: pass
  else: raise ExceptionExpected
  try: mi.pair_selection(0)
  except RuntimeError: pass
  else: raise ExceptionExpected
  d0 = flex.double((1,2,3,4,5))
  try: mi.plus(d0, d0)
  except RuntimeError: pass
  else: raise ExceptionExpected
  try: mi.permutation()
  except RuntimeError: pass
  else: raise ExceptionExpected
Пример #20
0
def make_joined_set (miller_arrays) :
  if(len(miller_arrays)==0): return None
  cs0 = miller_arrays[0].crystal_symmetry()
  for ma in miller_arrays:
    if(not ma.crystal_symmetry().is_similar_symmetry(cs0)): return None
  from cctbx import miller
  master_set = miller.set(
    crystal_symmetry=miller_arrays[0].crystal_symmetry(),
    indices=miller_arrays[0].indices(),
    anomalous_flag=False)
  master_indices = miller_arrays[0].indices().deep_copy()
  for array in miller_arrays[1:] :
    current_indices = array.indices()
    missing_isel = miller.match_indices(master_indices,
      current_indices).singles(1)
    missing_indices = current_indices.select(missing_isel)
    master_indices.extend(missing_indices)
  master_set = miller.set(
    crystal_symmetry=miller_arrays[0].crystal_symmetry(),
    indices=master_indices,
    anomalous_flag=False)
  return master_set.map_to_asu().unique_under_symmetry()
Пример #21
0
  def __init__(self, model,diffs,params):
    self.params = params
    self.model = model
    if params.group_sulfurs:
      self.n = 1 + flex.max(self.model.scatterer_model_idx)
    else:
      self.n = self.model.N_anom_scatterers
    self.x = flex.double(self.n,0.)

    from cctbx import miller
    matches = miller.match_indices(self.model.f_model_real.indices(),diffs.indices())
    self.sel0 = flex.size_t([p[0] for p in matches.pairs()])
    self.sel1 = flex.size_t([p[1] for p in matches.pairs()])

    self.diffs = diffs.select(self.sel1)

    print "SELECTED %d diffs out of %d"%(len(self.diffs.data()), len(diffs.data()))

    self.minimizer = scitbx.lbfgs.run(target_evaluator=self,
        termination_params=scitbx.lbfgs.termination_parameters(
        traditional_convergence_test=True,
        traditional_convergence_test_eps=1.e-4,
        max_iterations=20))
Пример #22
0
  def make_new_miller_array(self):
    miller_array_operations_lst = eval(self.params.miller_array_operations)
    unique_miller_array_operations_lst = []
    for (operation, label, arrid1, arrid2) in miller_array_operations_lst:
      for arr in self.procarrays:
        if label in arr.info().label_string() or label in [ "", None]:
          raise Sorry("Provide an unambiguous label for your new miller array!")
      unique_miller_array_operations_lst.append( (operation, label, arrid1, arrid2) )
    self.params.miller_array_operations = str(unique_miller_array_operations_lst)
    from copy import deepcopy
    millarr1 = deepcopy(self.procarrays[arrid1])
    newarray = None
    if arrid2 != -1:
      millarr2 = deepcopy(self.procarrays[arrid2])
      newarray = self.viewer.OperateOn2MillerArrays(millarr1, millarr2, operation)
    else:
      newarray = self.viewer.OperateOn1MillerArray(millarr1, operation)
    if newarray is not None:
      newarray.set_info(millarr1._info )
      newarray._info.labels = [ label ]
      procarray, procarray_info = self.process_miller_array(newarray)
      self.procarrays.append(procarray)
      self.viewer.proc_arrays = self.procarrays
      self.viewer.has_new_miller_array = True
      self.viewer.array_infostrs.append( ArrayInfo(procarray, self.mprint).infostr )
      self.viewer.array_infotpls.append( ArrayInfo(procarray, self.mprint).infotpl )
      #self.viewer.SupersetMillerArrays()

      hkls = self.origarrays["HKLs"]
      nanarr = flex.double(len(hkls), float("nan"))
      m = miller.match_indices(hkls, procarray.indices() )
      indices_of_matched_hkls = m.pairs().column(0)
      for i,e in enumerate(indices_of_matched_hkls):
        nanarr[e] = procarray.data()[i]
      self.origarrays[label] = list(nanarr)
      mydict = { "array_infotpls": self.viewer.array_infotpls, "NewHKLscenes" : True, "NewMillerArray" : True}
      self.SendInfoToGUI(mydict)
Пример #23
0
 def reindex_in_place(O,
       reindexing_assistant=None,
       cb_op=None,
       miller_indices=None):
   assert [reindexing_assistant, cb_op].count(None) == 1
   assert (cb_op is None) == (miller_indices is None)
   if (reindexing_assistant is not None):
     assert O.i_perm is not None
     cb_op = reindexing_assistant.cb_ops[O.i_perm]
   if (not O.unit_cell.is_similar_to(
             other=O.unit_cell.change_basis(cb_op),
             relative_length_tolerance=1e-5,
             absolute_angle_tolerance=1e-3)):
     raise RuntimeError(
       "Unit cell is not compatible with reindexing operation.")
   if (reindexing_assistant is not None):
     assert O.i_perm is not None
     perm = reindexing_assistant.perms[O.i_perm]
     O.miller_index_i_seqs = perm.select(O.miller_index_i_seqs)
   else:
     mi_cb = cb_op.apply(miller_indices.select(O.miller_index_i_seqs))
     from cctbx import miller
     matches = miller.match_indices(miller_indices, mi_cb)
     assert matches.singles(1).size() == 0
     O.miller_index_i_seqs = matches.pairs().column(0)
   from scitbx.array_family import flex
   sort_perm = flex.sort_permutation(data=O.miller_index_i_seqs)
   O.miller_index_i_seqs = O.miller_index_i_seqs.select(sort_perm)
   O.spot_positions = O.spot_positions.select(sort_perm)
   O.spot_intensities = O.spot_intensities.select(sort_perm)
   from scitbx import matrix
   c_cart = matrix.sqr(O.unit_cell.matrix_cart(rot_mx=cb_op.c_inv().r()))
   O.crystal_rotation = (matrix.sqr(O.crystal_rotation) * c_cart).elems
   O.partialities = None
   O.i_perm = 0
   if (O.backup is not None):
     O.backup.i_perm = 0
Пример #24
0
 def reindex_in_place(O,
       reindexing_assistant=None,
       cb_op=None,
       miller_indices=None):
   assert [reindexing_assistant, cb_op].count(None) == 1
   assert (cb_op is None) == (miller_indices is None)
   if (reindexing_assistant is not None):
     assert O.i_perm is not None
     cb_op = reindexing_assistant.cb_ops[O.i_perm]
   if (not O.unit_cell.is_similar_to(
             other=O.unit_cell.change_basis(cb_op),
             relative_length_tolerance=1e-5,
             absolute_angle_tolerance=1e-3)):
     raise RuntimeError(
       "Unit cell is not compatible with reindexing operation.")
   if (reindexing_assistant is not None):
     assert O.i_perm is not None
     perm = reindexing_assistant.perms[O.i_perm]
     O.miller_index_i_seqs = perm.select(O.miller_index_i_seqs)
   else:
     mi_cb = cb_op.apply(miller_indices.select(O.miller_index_i_seqs))
     from cctbx import miller
     matches = miller.match_indices(miller_indices, mi_cb)
     assert matches.singles(1).size() == 0
     O.miller_index_i_seqs = matches.pairs().column(0)
   from scitbx.array_family import flex
   sort_perm = flex.sort_permutation(data=O.miller_index_i_seqs)
   O.miller_index_i_seqs = O.miller_index_i_seqs.select(sort_perm)
   O.spot_positions = O.spot_positions.select(sort_perm)
   O.spot_intensities = O.spot_intensities.select(sort_perm)
   from scitbx import matrix
   c_cart = matrix.sqr(O.unit_cell.matrix_cart(rot_mx=cb_op.c_inv().r()))
   O.crystal_rotation = (matrix.sqr(O.crystal_rotation) * c_cart).elems
   O.partialities = None
   O.i_perm = 0
   if (O.backup is not None):
     O.backup.i_perm = 0
Пример #25
0
    parsed = iotbx.phil.parse(master_params_str, process_includes=True)

    processed_args = mmtbx.utils.process_command_line_args(
        args=sys.argv[1:], log=sys.stdout, master_params=parsed)

    working_phil = processed_args.params
    params = working_phil.extract()

    if params.hklin_1 is None and params.hklin_2 is None:
        if len(processed_args.reflection_file_names) != 2:
            print "Exactly two mtz files must be given."
            sys.exit(1)
        params.hklin_1, params.hklin_2 = processed_args.reflection_file_names

    working_phil = parsed.format(python_object=params)
    print "Parameters to compute maps:"
    working_phil.show(out=sys.stdout, prefix=" ")

    data_1 = get_data(params.hklin_1, params.labels_1)
    data_2 = get_data(params.hklin_2, params.labels_2)

    matches = miller.match_indices(data_1.indices(), data_2.indices())

    only_1 = data_1.select(matches.singles(0))
    only_2 = data_2.select(matches.singles(1))

    only_1.as_mtz_dataset(column_root_label=params.labels_1.split(",")
                          [0]).mtz_object().write("only_in_1.mtz")
    only_2.as_mtz_dataset(column_root_label=params.labels_2.split(",")
                          [0]).mtz_object().write("only_in_2.mtz")
Пример #26
0
        def _compute_rij_matrix_one_row_block(i):
            rij_cache = {}

            n_sym_ops = len(self._sym_ops)
            NN = n_lattices * n_sym_ops

            from scipy import sparse

            rij_row = []
            rij_col = []
            rij_data = []
            if self._weights is not None:
                wij_row = []
                wij_col = []
                wij_data = []
            else:
                wij = None

            i_lower, i_upper = self._lattice_lower_upper_index(i)
            intensities_i = self._data.data()[i_lower:i_upper]

            for j in range(n_lattices):

                j_lower, j_upper = self._lattice_lower_upper_index(j)
                intensities_j = self._data.data()[j_lower:j_upper]

                for k, cb_op_k in enumerate(self._sym_ops):
                    cb_op_k = sgtbx.change_of_basis_op(cb_op_k)

                    indices_i = indices[cb_op_k.as_xyz()][i_lower:i_upper]

                    for kk, cb_op_kk in enumerate(self._sym_ops):
                        if i == j and k == kk:
                            # don't include correlation of dataset with itself
                            continue
                        cb_op_kk = sgtbx.change_of_basis_op(cb_op_kk)

                        ik = i + (n_lattices * k)
                        jk = j + (n_lattices * kk)

                        key = (i, j, str(cb_op_k.inverse() * cb_op_kk))
                        if use_cache and key in rij_cache:
                            cc, n = rij_cache[key]
                        else:
                            indices_j = indices[
                                cb_op_kk.as_xyz()][j_lower:j_upper]

                            matches = miller.match_indices(
                                indices_i, indices_j)
                            pairs = matches.pairs()
                            isel_i = pairs.column(0)
                            isel_j = pairs.column(1)
                            isel_i = isel_i.select(
                                self._patterson_group.epsilon(
                                    indices_i.select(isel_i)) == 1)
                            isel_j = isel_j.select(
                                self._patterson_group.epsilon(
                                    indices_j.select(isel_j)) == 1)
                            corr = flex.linear_correlation(
                                intensities_i.select(isel_i),
                                intensities_j.select(isel_j),
                            )

                            if corr.is_well_defined():
                                cc = corr.coefficient()
                                n = corr.n()
                                rij_cache[key] = (cc, n)
                            else:
                                cc = None
                                n = None

                        if n < self._min_pairs:
                            continue

                        if cc is not None and n is not None:
                            if self._weights == "count":
                                wij_row.extend([ik, jk])
                                wij_col.extend([jk, ik])
                                wij_data.extend([n, n])
                            elif self._weights == "standard_error":
                                assert n > 2
                                # http://www.sjsu.edu/faculty/gerstman/StatPrimer/correlation.pdf
                                se = math.sqrt((1 - cc**2) / (n - 2))
                                wij = 1 / se
                                wij_row.extend([ik, jk])
                                wij_col.extend([jk, ik])
                                wij_data.extend([wij, wij])

                            rij_row.append(ik)
                            rij_col.append(jk)
                            rij_data.append(cc)

            rij = sparse.coo_matrix((rij_data, (rij_row, rij_col)),
                                    shape=(NN, NN))
            if self._weights is not None:
                wij = sparse.coo_matrix((wij_data, (wij_row, wij_col)),
                                        shape=(NN, NN))

            return rij, wij
Пример #27
0
def compare_integrate_hkls(result_single, result_range):
    from cctbx import miller
    from cctbx.array_family import flex

    def calc_r_fac(data1, data2):
        return flex.sum(flex.abs(data1 - data2)) / flex.sum(data1)

    # calc_r_fac()

    def calc_cc(data1, data2):
        corr = flex.linear_correlation(data1, data2)
        assert corr.is_well_defined()
        return corr.coefficient()

    # calc_cc()

    read_columns = ["IOBS", "SIGMA", "PEAK"]
    reader_single = integrate_hkl_as_flex.reader(result_single, read_columns)
    reader_range = integrate_hkl_as_flex.reader(result_range, read_columns)

    assert len(reader_single.hkl) == len(set(
        reader_single.hkl))  # No duplicate hkls!
    assert len(reader_range.hkl) == len(set(
        reader_range.hkl))  # No duplicate hkls!

    data_single = reader_single.arrays()
    data_range = reader_range.arrays()

    # Take common sets
    pairs = miller.match_indices(reader_single.hkl, reader_range.hkl).pairs()

    # XXX. Make sure two arrays are sorted (having the same order)!
    for k in read_columns:
        data_single[k] = data_single[k].select(pairs.column(0))
        data_range[k] = data_range[k].select(pairs.column(1))

    ## Sort by PEAK
    #perm = flex.sort_permutation(data=data_single["PEAK"].data(), reverse=False)

    # Output to terminal
    N = 20
    print "      PEAK                          r_fac  cc"
    for i in xrange(N):
        lrange, rrange = i * 100. / N, (i + 1) * 100. / N
        sel = lrange < data_single["PEAK"].data()
        sel &= data_single["PEAK"].data() <= rrange
        sel &= 0.95 < data_range["PEAK"].data()

        sel_I_range = data_range["IOBS"].select(sel)
        sel_I_single = data_single["IOBS"].select(sel)

        r_fac = calc_r_fac(sel_I_range.data(), sel_I_single.data())

        cc = calc_cc(sel_I_range.data(), sel_I_single.data())

        for r, s, p in zip(sel_I_range[:10], sel_I_single[:10],
                           data_single["PEAK"].select(sel).data()):
            print "  ", r, s, p

        print "%6.2f .. %6.2f [nref= %10d] %.4f %.4f" % (lrange, rrange,
                                                         sum(sel), r_fac, cc)

    # Output to file
    ofs = open("result.dat", "w")
    ofs.write(
        "H K L single.I single.SIGMA single.PEAK range.I range.SIGMA range.PEAK\n"
    )
    for values in zip(data_single["IOBS"].indices(),
                      data_single["IOBS"].data(), data_single["SIGMA"].data(),
                      data_single["PEAK"].data(), data_range["IOBS"].data(),
                      data_range["SIGMA"].data(), data_range["PEAK"].data()):
        ofs.write("%d %d %d %f %f %f %f %f %f\n" %
                  ((values[0][0], values[0][1], values[0][2]) + values[1:]))
Пример #28
0
    def consistent_set_and_model(self, i_model=None):
        assert self.params.scaling.space_group, "Space group must be specified in the input parameters or a reference file must be present"
        # which unit cell are we using?
        if self.purpose == "scaling":
            assert self.params.scaling.unit_cell is not None, "Unit cell must be specified in the input parameters or a reference file must be present"
            unit_cell = self.params.scaling.unit_cell
            self.logger.log("Using target unit cell: " + str(unit_cell))
            if self.mpi_helper.rank == 0:
                self.logger.main_log("Using target unit cell: " +
                                     str(unit_cell))
        elif self.purpose == "statistics":
            if self.params.merging.set_average_unit_cell:
                assert self.params.statistics.average_unit_cell is not None, "Average unit cell hasn't been calculated"
                unit_cell = self.params.statistics.average_unit_cell
                unit_cell_formatted = "(%.6f, %.6f, %.6f, %.3f, %.3f, %.3f)"\
                                  %(unit_cell.parameters()[0], unit_cell.parameters()[1], unit_cell.parameters()[2], \
                                    unit_cell.parameters()[3], unit_cell.parameters()[4], unit_cell.parameters()[5])
                self.logger.log("Using average unit cell: " +
                                unit_cell_formatted)
                if self.mpi_helper.rank == 0:
                    self.logger.main_log("Using average unit cell: " +
                                         unit_cell_formatted)
            else:
                assert self.params.scaling.unit_cell is not None, "Unit cell must be specified in the input parameters or a reference file must be present"
                unit_cell = self.params.scaling.unit_cell
                self.logger.log("Using target unit cell: " + str(unit_cell))
                if self.mpi_helper.rank == 0:
                    self.logger.main_log("Using target unit cell: " +
                                         str(unit_cell))

        # create symmetry for the full miller set
        symm = symmetry(unit_cell=unit_cell,
                        space_group_info=self.params.scaling.space_group)

        # Adjust the minimum d-spacing of the generated Miller set to assure
        # that the desired high-resolution limit is included even if the
        # observed unit cell differs slightly from the target.  Use the same
        # expansion formula as used in merging/general_fcalc.py, to assure consistency.
        # If a reference model is present, ensure that Miller indices are ordered
        # identically.

        # set up the resolution limits
        d_max = 100000  # a default like in cxi-merge
        if self.params.merging.d_max != None:
            d_max = self.params.merging.d_max
        # RB: for later
        #d_max /= self.params.scaling.resolution_scalar
        d_min = self.params.merging.d_min * self.params.scaling.resolution_scalar

        # build the full miller set
        miller_set = symm.build_miller_set(
            anomalous_flag=(not self.params.merging.merge_anomalous),
            d_max=d_max,
            d_min=d_min)
        miller_set = miller_set.change_basis(
            self.params.scaling.model_reindex_op).map_to_asu()

        # Handle the case where model is anomalous=False but the requested merging is anomalous=True
        if i_model is not None:
            if i_model.anomalous_flag() is False and miller_set.anomalous_flag(
            ) is True:
                i_model = i_model.generate_bijvoet_mates()

            # manage the sizes of arrays. General_fcalc assures that
            # N(i_model) >= N(miller_set) since it fills non-matches with invalid structure factors
            # However, if N(i_model) > N(miller_set), it's because this run of cxi.merge requested
            # a smaller resolution range.  Must prune off the reference model.
            if self.purpose == "scaling":
                if i_model.indices().size() > miller_set.indices().size():
                    matches = miller.match_indices(i_model.indices(),
                                                   miller_set.indices())
                    pairs = matches.pairs()
                    i_model = i_model.select(pairs.column(0))

                matches = miller.match_indices(i_model.indices(),
                                               miller_set.indices())
                #assert not matches.have_singles()
                miller_set = miller_set.select(matches.permutation())

        return miller_set, i_model
Пример #29
0
 def ExtendMillerArraysUnionHKLs(self):
     #import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
     self.match_valarrays = []
     # loop over all miller arrays to find the subsets of hkls common between currently selected
     # miler array and the other arrays. hkls found in the currently selected miller array but
     # missing in the subsets are populated populated with NaN values
     # create miller indices being a superset of available indices in all arrays
     self.mprint("Gathering superset of miller indices")
     superset_array = self.proc_arrays[0]
     for i, validarray in enumerate(self.proc_arrays):
         if i == 0:
             continue
         # first match indices in currently selected miller array with indices in the other miller arrays
         matchindices = miller.match_indices(superset_array.indices(),
                                             validarray.indices())
         #print validarray.info().label_string()
         valarray = validarray.select(matchindices.pairs().column(1))
         #import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
         if valarray.anomalous_flag(
         ) and not superset_array.anomalous_flag():
             # valarray gets its anomalous_flag from validarray. But it cannot have more HKLs than self.miller_array
             # so set its anomalous_flag to False if self.miller_array is not anomalous data
             valarray._anomalous_flag = False
         if not valarray.anomalous_flag() and superset_array.anomalous_flag(
         ):
             # temporarily expand other arrays to anomalous if self.miller_array is anomalous
             valarray = valarray.generate_bijvoet_mates()
         missing = superset_array.lone_set(valarray)
         # insert NAN values for reflections in self.miller_array not found in validarray
         valarray = display.ExtendMillerArray(valarray, missing.size(),
                                              missing.indices())
         match_valindices = miller.match_indices(superset_array.indices(),
                                                 valarray.indices())
         #import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
         match_valarray = valarray.select(
             match_valindices.pairs().column(1))
         match_valarray.sort(by_value="packed_indices")
         #match_valarray.set_info(validarray.info() )
         superset_array = match_valarray
         #print "supersetsize:", superset_array.size()
     # now extend each miller array to contain any missing indices from the superset miller array
     for i, validarray in enumerate(self.proc_arrays):
         # first match indices in currently selected miller array with indices in the other miller arrays
         matchindices = miller.match_indices(superset_array.indices(),
                                             validarray.indices())
         #print validarray.info().label_string()
         valarray = validarray.select(matchindices.pairs().column(1))
         #import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
         if valarray.anomalous_flag(
         ) and not superset_array.anomalous_flag():
             # valarray gets its anomalous_flag from validarray. But it cannot have more HKLs than self.miller_array
             # so set its anomalous_flag to False if self.miller_array is not anomalous data
             valarray._anomalous_flag = False
         if not valarray.anomalous_flag() and superset_array.anomalous_flag(
         ):
             # temporarily expand other arrays to anomalous if self.miller_array is anomalous
             valarray = valarray.generate_bijvoet_mates()
         missing = superset_array.lone_set(valarray)
         # insert NAN values for reflections in self.miller_array not found in validarray
         valarray = display.ExtendMillerArray(valarray, missing.size(),
                                              missing.indices())
         match_valindices = miller.match_indices(superset_array.indices(),
                                                 valarray.indices())
         #import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
         match_valarray = valarray.select(
             match_valindices.pairs().column(1))
         match_valarray.sort(by_value="packed_indices")
         match_valarray.set_info(validarray.info())
         self.match_valarrays.append(match_valarray)
Пример #30
0
def run(args):
    import iotbx.phil

    phil = iotbx.phil.process_command_line(
        args=args,
        master_string="""
target_unit_cell = 78,78,37,90,90,90
  .type = unit_cell
target_space_group = P43212
  .type = space_group
d_min = 2.1
  .type = float
plot = False
  .type = str
cut_short_at = None
  .type = int
""",
    ).show()
    print
    work_params = phil.work.extract()
    assert work_params.d_min is not None

    print work_params.target_unit_cell
    print work_params.target_space_group

    from cctbx import miller

    miller_set = symmetry(
        unit_cell=work_params.target_unit_cell, space_group_info=work_params.target_space_group
    ).build_miller_set(anomalous_flag=True, d_min=work_params.d_min)

    miller_set.show_summary()

    # reality check
    # recip_cell_volume = work_params.target_unit_cell.reciprocal().volume()
    # recip_sphere_volume = (4/3)*math.pi*math.pow(1./work_params.d_min,3)
    # resolution_cells = recip_sphere_volume/recip_cell_volume
    # print "Number of asu's in sphere=",resolution_cells/miller_set.size()

    results = get_observations(miller_set, phil.remaining_args, work_params)

    # Create (and initialise?) arrays for statistics on the set of the
    # observed reflections which are present in the reference data set.
    completeness = flex.int(miller_set.size())
    sum_I = flex.double(miller_set.size())
    sum_I_SIGI = flex.double(miller_set.size())
    # last = completeness.deep_copy()

    for result, filename in results:
        result.show_summary()
        show_observations(result)

        if work_params.plot == True:
            # private interface to get the very strong diffraction images
            import StringIO

            G = StringIO.StringIO()
            show_observations(result, out=G)
            for line in G.getvalue().split("\n"):
                tokens = line.split()
                if len(tokens) > 6:
                    try:
                        if float(tokens[3]) < 2.6 and float(tokens[-1]) > 10:
                            print "Strong signal", filename, line
                    except ValueError:
                        pass
        print

        # Match up the observed intensities against the reference data
        # set, i_model, instead of the pre-generated miller set,
        # miller_set.
        matches = miller.match_indices(miller_set.indices(), result.indices())

        # for ih,hkl in enumerate(result.indices()):
        #  print hkl, result.data()[ih]
        print

        # Update the count for each matched reflection.
        completeness += (~matches.single_selection(0)).as_int()
        for pair in matches.pairs():
            sum_I[pair[0]] += result.data()[pair[1]]
            sum_I_SIGI[pair[0]] += result.data()[pair[1]] / result.sigmas()[pair[1]]
        # for ih,hkl in enumerate(miller_set.indices()):
        #  print "%15s"%str(hkl),"%4d"%last[ih],"%4d"%completeness[ih], sum_I[ih]

        # print matches
        # help(matches)
        # print matches.pair_selection(0)
        # for item in matches.pairs(): print item
        # print list(miller_set.indices().select(matches.pairs().column(1)))
        # print list(~matches.single_selection(0))
        # print list(~matches.single_selection(1))
        # last  = completeness.deep_copy()

    # plot_overall_completeness(completeness)

    show_overall_observations(miller_set, completeness, sum_I, sum_I_SIGI)
Пример #31
0
  def construct_reciprocal_space (self, merge=None) :
    #import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
    matchcolourindices = miller.match_indices(self.miller_array.indices(),
       self.valid_arrays[self.icolourcol].indices() )
    matchcolourarray = self.miller_array.select( matchcolourindices.pairs().column(0) )

    matchradiiindices = miller.match_indices(self.miller_array.indices(),
       self.valid_arrays[self.iradiicol ].indices() )
    matchradiiarray = self.miller_array.select( matchradiiindices.pairs().column(0) )

    matchcolourradiiindices = miller.match_indices(self.valid_arrays[self.icolourcol].indices(),
       self.valid_arrays[self.iradiicol ].indices() )
    #matchcolourradiiindices = miller.match_indices(matchcolourarray.indices(),
    #                                               matchradiiarray.indices() )
    #import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
    #matchcolourradiiarray = self.miller_array.select( matchcolourradiiindices.pairs().column(0) )

    #commonindices = miller.match_indices(self.miller_array.indices(),
    #   matchcolourradiiarray.indices() )
    commonindices = miller.match_indices(self.miller_array.indices(),
       matchcolourradiiindices.paired_miller_indices(0) )
    commonarray = self.miller_array.select( commonindices.pairs().column(0) )

    commonarray.set_info(self.miller_array.info() )
    commonarray.sort(by_value="packed_indices")

    #import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
    #commonarray.size(), matchcolourradiiarray.size(), matchradiiarray.size(), matchcolourarray.size()
    foms_array = None
    if self.miller_array.is_complex_array():
      fomcolm = self.mapcoef_fom_dict.get(self.miller_array.info().label_string())
      if fomcolm:
        #foms = self.valid_arrays[fomcolm].data().deep_copy()
        foms_array = self.valid_arrays[fomcolm].deep_copy()
    self.scene = display.scene(miller_array=self.miller_array, merge=merge,
     settings=self.settings, foms_array=foms_array)

    self.rotation_center = (0,0,0)

    self.otherscenes = []
    self.othermaxdata = []
    self.othermindata = []
    self.matchingarrayinfo = []
    match_valarrays = []
    # loop over all miller arrays to find the subsets of hkls common between currently selected
    # miler array and the other arrays. hkls found in the currently selected miller array but
    # missing in the subsets are populated populated with NaN values
    for i,validarray in enumerate(self.valid_arrays):
      # first match indices in currently selected miller array with indices in the other miller arrays
      #matchindices = miller.match_indices(matchcolourradiiarray.indices(), validarray.indices() )
      matchindices = miller.match_indices(self.miller_array.indices(), validarray.indices() )
      #matchindices = miller.match_indices( commonarray.indices(), validarray.indices() )
      #print validarray.info().label_string()

      valarray = validarray.select( matchindices.pairs().column(1) )

      #import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
      if valarray.anomalous_flag() and not self.miller_array.anomalous_flag():
        # valarray gets its anomalous_flag from validarray. But it cannot have more HKLs than self.miller_array
        # so set its anomalous_flag to False if self.miller_array is not anomalous data
        valarray._anomalous_flag = False
      if not valarray.anomalous_flag() and self.miller_array.anomalous_flag():
        # temporary expand other arrays to anomalous if self.miller_array is anomalous
        valarray = valarray.generate_bijvoet_mates()

      missing = self.miller_array.lone_set( valarray )
      # insert NAN values for reflections in self.miller_array not found in validarray
      valarray = display.ExtendMillerArray(valarray, missing.size(), missing.indices())

      #import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
      match_valindices = miller.match_indices(self.miller_array.indices(), valarray.indices() )
      #import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
      match_valarray = valarray.select( match_valindices.pairs().column(1) )
      match_valarray.sort(by_value="packed_indices")
      match_valarray.set_info(validarray.info() )
      match_valarrays.append( match_valarray )

    for i,match_valarray in enumerate(match_valarrays):
      foms = None
      if match_valarray.is_complex_array():
        fomcolm = self.mapcoef_fom_dict.get(match_valarray.info().label_string())
        #import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
        if fomcolm:
          foms = match_valarrays[fomcolm]

      otherscene = display.scene(miller_array=match_valarray,  merge=merge,
        settings=self.settings, foms_array=foms)
      #import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
      # cast any NAN values to -1 of the colours and radii arrays before writing javascript
      nplst = np.array( list( otherscene.data ) )
      mask = np.isnan(nplst)
      npcolour = np.array( list(otherscene.colors))
      npcolourcol = npcolour.reshape( len(otherscene.data), 3 )
      #npcolourcol[mask] = -1
      otherscene.colors = flex.vec3_double()
      otherscene.colors.extend( flex.vec3_double( npcolourcol.tolist()) )
      """
      nplst = np.array( list( otherscene.radii ) )
      mask = np.isnan(nplst)
      npradii = np.array( list(otherscene.radii))
      npradiicol = npradii.reshape( len(otherscene.data), 1 )
      npradiicol[mask] = 0.2
      otherscene.radii = flex.double( npradiicol.flatten().tolist())
      """
      b = flex.bool([bool(math.isnan(e)) for e in otherscene.radii])
      # replace any nan values with 0.2
      otherscene.radii = otherscene.radii.set_selected(b, 0.2)

      d = otherscene.data
      if (isinstance(d, flex.int)):
        d = [e for e in self.scene.data if e!= display.inanval]
      if match_valarray.is_complex_array():
        d = otherscene.ampl
      maxdata =max( d )
      mindata =min( d )
      self.othermaxdata.append( maxdata )
      self.othermindata.append( mindata )

      maxsigmas = minsigmas = display.nanval
      if otherscene.sigmas is not None:
        d = otherscene.sigmas
        maxsigmas = max( d )
        minsigmas = min( d )

      self.othermaxsigmas.append(maxsigmas)
      self.otherminsigmas.append(minsigmas)
      # TODO: tag array according to which otherscene is included
      self.otherscenes.append( otherscene)

      infostr = ArrayInfo(otherscene.miller_array).infostr
      self.mprint("%d, %s" %(i, infostr) )
      self.matchingarrayinfo.append(infostr)
Пример #32
0
  W1_oxidized = GF.get_intensities()
  GF.reset_wavelength(W1)
  GF.reset_specific_at_wavelength(label_has="FE1",tables=Fe_reduced_model,newvalue=W1)
  GF.reset_specific_at_wavelength(label_has="FE2",tables=Fe_reduced_model,newvalue=W1)
  W1_reduced = GF.get_intensities()
  GF.reset_wavelength(W2)
  GF.reset_specific_at_wavelength(label_has="FE1",tables=Fe_oxidized_model,newvalue=W2)
  GF.reset_specific_at_wavelength(label_has="FE2",tables=Fe_reduced_model,newvalue=W2)
  W2_oxidized = GF.get_intensities()
  GF.reset_wavelength(W2)
  GF.reset_specific_at_wavelength(label_has="FE1",tables=Fe_reduced_model,newvalue=W2)
  GF.reset_specific_at_wavelength(label_has="FE2",tables=Fe_reduced_model,newvalue=W2)
  W2_reduced = GF.get_intensities()

  from cctbx import miller
  matches = miller.match_indices(W2_reduced.indices(),M)
  sel0 = flex.size_t([p[0] for p in matches.pairs()])
  sel1 = flex.size_t([p[1] for p in matches.pairs()])
  print (len(sel0))
  print (len(sel1))
  print (len(W2_reduced.indices()))

  W1_ox = W1_oxidized.select(sel0).data()
  W2_ox = W2_oxidized.select(sel0).data()
  W1_re = W1_reduced.select(sel0).data()
  W2_re = W2_reduced.select(sel0).data()
  idx   = W1_oxidized.select(sel0).indices()

  #@oxidized_ratio = W2_oxidized.select(sel0) / W1_oxidized.select(sel0)
  #reduced_ratio = W2_reduced.select(sel0) / W1_reduced.select(sel0)
Пример #33
0
  def job_runner(self,i_exp=0,spectra={}):
    from simtbx.nanoBragg import utils

    from LS49.adse13_187.case_data import retrieve_from_repo
    experiment_file = retrieve_from_repo(i_exp)

    # Fixed hyperparameters
    mosaic_spread_samples = 500
    ev_res = 1.5  # resolution of the downsample spectrum
    total_flux = 1e12  # total flux across channels
    beamsize_mm = 0.000886226925452758 # sqrt beam focal area
    spot_scale = 500.
    oversample = 1  # factor 1,2, or 3 probably enough
    include_background = False
    verbose = 0  # leave as 0, unless debug
    shapetype = "gauss_argchk"

    #<><><><><><><><>
    os.environ["NXMX_LOCAL_DATA"]="/global/cfs/cdirs/m3562/der/master_files/run_000795.JF07T32V01_master.h5"
    expt = ExperimentListFactory.from_json_file(
      experiment_file, check_format=True)[0]

    crystal = expt.crystal
    detector = expt.detector
    flat = True  # enforce that the camera has 0 thickness
    if flat:
        from dxtbx_model_ext import SimplePxMmStrategy
        for panel in detector:
            panel.set_px_mm_strategy(SimplePxMmStrategy())
            panel.set_mu(0)
            panel.set_thickness(0)
        assert detector[0].get_thickness() == 0


    beam = expt.beam
    spec = expt.imageset.get_spectrum(0)
    energies_raw = spec.get_energies_eV().as_numpy_array()
    weights_raw = spec.get_weights().as_numpy_array()
    energies, weights = utils.downsample_spectrum(energies_raw, weights_raw, method=1, total_flux=total_flux, ev_width=ev_res)

    device_Id = 0
    if self.gpu_channels_singleton is not None:
      device_Id = self.gpu_channels_singleton.get_deviceID()

    mn_energy = (energies*weights).sum() / weights.sum()
    mn_wave = utils.ENERGY_CONV / mn_energy
    print("\n<><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>")
    print("\tBreakdown:")
    for shapetype in ["gauss_argchk"]:
      BEG=time()
      print (self.gpu_channels_singleton.get_deviceID(),"device",shapetype)
      Famp_is_uninitialized = ( self.gpu_channels_singleton.get_nchannels() == 0 )
      if Famp_is_uninitialized:
        from iotbx.reflection_file_reader import any_reflection_file
        from LS49 import ls49_big_data
        merge_file = os.path.join(ls49_big_data,"adse13_228","cyto_init_merge.mtz")
        self.merged_amplitudes = any_reflection_file(merge_file).as_miller_arrays()[0].as_amplitude_array()

        F1 = self.merged_amplitudes.expand_to_p1()
        F2 = self.amplitudes.expand_to_p1() # takes care of both transform to asu & expand

        if False: # make sure that mtz file (F1) and strong spots (self.amplitudes) are roughly correlated
          from matplotlib import pyplot as plt
          from cctbx import miller
          matches = miller.match_indices( F1.indices(), self.amplitudes.indices() )
          sel0 = flex.size_t([p[0] for p in matches.pairs()])
          sel1 = flex.size_t([p[1] for p in matches.pairs()])
          data0 = F1.data().select(sel0)
          data1 = self.amplitudes.data().select(sel1)
          plt.plot(data0, data1, 'r.')
          plt.show() # yes, the two are very roughly correlated
          # end of test

        #F_P1 = F1 # legacy, use a merged mtz file
        #F_P1 = F2 # this one way absolutely wrong! way too many predictions, beyond the strong spots
        F_P1 = F1
        for x in range(1):  # in this scenario, amplitudes are independent of lambda
          self.gpu_channels_singleton.structure_factors_to_GPU_direct(
          x, F_P1.indices(), F_P1.data())
      assert self.gpu_channels_singleton.get_nchannels() == 1

      # Variable parameters
      mosaic_spread = 0.07 # degrees
      Ncells_abc = 30, 30, 10

      JF16M_numpy_array, TIME_BG, TIME_BRAGG, _ = multipanel_sim(
        CRYSTAL=crystal, DETECTOR=detector, BEAM=beam,
        Famp = self.gpu_channels_singleton,
        energies=list(energies), fluxes=list(weights),
        background_wavelengths=[mn_wave], background_wavelength_weights=[1],
        background_total_flux=total_flux,background_sample_thick_mm=0.5,
        cuda=True,
        oversample=oversample, Ncells_abc=Ncells_abc,
        mos_dom=mosaic_spread_samples, mos_spread=mosaic_spread,
        beamsize_mm=beamsize_mm,
        profile=shapetype,
        show_params=False,
        time_panels=False, verbose=verbose,
        spot_scale_override=spot_scale,
        include_background=include_background,
        mask_file=mask_array)
      TIME_EXA = time()-BEG

      print("\t\tExascale: time for bkgrd sim: %.4fs; Bragg sim: %.4fs; total: %.4fs" % (TIME_BG, TIME_BRAGG, TIME_EXA))
    print("<><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>\n")
    return JF16M_numpy_array
def run(args):
  import iotbx.phil
  phil = iotbx.phil.process_command_line(args=args, master_string="""
target_unit_cell = 78,78,37,90,90,90
  .type = unit_cell
target_space_group = P43212
  .type = space_group
d_min = 2.1
  .type = float
plot = False
  .type = str
cut_short_at = None
  .type = int
""").show()
  print
  work_params = phil.work.extract()
  assert work_params.d_min is not None

  print work_params.target_unit_cell
  print work_params.target_space_group

  from cctbx import miller
  miller_set = symmetry(
      unit_cell=work_params.target_unit_cell,
      space_group_info=work_params.target_space_group
    ).build_miller_set(
      anomalous_flag=True,
      d_min=work_params.d_min
    )

  miller_set.show_summary()

  # reality check
  #recip_cell_volume = work_params.target_unit_cell.reciprocal().volume()
  #recip_sphere_volume = (4/3)*math.pi*math.pow(1./work_params.d_min,3)
  #resolution_cells = recip_sphere_volume/recip_cell_volume
  #print "Number of asu's in sphere=",resolution_cells/miller_set.size()

  results = get_observations(miller_set,phil.remaining_args,work_params)

  # Create (and initialise?) arrays for statistics on the set of the
  # observed reflections which are present in the reference data set.
  completeness = flex.int(miller_set.size())
  sum_I        = flex.double(miller_set.size())
  sum_I_SIGI   = flex.double(miller_set.size())
  #last = completeness.deep_copy()

  for result,filename in results:
    result.show_summary()
    show_observations(result)

    if work_params.plot==True:
      #private interface to get the very strong diffraction images
      import StringIO
      G = StringIO.StringIO()
      show_observations(result,out=G)
      for line in G.getvalue().split("\n"):
        tokens = line.split()
        if len(tokens)>6:
          try:
            if float(tokens[3]) < 2.6 and float(tokens[-1]) > 10:
              print "Strong signal",filename,line
          except ValueError: pass
    print

    # Match up the observed intensities against the reference data
    # set, i_model, instead of the pre-generated miller set,
    # miller_set.
    matches = miller.match_indices(
      miller_set.indices(),
      result.indices())

    #for ih,hkl in enumerate(result.indices()):
    #  print hkl, result.data()[ih]
    print

    # Update the count for each matched reflection.
    completeness +=  (~matches.single_selection(0)).as_int()
    for pair in matches.pairs():
      sum_I[pair[0]] += result.data()[pair[1]]
      sum_I_SIGI[pair[0]] += (result.data()[pair[1]]/result.sigmas()[pair[1]])
    #for ih,hkl in enumerate(miller_set.indices()):
    #  print "%15s"%str(hkl),"%4d"%last[ih],"%4d"%completeness[ih], sum_I[ih]

    #print matches
    #help(matches)
    #print matches.pair_selection(0)
    #for item in matches.pairs(): print item
    #print list(miller_set.indices().select(matches.pairs().column(1)))
    #print list(~matches.single_selection(0))
    #print list(~matches.single_selection(1))
    #last  = completeness.deep_copy()

  #plot_overall_completeness(completeness)

  show_overall_observations(miller_set,completeness,sum_I,sum_I_SIGI)
def compare_integrate_hkls(result_single, result_range):
    from cctbx import miller
    from cctbx.array_family import flex

    def calc_r_fac(data1, data2):
        return flex.sum(flex.abs(data1 - data2))/flex.sum(data1)
    # calc_r_fac()

    def calc_cc(data1, data2):
        corr = flex.linear_correlation(data1, data2)
        assert corr.is_well_defined()
        return corr.coefficient()
    # calc_cc()

    read_columns = ["IOBS","SIGMA","PEAK"]
    reader_single = integrate_hkl_as_flex.reader(result_single, read_columns)
    reader_range = integrate_hkl_as_flex.reader(result_range, read_columns)

    assert len(reader_single.hkl) == len(set(reader_single.hkl)) # No duplicate hkls!
    assert len(reader_range.hkl) == len(set(reader_range.hkl)) # No duplicate hkls!

    data_single = reader_single.arrays()
    data_range = reader_range.arrays()

    # Take common sets
    pairs = miller.match_indices(reader_single.hkl, reader_range.hkl).pairs()

    # XXX. Make sure two arrays are sorted (having the same order)!
    for k in read_columns:
        data_single[k] = data_single[k].select(pairs.column(0))
        data_range[k] = data_range[k].select(pairs.column(1))

    ## Sort by PEAK
    #perm = flex.sort_permutation(data=data_single["PEAK"].data(), reverse=False)
    
    # Output to terminal
    N = 20
    print "      PEAK                          r_fac  cc"
    for i in xrange(N):
        lrange, rrange = i * 100./N, (i+1)*100./N
        sel = lrange < data_single["PEAK"].data()
        sel &= data_single["PEAK"].data() <= rrange
        sel &= 0.95 < data_range["PEAK"].data() 

        sel_I_range = data_range["IOBS"].select(sel)
        sel_I_single = data_single["IOBS"].select(sel)

        r_fac = calc_r_fac(sel_I_range.data(), 
                           sel_I_single.data())

        cc = calc_cc(sel_I_range.data(), 
                     sel_I_single.data())

        for r, s, p in zip(sel_I_range[:10], sel_I_single[:10], data_single["PEAK"].select(sel).data()):
            print "  ", r, s, p

        print "%6.2f .. %6.2f [nref= %10d] %.4f %.4f" % (lrange, rrange, sum(sel), r_fac, cc)

    # Output to file
    ofs = open("result.dat", "w")
    ofs.write("H K L single.I single.SIGMA single.PEAK range.I range.SIGMA range.PEAK\n")
    for values in zip(data_single["IOBS"].indices(),
                                           data_single["IOBS"].data(), data_single["SIGMA"].data(), data_single["PEAK"].data(),
                                           data_range["IOBS"].data(), data_range["SIGMA"].data(), data_range["PEAK"].data()):
        ofs.write("%d %d %d %f %f %f %f %f %f\n" % ((values[0][0], values[0][1], values[0][2])+values[1:]))
Пример #36
0
    GF.reset_specific_at_wavelength(label_has="FE2",
                                    tables=Fe_reduced_model,
                                    newvalue=W2)
    W2_reduced = GF.get_intensities()
    # Einsle paper: Reduced form has
    #    buried irons, FE1, in Fe(III) state (absorption at higher energy, oxidized)
    #    surface iron, FE2, in Fe(II) state (absorption at lower energy, reduced)

    from cctbx import miller
    W2i = W2_reduced.indices()
    with (open("debug26.data", "w")) as F:
        for iw in range(len(W2i)):
            print("%20s, %10.2f" %
                  (W2_reduced.indices()[iw], W2_reduced.data()[iw]),
                  file=F)
    matches = miller.match_indices(M, W2i)
    sel0 = flex.size_t([p[0] for p in matches.pairs()])
    sel1 = flex.size_t([p[1] for p in matches.pairs()])
    print("matches", len(sel0))
    #sel0unique is a dictionary keyed by unique Miller indices.
    sel0unique = {}
    for item in sel1:
        sel0unique[W2i[item]] = item
    print("unique", len(sel0unique))
    print("total images %d, strong %d" % (G.images_all, len(G.images_strong)))
    exit("done outputting intensities")
    #doublecheck this
    #for si in xrange(len(sel0)):
    #print (si, M[sel0[si]], W2i[sel1[si]], W2i[sel0unique[W2i[sel1[si]]]])

    per_HKL_I = {}
Пример #37
0
  def add_miller_array(self, array, array_type=None,
                       column_name=None, column_names=None):
    """
    Accepts a miller array, and one of array_type, column_name or column_names.
    """

    assert [array_type, column_name, column_names].count(None) == 2
    if array_type is not None:
      assert array_type in ('calc', 'meas')
    elif column_name is not None:
      column_names = [column_name]
    if array.is_complex_array():
      if column_names is None:
        column_names = [self.prefix+'F_'+array_type,
                        self.prefix+'phase_'+array_type]
      else: assert len(column_names) == 2
      if (('_A_' in column_names[0] and '_B_' in column_names[1]) or
          ('.A_' in column_names[0] and '.B_' in column_names[1])):
        data = [flex.real(array.data()).as_string(),
                 flex.imag(array.data()).as_string()]
      else:
        data = [flex.abs(array.data()).as_string(),
                 array.phases(deg=True).data().as_string()]
    elif array.is_hendrickson_lattman_array():
      if column_names is None:
        column_names = [self.prefix+'HL_%s_iso' %abcd for abcd in 'ABCD']
      else: assert len(column_names) == 4
      data = [d.as_string() for d in array.data().as_abcd()]
    else:
      if array_type is not None:
        if array.is_xray_intensity_array():
          obs_ext = 'squared_'
        else: obs_ext = ''
        column_names = [self.prefix+'F_'+obs_ext+array_type]
        if array.sigmas() is not None:
          column_names.append(self.prefix+'F_'+obs_ext+'sigma')
      if isinstance(array.data(), flex.std_string):
        data = [array.data()]
      else:
        data = [array.data().as_string()]
      if array.anomalous_flag():
        if ((array.sigmas() is not None and len(column_names) == 4) or
            (array.sigmas() is None and len(column_names) == 2)):
          data = []
          asu, matches = array.match_bijvoet_mates()
          for anomalous_sign in ("+", "-"):
            sel = matches.pairs_hemisphere_selection(anomalous_sign)
            sel.extend(matches.singles_hemisphere_selection(anomalous_sign))
            if (anomalous_sign == "+"):
              indices = asu.indices().select(sel)
              hemisphere_column_names = column_names[:len(column_names)//2]
            else:
              indices = -asu.indices().select(sel)
              hemisphere_column_names = column_names[len(column_names)//2:]
            hemisphere_data = asu.data().select(sel)
            hemisphere_array = miller.array(miller.set(
              array.crystal_symmetry(), indices), hemisphere_data)
            if array.sigmas() is not None:
              hemisphere_array.set_sigmas(asu.sigmas().select(sel))
            if self.refln_loop is None:
              # then this is the first array to be added to the loop,
              # hack so we don't have both hemispheres of indices
              self.indices = indices
            self.add_miller_array(
              hemisphere_array, column_names=hemisphere_column_names)
          return
      if array.sigmas() is not None and len(column_names) == 2:
        data.append(array.sigmas().as_string())
    if not (self.indices.size() == array.indices().size() and
            self.indices.all_eq(array.indices())):
      from cctbx.miller import match_indices
      other_indices = array.indices().deep_copy()
      match = match_indices(self.indices, other_indices)
      if match.singles(0).size():
        # array is missing some reflections indices that already appear in the loop
        # therefore pad the data with '?' values
        other_indices.extend(self.indices.select(match.single_selection(0)))
        for d in data:
          d.extend(flex.std_string(['?']*(other_indices.size() - d.size())))
        for d in data:
          assert d.size() == other_indices.size()
        match = match_indices(self.indices, other_indices)
      if match.singles(1).size():
        # this array contains some reflections that are not already present in the
        # cif loop, therefore need to add rows of '?' values
        single_indices = other_indices.select(match.single_selection(1))
        self.indices.extend(single_indices)
        n_data_columns = len(self.refln_loop.keys()) - 3
        for hkl in single_indices:
          row = list(hkl) + ['?'] * n_data_columns
          self.refln_loop.add_row(row)
        match = match_indices(self.indices, other_indices)

      match = match_indices(self.indices, other_indices)
      perm = match.permutation()
      data = [d.select(perm) for d in data]

    if self.refln_loop is None:
      self.refln_loop = miller_indices_as_cif_loop(self.indices, prefix=self.prefix)
    columns = OrderedDict(zip(column_names, data))
    for key in columns:
      assert key not in self.refln_loop
    self.refln_loop.add_columns(columns)
Пример #38
0
    def add_miller_array(self,
                         array,
                         array_type=None,
                         column_name=None,
                         column_names=None):
        """
    Accepts a miller array, and one of array_type, column_name or column_names.
    """

        assert [array_type, column_name, column_names].count(None) == 2
        if array_type is not None:
            assert array_type in ('calc', 'meas')
        elif column_name is not None:
            column_names = [column_name]
        if array.is_complex_array():
            if column_names is None:
                column_names = [
                    self.prefix + 'F_' + array_type,
                    self.prefix + 'phase_' + array_type
                ]
            else:
                assert len(column_names) == 2
            if (('_A_' in column_names[0] and '_B_' in column_names[1]) or
                ('.A_' in column_names[0] and '.B_' in column_names[1])):
                data = [
                    flex.real(array.data()).as_string(),
                    flex.imag(array.data()).as_string()
                ]
            else:
                data = [
                    flex.abs(array.data()).as_string(),
                    array.phases(deg=True).data().as_string()
                ]
        elif array.is_hendrickson_lattman_array():
            if column_names is None:
                column_names = [
                    self.prefix + 'HL_%s_iso' % abcd for abcd in 'ABCD'
                ]
            else:
                assert len(column_names) == 4
            data = [d.as_string() for d in array.data().as_abcd()]
        else:
            if array_type is not None:
                if array.is_xray_intensity_array():
                    obs_ext = 'squared_'
                else:
                    obs_ext = ''
                column_names = [self.prefix + 'F_' + obs_ext + array_type]
                if array.sigmas() is not None:
                    column_names.append(self.prefix + 'F_' + obs_ext + 'sigma')
            if isinstance(array.data(), flex.std_string):
                data = [array.data()]
            else:
                data = [array.data().as_string()]
            if array.anomalous_flag():
                if ((array.sigmas() is not None and len(column_names) == 4) or
                    (array.sigmas() is None and len(column_names) == 2)):
                    data = []
                    asu, matches = array.match_bijvoet_mates()
                    for anomalous_sign in ("+", "-"):
                        sel = matches.pairs_hemisphere_selection(
                            anomalous_sign)
                        sel.extend(
                            matches.singles_hemisphere_selection(
                                anomalous_sign))
                        if (anomalous_sign == "+"):
                            indices = asu.indices().select(sel)
                            hemisphere_column_names = column_names[:len(
                                column_names) // 2]
                        else:
                            indices = -asu.indices().select(sel)
                            hemisphere_column_names = column_names[
                                len(column_names) // 2:]
                        hemisphere_data = asu.data().select(sel)
                        hemisphere_array = miller.array(
                            miller.set(array.crystal_symmetry(), indices),
                            hemisphere_data)
                        if array.sigmas() is not None:
                            hemisphere_array.set_sigmas(
                                asu.sigmas().select(sel))
                        if self.refln_loop is None:
                            # then this is the first array to be added to the loop,
                            # hack so we don't have both hemispheres of indices
                            self.indices = indices
                        self.add_miller_array(
                            hemisphere_array,
                            column_names=hemisphere_column_names)
                    return
            if array.sigmas() is not None and len(column_names) == 2:
                data.append(array.sigmas().as_string())
        if not (self.indices.size() == array.indices().size()
                and self.indices.all_eq(array.indices())):
            from cctbx.miller import match_indices
            other_indices = array.indices().deep_copy()
            match = match_indices(self.indices, other_indices)
            if match.singles(0).size():
                # array is missing some reflections indices that already appear in the loop
                # therefore pad the data with '?' values
                other_indices.extend(
                    self.indices.select(match.single_selection(0)))
                for d in data:
                    d.extend(
                        flex.std_string(['?'] *
                                        (other_indices.size() - d.size())))
                for d in data:
                    assert d.size() == other_indices.size()
                match = match_indices(self.indices, other_indices)
            if match.singles(1).size():
                # this array contains some reflections that are not already present in the
                # cif loop, therefore need to add rows of '?' values
                single_indices = other_indices.select(
                    match.single_selection(1))
                self.indices.extend(single_indices)
                n_data_columns = len(self.refln_loop.keys()) - 3
                for hkl in single_indices:
                    row = list(hkl) + ['?'] * n_data_columns
                    self.refln_loop.add_row(row)
                match = match_indices(self.indices, other_indices)

            match = match_indices(self.indices, other_indices)
            perm = match.permutation()
            data = [d.select(perm) for d in data]

        if self.refln_loop is None:
            self.refln_loop = miller_indices_as_cif_loop(self.indices,
                                                         prefix=self.prefix)
        columns = OrderedDict(zip(column_names, data))
        for key in columns:
            assert key not in self.refln_loop
        self.refln_loop.add_columns(columns)
Пример #39
0
def run(args):
  phil = iotbx.phil.process_command_line(args=args, master_string=master_phil).show()
  work_params = phil.work.extract()
  from xfel.merging.phil_validation import application
  application(work_params)
  if ("--help" in args) :
    libtbx.phil.parse(master_phil.show())
    return

  if ((work_params.d_min is None) or
      (work_params.data is None) or
      ( (work_params.model is None) and work_params.scaling.algorithm != "mark1") ) :
    raise Usage("cxi.merge "
                "d_min=4.0 "
                "data=~/scratch/r0220/006/strong/ "
                "model=3bz1_3bz2_core.pdb")
  if ((work_params.rescale_with_average_cell) and
      (not work_params.set_average_unit_cell)) :
    raise Usage("If rescale_with_average_cell=True, you must also specify "+
      "set_average_unit_cell=True.")
  if work_params.raw_data.sdfac_auto and work_params.raw_data.sdfac_refine:
    raise Usage("Cannot specify both sdfac_auto and sdfac_refine")

  log = open("%s_%s.log" % (work_params.output.prefix,work_params.scaling.algorithm), "w")
  out = multi_out()
  out.register("log", log, atexit_send_to=None)
  out.register("stdout", sys.stdout)

  # Verify that the externally supplied isomorphous reference, if
  # present, defines a suitable column of intensities, and exit with
  # error if it does not.  Then warn if it is necessary to generate
  # Bijvoet mates.  Failure to catch these issues here would lead to
  # possibly obscure problems in cxi/cxi_cc.py later on.
  try:
    data_SR = mtz.object(work_params.scaling.mtz_file)
  except RuntimeError:
    pass
  else:
    array_SR = None
    obs_labels = []
    for array in data_SR.as_miller_arrays():
      this_label = array.info().label_string().lower()
      if array.observation_type() is not None:
        obs_labels.append(this_label.split(',')[0])
      if this_label.find('fobs')>=0:
        array_SR = array.as_intensity_array()
        break
      if this_label.find('imean')>=0:
        array_SR = array.as_intensity_array()
        break
      if this_label.find(work_params.scaling.mtz_column_F)==0:
        array_SR = array.as_intensity_array()
        break

    if array_SR is None:
      known_labels = ['fobs', 'imean', work_params.scaling.mtz_column_F]
      raise Usage(work_params.scaling.mtz_file +
                  " does not contain any observations labelled [" +
                  ", ".join(known_labels) +
                  "].  Please set scaling.mtz_column_F to one of [" +
                  ",".join(obs_labels) + "].")
    elif not work_params.merge_anomalous and not array_SR.anomalous_flag():
      print >> out, "Warning: Preserving anomalous contributors, but %s " \
        "has anomalous contributors merged.  Generating identical Bijvoet " \
        "mates." % work_params.scaling.mtz_file

  # Read Nat's reference model from an MTZ file.  XXX The observation
  # type is given as F, not I--should they be squared?  Check with Nat!
  print >> out, "I model"
  if work_params.model is not None:
    from xfel.merging.general_fcalc import run
    i_model = run(work_params)
    work_params.target_unit_cell = i_model.unit_cell()
    work_params.target_space_group = i_model.space_group_info()
    i_model.show_summary()
  else:
    i_model = None

  print >> out, "Target unit cell and space group:"
  print >> out, "  ", work_params.target_unit_cell
  print >> out, "  ", work_params.target_space_group

  miller_set = symmetry(
      unit_cell=work_params.target_unit_cell,
      space_group_info=work_params.target_space_group
    ).build_miller_set(
      anomalous_flag=not work_params.merge_anomalous,
      d_max=work_params.d_max,
      d_min=work_params.d_min / math.pow(
        1 + work_params.unit_cell_length_tolerance, 1 / 3))
  miller_set = miller_set.change_basis(
    work_params.model_reindex_op).map_to_asu()

  if i_model is not None:
    matches = miller.match_indices(i_model.indices(), miller_set.indices())
    assert not matches.have_singles()
    miller_set = miller_set.select(matches.permutation())

# ---- Augment this code with any special procedures for x scaling
  scaler = xscaling_manager(
    miller_set=miller_set,
    i_model=i_model,
    params=work_params,
    log=out)
  scaler.scale_all()
  if scaler.n_accepted == 0:
    return None
# --- End of x scaling
  scaler.uc_values = unit_cell_distribution()
  for icell in xrange(len(scaler.frames["unit_cell"])):
    if scaler.params.model is None:
      scaler.uc_values.add_cell(
      unit_cell=scaler.frames["unit_cell"][icell])
    else:
      scaler.uc_values.add_cell(
      unit_cell=scaler.frames["unit_cell"][icell],
      rejected=(scaler.frames["cc"][icell] < scaler.params.min_corr))

  scaler.show_unit_cell_histograms()
  if (work_params.rescale_with_average_cell) :
    average_cell_abc = scaler.uc_values.get_average_cell_dimensions()
    average_cell = uctbx.unit_cell(list(average_cell_abc) +
      list(work_params.target_unit_cell.parameters()[3:]))
    work_params.target_unit_cell = average_cell
    print >> out, ""
    print >> out, "#" * 80
    print >> out, "RESCALING WITH NEW TARGET CELL"
    print >> out, "  average cell: %g %g %g %g %g %g" % \
      work_params.target_unit_cell.parameters()
    print >> out, ""
    scaler.reset()
    scaler = xscaling_manager(
      miller_set=miller_set,
      i_model=i_model,
      params=work_params,
      log=out)
    scaler.scale_all()
    scaler.uc_values = unit_cell_distribution()
    for icell in xrange(len(scaler.frames["unit_cell"])):
      if scaler.params.model is None:
        scaler.uc_values.add_cell(
        unit_cell=scaler.frames["unit_cell"][icell])
      else:
        scaler.uc_values.add_cell(
        unit_cell=scaler.frames["unit_cell"][icell],
        rejected=(scaler.frames["cc"][icell] < scaler.params.min_corr))
    scaler.show_unit_cell_histograms()
  if False : #(work_params.output.show_plots) :
    try :
      plot_overall_completeness(completeness)
    except Exception, e :
      print "ERROR: can't show plots"
      print "  %s" % str(e)
Пример #40
0
    GF.set_k_sol(0.435)
    GF.reset_wavelength(W2)
    GF.reset_specific_at_wavelength(label_has="FE1",
                                    tables=Fe_oxidized_model,
                                    newvalue=W2)
    GF.reset_specific_at_wavelength(label_has="FE2",
                                    tables=Fe_reduced_model,
                                    newvalue=W2)
    W2_reduced = GF.get_intensities()
    # Einsle paper: Reduced form has
    #    buried irons, FE1, in Fe(III) state (absorption at higher energy, oxidized)
    #    surface iron, FE2, in Fe(II) state (absorption at lower energy, reduced)

    from cctbx import miller
    W2i = W2_reduced.indices()
    matches = miller.match_indices(M, W2i)
    sel0 = flex.size_t([p[0] for p in matches.pairs()])
    sel1 = flex.size_t([p[1] for p in matches.pairs()])
    print("matches", len(sel0))
    #sel0unique is a dictionary keyed by unique Miller indices.
    sel0unique = {}
    for item in sel1:
        sel0unique[W2i[item]] = item
    print("unique", len(sel0unique))
    print("total images %d, strong %d" % (G.images_all, len(G.images_strong)))

    #doublecheck this
    #for si in xrange(len(sel0)):
    #print (si, M[sel0[si]], W2i[sel1[si]], W2i[sel0unique[W2i[sel1[si]]]])

    per_energy_I = {}
Пример #41
0
def run(args):
  phil = iotbx.phil.process_command_line(args=args, master_string=master_phil).show()
  work_params = phil.work.extract()
  from xfel.merging.phil_validation import application,samosa
  application(work_params)
  samosa(work_params)
  if ("--help" in args) :
    libtbx.phil.parse(master_phil.show())
    return

  if ((work_params.d_min is None) or
      (work_params.data is None) ) :
    command_name = os.environ["LIBTBX_DISPATCHER_NAME"]
    raise Usage(command_name + " "
                "d_min=4.0 "
                "data=~/scratch/r0220/006/strong/ "
                "model=3bz1_3bz2_core.pdb")
  if ((work_params.rescale_with_average_cell) and
      (not work_params.set_average_unit_cell)) :
    raise Usage("If rescale_with_average_cell=True, you must also specify "+
      "set_average_unit_cell=True.")
  if work_params.raw_data.sdfac_auto and work_params.raw_data.sdfac_refine:
    raise Usage("Cannot specify both sdfac_auto and sdfac_refine")

  # Read Nat's reference model from an MTZ file.  XXX The observation
  # type is given as F, not I--should they be squared?  Check with Nat!
  log = open("%s.log" % work_params.output.prefix, "w")
  out = multi_out()
  out.register("log", log, atexit_send_to=None)
  out.register("stdout", sys.stdout)
  print >> out, "I model"
  if work_params.model is not None:
    from xfel.merging.general_fcalc import run
    i_model = run(work_params)
    work_params.target_unit_cell = i_model.unit_cell()
    work_params.target_space_group = i_model.space_group_info()
    i_model.show_summary()
  else:
    i_model = None

  print >> out, "Target unit cell and space group:"
  print >> out, "  ", work_params.target_unit_cell
  print >> out, "  ", work_params.target_space_group

  # Adjust the minimum d-spacing of the generated Miller set to assure
  # that the desired high-resolution limit is included even if the
  # observed unit cell differs slightly from the target.  If a
  # reference model is present, ensure that Miller indices are ordered
  # identically.
  miller_set = symmetry(
      unit_cell=work_params.target_unit_cell,
      space_group_info=work_params.target_space_group
    ).build_miller_set(
      anomalous_flag=not work_params.merge_anomalous,
      d_max=work_params.d_max,
      d_min=work_params.d_min / math.pow(
        1 + work_params.unit_cell_length_tolerance, 1 / 3))
  miller_set = miller_set.change_basis(
    work_params.model_reindex_op).map_to_asu()

  if i_model is not None:
    matches = miller.match_indices(i_model.indices(), miller_set.indices())
    assert not matches.have_singles()
    miller_set = miller_set.select(matches.permutation())

  frame_files = get_observations(work_params)
  scaler = scaling_manager(
    miller_set=miller_set,
    i_model=i_model,
    params=work_params,
    log=out)
  scaler.scale_all(frame_files)
  if scaler.n_accepted == 0:
    return None
  scaler.show_unit_cell_histograms()
  if (work_params.rescale_with_average_cell) :
    average_cell_abc = scaler.uc_values.get_average_cell_dimensions()
    average_cell = uctbx.unit_cell(list(average_cell_abc) +
      list(work_params.target_unit_cell.parameters()[3:]))
    work_params.target_unit_cell = average_cell
    print >> out, ""
    print >> out, "#" * 80
    print >> out, "RESCALING WITH NEW TARGET CELL"
    print >> out, "  average cell: %g %g %g %g %g %g" % \
      work_params.target_unit_cell.parameters()
    print >> out, ""
    scaler.reset()
    scaler.scale_all(frame_files)
    scaler.show_unit_cell_histograms()
  if False : #(work_params.output.show_plots) :
    try :
      plot_overall_completeness(completeness)
    except Exception, e :
      print "ERROR: can't show plots"
      print "  %s" % str(e)