Exemplo n.º 1
0
def test_ann():

    reference = flex.double()

    for j in range(3 * 100):
        reference.append(random.random())

    query = flex.double()

    for j in range(3 * 10):
        query.append(random.random())

    ann = ann_adaptor(data=reference, dim=3, k=1)
    ann.query(query)

    # workout code - see how far separated on average they are - which should
    # in principle decrease as the number of positions in the reference set
    # increases

    offsets = []

    for j in range(10):
        q = matrix.col([query[3 * j + k] for k in range(3)])
        r = matrix.col([reference[3 * ann.nn[j] + k] for k in range(3)])
        offsets.append((q - r).length())

    return meansd(offsets)
Exemplo n.º 2
0
def exercise_derivatives(space_group_info, out):
  crystal_symmetry = space_group_info.any_compatible_crystal_symmetry(
    volume=1000)
  space_group = space_group_info.group()
  adp_constraints = space_group.adp_constraints()
  m = adp_constraints.row_echelon_form()
  print >> out, matrix.rec(m, (m.size()//6, 6)).mathematica_form(
    one_row_per_line=True)
  print >> out, list(adp_constraints.independent_indices)
  u_cart_p1 = adptbx.random_u_cart()
  u_star_p1 = adptbx.u_cart_as_u_star(crystal_symmetry.unit_cell(), u_cart_p1)
  u_star = space_group.average_u_star(u_star_p1)
  miller_set = miller.build_set(
    crystal_symmetry=crystal_symmetry, d_min=3, anomalous_flag=False)
  for h in miller_set.indices():
    grads_fin = d_dw_d_u_star_finite(h=h, u_star=u_star)
    print >> out, "grads_fin:", list(grads_fin)
    grads_ana = d_dw_d_u_star_analytical(h=h, u_star=u_star)
    print >> out, "grads_ana:", list(grads_ana)
    compare_derivatives(grads_ana, grads_fin)
    curvs_fin = d2_dw_d_u_star_d_u_star_finite(h=h, u_star=u_star)
    print >> out, "curvs_fin:", list(curvs_fin)
    curvs_ana = d2_dw_d_u_star_d_u_star_analytical(h=h, u_star=u_star)
    print >> out, "curvs_ana:", list(curvs_ana)
    compare_derivatives(curvs_ana, curvs_fin)
    #
    u_indep = adp_constraints.independent_params(u_star)
    grads_indep_fin = d_dw_d_u_indep_finite(
      adp_constraints=adp_constraints, h=h, u_indep=u_indep)
    print >> out, "grads_indep_fin:", list(grads_indep_fin)
    grads_indep_ana = flex.double(adp_constraints.independent_gradients(
      all_gradients=list(grads_ana)))
    print >> out, "grads_indep_ana:", list(grads_indep_ana)
    compare_derivatives(grads_indep_ana, grads_indep_fin)
    curvs_indep_fin = d2_dw_d_u_indep_d_u_indep_finite(
      adp_constraints=adp_constraints, h=h, u_indep=u_indep)
    print >> out, "curvs_indep_fin:", list(curvs_indep_fin)
    curvs_indep_ana = adp_constraints.independent_curvatures(
      all_curvatures=curvs_ana)
    print >> out, "curvs_indep_ana:", list(curvs_indep_ana)
    compare_derivatives(curvs_indep_ana, curvs_indep_fin)
    #
    curvs_indep_mm = None
    if (str(space_group_info) == "P 1 2 1"):
      assert list(adp_constraints.independent_indices) == [0,1,2,4]
      curvs_indep_mm = p2_curv(h, u_star)
    elif (str(space_group_info) == "P 4"):
      assert list(adp_constraints.independent_indices) == [1,2]
      curvs_indep_mm = p4_curv(h, u_star)
    elif (str(space_group_info) in ["P 3", "P 6"]):
      assert list(adp_constraints.independent_indices) == [2,3]
      curvs_indep_mm = p3_curv(h, u_star)
    elif (str(space_group_info) == "P 2 3"):
      assert list(adp_constraints.independent_indices) == [2]
      curvs_indep_mm = p23_curv(h, u_star)
    if (curvs_indep_mm is not None):
      curvs_indep_mm = flex.double(
        curvs_indep_mm).matrix_symmetric_as_packed_u()
      print >> out, "curvs_indep_mm:", list(curvs_indep_mm)
      compare_derivatives(curvs_indep_ana, curvs_indep_mm)
Exemplo n.º 3
0
def compare(xyz_to_hkl, xyz_to_hkl_ref):
    # construct ann to perform search...

    from cctbx.array_family import flex
    from annlib_ext import AnnAdaptor as ann_adaptor

    reference = flex.double()

    xyzs = [xyz for xyz in xyz_to_hkl]

    for xyz in xyzs:
        reference.append(xyz[0])
        reference.append(xyz[1])
        reference.append(xyz[2])

    ann = ann_adaptor(data = reference, dim = 3, k = 1)

    n_correct = 0
    n_wrong = 0

    for xyz in xyz_to_hkl_ref:
        query = flex.double(xyz)
        ann.query(query)
        nnxyz = xyzs[ann.nn[0]]
        if xyz_to_hkl_ref[xyz] == xyz_to_hkl[nnxyz]:
            n_correct += 1
        else:
            n_wrong += 1

    return n_correct, n_wrong
Exemplo n.º 4
0
 def __init__(self,
              fmodel,
              map_type_str,
              acentrics_scale = 2.0,
              centrics_scale = 1.0):
   """
   Compute x and y for x*Fobs-y*Fmodel given map type string
   """
   self.fmodel = fmodel
   mnm = mmtbx.map_names(map_name_string = map_type_str)
   # R.Read, SIGMAA: 2mFo-DFc (acentrics) & mFo (centrics)
   centric_flags  = self.fmodel.f_obs().centric_flags().data()
   acentric_flags = ~centric_flags
   if(mnm.k != 0):
     self.fo_scale = flex.double(self.fmodel.f_obs().size(), 1.0)
   else: self.fo_scale = flex.double(self.fmodel.f_obs().size(), 0.0)
   if(mnm.n != 0):
     self.fc_scale = flex.double(self.fmodel.f_obs().size(), 1.0)
   else: self.fc_scale = flex.double(self.fmodel.f_obs().size(), 0.0)
   abs_kn = abs(mnm.k*mnm.n)
   if(mnm.k != abs(mnm.n) and abs_kn > 1.e-6):
     self.fo_scale.set_selected(acentric_flags, mnm.k)
     self.fo_scale.set_selected( centric_flags, max(mnm.k-centrics_scale,0.))
     self.fc_scale.set_selected(acentric_flags, mnm.n)
     self.fc_scale.set_selected( centric_flags, max(mnm.n-centrics_scale,0.))
   elif(mnm.k == abs(mnm.n) and abs_kn > 1.e-6):
     fo_scale_k = self.fo_scale*mnm.k
     self.fc_scale_n = self.fc_scale*mnm.n
     self.fo_scale.set_selected(acentric_flags, fo_scale_k*acentrics_scale)
     self.fo_scale.set_selected( centric_flags, fo_scale_k)
     self.fc_scale.set_selected(acentric_flags, self.fc_scale_n*acentrics_scale)
     self.fc_scale.set_selected( centric_flags, self.fc_scale_n)
   else:
     self.fo_scale *= mnm.k
     self.fc_scale *= mnm.n
 def calc_partiality_anisotropy_set(self, my_uc, rotx, roty, miller_indices,
     ry, rz, r0, re, nu,
     bragg_angle_set, alpha_angle_set, wavelength, crystal_init_orientation,
     spot_pred_x_mm_set, spot_pred_y_mm_set, detector_distance_mm,
     partiality_model, flag_beam_divergence):
   #use III.4 in Winkler et al 1979 (A35; P901) for set of miller indices
   O = sqr(my_uc.orthogonalization_matrix()).transpose()
   R = sqr(crystal_init_orientation.crystal_rotation_matrix()).transpose()
   CO = crystal_orientation(O*R, basis_type.direct)
   CO_rotate = CO.rotate_thru((1,0,0), rotx
                ).rotate_thru((0,1,0), roty)
   A_star = sqr(CO_rotate.reciprocal_matrix())
   S0 = -1*col((0,0,1./wavelength))
   #caculate rs
   rs_set = r0 + (re * flex.tan(bragg_angle_set))
   if flag_beam_divergence:
     rs_set += ((ry * flex.cos(alpha_angle_set))**2 + (rz * flex.sin(alpha_angle_set))**2)**(1/2)
   #calculate rh
   x = A_star.elems * miller_indices.as_vec3_double()
   sd_array = x + S0.elems
   rh_set = sd_array.norms() - (1/wavelength)
   #calculate partiality
   if partiality_model == "Lorentzian":
     partiality_set = ((rs_set**2)/((2*(rh_set**2))+(rs_set**2)))
   elif partiality_model == "Voigt":
     partiality_set = self.voigt(rh_set, rs_set, nu)
   elif partiality_model == "Lognormal":
     partiality_set = self.lognpdf(rh_set, rs_set, nu)
   #calculate delta_xy
   d_ratio = -detector_distance_mm/sd_array.parts()[2]
   calc_xy_array = flex.vec3_double(sd_array.parts()[0]*d_ratio, \
       sd_array.parts()[1]*d_ratio, flex.double([0]*len(d_ratio)))
   pred_xy_array = flex.vec3_double(spot_pred_x_mm_set, spot_pred_y_mm_set, flex.double([0]*len(d_ratio)))
   delta_xy_set = (pred_xy_array - calc_xy_array).norms()
   return partiality_set, delta_xy_set, rs_set, rh_set
Exemplo n.º 6
0
def exercise_match_multi_indices():
  h0 = flex.miller_index(((1,2,3), (-1,-2,-3), (2,3,4), (-2,-3,-4), (3,4,5)))
  d0 = flex.double((1,2,3,4,5))
  h1 = flex.miller_index(((1,2,3), (-2,-3,-4), (1,2,3), (2,3,4)))
  d1 = flex.double((10,20,30,40))
  mi = miller.match_multi_indices(h0, h0)
  assert mi.have_singles() == 0
  assert list(mi.pairs()) == zip(range(5), range(5))
  mi = miller.match_multi_indices(h0, h1)
  assert tuple(mi.singles(0)) == (1,4,)
  assert tuple(mi.singles(1)) == ()
  assert len(set(mi.pairs()) - set([(0,0), (0,2), (2,3), (3, 1)])) == 0
  assert tuple(mi.number_of_matches(0)) == (2, 0, 1, 1, 0)
  assert tuple(mi.pair_selection(0)) == (1, 0, 1, 1, 0)
  assert tuple(mi.single_selection(0)) == (0, 1, 0, 0, 1)
  assert tuple(mi.number_of_matches(1)) == (1, 1, 1, 1)
  assert tuple(mi.pair_selection(1)) == (1, 1, 1, 1)
  assert tuple(mi.single_selection(1)) == (0, 0, 0, 0)
  assert tuple(mi.paired_miller_indices(0)) \
      == tuple(h0.select(mi.pair_selection(0)))
  l1 = list(mi.paired_miller_indices(1))
  l2 = list(h1.select(mi.pair_selection(1)))
  l1.sort()
  l2.sort()
  assert l1 == l2
  try:
    miller.match_multi_indices(h1, h0)
  except RuntimeError, e:
    pass
Exemplo n.º 7
0
def exercise_map_to_asu(sg_symbol):
  sg_type = sgtbx.space_group_type(sg_symbol)
  index_abs_range = (4,4,4)
  for anomalous_flag in (False,True):
    m = miller.index_generator(
      sg_type, anomalous_flag, index_abs_range).to_array()
    a = flex.double()
    p = flex.double()
    c = flex.hendrickson_lattman()
    for i in xrange(m.size()):
      a.append(random.random())
      p.append(random.random() * 2)
      c.append([random.random() for j in xrange(4)])
    f = flex.polar(a, p)
    p = [p, p*(180/math.pi)]
  m_random = flex.miller_index()
  p_random = [flex.double(), flex.double()]
  c_random = flex.hendrickson_lattman()
  f_random = flex.complex_double()
  for i,h_asym in enumerate(m):
    h_eq = miller.sym_equiv_indices(sg_type.group(), h_asym)
    i_eq = random.randrange(h_eq.multiplicity(anomalous_flag))
    h_i = h_eq(i_eq)
    m_random.append(h_i.h())
    for deg in (False,True):
      p_random[deg].append(h_i.phase_eq(p[deg][i], deg))
    f_random.append(h_i.complex_eq(f[i]))
    c_random.append(h_i.hendrickson_lattman_eq(c[i]))
  m_random_copy = m_random.deep_copy()
  miller.map_to_asu(sg_type, anomalous_flag, m_random_copy)
  for i,h_asym in enumerate(m):
    assert h_asym == m_random_copy[i]
  m_random_copy = m_random.deep_copy()
  miller.map_to_asu(sg_type, anomalous_flag, m_random_copy, f_random)
  for i,h_asym in enumerate(m):
    assert h_asym == m_random_copy[i]
  for i,f_asym in enumerate(f):
    assert abs(f_asym - f_random[i]) < 1.e-6
  m_random_copy = m_random.deep_copy()
  a_random = a.deep_copy()
  miller.map_to_asu(sg_type, anomalous_flag, m_random_copy, a_random)
  for i,h_asym in enumerate(m):
    assert h_asym == m_random_copy[i]
  for i,a_asym in enumerate(a):
    assert a_asym == a_random[i]
  for deg in (False,True):
    m_random_copy = m_random.deep_copy()
    miller.map_to_asu(
      sg_type, anomalous_flag, m_random_copy, p_random[deg], deg)
    for i,h_asym in enumerate(m):
      assert h_asym == m_random_copy[i]
    for i,p_asym in enumerate(p[deg]):
      assert scitbx.math.phase_error(p_asym, p_random[deg][i], deg) < 1.e-5
  m_random_copy = m_random.deep_copy()
  miller.map_to_asu(sg_type, anomalous_flag, m_random_copy, c_random)
  for i,h_asym in enumerate(m):
    assert h_asym == m_random_copy[i]
  for i,c_asym in enumerate(c):
    for j in xrange(4):
      assert abs(c_asym[j] - c_random[i][j]) < 1.e-5
Exemplo n.º 8
0
def set_refinable_parameters(xray_structure, parameters, selections,
                             enforce_positivity=False):
  # XXX PVA: Code below is terribly inefficient and MUST be moved into C++
  sz = xray_structure.scatterers().size()
  i = 0
  for sel in selections:
    # pre-check for positivity begin
    # spread negative occupancies across i_seqs having positive ones
    par_all = flex.double()
    par_neg = flex.double()
    i_p = i
    for sel_ in sel:
      p = parameters[i_p]
      par_all.append(p)
      if(p<0): par_neg.append(p)
      i_p += 1
    if(enforce_positivity and par_neg.size()>0):
      par_all = par_all - flex.min(par_all)
      fs = flex.sum(par_all)
      if(fs != 0):
        par_all = par_all / fs
    # pre-check for positivity end
    for j, sel_ in enumerate(sel):
      sel__b = flex.bool(sz, flex.size_t(sel_))
      xray_structure.set_occupancies(par_all[j], sel__b)
      i+=1
Exemplo n.º 9
0
def exercise_expand():
  sg = sgtbx.space_group("P 41 (1,-1,0)")
  h = flex.miller_index(((3,1,-2), (1,-2,0)))
  assert tuple(sg.is_centric(h)) == (0, 1)
  p1 = miller.expand_to_p1_iselection(
    space_group=sg, anomalous_flag=False, indices=h, build_iselection=False)
  p1_i0 = ((-3,-1,2), (-1, 3,2),(3,1,2),(1,-3,2),(1,-2, 0),(2,1,0))
  assert tuple(p1.indices) == p1_i0
  assert p1.iselection.size() == 0
  p1 = miller.expand_to_p1_iselection(
    space_group=sg, anomalous_flag=True, indices=h, build_iselection=False)
  assert tuple(p1.indices) \
      == ((3,1,-2), (1,-3,-2), (-3,-1,-2), (-1,3,-2),
          (1,-2,0), (-2,-1,0), (-1,2,0), (2,1,0))
  p1 = miller.expand_to_p1_iselection(
    space_group=sg, anomalous_flag=False, indices=h, build_iselection=True)
  assert tuple(p1.indices) == p1_i0
  assert tuple(p1.iselection) == (0,0,0,0,1,1)
  a = flex.double((1,2))
  p = flex.double((10,90))
  p1 = miller.expand_to_p1_phases(
    space_group=sg, anomalous_flag=False, indices=h, data=p, deg=True)
  assert approx_equal(tuple(p1.data), (-10,110,110,-10, 90,30))
  p1 = miller.expand_to_p1_phases(
    space_group=sg, anomalous_flag=True, indices=h, data=p, deg=True)
  assert approx_equal(tuple(p1.data), (10,-110,-110,10, 90,-30,-90,30))
  p = flex.double([x * math.pi/180 for x in p])
  v = [x * math.pi/180 for x in p1.data]
  p1 = miller.expand_to_p1_phases(
    space_group=sg, anomalous_flag=True, indices=h, data=p, deg=False)
  assert approx_equal(tuple(p1.data), v)
  f = flex.polar(a, p)
  p1 = miller.expand_to_p1_complex(
    space_group=sg, anomalous_flag=True, indices=h, data=f)
  assert approx_equal(tuple(flex.abs(p1.data)), (1,1,1,1,2,2,2,2))
  assert approx_equal(tuple(flex.arg(p1.data)), v)
  hl = flex.hendrickson_lattman([(1,2,3,4), (5,6,7,8)])
  p1 = miller.expand_to_p1_hendrickson_lattman(
    space_group=sg, anomalous_flag=True, indices=h, data=hl)
  assert approx_equal(p1.data, [
    [1,2,3,4],
    [1.232051,-1.866025,-4.964102,0.5980762],
    [1.232051,-1.866025,-4.964102,0.5980762],
    [1,2,3,4],
    [5,6,7,8],
    [2.696152,-7.330127,-10.4282,2.062178],
    [-5,-6,7,8],
    [7.696152,-1.330127,3.428203,-10.06218]])
  b = flex.bool([True,False])
  p1 = miller.expand_to_p1_iselection(
    space_group=sg, anomalous_flag=True, indices=h, build_iselection=True)
  assert b.select(p1.iselection).all_eq(
    flex.bool([True, True, True, True, False, False, False, False]))
  i = flex.int([13,17])
  p1 = miller.expand_to_p1_iselection(
    space_group=sg, anomalous_flag=True, indices=h, build_iselection=True)
  assert i.select(p1.iselection).all_eq(flex.int([13,13,13,13,17,17,17,17]))
  #
  assert approx_equal(miller.statistical_mean(sg, False, h, a), 4/3.)
  assert approx_equal(miller.statistical_mean(sg, True, h, a), 3/2.)
Exemplo n.º 10
0
def df2m(df, cell, spgr, data=None, sigmas=None):
    """Constructs a miller.array from the columns specified by data/sigmas in the dataframe,
    if both are None, returns just the indices.
    needs cell and spgr to generate a symmetry object."""
    anomalous_flag = False

    if isinstance(df, pd.DataFrame):
        try:
            sel = df[data].notnull()  # select notnull data items for index
        except ValueError:
            index = df.index
        else:
            index = df.index[sel]
    else:
        index = df

    indices = flex.miller_index(index)

    if data:
        data = flex.double(df[data][sel])
    if sigmas:
        sigmas = flex.double(df[sigmas][sel])

    symm = make_symmetry(cell, spgr)
    ms = miller.set(
        crystal_symmetry=symm, indices=indices, anomalous_flag=anomalous_flag)
    return miller.array(ms, data=data, sigmas=sigmas)
def detwin_miller_array(miller_obs,
                        twin_law,
                        twin_fraction):
  # for the moment, ignore incompleten twin pairs please
  cb_op = sgtbx.change_of_basis_op( twin_law )
  twin_related_miller  = miller_obs.change_basis( cb_op ).set_observation_type(
    miller_obs )
  set1, set2 = miller_obs.common_sets( twin_related_miller )\
               .set_observation_type(miller_obs )

  assert miller_obs.observation_type() is not None
  assert miller_obs.sigmas() is not None
  if set1.is_xray_amplitude_array():
    set1 = set1.f_as_f_sq()
    set2 = set1.f_as_f_sq()

  detwinned_f     = flex.double()
  detwinned_sigma = flex.double()
  if set1.is_xray_intensity_array():
    if set2.is_xray_intensity_array():
      for i1,s1,i2,s2 in zip( set1.data(), set1.sigmas(),
                              set2.data(), set2.sigmas() ):
        tmp_detwinner = detwin(i1,s1,i2,s2)
        # we do some double work here actually
        ni1 = tmp_detwinner.xm
        ns1 = math.sqrt( math.abs(self.vcv[0]) )
        detwinned_f.append( ni1 )
        detwinned_s.append( ns1 )

  set1 = set1.f_sq_as_f()
  new_f = set1.customized_copy
Exemplo n.º 12
0
def exercise_singular_least_squares():
  obs = flex.double([1.234])
  weights_2345 = flex.double([2.345])
  weights_zero = flex.double([0])
  r_free_flags = flex.bool([False])
  a = flex.double([0])
  b = flex.double([0])
  for obs_type in ["F", "I"]:
    for weights,scale_factor in [
          (weights_2345, 3.456),
          (weights_zero, 0)]:
      tg = ext.targets_least_squares(
        compute_scale_using_all_data=False,
        obs_type=obs_type,
        obs=obs,
        weights=weights,
        r_free_flags=r_free_flags,
        f_calc=flex.complex_double(a, b),
        derivatives_depth=2,
        scale_factor=scale_factor)
      if (weights is weights_2345):
        assert approx_equal(tg.scale_factor(), scale_factor)
        assert list(tg.gradients_work()) == [0j]
        assert list(tg.hessians_work()) == [(1,1,1)]
      else:
        assert tg.scale_factor() is None
        assert tg.target_work() is None
        assert tg.target_test() is None
        assert tg.gradients_work().size() == 0
        assert tg.hessians_work().size() == 0
Exemplo n.º 13
0
def exercise_f_model_no_scales(symbol = "C 2"):
  random.seed(0)
  flex.set_random_seed(0)
  x = random_structure.xray_structure(
    space_group_info       = sgtbx.space_group_info(symbol=symbol),
    elements               =(("O","N","C")*5+("H",)*10),
    volume_per_atom        = 200,
    min_distance           = 1.5,
    general_positions_only = True,
    random_u_iso           = True,
    random_occupancy       = False)
  f_obs = abs(x.structure_factors(d_min = 1.5, algorithm="fft").f_calc())
  x.shake_sites_in_place(mean_distance=1)
  k_iso = flex.double(f_obs.data().size(), 2)
  k_aniso = flex.double(f_obs.data().size(), 3)
  fmodel = mmtbx.f_model.manager(
    xray_structure = x,
    k_isotropic    = k_iso,
    k_anisotropic  = k_aniso,
    f_obs          = f_obs)
  fc = abs(fmodel.f_calc()).data()
  fm = abs(fmodel.f_model()).data()
  fmns = abs(fmodel.f_model_no_scales()).data()
  assert approx_equal(flex.mean(fm/fc), 6)
  assert approx_equal(flex.mean(fmns/fc), 1)
Exemplo n.º 14
0
def test_grouped_observations():

  # some dummy observations for one reflection
  I1, I2, I3 = 101, 100, 99
  w1, w2, w3 = 0.9, 1.0, 0.8
  g1, g2, g3 = 1.01, 1.0, 0.99

  # combine with observations from some other group to make a dataset
  hkl = flex.miller_index([(0,0,1)] * 3 + [(0,0,2)] * 2)
  intensity = flex.double([I1, I2, I3, 10, 10])
  weight = flex.double([w1, w2, w3, 1.0, 1.0])
  phi = flex.double(len(hkl), 0) # dummy
  scale = flex.double([g1, g2, g3, 1.0, 1.0])

  # group the observations by Miller index
  go = GroupedObservations(hkl,
                           intensity,
                           weight,
                           phi,
                           scale)

  # ensure there are two groups, the first of size 3, the second of size 2
  assert list(go.get_group_size()) == [3,2]

  # the first group has an average intensity given by the HRS formula expanded:
  avI = (w1*g1*I1 + w2*g2*I2 + w3*g3*I3) / (w1*g1*g1 + w2*g2*g2 + w3*g3*g3)
  assert approx_equal(avI, go.get_average_intensity()[0])

  print "OK"
Exemplo n.º 15
0
 def __init__(self, params,
                    fo,
                    hl_coeffs,
                    ncs_object=None,
                    map_coeffs=None,
                    model_map_coeffs=None,
                    log=None,
                    as_gui_program=False):
   self.model_map_coeffs = model_map_coeffs
   self.correlation_coeffs = flex.double()
   self.mean_phase_errors = flex.double()
   density_modification.density_modification.__init__(
     self, params, fo, hl_coeffs,
     ncs_object=ncs_object,
     map_coeffs=map_coeffs,
     log=log,
     as_gui_program=as_gui_program)
   if len(self.correlation_coeffs) > 1:
     model_coeffs, start_coeffs = self.model_map_coeffs.common_sets(self.map_coeffs_start)
     model_fft_map = model_coeffs.fft_map(
       resolution_factor=self.params.grid_resolution_factor).apply_sigma_scaling()
     fft_map = start_coeffs.fft_map(
       resolution_factor=self.params.grid_resolution_factor
     ).apply_sigma_scaling()
     corr = flex.linear_correlation(
       model_fft_map.real_map_unpadded().as_1d(), fft_map.real_map_unpadded().as_1d())
     print "Starting dm/model correlation: %.6f" %corr.coefficient()
     print "Final dm/model correlation:    %.6f" %self.correlation_coeffs[-1]
     fft_map.as_ccp4_map(file_name="starting.map", labels=[])
Exemplo n.º 16
0
def correlation(G_ref, I_ref, G_model, I_model, mi=None, plot=False):
    if G_ref:
        ref = open(G_ref)
        I_ref_sub = []
        for i in ref:
            I_ref_sub.append(float(i))
        R = flex.linear_correlation(flex.double(G_model), flex.double(I_ref_sub)).coefficient()
        print "The Gm correlation: ", round(R, 5)
    if I_ref:
        ref = open(I_ref)
        I_ref_sub = []
        I_ref_match = []
        for i in ref:
            I_ref_sub.append(float(i))
    if mi:
        for j in mi:
            I_ref_match.append(I_ref_sub[j])
    else:
        model_match = []
        for i, j in zip(I_model, I_ref_sub):
            if i != 0:
                model_match.append(i)
                I_ref_match.append(j)
        I_model = model_match
    R = flex.linear_correlation(flex.double(I_model), flex.double(I_ref_match)).coefficient()
    print "The Ih correlation: ", round(R, 5)
Exemplo n.º 17
0
 def __init__(self, centers_of_mass,
                    sites_cart,
                    target_functor,
                    rot_objs,
                    selections,
                    suppress_gradients):
   t_r = target_functor(compute_gradients=not suppress_gradients)
   self.f = t_r.target_work()
   if (suppress_gradients):
     self.grads_wrt_r = None
     self.grads_wrt_t = None
     return
   target_grads_wrt_xyz = t_r.gradients_wrt_atomic_parameters(site=True)
   self.grads_wrt_r = []
   self.grads_wrt_t = []
   target_grads_wrt_xyz = flex.vec3_double(target_grads_wrt_xyz.packed())
   for sel,rot_obj, cm in zip(selections, rot_objs, centers_of_mass):
       sites_cart_cm = sites_cart.select(sel) - cm
       target_grads_wrt_xyz_sel = target_grads_wrt_xyz.select(sel)
       target_grads_wrt_r = matrix.sqr(
                   sites_cart_cm.transpose_multiply(target_grads_wrt_xyz_sel))
       self.grads_wrt_t.append(flex.double(target_grads_wrt_xyz_sel.sum()))
       g_phi = (rot_obj.r_phi() * target_grads_wrt_r).trace()
       g_psi = (rot_obj.r_psi() * target_grads_wrt_r).trace()
       g_the = (rot_obj.r_the() * target_grads_wrt_r).trace()
       self.grads_wrt_r.append(flex.double([g_phi, g_psi, g_the]))
def show_terms(structure, term_table, coseq_dict=None):
  assert len(term_table) == structure.scatterers().size()
  for scatterer,terms in zip(structure.scatterers(), term_table):
    print scatterer.label, list(terms),
    if (coseq_dict is not None):
      terms_to_match = list(terms[1:])
      have_match = False
      tags = coseq_dict.keys()
      tags.sort()
      for tag in tags:
        for coseq_terms in coseq_dict[tag]:
          n = min(len(coseq_terms), len(terms_to_match))
          if (coseq_terms[:n] == terms_to_match[:n]):
            print tag,
            have_match = True
      if (not have_match):
        print "Unknown",
    print
  sums_terms = flex.double()
  multiplicities = flex.double()
  for scatterer,terms in zip(structure.scatterers(), term_table):
    sums_terms.append(flex.sum(flex.size_t(list(terms))))
    multiplicities.append(scatterer.multiplicity())
  print "TD%d: %.2f" % (
    len(terms)-1, flex.mean_weighted(sums_terms, multiplicities))
Exemplo n.º 19
0
        def jacobian_callable(pfh,current_values):
          rotx = current_values[0]
          roty = current_values[1]
          from scitbx.matrix import sqr
          Ai = sqr(OO.input_orientation.reciprocal_matrix())
          Rx = col((1,0,0)).axis_and_angle_as_r3_rotation_matrix(rotx)
          Ry = col((0,1,0)).axis_and_angle_as_r3_rotation_matrix(roty)
          Rz = col((0,0,1)).axis_and_angle_as_r3_rotation_matrix(0.0)
          dRx_drotx = col((1,0,0)).axis_and_angle_as_r3_derivative_wrt_angle(rotx)
          dRy_droty = col((0,1,0)).axis_and_angle_as_r3_derivative_wrt_angle(roty)
          dA_drotx = Rz * Ry * dRx_drotx * Ai
          dA_droty = Rz * dRy_droty * Rx * Ai

          dexc_drotx = [
            OO.ucbp3.simple_part_excursion_part_rotxy(
            wavelength = OO.central_wavelength_ang,
            observation_no = obsno,
            dA_drotxy = dA_drotx)
            for obsno in xrange(len(OO.parent.indexed_pairs))]

          dexc_droty = [
            OO.ucbp3.simple_part_excursion_part_rotxy(
            wavelength = OO.central_wavelength_ang,
            observation_no = obsno,
            dA_drotxy = dA_droty)
            for obsno in xrange(len(OO.parent.indexed_pairs))]
          return flex.double(dexc_drotx)/(2.*math.pi), flex.double(dexc_droty)/(2.*math.pi)
Exemplo n.º 20
0
 def pack_parameters(O):
   O.x = flex.double()
   O.l = flex.double()
   O.u = flex.double()
   O.nbd = flex.int()
   sstab = O.xray_structure.site_symmetry_table()
   for i_sc,sc in enumerate(O.xray_structure.scatterers()):
     assert sc.flags.use_u_iso()
     assert not sc.flags.use_u_aniso()
     #
     site_symmetry = sstab.get(i_sc)
     if (site_symmetry.is_point_group_1()):
       p = sc.site
     else:
       p = site_symmetry.site_constraints().independent_params(
         all_params=sc.site)
     O.x.extend(flex.double(p))
     O.l.resize(O.x.size(), 0)
     O.u.resize(O.x.size(), 0)
     O.nbd.resize(O.x.size(), 0)
     #
     O.x.append(sc.u_iso)
     O.l.append(0)
     O.u.append(0)
     O.nbd.append(1)
   O.x *= O.p_as_x
def get_f_masks(xrs, miller_array):
  crystal_gridding = maptbx.crystal_gridding(
    unit_cell          = xrs.unit_cell(),
    d_min              = miller_array.d_min(),
    resolution_factor  = 1./4,
    symmetry_flags     = maptbx.use_space_group_symmetry,
    space_group_info   = xrs.space_group_info())
  mp = mmtbx.masks.mask_master_params.extract()
  mask_data = mmtbx.masks.mask_from_xray_structure(
    xray_structure           = xrs,
    p1                       = True,
    solvent_radius           = mp.solvent_radius,
    shrink_truncation_radius = mp.shrink_truncation_radius,
    for_structure_factors    = True,
    n_real                   = crystal_gridding.n_real()).mask_data
  n = mask_data.all()
  mask_data1 = flex.double(flex.grid(n), 0)
  mask_data2 = flex.double(flex.grid(n), 0)
  I,J,K = xrange(n[0]), xrange(n[1]), xrange(n[2])
  for i in I:
    for j in J:
      for k in K:
        if(i < n[0]//2 and j < n[1]//2 and k < n[2]//2):
          mask_data1[i,j,k]=mask_data[i,j,k]
        else:
          mask_data2[i,j,k]=mask_data[i,j,k]
  f_mask1 = miller_array.structure_factors_from_map(map=mask_data1,
    use_scale = True, anomalous_flag = False, use_sg = False)
  f_mask2 = miller_array.structure_factors_from_map(map=mask_data2,
    use_scale = True, anomalous_flag = False, use_sg = False)
  return [f_mask1.data(), f_mask2.data()]
Exemplo n.º 22
0
  def calc_scales(self, params_in):
    """ Calculate an array of scales based on scale, B and wavelength using the
    equation $scale * exp(-2*B*(sin(theta)/wavelength)^2)$

    Reuturn a scale vector for all the reflections in self, using the
    parameters defined in the array params.

    :params: a tuple of the form appropriate for the crystal symetry, such as
    one produced by get_x0(). This method only uses params[0] (scale) and
    params[1] (B)

    :return: a list of scales for all the miller indicies in self
    """
    if self.use_scales:
      scale = params_in[0]
      B = params_in[1]
      sin_sq_theta = self.miller_array.two_theta(wavelength=self.wavelength) \
        .sin_theta_over_lambda_sq().data()

      scales = scale * self.miller_array.data()
      exp_arg = flex.double(-2 * B * sin_sq_theta)
      return flex.double(flex.double(scales) * flex.exp(exp_arg))
    else:
      # Horrible way to get vector of ones...
      return flex.double(self.miller_array.data()/self.miller_array.data())
Exemplo n.º 23
0
def read_table(file_name):
    atomic_number = None
    number_of_electrons = None
    x = flex.double()
    y = flex.double()
    sigmas = flex.double()
    lf = line_feeder(open(file_name))
    while 1:
        line = lf.next()
        if lf.eof:
            break
        if line.startswith("   FORM: ATOMIC NUMBER="):
            atomic_number = float(line.split("=")[1])
            assert int(atomic_number) == atomic_number
        elif line.startswith("   FORM: # ELECTRONS="):
            number_of_electrons = float(line.split("=")[1])
            assert int(number_of_electrons) == number_of_electrons
        elif line.startswith("        X (1/A)"):
            assert atomic_number == number_of_electrons
            while 1:
                line = lf.next()
                assert not lf.eof
                if line == " *** END OF DATA ***":
                    lf.eof = True
                    break
                vals_str = line.split()
                for val_str in vals_str:
                    assert len(val_str) == 13
                x.append(float(vals_str[0]))
                y.append(float(vals_str[1]))
                assert vals_str[1][-4] == "E"
                sigmas.append(float("0.00000005" + vals_str[1][-4:]))
    return table(int(atomic_number), x, y * atomic_number, sigmas)
Exemplo n.º 24
0
 def join(self,data_dict):
   """The join() function closes the database.
   """
   # Terminate the consumer process by feeding it a None command and
   # wait for it to finish.
   self._db_commands_queue.put(None)
   self._db_commands_queue.join()
   self._db_results_queue.join()
   self._semaphore.acquire()
   nrows = data_dict["rows"].get_obj()[0]
   print "writing observation pickle with %d rows"%nrows
   kwargs = dict(
     miller_lookup =      flex.size_t(data_dict["miller_proxy"].get_obj()[:nrows]),
     observed_intensity = flex.double(data_dict["intensity_proxy"].get_obj()[:nrows]),
     observed_sigI =      flex.double(data_dict["sigma_proxy"].get_obj()[:nrows]),
     frame_lookup =       flex.size_t(data_dict["frame_proxy"].get_obj()[:nrows]),
     original_H =         flex.int   (data_dict["H_proxy"].get_obj()[:nrows]),
     original_K =         flex.int   (data_dict["K_proxy"].get_obj()[:nrows]),
     original_L =         flex.int   (data_dict["L_proxy"].get_obj()[:nrows]),
   )
   import cPickle as pickle
   pickle.dump(kwargs, open(self.params.output.prefix+"_observation.pickle","wb"),
               pickle.HIGHEST_PROTOCOL)
   pickle.dump(self.miller, open(self.params.output.prefix+"_miller.pickle","wb"),
               pickle.HIGHEST_PROTOCOL)
   pickle.dump(data_dict["xtal_proxy"].get_obj().raw.replace('\0','').strip(),
                       open(self.params.output.prefix+"_frame.pickle","wb"),pickle.HIGHEST_PROTOCOL)
   return kwargs
Exemplo n.º 25
0
def exercise_miller_array_data_types():
    miller_set = crystal.symmetry(unit_cell=(10, 10, 10, 90, 90, 90), space_group_symbol="P1").miller_set(
        indices=flex.miller_index([(1, 2, 3), (4, 5, 6)]), anomalous_flag=False
    )
    for data in [
        flex.bool([False, True]),
        flex.int([0, 1]),
        flex.size_t([0, 1]),
        flex.double([0, 1]),
        flex.complex_double([0, 1]),
    ]:
        miller_array = miller_set.array(data=data)
        if op.isfile("tmp_iotbx_mtz.mtz"):
            os.remove("tmp_iotbx_mtz.mtz")
        assert not op.isfile("tmp_iotbx_mtz.mtz")
        miller_array.as_mtz_dataset(column_root_label="DATA").mtz_object().write(file_name="tmp_iotbx_mtz.mtz")
        assert op.isfile("tmp_iotbx_mtz.mtz")
        mtz_obj = mtz.object(file_name="tmp_iotbx_mtz.mtz")
        miller_arrays_read_back = mtz_obj.as_miller_arrays()
        assert len(miller_arrays_read_back) == 1
        miller_array_read_back = miller_arrays_read_back[0]
        assert miller_array_read_back.indices().all_eq(miller_array.indices())
        if miller_array.is_integer_array() or miller_array.is_bool_array():
            assert miller_array_read_back.data().all_eq(flex.int([0, 1]))
        elif miller_array.is_real_array():
            assert miller_array_read_back.data().all_eq(flex.double([0, 1]))
        elif miller_array.is_complex_array():
            assert miller_array_read_back.data().all_eq(flex.complex_double([0, 1]))
        else:
            raise RuntimeError("Programming error.")
def testing_function_for_rsfit(basic_map,delta_h,xray_structure,out):
  for i_trial in xrange(100):
    sites_cart = flex.vec3_double((flex.random_double(size=3)-0.5)*1)
    tmp_sites_cart = sites_cart.deep_copy()
    for i in xrange(3):
      ref = lbfgs(
        basic_map=basic_map,
        sites_cart=tmp_sites_cart,
        delta_h=delta_h)
      temp = flex.double(ref.sites_cart[0])-flex.double((0,0,0))
      temp = math.sqrt(temp.dot(temp))
      if temp <= 2*delta_h:
        break
      print >> out, "recycling:", ref.sites_cart[0]
      tmp_sites_cart = ref.sites_cart
    for site,sitec in zip(ref.sites_cart,xray_structure.sites_cart()):
      print >> out, i_trial
      print >> out, sitec
      print >> out, sites_cart[0]
      print >> out, site
      temp = flex.double(site)-flex.double(sitec)
      temp = math.sqrt(temp.dot(temp))
      print >> out, temp, delta_h
      assert temp <= delta_h*2
      print >> out
Exemplo n.º 27
0
  def __init__(self):
    self.data_obs1 = flex.double(2,1.0)
    self.data_obs2 = flex.double(2,3.0)
    self.sigma_obs1 = flex.double(2,0.1)
    self.sigma_obs2 = flex.double(2,1)
    self.unit_cell = uctbx.unit_cell('20, 30, 40, 90.0, 90.0, 90.0')
    #mi = flex.miller_index(((1,2,3), (1,2,3)))
    self.mi = flex.miller_index(((1,2,3), (5,6,7)))
    self.xs = crystal.symmetry((20,30,40), "P 2 2 2")
    self.ms = miller.set(self.xs, self.mi)
    self.u = [1,2,3,4,5,6]
    self.p_scale = 0.40
    #self.u = [0,0,0,0,0,0]
    #self.p_scale = 0.00

    self.ls_i_wt = scaling.least_squares_on_i_wt(
      self.mi,
      self.data_obs1,
      self.sigma_obs1,
      self.data_obs2,
      self.sigma_obs2,
      self.p_scale,
      self.unit_cell,
      self.u)
    self.ls_i = scaling.least_squares_on_i(
      self.mi,
      self.data_obs1,
      self.sigma_obs1,
      self.data_obs2,
      self.sigma_obs2,
      self.p_scale,
      self.unit_cell,
      self.u)
    self.ls_f_wt = scaling.least_squares_on_f_wt(
      self.mi,
      self.data_obs1,
      self.sigma_obs1,
      self.data_obs2,
      self.sigma_obs2,
      self.p_scale,
      self.unit_cell,
      self.u)
    self.ls_f = scaling.least_squares_on_f(
      self.mi,
      self.data_obs1,
      self.sigma_obs1,
      self.data_obs2,
      self.sigma_obs2,
      self.p_scale,
      self.unit_cell,
      self.u)

    self.tst_ls_f_wt()
    self.tst_ls_i_wt()
    self.tst_ls_f()
    self.tst_ls_i()
    self.tst_hes_ls_i_wt()
    self.tst_hes_ls_f_wt()
    self.tst_hes_ls_i()
    self.tst_hes_ls_f()
Exemplo n.º 28
0
def exercise_x1():
  u_iso = flex.double([1.0,2.0,3.0,4.0,5.0])
  t = tools.t_from_u_cart(u_iso, 1.e-9)
  assert t == (1.0, 1.0, 1.0, 0.0, 0.0, 0.0)
  u_iso = flex.double([7.0,2.0,3.0,9.0,5.0])
  t = tools.t_from_u_cart(u_iso, 1.e-9)
  assert t == (2.0, 2.0, 2.0, 0.0, 0.0, 0.0)
Exemplo n.º 29
0
    def set_fstats(self, fstats):
        self.all_spots = None
        self.spots = collections.OrderedDict()
        self.total_integrated_signal = {}
        self.mean_integrated_signal= {}
        self.median_integrated_signal= {}
        self.n_spots = {}

        for k in sorted(fstats.nodes.keys()):
            node = fstats.nodes[k] # XXX some data in node.data will be corrupted (e.g. resolution, wts) but x and y coordinates look ok (it works later). why?
            if node.descriptor == "spots_total":
                self.all_spots = node.data
            else:
                self.spots[node.descriptor] = node.data

        # Pre-calculate stats
        for k in self.keys():
            summed_wts = [flex.sum(spot.wts) for spot in self.get_spots(k)]
            self.intensities[k] = summed_wts
            self.resolutions[k] = [spot.resolution for spot in self.get_spots(k)]

            total_summed = flex.sum(flex.double(summed_wts))

            if len(summed_wts) > 0:
                self.mean_integrated_signal[k] = total_summed / len(summed_wts)
                self.median_integrated_signal[k] = flex.median(flex.double(summed_wts))
            else:
                self.mean_integrated_signal[k] = 0.
                self.median_integrated_signal[k] = 0.

            self.total_integrated_signal[k] = total_summed
            self.n_spots[k] = len(summed_wts)
 def __init__(self, file_name=None, file_object=None, max_header_lines=30):
   assert [file_name, file_object].count(None) == 1
   if (file_object is None):
     file_object = open(file_name)
   self._names = None
   self._indices = flex.miller_index()
   self._data = flex.double()
   self._sigmas = flex.double()
   have_data = False
   self.n_lines = 0
   for raw_line in file_object:
     self.n_lines += 1
     ifs = index_fobs_sigma_line(raw_line)
     if (not ifs.is_complete):
       if (raw_line.strip().lower() == "end"):
         break
       if (self.n_lines == max_header_lines or have_data):
         raise RuntimeError, "Unkown file format."
     else:
       if (self._names is None): self._names = ifs.names
       self._indices.append(ifs.index)
       self._data.append(ifs.fobs)
       self._sigmas.append(ifs.sigma)
       have_data = True
   if (not have_data):
     raise RuntimeError, "No data found in file."
Exemplo n.º 31
0
def load_result(file_name, ref_bravais_type, reference_cell, params,
                reindex_op, out):
    # Pull relevant information from integrated.pickle and refined_experiments.json
    # files to construct the equivalent of a single integration pickle (frame).
    try:
        frame = frame_extractor.ConstructFrameFromFiles(
            eval_ending(file_name)[2],
            eval_ending(file_name)[1]).make_frame()
    except Exception:
        return None

    # If @p file_name cannot be read, the load_result() function returns
    # @c None.

    print "Step 2.  Load frame obj and filter on lattice & cell with", reindex_op
    """
  Take a frame with all expected contents of an integration pickle, confirm
  that it contains the appropriate data, and check the lattice type and unit
  cell against the reference settings - if rejected, raises an exception
  (for tracking statistics).
  """
    # Ignore frames with no integrated reflections.
    obj = frame
    if ("observations" not in obj):
        return None

    if reindex_op == "h,k,l":
        pass
    else:
        obj["observations"][0].apply_change_of_basis(reindex_op)
        pass

    result_array = obj["observations"][0]
    unit_cell = result_array.unit_cell()
    sg_info = result_array.space_group_info()
    print >> out, ""
    print >> out, "-" * 80
    print >> out, file_name
    print >> out, sg_info
    print >> out, unit_cell

    #Check for pixel size (at this point we are assuming we have square pixels, all experiments described in one
    #refined_experiments.json file use the same detector, and all panels on the detector have the same pixel size)

    if params.pixel_size is not None:
        pixel_size = params.pixel_size
    elif "pixel_size" in obj:
        pixel_size = obj["pixel_size"]
    else:
        raise Sorry(
            "Cannot find pixel size. Specify appropriate pixel size in mm for your detector in phil file."
        )

    #Calculate displacements based on pixel size
    assert obj['mapped_predictions'][0].size() == obj["observations"][0].size()
    mm_predictions = pixel_size * (obj['mapped_predictions'][0])
    mm_displacements = flex.vec3_double()
    cos_two_polar_angle = flex.double()
    for pred in mm_predictions:
        mm_displacements.append(
            (pred[0] - obj["xbeam"], pred[1] - obj["ybeam"], 0.0))
        cos_two_polar_angle.append(
            math.cos(
                2. *
                math.atan2(pred[1] - obj["ybeam"], pred[0] - obj["xbeam"])))
    obj["cos_two_polar_angle"] = cos_two_polar_angle
    #then convert to polar angle and compute polarization correction

    if (not bravais_lattice(sg_info.type().number()) == ref_bravais_type):
        raise WrongBravaisError(
            "Skipping cell in different Bravais type (%s)" % str(sg_info))
    if (not unit_cell.is_similar_to(
            other=reference_cell,
            relative_length_tolerance=params.unit_cell_length_tolerance,
            absolute_angle_tolerance=params.unit_cell_angle_tolerance)):
        raise OutlierCellError(
            "Skipping cell with outlier dimensions (%g %g %g %g %g %g" %
            unit_cell.parameters())
    print >> out, "Integrated data:"
    result_array.show_summary(f=out, prefix="  ")
    # XXX don't force reference setting here, it will be done later, after the
    # original unit cell is recorded
    return obj
Exemplo n.º 32
0
    def visualise_orientational_distribution(self,
                                             axes_to_return=None,
                                             cbar=True):
        """ Creates a plot of the orientational distribution of the unit cells.

    :param axes_to_return: if None, print to screen, otherwise, requires 3 axes objects, and will return them.
    :param cbar: boolean to specify if a color bar should be used.
    """
        import matplotlib.pyplot as plt
        import matplotlib.patheffects as patheffects
        from mpl_toolkits.basemap import Basemap
        import scipy.ndimage as ndi

        def cart2sph(x, y, z):
            # cctbx (+z to source, y to ceiling) to
            # lab frame (+x to source, z to ceiling)
            z, x, y = x, y, z
            dxy = np.sqrt(x**2 + y**2)
            r = np.sqrt(dxy**2 + z**2)
            theta = np.arctan2(y, x)
            phi = np.arctan2(z,
                             dxy)  # angle of the z axis relative to xy plane
            theta, phi = np.rad2deg([theta, phi])
            return theta % 360, phi, r

        def xy_lat_lon_from_orientation(orientation_array, axis_id):
            logger.debug("axis_id: {}".format(axis_id))
            dist = math.sqrt(orientation_array[axis_id][0]**2 +
                             orientation_array[axis_id][1]**2 +
                             orientation_array[axis_id][2]**2)
            flon, flat, bla = cart2sph(orientation_array[axis_id][0] / dist,
                                       orientation_array[axis_id][1] / dist,
                                       orientation_array[axis_id][2] / dist)
            x, y = euler_map(flon, flat)
            return x, y, flon, flat

        orientations = [
            flex.vec3_double(flex.double(image.orientation.direct_matrix()))
            for image in self.members
        ]

        space_groups = [
            image.orientation.unit_cell().lattice_symmetry_group()
            for image in self.members
        ]

        # Now do all the plotting
        if axes_to_return is None:
            plt.figure(figsize=(10, 14))
            axes_to_return = [
                plt.subplot2grid((3, 1), (0, 0)),
                plt.subplot2grid((3, 1), (1, 0)),
                plt.subplot2grid((3, 1), (2, 0))
            ]
            show_image = True
        else:
            assert len(axes_to_return) == 3, "If using axes option, must hand" \
                                             " 3 axes to function."
            show_image = False

        axis_ids = [0, 1, 2]
        labels = ["a", "b", "c"]

        for ax, axis_id, label in zip(axes_to_return, axis_ids, labels):

            # Lists of x,y,lat,long for the master orientation, and for all
            # symmetry mates.
            x_coords = []
            y_coords = []
            lon = []
            lat = []
            sym_x_coords = []
            sym_y_coords = []
            sym_lon = []
            sym_lat = []
            euler_map = Basemap(projection='eck4', lon_0=0, ax=ax)

            for orientation, point_group_type in zip(orientations,
                                                     space_groups):

                # Get position of main spots.
                main_x, main_y, main_lon, main_lat \
                  = xy_lat_lon_from_orientation(list(orientation), axis_id)
                x_coords.append(main_x)
                y_coords.append(main_y)
                lon.append(main_lon)
                lat.append(main_lat)

                # Get position of symetry mates
                symmetry_operations = list(point_group_type.smx())[1:]
                for mx in symmetry_operations:
                    rotated_orientation = list(mx.r().as_double() *
                                               orientation)  # <--
                    # should make sense if orientation was a vector, not clear what is
                    # going on since orientation is a matrix. Or, make some test cases
                    # with 'orientation' and see if the behave as desired.
                    sym_x, sym_y, sym_lo, sym_la \
                      = xy_lat_lon_from_orientation(rotated_orientation, axis_id)
                    #assert (sym_x, sym_y) != (main_x, main_y)
                    sym_x_coords.append(sym_x)
                    sym_y_coords.append(sym_y)
                    sym_lon.append(sym_lo)
                    sym_lat.append(sym_la)

            # Plot each image as a yellow sphere
            logger.debug(len(x_coords))
            euler_map.plot(x_coords,
                           y_coords,
                           'oy',
                           markersize=4,
                           markeredgewidth=0.5)

            # Plot the symetry mates as black crosses
            #euler_map.plot(sym_x_coords, sym_y_coords, 'kx')

            # Use a histogram to bin the data in lattitude/longitude space, smooth it,
            # then plot this as a contourmap. This is for all the symetry-related
            # copies
            #density_hist = np.histogram2d(lat + sym_lat, lon + sym_lon,
            #                                    bins=[range(-90, 91), range(0, 361)])
            # No symmetry mates until we can verify what the cctbx libs are doing
            density_hist = np.histogram2d(lat,
                                          lon,
                                          bins=[range(-90, 91),
                                                range(0, 361)])
            smoothed = ndi.gaussian_filter(density_hist[0], (15, 15),
                                           mode='wrap')
            local_intensity = []
            x_for_plot = []
            y_for_plot = []
            for _lat in range(0, 180):
                for _lon in range(0, 360):
                    _x, _y = euler_map(density_hist[2][_lon],
                                       density_hist[1][_lat])
                    x_for_plot.append(_x)
                    y_for_plot.append(_y)
                    local_intensity.append(smoothed[_lat, _lon])
            cs = euler_map.contourf(np.array(x_for_plot),
                                    np.array(y_for_plot),
                                    np.array(local_intensity),
                                    tri=True)

            #  Pretty up graph
            if cbar:
                _cbar = plt.colorbar(cs, ax=ax)
                _cbar.ax.set_ylabel('spot density [AU]')
            middle = euler_map(0, 0)
            path_effect = [patheffects.withStroke(linewidth=3, foreground="w")]
            euler_map.plot(middle[0],
                           middle[1],
                           'o',
                           markersize=10,
                           mfc='none')
            euler_map.plot(middle[0], middle[1], 'x', markersize=8)
            ax.annotate("beam",
                        xy=(0.52, 0.52),
                        xycoords='axes fraction',
                        size='medium',
                        path_effects=path_effect)
            euler_map.drawmeridians(np.arange(0, 360, 60),
                                    labels=[0, 0, 1, 0],
                                    fontsize=10)
            euler_map.drawparallels(np.arange(-90, 90, 30),
                                    labels=[1, 0, 0, 0],
                                    fontsize=10)
            ax.annotate(label,
                        xy=(-0.05, 0.9),
                        xycoords='axes fraction',
                        size='x-large',
                        weight='demi')

        if show_image:
            plt.show()

        return axes_to_return
Exemplo n.º 33
0
    def __init__(self,
                 map_1,
                 xray_structure,
                 fft_map,
                 atom_radius,
                 hydrogen_atom_radius,
                 model_i,
                 number_previous_scatters,
                 ignore_hd=False,
                 residue_detail=True,
                 selection=None,
                 pdb_hierarchy=None):
        self.xray_structure = xray_structure
        self.selection = selection
        self.pdb_hierarchy = pdb_hierarchy
        self.result = []
        self.map_1_size = map_1.size()
        self.map_1_stat = maptbx.statistics(map_1)
        self.atoms_with_labels = None
        self.residue_detail = residue_detail
        self.model_i = model_i
        if (pdb_hierarchy is not None):
            self.atoms_with_labels = list(pdb_hierarchy.atoms_with_labels())
        scatterers = self.xray_structure.scatterers()
        sigma_occ = flex.double()
        if (self.selection is None):
            self.selection = flex.bool(scatterers.size(), True)
        real_map_unpadded = fft_map.real_map_unpadded()
        sites_cart = self.xray_structure.sites_cart()

        if not self.residue_detail:
            self.gifes = [
                None,
            ] * scatterers.size()
            self._result = [
                None,
            ] * scatterers.size()
            #
            atom_radii = flex.double(scatterers.size(), atom_radius)
            for i_seq, sc in enumerate(scatterers):
                if (self.selection[i_seq]):
                    if (sc.element_symbol().strip().lower() in ["h", "d"]):
                        atom_radii[i_seq] = hydrogen_atom_radius
            #
            for i_seq, site_cart in enumerate(sites_cart):
                if (self.selection[i_seq]):
                    sel = maptbx.grid_indices_around_sites(
                        unit_cell=self.xray_structure.unit_cell(),
                        fft_n_real=real_map_unpadded.focus(),
                        fft_m_real=real_map_unpadded.all(),
                        sites_cart=flex.vec3_double([site_cart]),
                        site_radii=flex.double([atom_radii[i_seq]]))
                    self.gifes[i_seq] = sel
                    m1 = map_1.select(sel)
                    ed1 = map_1.eight_point_interpolation(
                        scatterers[i_seq].site)
                    sigma_occ.append(ed1)
                    a = None
                    if (self.atoms_with_labels is not None):
                        a = self.atoms_with_labels[i_seq]
                    self._result[i_seq] = group_args(atom=a,
                                                     m1=m1,
                                                     ed1=ed1,
                                                     xyz=site_cart)
            self.xray_structure.set_occupancies(sigma_occ)

            ### For testing other residue averaging options
            residues = self.extract_residues(
                model_i=model_i,
                number_previous_scatters=number_previous_scatters)
            self.xray_structure.residue_selections = residues

        # Residue detail
        if self.residue_detail:
            assert self.pdb_hierarchy is not None
            residues = self.extract_residues(
                model_i=model_i,
                number_previous_scatters=number_previous_scatters)
            self.gifes = [
                None,
            ] * len(residues)
            self._result = [
                None,
            ] * len(residues)
            for i_seq, residue in enumerate(residues):
                residue_sites_cart = sites_cart.select(residue.selection)
                if 0: print(i_seq, list(residue.selection))  # DEBUG
                sel = maptbx.grid_indices_around_sites(
                    unit_cell=self.xray_structure.unit_cell(),
                    fft_n_real=real_map_unpadded.focus(),
                    fft_m_real=real_map_unpadded.all(),
                    sites_cart=residue_sites_cart,
                    site_radii=flex.double(residue.selection.size(),
                                           atom_radius))
                self.gifes[i_seq] = sel
                m1 = map_1.select(sel)
                ed1 = flex.double()
                for i_seq_r in residue.selection:
                    ed1.append(
                        map_1.eight_point_interpolation(
                            scatterers[i_seq_r].site))
                self._result[i_seq] = \
                  group_args(residue = residue, m1 = m1, ed1 = flex.mean(ed1),
                    xyz=residue_sites_cart.mean(), n_atoms=residue_sites_cart.size())

                residue_scatterers = scatterers.select(residue.selection)
                residue_ed1 = flex.double()
                for n, scatter in enumerate(residue_scatterers):
                    if ignore_hd:
                        if scatter.element_symbol() not in ['H', 'D']:
                            residue_ed1.append(ed1[n])
                    else:
                        residue_ed1.append(ed1[n])
                for x in range(ed1.size()):
                    sigma_occ.append(flex.mean(residue_ed1))

            self.xray_structure.set_occupancies(sigma_occ)
            self.xray_structure.residue_selections = residues

        del map_1
Exemplo n.º 34
0
    def run(self, args, command_name, out=sys.stdout):
        command_line = (iotbx_option_parser(
            usage="%s [options]" % command_name,
            description='Example: %s data.mtz data.mtz ref_model.pdb' %
            command_name).option(
                None,
                "--show_defaults",
                action="store_true",
                help="Show list of parameters.")).process(args=args)

        cif_file = None
        processed_args = utils.process_command_line_args(
            args=args, log=sys.stdout, master_params=master_phil)
        params = processed_args.params
        if (params is None): params = master_phil
        self.params = params.extract().ensemble_probability
        pdb_file_names = processed_args.pdb_file_names
        if len(pdb_file_names) != 1:
            raise Sorry("Only one PDB structure may be used")
        pdb_file = file_reader.any_file(pdb_file_names[0])
        self.log = multi_out()
        self.log.register(label="stdout", file_object=sys.stdout)
        self.log.register(label="log_buffer",
                          file_object=StringIO(),
                          atexit_send_to=None)
        sys.stderr = self.log
        log_file = open(
            pdb_file_names[0].split('/')[-1].replace('.pdb', '') +
            '_pensemble.log', "w")

        self.log.replace_stringio(old_label="log_buffer",
                                  new_label="log",
                                  new_file_object=log_file)
        utils.print_header(command_name, out=self.log)
        params.show(out=self.log)
        #
        f_obs = None
        r_free_flags = None
        reflection_files = processed_args.reflection_files

        if self.params.fobs_vs_fcalc_post_nll:
            if len(reflection_files) == 0:
                raise Sorry(
                    "Fobs from input MTZ required for fobs_vs_fcalc_post_nll")

        if len(reflection_files) > 0:
            crystal_symmetry = processed_args.crystal_symmetry
            print('Reflection file : ',
                  processed_args.reflection_file_names[0],
                  file=self.log)
            utils.print_header("Model and data statistics", out=self.log)
            rfs = reflection_file_server(
                crystal_symmetry=crystal_symmetry,
                reflection_files=processed_args.reflection_files,
                log=self.log)

            parameters = utils.data_and_flags_master_params().extract()
            determine_data_and_flags_result = utils.determine_data_and_flags(
                reflection_file_server=rfs,
                parameters=parameters,
                data_parameter_scope="refinement.input.xray_data",
                flags_parameter_scope="refinement.input.xray_data.r_free_flags",
                data_description="X-ray data",
                keep_going=True,
                log=self.log)
            f_obs = determine_data_and_flags_result.f_obs
            number_of_reflections = f_obs.indices().size()
            r_free_flags = determine_data_and_flags_result.r_free_flags
            test_flag_value = determine_data_and_flags_result.test_flag_value
            if (r_free_flags is None):
                r_free_flags = f_obs.array(
                    data=flex.bool(f_obs.data().size(), False))

        # process PDB
        pdb_file.assert_file_type("pdb")
        #
        pdb_in = hierarchy.input(file_name=pdb_file.file_name)
        ens_pdb_hierarchy = pdb_in.construct_hierarchy()
        ens_pdb_hierarchy.atoms().reset_i_seq()
        ens_pdb_xrs_s = pdb_in.input.xray_structures_simple()
        number_structures = len(ens_pdb_xrs_s)
        print('Number of structure in ensemble : ',
              number_structures,
              file=self.log)

        # Calculate sigmas from input map only
        if self.params.assign_sigma_from_map and self.params.ensemble_sigma_map_input is not None:
            # process MTZ
            input_file = file_reader.any_file(
                self.params.ensemble_sigma_map_input)
            if input_file.file_type == "hkl":
                if input_file.file_object.file_type() != "ccp4_mtz":
                    raise Sorry("Only MTZ format accepted for map input")
                else:
                    mtz_file = input_file
            else:
                raise Sorry("Only MTZ format accepted for map input")
            miller_arrays = mtz_file.file_server.miller_arrays
            map_coeffs_1 = miller_arrays[0]
            #
            xrs_list = []
            for n, ens_pdb_xrs in enumerate(ens_pdb_xrs_s):
                # get sigma levels from ensemble fc for each structure
                xrs = get_map_sigma(ens_pdb_hierarchy=ens_pdb_hierarchy,
                                    ens_pdb_xrs=ens_pdb_xrs,
                                    map_coeffs_1=map_coeffs_1,
                                    residue_detail=self.params.residue_detail,
                                    ignore_hd=self.params.ignore_hd,
                                    log=self.log)
                xrs_list.append(xrs)
            # write ensemble pdb file, occupancies as sigma level
            filename = pdb_file_names[0].split('/')[-1].replace(
                '.pdb',
                '') + '_vs_' + self.params.ensemble_sigma_map_input.replace(
                    '.mtz', '') + '_pensemble.pdb'
            write_ensemble_pdb(filename=filename,
                               xrs_list=xrs_list,
                               ens_pdb_hierarchy=ens_pdb_hierarchy)

        # Do full analysis vs Fobs
        else:
            model_map_coeffs = []
            fmodel = None
            # Get <fcalc>
            for model, ens_pdb_xrs in enumerate(ens_pdb_xrs_s):
                ens_pdb_xrs.set_occupancies(1.0)
                if model == 0:
                    # If mtz not supplied get fobs from xray structure...
                    # Use input Fobs for scoring against nll
                    if self.params.fobs_vs_fcalc_post_nll:
                        dummy_fobs = f_obs
                    else:
                        if f_obs == None:
                            if self.params.fcalc_high_resolution == None:
                                raise Sorry(
                                    "Please supply high resolution limit or input mtz file."
                                )
                            dummy_dmin = self.params.fcalc_high_resolution
                            dummy_dmax = self.params.fcalc_low_resolution
                        else:
                            print(
                                'Supplied mtz used to determine high and low resolution cuttoffs',
                                file=self.log)
                            dummy_dmax, dummy_dmin = f_obs.d_max_min()
                        #
                        dummy_fobs = abs(
                            ens_pdb_xrs.structure_factors(
                                d_min=dummy_dmin).f_calc())
                        dummy_fobs.set_observation_type_xray_amplitude()
                        # If mtz supplied, free flags are over written to prevent array size error
                        r_free_flags = dummy_fobs.array(
                            data=flex.bool(dummy_fobs.data().size(), False))
                    #
                    fmodel = utils.fmodel_simple(
                        scattering_table="wk1995",
                        xray_structures=[ens_pdb_xrs],
                        f_obs=dummy_fobs,
                        target_name='ls',
                        bulk_solvent_and_scaling=False,
                        r_free_flags=r_free_flags)
                    f_calc_ave = fmodel.f_calc().array(
                        data=fmodel.f_calc().data() * 0).deep_copy()
                    # XXX Important to ensure scale is identical for each model and <model>
                    fmodel.set_scale_switch = 1.0
                    f_calc_ave_total = fmodel.f_calc().data().deep_copy()
                else:
                    fmodel.update_xray_structure(xray_structure=ens_pdb_xrs,
                                                 update_f_calc=True,
                                                 update_f_mask=False)
                    f_calc_ave_total += fmodel.f_calc().data().deep_copy()
                print('Model :', model + 1, file=self.log)
                print("\nStructure vs real Fobs (no bulk solvent or scaling)",
                      file=self.log)
                print('Rwork          : %5.4f ' % fmodel.r_work(),
                      file=self.log)
                print('Rfree          : %5.4f ' % fmodel.r_free(),
                      file=self.log)
                print('K1             : %5.4f ' % fmodel.scale_k1(),
                      file=self.log)
                fcalc_edm = fmodel.electron_density_map()
                fcalc_map_coeffs = fcalc_edm.map_coefficients(map_type='Fc')
                fcalc_mtz_dataset = fcalc_map_coeffs.as_mtz_dataset(
                    column_root_label='Fc')
                if self.params.output_model_and_model_ave_mtz:
                    fcalc_mtz_dataset.mtz_object().write(
                        file_name=str(model + 1) + "_Fc.mtz")
                model_map_coeffs.append(fcalc_map_coeffs.deep_copy())

            fmodel.update(f_calc=f_calc_ave.array(f_calc_ave_total /
                                                  number_structures))
            print("\nEnsemble vs real Fobs (no bulk solvent or scaling)",
                  file=self.log)
            print('Rwork          : %5.4f ' % fmodel.r_work(), file=self.log)
            print('Rfree          : %5.4f ' % fmodel.r_free(), file=self.log)
            print('K1             : %5.4f ' % fmodel.scale_k1(), file=self.log)

            # Get <Fcalc> map
            fcalc_ave_edm = fmodel.electron_density_map()
            fcalc_ave_map_coeffs = fcalc_ave_edm.map_coefficients(
                map_type='Fc').deep_copy()
            fcalc_ave_mtz_dataset = fcalc_ave_map_coeffs.as_mtz_dataset(
                column_root_label='Fc')
            if self.params.output_model_and_model_ave_mtz:
                fcalc_ave_mtz_dataset.mtz_object().write(file_name="aveFc.mtz")
            fcalc_ave_map_coeffs = fcalc_ave_map_coeffs.fft_map()
            fcalc_ave_map_coeffs.apply_volume_scaling()
            fcalc_ave_map_data = fcalc_ave_map_coeffs.real_map_unpadded()
            fcalc_ave_map_stats = maptbx.statistics(fcalc_ave_map_data)

            print("<Fcalc> Map Stats :", file=self.log)
            fcalc_ave_map_stats.show_summary(f=self.log)
            offset = fcalc_ave_map_stats.min()
            model_neg_ll = []

            number_previous_scatters = 0

            # Run through structure list again and get probability
            xrs_list = []
            for model, ens_pdb_xrs in enumerate(ens_pdb_xrs_s):
                if self.params.verbose:
                    print('\n\nModel                   : ',
                          model + 1,
                          file=self.log)
                # Get model atom sigmas vs Fcalc
                fcalc_map = model_map_coeffs[model].fft_map()
                fcalc_map.apply_volume_scaling()
                fcalc_map_data = fcalc_map.real_map_unpadded()
                fcalc_map_stats = maptbx.statistics(fcalc_map_data)
                if self.params.verbose:
                    print("Fcalc map stats         :", file=self.log)
                fcalc_map_stats.show_summary(f=self.log)

                xrs = get_map_sigma(
                    ens_pdb_hierarchy=ens_pdb_hierarchy,
                    ens_pdb_xrs=ens_pdb_xrs,
                    fft_map_1=fcalc_map,
                    model_i=model,
                    residue_detail=self.params.residue_detail,
                    ignore_hd=self.params.ignore_hd,
                    number_previous_scatters=number_previous_scatters,
                    log=self.log)
                fcalc_sigmas = xrs.scatterers().extract_occupancies()
                del fcalc_map
                # Get model atom sigmas vs <Fcalc>
                xrs = get_map_sigma(
                    ens_pdb_hierarchy=ens_pdb_hierarchy,
                    ens_pdb_xrs=ens_pdb_xrs,
                    fft_map_1=fcalc_ave_map_coeffs,
                    model_i=model,
                    residue_detail=self.params.residue_detail,
                    ignore_hd=self.params.ignore_hd,
                    number_previous_scatters=number_previous_scatters,
                    log=self.log)

                ### For testing other residue averaging options
                #print xrs.residue_selections

                fcalc_ave_sigmas = xrs.scatterers().extract_occupancies()
                # Probability of model given <model>
                prob = fcalc_ave_sigmas / fcalc_sigmas
                # XXX debug option
                if False:
                    for n, p in enumerate(prob):
                        print(' {0:5d} {1:5.3f}'.format(n, p), file=self.log)
                # Set probabilty between 0 and 1
                # XXX Make Histogram / more stats
                prob_lss_zero = flex.bool(prob <= 0)
                prob_grt_one = flex.bool(prob > 1)
                prob.set_selected(prob_lss_zero, 0.001)
                prob.set_selected(prob_grt_one, 1.0)
                xrs.set_occupancies(prob)
                xrs_list.append(xrs)
                sum_neg_ll = sum(-flex.log(prob))
                model_neg_ll.append((sum_neg_ll, model))
                if self.params.verbose:
                    print('Model probability stats :', file=self.log)
                    print(prob.min_max_mean().show(), file=self.log)
                    print('  Count < 0.0 : ',
                          prob_lss_zero.count(True),
                          file=self.log)
                    print('  Count > 1.0 : ',
                          prob_grt_one.count(True),
                          file=self.log)

                # For averaging by residue
                number_previous_scatters += ens_pdb_xrs.sites_cart().size()

            # write ensemble pdb file, occupancies as sigma level
            write_ensemble_pdb(
                filename=pdb_file_names[0].split('/')[-1].replace('.pdb', '') +
                '_pensemble.pdb',
                xrs_list=xrs_list,
                ens_pdb_hierarchy=ens_pdb_hierarchy)

            # XXX Test ordering models by nll
            # XXX Test removing nth percentile atoms
            if self.params.sort_ensemble_by_nll_score or self.params.fobs_vs_fcalc_post_nll:
                for percentile in [1.0, 0.975, 0.95, 0.9, 0.8, 0.6, 0.2]:
                    model_neg_ll = sorted(model_neg_ll)
                    f_calc_ave_total_reordered = None
                    print_list = []
                    for i_neg_ll in model_neg_ll:
                        xrs = xrs_list[i_neg_ll[1]]
                        nll_occ = xrs.scatterers().extract_occupancies()

                        # Set q=0 nth percentile atoms
                        sorted_nll_occ = sorted(nll_occ, reverse=True)
                        number_atoms = len(sorted_nll_occ)
                        percentile_prob_cutoff = sorted_nll_occ[
                            int(number_atoms * percentile) - 1]
                        cutoff_selections = flex.bool(
                            nll_occ < percentile_prob_cutoff)
                        cutoff_nll_occ = flex.double(nll_occ.size(),
                                                     1.0).set_selected(
                                                         cutoff_selections,
                                                         0.0)
                        #XXX Debug
                        if False:
                            print('\nDebug')
                            for x in range(len(cutoff_selections)):
                                print(cutoff_selections[x], nll_occ[x],
                                      cutoff_nll_occ[x])
                            print(percentile)
                            print(percentile_prob_cutoff)
                            print(cutoff_selections.count(True))
                            print(cutoff_selections.size())
                            print(cutoff_nll_occ.count(0.0))
                            print('Count q = 1           : ',
                                  cutoff_nll_occ.count(1.0))
                            print('Count scatterers size : ',
                                  cutoff_nll_occ.size())

                        xrs.set_occupancies(cutoff_nll_occ)
                        fmodel.update_xray_structure(xray_structure=xrs,
                                                     update_f_calc=True,
                                                     update_f_mask=True)

                        if f_calc_ave_total_reordered == None:
                            f_calc_ave_total_reordered = fmodel.f_calc().data(
                            ).deep_copy()
                            f_mask_ave_total_reordered = fmodel.f_masks(
                            )[0].data().deep_copy()
                            cntr = 1
                        else:
                            f_calc_ave_total_reordered += fmodel.f_calc().data(
                            ).deep_copy()
                            f_mask_ave_total_reordered += fmodel.f_masks(
                            )[0].data().deep_copy()
                            cntr += 1
                        fmodel.update(
                            f_calc=f_calc_ave.array(
                                f_calc_ave_total_reordered / cntr).deep_copy(),
                            f_mask=f_calc_ave.array(
                                f_mask_ave_total_reordered / cntr).deep_copy())

                        # Update solvent and scale
                        # XXX Will need to apply_back_trace on latest version
                        fmodel.set_scale_switch = 0
                        fmodel.update_all_scales()

                        # Reset occ for outout
                        xrs.set_occupancies(nll_occ)
                        # k1 updated vs Fobs
                        if self.params.fobs_vs_fcalc_post_nll:
                            print_list.append([
                                cntr, i_neg_ll[0], i_neg_ll[1],
                                fmodel.r_work(),
                                fmodel.r_free()
                            ])

                    # Order models by nll and print summary
                    print(
                        '\nModels ranked by nll <Fcalc> R-factors recalculated',
                        file=self.log)
                    print('Percentile cutoff : {0:5.3f}'.format(percentile),
                          file=self.log)
                    xrs_list_sorted_nll = []
                    print('      |      NLL     <Rw>     <Rf>    Ens Model',
                          file=self.log)
                    for info in print_list:
                        print(' {0:4d} | {1:8.1f} {2:8.4f} {3:8.4f} {4:12d}'.
                              format(
                                  info[0],
                                  info[1],
                                  info[3],
                                  info[4],
                                  info[2] + 1,
                              ),
                              file=self.log)
                        xrs_list_sorted_nll.append(xrs_list[info[2]])

                # Output nll ordered ensemble

                write_ensemble_pdb(
                    filename='nll_ordered_' +
                    pdb_file_names[0].split('/')[-1].replace('.pdb', '') +
                    '_pensemble.pdb',
                    xrs_list=xrs_list_sorted_nll,
                    ens_pdb_hierarchy=ens_pdb_hierarchy)
Exemplo n.º 35
0
                                     assume_index_matching=True)
 r1_factor_bin.show()
 r1_factor = ref_match.r1_factor(obs_match,
                                 use_binning=False,
                                 assume_index_matching=True)
 print 'Overall R-factor:', r1_factor
 #calculate r_factor for partiality-corrected scaled data
 pixel_size_mm = 0.079346
 crystal_init_orientation = observations_pickle["current_orientation"][0]
 wavelength = observations_pickle["wavelength"]
 detector_distance_mm = observations_pickle['distance']
 mm_predictions = pixel_size_mm * (
     observations_pickle['mapped_predictions'][0])
 xbeam = observations_pickle["xbeam"]
 ybeam = observations_pickle["ybeam"]
 alpha_angle_obs = flex.double([math.atan(abs(pred[0]-xbeam)/abs(pred[1]-ybeam)) \
                                  for pred in mm_predictions])
 spot_pred_x_mm = flex.double([pred[0] - xbeam for pred in mm_predictions])
 spot_pred_y_mm = flex.double([pred[1] - ybeam for pred in mm_predictions])
 ph = partiality_handler()
 r0 = ph.calc_spot_radius(sqr(crystal_init_orientation.reciprocal_matrix()),
                          observations.indices(), wavelength)
 ry, rz, re, voigt_nu, rotx, roty = (0, 0, 0.003, 0.5, 0, 0)
 partiality_init, delta_xy_init, rs_init, rh_init = ph.calc_partiality_anisotropy_set(\
                                                         crystal_init_orientation.unit_cell(),
                                                         rotx, roty, observations.indices(),
                                                         ry, rz, r0, re, voigt_nu,
                                                         two_theta, alpha_angle_obs, wavelength,
                                                         crystal_init_orientation,
                                                         spot_pred_x_mm, spot_pred_y_mm,
                                                         detector_distance_mm, "Voigt",
                                                         False)
Exemplo n.º 36
0
     if (command_line.options.use_model):
         crystal_symmetry = crystal_symmetry_from_pdb.extract_from(
             file_name=command_line.options.use_model)
 if (crystal_symmetry.unit_cell() is None
         or crystal_symmetry.space_group_info() is None):
     raise Sorry(
         "Crystal symmetry is not defined. Please use the --symmetry option.\n"
         "Type %s without arguments to see more options." % command_name)
 if (len(command_line.args) > 1):
     print "%d arguments are given from the command line:"% \
       len(command_line.args), command_line.args
     raise Sorry("Please specify one reflection csv file.")
 file_name = command_line.args[0]
 if (not os.path.isfile(file_name)):
     raise Sorry("File is not found: %s" % file_name)
 data = flex.double()
 sigmas = flex.double()
 flags = flex.int()
 column_ids, columns = parse_csv_file(file_name=file_name)
 data_label_root = column_ids[3]
 ms = miller.set(crystal_symmetry, flex.miller_index(columns[0]))
 for d in columns[1]:
     data.append(float(d))
 for sig in columns[2]:
     sigmas.append(float(sig))
 for flag in columns[3]:
     flags.append(int(flag))
 assert len(data) == len(sigmas)
 assert len(flags) == len(data)
 ma = miller.array(ms, data, sigmas)
 if data_label_root.startswith('F'):
Exemplo n.º 37
0
def exercise_sort():
    expected_unsorted_data = """    4 I 4
  1  0  0  0  1  0  0  0  1
  0  0  0
  0 -1  0  1  0  0  0  0  1
  0  0  0
  0  1  0 -1  0  0  0  0  1
  0  0  0
 -1  0  0  0 -1  0  0  0  1
  0  0  0
   0   0   4   0   0   4     0 1 0  1 80331.8  8648.0
   0   0  12   0   0  12     0 1 0  1104526.7 11623.3
   0   0  -4   0   0   4     0 2 0  1 44883.6  4527.6
   0   0  -8   0   0   8     0 2 0  1 41134.1  4431.9
   0   0   8   0   0   8     0 1 0  1 50401.8  5464.6
   0   0   8   0   0   8     0 1 0  1 53386.0  5564.1
   0   0  -8   0   0   8     0 2 0  1119801.4 12231.2
   0   0  12   0   0  12     0 1 0  1105312.9 12602.2
   0   0  16   0   0  16     0 1 0  1 14877.6  2161.5
"""
    expected_sorted_data = """    4 I 4
  1  0  0  0  1  0  0  0  1
  0  0  0
  0 -1  0  1  0  0  0  0  1
  0  0  0
  0  1  0 -1  0  0  0  0  1
  0  0  0
 -1  0  0  0 -1  0  0  0  1
  0  0  0
   0   0   4   0   0   4     0 1 0  1 80331.8  8648.0
   0   0  -4   0   0   4     0 2 0  1 44883.6  4527.6
   0   0   8   0   0   8     0 1 0  1 50401.8  5464.6
   0   0   8   0   0   8     0 1 0  1 53386.0  5564.1
   0   0  -8   0   0   8     0 2 0  1 41134.1  4431.9
   0   0  -8   0   0   8     0 2 0  1119801.4 12231.2
   0   0  12   0   0  12     0 1 0  1104526.7 11623.3
   0   0  12   0   0  12     0 1 0  1105312.9 12602.2
   0   0  16   0   0  16     0 1 0  1 14877.6  2161.5
"""

    from cctbx.xray import observation_types
    from cctbx.array_family import flex
    xs = crystal.symmetry((113.949, 113.949, 32.474, 90.000, 90.000, 90.000),
                          "I4")
    mi = flex.miller_index((
        (0, 0, 4),
        (0, 0, 12),
        (0, 0, -4),
        (0, 0, -8),
        (0, 0, 8),
        (0, 0, 8),
        (0, 0, -8),
        (0, 0, 12),
        (0, 0, 16),
    ))
    data = flex.double((
        80331.8,
        104526.7,
        44883.6,
        41134.1,
        50401.8,
        53386.0,
        119801.4,
        105312.9,
        14877.6,
    ))
    sigmas = flex.double((
        8648.0,
        11623.3,
        4527.6,
        4431.9,
        5464.6,
        5564.1,
        12231.2,
        12602.2,
        2161.5,
    ))
    ms = miller.set(xs, mi, anomalous_flag=True)
    i_obs = miller.array(ms, data=data, sigmas=sigmas).set_observation_type(
        observation_types.intensity())

    i_obs.export_as_scalepack_unmerged(
        file_name='unsorted.sca', scale_intensities_for_scalepack_merge=True)
    i_obs = i_obs.sort(by_value="asu_indices")
    i_obs.export_as_scalepack_unmerged(
        file_name='sorted.sca', scale_intensities_for_scalepack_merge=True)
    with open('unsorted.sca') as f:
        unsorted = f.read()
    with open('sorted.sca') as f:
        sorted = f.read()
    assert unsorted == expected_unsorted_data
    assert sorted == expected_sorted_data
Exemplo n.º 38
0
  def contour_plot_DEPRECATED_DOES_NOT_APPLY_SUBPIXEL_METROLOGY(OO):
      self = OO.parent
      # see if I can reproduce the predicted positions
      pxlsz = self.pixel_size # mm/pixel
      SIGN = -1.

      # get a simple rmsd obs vs predicted
      sumsq = 0.
      nspot = 0
      for pair in self.indexed_pairs:
        deltax = self.spots[pair["spot"]].ctr_mass_x() - self.predicted[pair["pred"]][0]/pxlsz
        deltay = self.spots[pair["spot"]].ctr_mass_y() - self.predicted[pair["pred"]][1]/pxlsz
        sumsq += deltax*deltax + deltay*deltay
        nspot+=1
      print "RMSD obs vs pred in pixels: %7.2f"%(math.sqrt(sumsq/nspot))

      excursi = flex.double()
      rmsdpos = flex.double()

      for irotx in OO.grid:
        rotx = (0.02 * irotx) * math.pi/180.
        for iroty in OO.grid:
          roty = (0.02 * iroty) * math.pi/180.

          #Not necessary to apply the 3 offset rotations; they have apparently
          #  been applied already.  rotz (0,0,1) is the direct beam
          effective_orientation = OO.input_orientation.rotate_thru((1,0,0),rotx
           ).rotate_thru((0,1,0),roty
           ).rotate_thru((0,0,1),0.0)
          OO.ucbp3.set_orientation(effective_orientation)

          OO.ucbp3.gaussian_fast_slow()
          mean_position = OO.ucbp3.mean_position

          print mean_position
          sumsq = 0.
          nspot = 0
          for pair in self.indexed_pairs:
            deltax = mean_position[nspot][1] - self.predicted[pair["pred"]][0]/pxlsz
            deltay = mean_position[nspot][0] - self.predicted[pair["pred"]][1]/pxlsz
            sumsq += deltax*deltax + deltay*deltay
            nspot+=1
          print "RMSD markmodel vs rossmanpred in pixels: %7.2f"%(math.sqrt(sumsq/nspot))

          #from matplotlib import pyplot as plt
          #plt.plot([mpos[0] for mpos in mean_position],[mpos[1] for mpos in mean_position],"r+")
          #plt.plot([self.predicted[pair["pred"]][1]/pxlsz for pair in indexed_pairs],
          #         [self.predicted[pair["pred"]][0]/pxlsz for pair in indexed_pairs], "b.")
          #plt.show()

          sumsq = 0.
          nspot = 0
          for pair in self.indexed_pairs:
            deltax = self.spots[pair["spot"]].ctr_mass_x() - mean_position[nspot][1]
            deltay = self.spots[pair["spot"]].ctr_mass_y() - mean_position[nspot][0]
            sumsq += deltax*deltax + deltay*deltay
            nspot+=1
          rmsdposition = math.sqrt(sumsq/nspot)
          rmsdpos.append(rmsdposition)
          print "RMSD obs vs markmodel in pixels: %8.4f"%(rmsdposition)

          excursions = flex.double(
            [OO.ucbp3.simple_forward_calculation_spot_position(
            wavelength = OO.central_wavelength_ang,
            observation_no = obsno).rotax_excursion_rad*180./math.pi
            for obsno in xrange(len(self.indexed_pairs))])

          rmsdexc = math.sqrt(flex.mean(excursions*excursions))
          excursi.append(rmsdexc)
          print "rotx %7.2f roty %7.2f degrees, RMSD excursion %7.3f degrees"%(
          (0.02 * irotx),(0.02 * iroty), rmsdexc)
      return excursi,rmsdpos
Exemplo n.º 39
0
  def refine_rotx_roty2(OO,enable_rotational_target=True):

      helper = OO.per_frame_helper_factory()
      helper.restart()

      if enable_rotational_target:
        print "Trying least squares minimization of excursions",
        from scitbx.lstbx import normal_eqns_solving
        iterations = normal_eqns_solving.naive_iterations(
          non_linear_ls = helper,
          gradient_threshold = 1.E-10)

      results =  helper.x

      print "with %d reflections"%len(OO.parent.indexed_pairs),
      print "result %6.2f degrees"%(results[1]*180./math.pi),
      print "result %6.2f degrees"%(results[0]*180./math.pi)

      if False: # Excursion histogram
        print "The input mosaicity is %7.3f deg full width"%OO.parent.inputai.getMosaicity()
        # final histogram
        if OO.pvr_fix:
          final = 360.* helper.fvec_callable_pvr(results)
        else:
          final = 360.* helper.fvec_callable_NOT_USED_AFTER_BUGFIX(results)

        rmsdexc = math.sqrt(flex.mean(final*final))
        from matplotlib import pyplot as plt
        nbins = len(final)//20
        n,bins,patches = plt.hist(final,
          nbins, normed=0, facecolor="orange", alpha=0.75)
        plt.xlabel("Rotation on e1 axis, rmsd %7.3f deg"%rmsdexc)
        plt.title("Histogram of cctbx.xfel misorientation")
        plt.axis([-0.5,0.5,0,100])
        plt.plot([rmsdexc],[18],"b|")
        plt.show()

      # Determine optimal mosaicity and domain size model (monochromatic)
      if OO.pvr_fix:
        final = 360.* helper.fvec_callable_pvr(results)
      else:
        final = 360.* helper.fvec_callable_NOT_USED_AFTER_BUGFIX(results)
      #Guard against misindexing -- seen in simulated data, with zone nearly perfectly aligned
      guard_stats = flex.max(final), flex.min(final)
      if False and REMOVETEST_KILLING_LEGITIMATE_EXCURSIONS (guard_stats[0] > 2.0 or guard_stats[1] < -2.0):
        raise Exception("Misindexing diagnosed by meaningless excursion angle (bandpass_gaussian model)");
      print "The mean excursion is %7.3f degrees"%(flex.mean(final))

      two_thetas = helper.last_set_orientation.unit_cell().two_theta(OO.reserve_indices,OO.central_wavelength_ang,deg=True)
      dspacings = helper.last_set_orientation.unit_cell().d(OO.reserve_indices)
      dspace_sq = dspacings * dspacings
      excursion_rad = final * math.pi/ 180.

      #  First -- try to get a reasonable envelope for the observed excursions.
          ## minimum of three regions; maximum of 50 measurements in each bin
      print "fitting parameters on %d spots"%len(excursion_rad)
      n_bins = min(max(3, len(excursion_rad)//25),50)
      bin_sz = len(excursion_rad)//n_bins
      print "nbins",n_bins,"bin_sz",bin_sz
      order = flex.sort_permutation(two_thetas)
      two_thetas_env = flex.double()
      dspacings_env = flex.double()
      excursion_rads_env = flex.double()
      for x in xrange(0,n_bins):
        subset = order[x*bin_sz:(x+1)*bin_sz]
        two_thetas_env.append( flex.mean(two_thetas.select(subset)) )
        dspacings_env.append( flex.mean(dspacings.select(subset)))
        excursion_rads_env.append( flex.max( flex.abs( excursion_rad.select(subset))))

      #  Second -- parameter fit
          ## solve the normal equations
      sum_inv_u_sq = flex.sum(dspacings_env * dspacings_env)
      sum_inv_u    = flex.sum(dspacings_env)
      sum_te_u     = flex.sum(dspacings_env * excursion_rads_env)
      sum_te       = flex.sum(excursion_rads_env)
      Normal_Mat   = sqr((sum_inv_u_sq, sum_inv_u, sum_inv_u, len(dspacings_env)))
      Vector       = col((sum_te_u, sum_te))
      solution     = Normal_Mat.inverse() * Vector
      s_ang = 1./(2*solution[0])
      print "Best LSQ fit Scheerer domain size is %9.2f ang"%(
        s_ang)
      tan_phi_rad = helper.last_set_orientation.unit_cell().d(OO.reserve_indices) / (2. * s_ang)
      tan_phi_deg = tan_phi_rad * 180./math.pi
      k_degrees = solution[1]* 180./math.pi
      print "The LSQ full mosaicity is %8.5f deg; half-mosaicity %9.5f"%(2*k_degrees, k_degrees)
      tan_outer_deg = tan_phi_deg + k_degrees

      if OO.mosaic_refinement_target=="ML":
        from xfel.mono_simulation.max_like import minimizer
        print "input", s_ang,2. * solution[1]*180/math.pi
        # coerce the estimates to be positive for max-likelihood
        lower_limit_domain_size = math.pow(
         helper.last_set_orientation.unit_cell().volume(),
         1./3.)*20 # 10-unit cell block size minimum reasonable domain

        d_estimate = max(s_ang, lower_limit_domain_size)
        M = minimizer(d_i = dspacings, psi_i = excursion_rad, eta_rad = abs(2. * solution[1]),
                      Deff = d_estimate)
        print "output",1./M.x[0], M.x[1]*180./math.pi
        tan_phi_rad_ML = helper.last_set_orientation.unit_cell().d(OO.reserve_indices) / (2. / M.x[0])
        tan_phi_deg_ML = tan_phi_rad_ML * 180./math.pi
        # bugfix: Need factor of 0.5 because the plot shows half mosaicity (displacement from the center point defined as zero)
        tan_outer_deg_ML = tan_phi_deg_ML + 0.5*M.x[1]*180./math.pi

      if OO.parent.horizons_phil.integration.mosaic.enable_polychromatic:
        # add code here to perform polychromatic modeling.
        """
        get miller indices DONE
        get model-predicted mono-wavelength centroid S1 vectors
        back-convert S1vec, with mono-wavelength, to detector-plane position, factoring in subpixel correction
        compare with spot centroid measured position
        compare with locus of bodypixels
        """
        print list(OO.reserve_indices)
        print len(OO.reserve_indices), len(two_thetas)
        positions = [
              OO.ucbp3.simple_forward_calculation_spot_position(
              wavelength = OO.central_wavelength_ang,
              observation_no = obsno).position
              for obsno in xrange(len(OO.parent.indexed_pairs))]
        print len(positions)
        print positions # model-predicted positions
        print len(OO.parent.spots)
        print OO.parent.indexed_pairs
        print OO.parent.spots
        print len(OO.parent.spots)
        meas_spots = [OO.parent.spots[pair["spot"]] for pair in OO.parent.indexed_pairs]
  #      for xspot in meas_spots:
  #        xspot.ctr_mass_x(),xspot.ctr_mass_y()
  #        xspot.max_pxl_x()
  #        xspot.bodypixels
  #        xspot.ctr_mass_x()

        # Do some work to calculate an rmsd
        diff_vecs = flex.vec3_double()
        for p,xspot in zip(positions, meas_spots):
          diff_vecs.append((p[0]-xspot.ctr_mass_y(), p[1]-xspot.ctr_mass_x(), 0.0))
        # could use diff_vecs.rms_length()
        diff_vecs_sq = diff_vecs.dot(diff_vecs)
        mean_diff_vec_sq = flex.mean(diff_vecs_sq)
        rmsd = math.sqrt(mean_diff_vec_sq)
        print "mean obs-pred diff vec on %d spots is %6.2f pixels"%(len(positions),rmsd)

        positions_to_fictitious = [
              OO.ucbp3.simple_forward_calculation_spot_position(
              wavelength = OO.central_wavelength_ang,
              observation_no = obsno).position_to_fictitious
              for obsno in xrange(len(OO.parent.indexed_pairs))]
        # Do some work to calculate an rmsd
        diff_vecs = flex.vec3_double()
        for p,xspot in zip(positions_to_fictitious, meas_spots):
          diff_vecs.append((p[0]-xspot.ctr_mass_y(), p[1]-xspot.ctr_mass_x(), 0.0))
        rmsd = diff_vecs.rms_length()
        print "mean obs-pred_to_fictitious diff vec on %d spots is %6.2f pixels"%(len(positions),rmsd)

        """
        actually, it might be better if the entire set of experimental observations
        is transformed into the ideal detector plane, for the purposes of poly_treatment.


        start here.  Now it would be good to actually implement probability of observing a body pixel given the model.
        We have everything needed right here.
        """
        if OO.parent.horizons_phil.integration.mosaic.enable_AD14F7B:
          # Image plot: obs and predicted positions + bodypixels
          from matplotlib import pyplot as plt
          plt.plot( [p[0] for p in positions_to_fictitious], [p[1] for p in positions_to_fictitious], "r.")
          plt.plot( [xspot.ctr_mass_y() for xspot in meas_spots],
                    [xspot.ctr_mass_x() for xspot in meas_spots], "g.")
          bodypx = []
          for xspot in meas_spots:
            for body in xspot.bodypixels:
              bodypx.append(body)
          plt.plot( [b.y for b in bodypx], [b.x for b in bodypx], "b.")
          plt.axes().set_aspect("equal")
          plt.show()

      print "MEAN excursion",flex.mean(final),
      if OO.mosaic_refinement_target=="ML":
        print "mosaicity deg FW=",M.x[1]*180./math.pi
      else:
        print
      if OO.parent.horizons_phil.integration.mosaic.enable_AD14F7B: # Excursion vs resolution fit
        AD1TF7B_MAX2T = 30.
        AD1TF7B_MAXDP = 1.
        from matplotlib import pyplot as plt
        fig = plt.figure()
        plt.plot(two_thetas, final, "bo")
        mean = flex.mean(final)
        minplot = flex.min(two_thetas)
        plt.plot([0,minplot],[mean,mean],"k-")
        LR = flex.linear_regression(two_thetas, final)
        #LR.show_summary()
        model_y = LR.slope()*two_thetas + LR.y_intercept()
        plt.plot(two_thetas, model_y, "k-")
        print helper.last_set_orientation.unit_cell()
        #for sdp,tw in zip (dspacings,two_thetas):
          #print sdp,tw
        if OO.mosaic_refinement_target=="ML":
          plt.title("ML: mosaicity FW=%4.2f deg, Dsize=%5.0fA on %d spots"%(M.x[1]*180./math.pi, 2./M.x[0], len(two_thetas)))
          plt.plot(two_thetas, tan_phi_deg_ML, "r.")
          plt.plot(two_thetas, -tan_phi_deg_ML, "r.")
          plt.plot(two_thetas, tan_outer_deg_ML, "g.")
          plt.plot(two_thetas, -tan_outer_deg_ML, "g.")
        else:
          plt.plot(two_thetas_env, excursion_rads_env *180./math.pi, "r|")
          plt.plot(two_thetas_env, -excursion_rads_env *180./math.pi, "r|")
          plt.plot(two_thetas_env, excursion_rads_env *180./math.pi, "r-")
          plt.plot(two_thetas_env, -excursion_rads_env *180./math.pi, "r-")
          plt.plot(two_thetas, tan_phi_deg, "r.")
          plt.plot(two_thetas, -tan_phi_deg, "r.")
          plt.plot(two_thetas, tan_outer_deg, "g.")
          plt.plot(two_thetas, -tan_outer_deg, "g.")
        plt.xlim([0,AD1TF7B_MAX2T])
        plt.ylim([-AD1TF7B_MAXDP,AD1TF7B_MAXDP])
        OO.parent.show_figure(plt,fig,"psi")
        plt.close()

      from xfel.mono_simulation.util import green_curve_area,ewald_proximal_volume
      if OO.mosaic_refinement_target=="ML":
        OO.parent.green_curve_area = green_curve_area(two_thetas, tan_outer_deg_ML)
        OO.parent.inputai.setMosaicity(M.x[1]*180./math.pi) # full width, degrees
        OO.parent.ML_half_mosaicity_deg = M.x[1]*180./(2.*math.pi)
        OO.parent.ML_domain_size_ang = 1./M.x[0]
        OO.parent.ewald_proximal_volume = ewald_proximal_volume(
            wavelength_ang = OO.central_wavelength_ang,
            resolution_cutoff_ang = OO.parent.horizons_phil.integration.mosaic.ewald_proximal_volume_resolution_cutoff,
            domain_size_ang = 1./M.x[0],
            full_mosaicity_rad = M.x[1])
        return results, helper.last_set_orientation,1./M.x[0] # full width domain size, angstroms
      else:
        assert OO.mosaic_refinement_target=="LSQ"
        OO.parent.green_curve_area = green_curve_area(two_thetas, tan_outer_deg)
        OO.parent.inputai.setMosaicity(2*k_degrees) # full width
        OO.parent.ML_half_mosaicity_deg = k_degrees
        OO.parent.ML_domain_size_ang = s_ang
        OO.parent.ewald_proximal_volume = ewald_proximal_volume(
            wavelength_ang = OO.central_wavelength_ang,
            resolution_cutoff_ang = OO.parent.horizons_phil.integration.mosaic.ewald_proximal_volume_resolution_cutoff,
            domain_size_ang = s_ang,
            full_mosaicity_rad = 2*k_degrees*math.pi/180.)
        return results, helper.last_set_orientation,s_ang # full width domain size, angstroms
Exemplo n.º 40
0
  def integration_concept(self,image_number=0,cb_op_to_primitive=None,verbose=False,**kwargs):
    self.image_number = image_number
    NEAR = 10
    pxlsz = self.pixel_size
    self.get_predictions_accounting_for_centering(cb_op_to_primitive,**kwargs)
    FWMOSAICITY = self.inputai.getMosaicity()
    DOMAIN_SZ_ANG = kwargs.get("domain_size_ang",  self.__dict__.get("actual",0)  )
    refineflag = {True:0,False:1}[kwargs.get("domain_size_ang",0)==0]
    self.inputpd["symmetry"].show_summary(prefix="EXCURSION%1d REPORT FWMOS= %6.4f DOMAIN= %6.1f "%(refineflag,FWMOSAICITY,DOMAIN_SZ_ANG))
    from annlib_ext import AnnAdaptor
    self.cell = self.inputai.getOrientation().unit_cell()
    query = flex.double()
    for pred in self.predicted: # predicted spot coord in pixels
      query.append(pred[0]/pxlsz)
      query.append(pred[1]/pxlsz)
    self.reserve_hkllist_for_signal_search = self.hkllist

    reference = flex.double()
    spots = self.get_observations_with_outlier_removal()

    assert len(spots)>NEAR# Can't do spot/pred matching with too few spots
    for spot in spots:
      reference.append(spot.ctr_mass_x())
      reference.append(spot.ctr_mass_y())

    IS_adapt = AnnAdaptor(data=reference,dim=2,k=NEAR)
    IS_adapt.query(query)
    print "Calculate correction vectors for %d observations & %d predictions"%(len(spots),len(self.predicted))
    indexed_pairs_provisional = []
    correction_vectors_provisional = []
    c_v_p_flex = flex.vec3_double()
    idx_cutoff = float(min(self.mask_focus[image_number]))
    if verbose:
      print "idx_cutoff distance in pixels",idx_cutoff
    if not self.horizons_phil.integration.enable_one_to_one_safeguard:
     # legacy code, no safeguard against many-to-one predicted-to-observation mapping
     for i in xrange(len(self.predicted)): # loop over predicteds
      #for n in xrange(NEAR): # loop over near spotfinder spots
      for n in xrange(1): # only consider the nearest spotfinder spot
        Match = dict(spot=IS_adapt.nn[i*NEAR+n],pred=i)
        if n==0 and math.sqrt(IS_adapt.distances[i*NEAR+n]) < idx_cutoff:
          indexed_pairs_provisional.append(Match)

          vector = matrix.col(
            [spots[Match["spot"]].ctr_mass_x() - self.predicted[Match["pred"]][0]/pxlsz,
             spots[Match["spot"]].ctr_mass_y() - self.predicted[Match["pred"]][1]/pxlsz])
          correction_vectors_provisional.append(vector)
          c_v_p_flex.append((vector[0],vector[1],0.))
    else:
      one_to_one = {}
      for i in xrange(len(self.predicted)): # loop over predicteds
        annresultidx = i*NEAR
        obsidx = IS_adapt.nn[annresultidx]
        this_distancesq = IS_adapt.distances[annresultidx]
        if not one_to_one.has_key(obsidx) or \
           this_distancesq < one_to_one[obsidx]["distancesq"]:
           if math.sqrt(this_distancesq) < idx_cutoff:
             one_to_one[obsidx] = dict(spot=obsidx,pred=i,distancesq=this_distancesq)
      for key,value in one_to_one.items():
        indexed_pairs_provisional.append(value)
        vector = matrix.col(
            [spots[value["spot"]].ctr_mass_x() - self.predicted[value["pred"]][0]/pxlsz,
             spots[value["spot"]].ctr_mass_y() - self.predicted[value["pred"]][1]/pxlsz])
        correction_vectors_provisional.append(vector)
        c_v_p_flex.append((vector[0],vector[1],0.))

    print "... %d provisional matches"%len(correction_vectors_provisional),
    print "r.m.s.d. in pixels: %5.2f"%(math.sqrt(flex.mean(c_v_p_flex.dot(c_v_p_flex))))

    if self.horizons_phil.integration.enable_residual_scatter:
      from matplotlib import pyplot as plt
      fig = plt.figure()
      for cv in correction_vectors_provisional:
        plt.plot([cv[1]],[-cv[0]],"b.")
      plt.title(" %d matches, r.m.s.d. %5.2f pixels"%(len(correction_vectors_provisional),math.sqrt(flex.mean(c_v_p_flex.dot(c_v_p_flex)))))
      plt.axes().set_aspect("equal")
      self.show_figure(plt,fig,"res")
      plt.close()

    if self.horizons_phil.integration.enable_residual_map:
      from matplotlib import pyplot as plt
      fig = plt.figure()
      for match,cv in zip(indexed_pairs_provisional,correction_vectors_provisional):
        plt.plot([spots[match["spot"]].ctr_mass_y()],[-spots[match["spot"]].ctr_mass_x()],"r.")
        plt.plot([self.predicted[match["pred"]][1]/pxlsz],[-self.predicted[match["pred"]][0]/pxlsz],"g.")
        plt.plot([spots[match["spot"]].ctr_mass_y(), spots[match["spot"]].ctr_mass_y() + 10.*cv[1]],
                 [-spots[match["spot"]].ctr_mass_x(), -spots[match["spot"]].ctr_mass_x() - 10.*cv[0]],'b-')
      plt.xlim([0,float(self.inputpd["size2"])])
      plt.ylim([-float(self.inputpd["size1"]),0])
      plt.title(" %d matches, r.m.s.d. %5.2f pixels"%(len(correction_vectors_provisional),math.sqrt(flex.mean(c_v_p_flex.dot(c_v_p_flex)))))
      plt.axes().set_aspect("equal")
      self.show_figure(plt,fig,"map")
      plt.close()
    # insert code here to remove correction length outliers...
    # they are causing terrible
    # problems for finding legitimate correction vectors (print out the list)
    # also remove outliers for the purpose of reporting RMS
    outlier_rejection = True
    cache_refinement_spots = getattr(slip_callbacks.slip_callback,"requires_refinement_spots",False)
    if outlier_rejection:
      correction_lengths = flex.double([v.length() for v in correction_vectors_provisional])
      clorder = flex.sort_permutation(correction_lengths)
      sorted_cl = correction_lengths.select(clorder)

      ACCEPTABLE_LIMIT = 2
      limit = int(0.33 * len(sorted_cl)) # best 1/3 of data are assumed to be correctly modeled.
      if (limit <= ACCEPTABLE_LIMIT):
        raise Sorry("Not enough indexed spots to reject outliers; have %d need >%d" % (limit, ACCEPTABLE_LIMIT))

      y_data = flex.double(len(sorted_cl))
      for i in xrange(len(y_data)):
        y_data[i] = float(i)/float(len(y_data))

      # ideas are explained in Sauter & Poon (2010) J Appl Cryst 43, 611-616.
      from rstbx.outlier_spots.fit_distribution import fit_cdf,rayleigh
      fitted_rayleigh = fit_cdf(x_data = sorted_cl[0:limit],
                                y_data = y_data[0:limit],
                                distribution=rayleigh)

      inv_cdf = [fitted_rayleigh.distribution.inv_cdf(cdf) for cdf in y_data]

      #print "SORTED LIST OF ",len(sorted_cl), "with sigma",fitted_rayleigh.distribution.sigma
      indexed_pairs = []
      correction_vectors = []
      self.correction_vectors = []
      for icand in xrange(len(sorted_cl)):
        # somewhat arbitrary sigma = 1.0 cutoff for outliers
        if (sorted_cl[icand]-inv_cdf[icand])/fitted_rayleigh.distribution.sigma > 1.0:
          break
        indexed_pairs.append(indexed_pairs_provisional[clorder[icand]])
        correction_vectors.append(correction_vectors_provisional[clorder[icand]])
        if cache_refinement_spots:
          self.spotfinder.images[self.frame_numbers[self.image_number]]["refinement_spots"].append(
          spots[indexed_pairs[-1]["spot"]])
        if kwargs.get("verbose_cv")==True:
            print "CV OBSCENTER %7.2f %7.2f REFINEDCENTER %7.2f %7.2f"%(
              float(self.inputpd["size1"])/2.,float(self.inputpd["size2"])/2.,
              self.inputai.xbeam()/pxlsz, self.inputai.ybeam()/pxlsz),
            print "OBSSPOT %7.2f %7.2f PREDSPOT %7.2f %7.2f"%(
              spots[indexed_pairs[-1]["spot"]].ctr_mass_x(),
              spots[indexed_pairs[-1]["spot"]].ctr_mass_y(),
              self.predicted[indexed_pairs[-1]["pred"]][0]/pxlsz,
              self.predicted[indexed_pairs[-1]["pred"]][1]/pxlsz),
            the_hkl = self.hkllist[indexed_pairs[-1]["pred"]]
            print "HKL %4d %4d %4d"%the_hkl,"%2d"%self.setting_id,
            radial, azimuthal = spots[indexed_pairs[-1]["spot"]].get_radial_and_azimuthal_size(
              self.inputai.xbeam()/pxlsz, self.inputai.ybeam()/pxlsz)
            print "RADIALpx %5.3f AZIMUTpx %5.3f"%(radial,azimuthal)

        # Store a list of correction vectors in self.
        radial, azimuthal = spots[indexed_pairs[-1]['spot']].get_radial_and_azimuthal_size(
          self.inputai.xbeam()/pxlsz, self.inputai.ybeam()/pxlsz)
        self.correction_vectors.append(
          dict(obscenter=(float(self.inputpd['size1']) / 2,
                          float(self.inputpd['size2']) / 2),
               refinedcenter=(self.inputai.xbeam() / pxlsz,
                              self.inputai.ybeam() / pxlsz),
               obsspot=(spots[indexed_pairs[-1]['spot']].ctr_mass_x(),
                        spots[indexed_pairs[-1]['spot']].ctr_mass_y()),
               predspot=(self.predicted[indexed_pairs[-1]['pred']][0] / pxlsz,
                         self.predicted[indexed_pairs[-1]['pred']][1] / pxlsz),
               hkl=(self.hkllist[indexed_pairs[-1]['pred']][0],
                    self.hkllist[indexed_pairs[-1]['pred']][1],
                    self.hkllist[indexed_pairs[-1]['pred']][2]),
               setting_id=self.setting_id,
               radial=radial,
               azimuthal=azimuthal))

      print "After outlier rejection %d indexed spotfinder spots remain."%len(indexed_pairs)
      if False:
        rayleigh_cdf = [
          fitted_rayleigh.distribution.cdf(x=sorted_cl[c]) for c in xrange(len(sorted_cl))]
        from matplotlib import pyplot as plt
        plt.plot(sorted_cl,y_data,"r+")
        #plt.plot(sorted_cl,rayleigh_cdf,"g.")
        plt.plot(inv_cdf,y_data,"b.")
        plt.show()
    else:
      indexed_pairs = indexed_pairs_provisional
      correction_vectors = correction_vectors_provisional
    ########### finished with outlier rejection

    self.inputpd["symmetry"].show_summary(prefix="SETTING ")

    is_triclinic = (self.setting_id==1)
    if is_triclinic:
      self.triclinic_pairs = [ dict(pred=self.hkllist[a["pred"]],spot=a["spot"])
        for a in indexed_pairs ]

    if self.horizons_phil.integration.model == "user_supplied":
      if kwargs.get("user-reentrant",None)==None:
        from cxi_user import post_outlier_rejection
        self.indexed_pairs = indexed_pairs
        self.spots = spots
        post_outlier_rejection(self,image_number,cb_op_to_primitive,self.horizons_phil,kwargs)
        return

    ########### finished with user-supplied code

    if self.horizons_phil.integration.spot_shape_verbose:
        from rstbx.new_horizons.spot_shape import spot_shape_verbose
        spot_shape_verbose(rawdata = self.imagefiles.images[self.image_number].linearintdata,
           beam_center_pix = matrix.col((self.inputai.xbeam()/pxlsz, self.inputai.ybeam()/pxlsz)),
           indexed_pairs = indexed_pairs,
           spotfinder_observations = spots,
           distance_mm = self.inputai.distance(),
           mm_per_pixel = pxlsz,
           hkllist = self.hkllist,
           unit_cell = self.cell,
           wavelength_ang = self.inputai.wavelength
        )

    #Other checks to be implemented (future):
    # spot is within active area of detector on a circular detector such as the Mar IP
    # integration masks do not overlap; or deconvolute

    correction_lengths=flex.double([v.length() for v in correction_vectors])
    if verbose:
      print "average correction %5.2f over %d vectors"%(flex.mean(correction_lengths),
      len(correction_lengths)),
      print "or %5.2f mm."%(pxlsz*flex.mean(correction_lengths))
    self.r_residual = pxlsz*flex.mean(correction_lengths)

    #assert len(indexed_pairs)>NEAR # must have enough indexed spots
    if (len(indexed_pairs) <= NEAR):
      raise Sorry("Not enough indexed spots, only found %d, need %d" % (len(indexed_pairs), NEAR))

    reference = flex.double()
    for item in indexed_pairs:
      reference.append(spots[item["spot"]].ctr_mass_x())
      reference.append(spots[item["spot"]].ctr_mass_y())

    PS_adapt = AnnAdaptor(data=reference,dim=2,k=NEAR)
    PS_adapt.query(query)

    self.BSmasks = []
    #self.null_correction_mapping( predicted=self.predicted,
    #                                    correction_vectors = correction_vectors,
    #                                    IS_adapt = IS_adapt,
    #                                    spots = spots)
    self.positional_correction_mapping( predicted=self.predicted,
                                        correction_vectors = correction_vectors,
                                        PS_adapt = PS_adapt,
                                        IS_adapt = IS_adapt,
                                        spots = spots)

    # which spots are close enough to interfere with background?
    MAXOVER=6
    OS_adapt = AnnAdaptor(data=query,dim=2,k=MAXOVER) #six near nbrs
    OS_adapt.query(query)
    if self.mask_focus[image_number] is None:
      raise Sorry("No observed/predicted spot agreement; no Spotfinder masks; skip integration")
    nbr_cutoff = 2.0* max(self.mask_focus[image_number])
    FRAME = int(nbr_cutoff/2)
    #print "The overlap cutoff is %d pixels"%nbr_cutoff
    nbr_cutoff_sq = nbr_cutoff * nbr_cutoff

    #print "Optimized C++ section...",
    self.set_frame(FRAME)
    self.set_background_factor(kwargs["background_factor"])
    self.set_nbr_cutoff_sq(nbr_cutoff_sq)
    self.set_guard_width_sq(self.horizons_phil.integration.guard_width_sq)
    self.set_detector_gain(self.horizons_phil.integration.detector_gain)
    flex_sorted = flex.int()
    for item in self.sorted:
      flex_sorted.append(item[0]);flex_sorted.append(item[1]);

    if self.horizons_phil.integration.mask_pixel_value is not None:
      self.set_mask_pixel_val(self.horizons_phil.integration.mask_pixel_value)

    image_obj = self.imagefiles.imageindex(self.frame_numbers[self.image_number])
    image_obj.read()
    rawdata = image_obj.linearintdata # assume image #1

    if self.inputai.active_areas != None:
      self.detector_xy_draft = self.safe_background( rawdata=rawdata,
                          predicted=self.predicted,
                          OS_adapt=OS_adapt,
                          sorted=flex_sorted,
                          tiles=self.inputai.active_areas.IT,
                          tile_id=self.inputai.active_areas.tile_id);
    else:
      self.detector_xy_draft = self.safe_background( rawdata=rawdata,
                          predicted=self.predicted,
                          OS_adapt=OS_adapt,
                          sorted=flex_sorted);
    for i in xrange(len(self.predicted)): # loop over predicteds
      B_S_mask = {}
      keys = self.get_bsmask(i)
      for k in xrange(0,len(keys),2):
        B_S_mask[(keys[k],keys[k+1])]=True
      self.BSmasks.append(B_S_mask)
    #print "Done"
    return
Exemplo n.º 41
0
    def __init__(self,
                 fmodels,
                 restraints_manager=None,
                 model=None,
                 is_neutron_scat_table=None,
                 target_weights=None,
                 refine_xyz=False,
                 refine_adp=False,
                 lbfgs_termination_params=None,
                 use_fortran=False,
                 verbose=0,
                 correct_special_position_tolerance=1.0,
                 iso_restraints=None,
                 h_params=None,
                 qblib_params=None,
                 macro_cycle=None,
                 u_min=adptbx.b_as_u(-5.0),
                 u_max=adptbx.b_as_u(999.99),
                 collect_monitor=True,
                 log=None):
        timer = user_plus_sys_time()
        adopt_init_args(self, locals())
        self.f = None
        self.xray_structure = self.fmodels.fmodel_xray().xray_structure
        self.fmodels.create_target_functors()
        self.fmodels.prepare_target_functors_for_minimization()
        if (self.refine_adp and fmodels.fmodel_neutron() is None):
            self.xray_structure.tidy_us()
            self.fmodels.update_xray_structure(
                xray_structure=self.xray_structure, update_f_calc=True)
        self.weights = None
        # QBLIB INSERT
        self.qblib_params = qblib_params
        if (self.qblib_params is not None and self.qblib_params.qblib):
            self.macro = macro_cycle
            self.qblib_cycle_count = 0
            self.tmp_XYZ = None
            self.XYZ_diff_curr = None
# QBLIB END
        self.correct_special_position_tolerance = correct_special_position_tolerance
        if (refine_xyz and target_weights is not None):
            self.weights = target_weights.xyz_weights_result
        elif (refine_adp and target_weights is not None):
            self.weights = target_weights.adp_weights_result
        else:
            from phenix.refinement import weight_xray_chem
            self.weights = weight_xray_chem.weights(wx=1,
                                                    wx_scale=1,
                                                    angle_x=None,
                                                    wn=1,
                                                    wn_scale=1,
                                                    angle_n=None,
                                                    w=0,
                                                    wxn=1)
        if (self.collect_monitor):
            self.monitor = monitor(weights=self.weights,
                                   fmodels=fmodels,
                                   model=model,
                                   iso_restraints=iso_restraints,
                                   refine_xyz=refine_xyz,
                                   refine_adp=refine_adp,
                                   refine_occ=False)
        if (self.collect_monitor): self.monitor.collect()
        self.neutron_refinement = (self.fmodels.fmodel_n is not None)
        self.x = flex.double(self.xray_structure.n_parameters(), 0)
        self.riding_h_manager = self.model.riding_h_manager
        self._scatterers_start = self.xray_structure.scatterers()
        #lbfgs_core_params = scitbx.lbfgs.core_parameters(
        #  stpmin = 1.e-9,
        #  stpmax = adptbx.b_as_u(10))
        self.minimizer = scitbx.lbfgs.run(
            target_evaluator=self,
            termination_params=lbfgs_termination_params,
            use_fortran=use_fortran,
            #  core_params = lbfgs_core_params,
            #  gradient_only=True,
            exception_handling_params=scitbx.lbfgs.
            exception_handling_parameters(
                ignore_line_search_failed_step_at_lower_bound=True))
        self.apply_shifts()
        del self._scatterers_start
        self.compute_target(compute_gradients=False,
                            u_iso_refinable_params=None)
        if (self.refine_adp and self.fmodels.fmodel_neutron() is None):
            self.xray_structure.tidy_us()
            self.fmodels.update_xray_structure(
                xray_structure=self.xray_structure, update_f_calc=True)
        if (self.collect_monitor):
            self.monitor.collect(iter=self.minimizer.iter(),
                                 nfun=self.minimizer.nfun())
        self.fmodels.create_target_functors()
        # QBLIB INSERT
        if (self.qblib_params is not None and self.qblib_params.qblib):
            print('{:-^80}'.format(""), file=self.qblib_params.qblib_log)
            print(file=self.qblib_params.qblib_log)
Exemplo n.º 42
0
  def __init__(OO,self,use_inverse_beam=False):

    OO.parent = self # OO.parent is an instance of the legacy IntegrationMetaProcedure class
    from xfel.mono_simulation import bandpass_gaussian
    from rstbx.bandpass import parameters_bp3

    #take needed parameters from parent
    pxlsz = self.pixel_size # mm/pixel

    detector_origin = col(( -self.inputai.xbeam(),
                            -self.inputai.ybeam(),
                             0.))
    #OO.space_group = self.inputpd["symmetry"].space_group()   #comment this back in as needed for refinement
    indices = flex.miller_index([self.hkllist[pair["pred"]] for pair in self.indexed_pairs])
    OO.reserve_indices = indices
    OO.input_orientation = self.inputai.getOrientation()
    OO.central_wavelength_ang = self.inputai.wavelength
    incident_beam = col((0.,0.,-1.))
    if use_inverse_beam: incident_beam*=-1.

    parameters = parameters_bp3(
       indices=indices, orientation=OO.input_orientation,
       incident_beam=incident_beam,
       packed_tophat=col((1.,1.,0.)),
       detector_normal=col((0.,0.,-1.)),
       detector_fast=col((0.,1.,0.)),detector_slow=col((1.,0.,0.)),
       pixel_size=col((pxlsz,pxlsz,0)),
       pixel_offset=col((0.,0.,0.0)),
       distance=self.inputai.distance(),
       detector_origin=detector_origin
    )
    OO.ucbp3 = bandpass_gaussian(parameters=parameters)

    if "horizons_phil" in OO.parent.__dict__:
      the_tiles = OO.parent.imagefiles.images[OO.parent.image_number
      ].get_tile_manager(OO.parent.horizons_phil
      ).effective_tiling_as_flex_int(
      reapply_peripheral_margin=True,encode_inactive_as_zeroes=True)
      OO.ucbp3.set_active_areas( the_tiles )
    else:
      OO.ucbp3.set_active_areas( [0,0,1700,1700] )
    integration_signal_penetration=0.0 # easier to calculate distance derivatives

    OO.ucbp3.set_sensor_model( thickness_mm = 0.5, mu_rho = 8.36644, # CS_PAD detector at 1.3 Angstrom
      signal_penetration = integration_signal_penetration)

    # test for horizons_phil simply skips the subpixel correction for initial labelit indexing
    if "horizons_phil" in OO.parent.__dict__:
      if OO.parent.horizons_phil.integration.subpixel_joint_model.translations is not None:
        "Subpixel corrections: using joint-refined translation + rotation"
        T = OO.parent.horizons_phil.integration.subpixel_joint_model.translations
        import copy
        resortedT = copy.copy(T)
        for tt in xrange(0,len(T),2):
          resortedT[tt] = T[tt+1]
          resortedT[tt+1] = T[tt]
        OO.ucbp3.set_subpixel(
            translations = resortedT, rotations_deg = flex.double(
             OO.parent.horizons_phil.integration.subpixel_joint_model.rotations)
          )
    else:
      pass; "Subpixel corrections: none used"

    half_mosaicity_rad = (self.inputai.getMosaicity()/2.) * math.pi/180.
    OO.ucbp3.set_mosaicity(half_mosaicity_rad)
    OO.ucbp3.set_bandpass(OO.central_wavelength_ang - 0.000001, OO.central_wavelength_ang + 0.000001)
    OO.ucbp3.set_domain_size(280. * 17.) # for Holton psI simulation; probably doesn't detract from general case
Exemplo n.º 43
0
        exit()
    if pixel_size_mm is None:
        print "Please specify pixel size (eg. pixel_size_mm=0.079346)"
        exit()
    return data, hklrefin, pixel_size_mm, target_unit_cell, target_space_group, target_anomalous_flag, flag_plot, d_min, d_max, n_residues


if (__name__ == "__main__"):
    cc_bin_low_thres = 0.25
    beam_thres = 0.5
    uc_tol = 3
    #0 .read input parameters and frames (pickle files)
    data, hklrefin, pixel_size_mm, target_unit_cell, \
      target_space_group, target_anomalous_flag, flag_plot, d_min, d_max, n_residues = read_input(args = sys.argv[1:])
    frame_files = read_pickles(data)
    xbeam_set = flex.double()
    ybeam_set = flex.double()
    sys_abs_set = []
    sys_abs_all = flex.double()
    cc_bin_low_set = flex.double()
    cc_bins_set = []
    d_bins_set = []
    oodsqr_bins_set = []
    flag_good_unit_cell_set = []
    print 'Summary of integration pickles:'
    print '(image file, min. res., max. res, beamx, beamy, n_refl, cciso, <cciso_bin>, a, b, c, mosaicity, residual, dd_mm, wavelength, good_cell?, G, B)'
    uc_a = flex.double()
    uc_b = flex.double()
    uc_c = flex.double()
    dd_mm = flex.double()
    wavelength_set = flex.double()
Exemplo n.º 44
0
        def __init__(pfh):
          super(per_frame_helper, pfh).__init__(n_parameters=2)

          pfh.x_0 = flex.double((0.,0.))
          pfh.restart()
Exemplo n.º 45
0
 def __init__(self,
              u_cart,
              u_iso=None,
              use_u_aniso=None,
              sites_cart=None,
              adp_similarity_proxies=None,
              rigid_bond_proxies=None,
              isotropic_adp_proxies=None,
              compute_gradients=True,
              gradients_aniso_cart=None,
              gradients_iso=None,
              disable_asu_cache=False,
              normalization=False):
     adopt_init_args(self, locals())
     self.number_of_restraints = 0
     self.residual_sum = 0
     self.normalization_factor = None
     if (adp_similarity_proxies is not None):
         assert u_iso is not None and use_u_aniso is not None
     if (rigid_bond_proxies is not None): assert sites_cart is not None
     if (sites_cart is not None): assert sites_cart.size() == u_cart.size()
     if (u_iso is not None): assert u_iso.size() == u_cart.size()
     if (use_u_aniso is not None):
         assert use_u_aniso.size() == u_cart.size()
     if (compute_gradients):
         if (self.gradients_aniso_cart is None):
             self.gradients_aniso_cart = flex.sym_mat3_double(
                 sites_cart.size(), [0, 0, 0, 0, 0, 0])
         else:
             assert self.gradients_aniso_cart.size() == sites_cart.size()
         if (u_iso is not None and self.gradients_iso is None):
             self.gradients_iso = flex.double(sites_cart.size(), 0)
         elif (u_iso is not None):
             assert self.gradients_iso.size() == sites_cart.size()
     if (adp_similarity_proxies is None):
         self.n_adp_similarity_proxies = None
         self.adp_similarity_residual_sum = 0
     else:
         self.n_adp_similarity_proxies = len(adp_similarity_proxies)
         self.adp_similarity_residual_sum = adp_restraints.adp_similarity_residual_sum(
             u_cart=u_cart,
             u_iso=u_iso,
             use_u_aniso=use_u_aniso,
             proxies=adp_similarity_proxies,
             gradients_aniso_cart=self.gradients_aniso_cart,
             gradients_iso=self.gradients_iso)
         self.number_of_restraints += 6 * self.n_adp_similarity_proxies
         self.residual_sum += self.adp_similarity_residual_sum
     if (rigid_bond_proxies is None):
         self.n_rigid_bond_proxies = None
         self.rigid_bond_residual_sum = 0
     else:
         self.n_rigid_bond_proxies = len(rigid_bond_proxies)
         self.rigid_bond_residual_sum = adp_restraints.rigid_bond_residual_sum(
             sites_cart=sites_cart,
             u_cart=u_cart,
             proxies=rigid_bond_proxies,
             gradients_aniso_cart=self.gradients_aniso_cart)
         self.number_of_restraints += self.n_rigid_bond_proxies
         self.residual_sum += self.rigid_bond_residual_sum
     if (isotropic_adp_proxies is None):
         self.n_isotropic_adp_proxies = None
         self.isotropic_adp_residual_sum = 0
     else:
         self.n_isotropic_adp_proxies = len(isotropic_adp_proxies)
         self.isotropic_adp_residual_sum = adp_restraints.isotropic_adp_residual_sum(
             u_cart=u_cart,
             proxies=isotropic_adp_proxies,
             gradients_aniso_cart=self.gradients_aniso_cart)
         self.number_of_restraints += self.n_isotropic_adp_proxies
         self.residual_sum += self.isotropic_adp_residual_sum
     self.finalize_target_and_gradients()
Exemplo n.º 46
0
 def add_miller_array(self,
       miller_array,
       column_root_label,
       column_types=None,
       label_decorator=None):
   assert column_types is None or isinstance(column_types, str)
   if (label_decorator is None):
     label_decorator = globals()["label_decorator"]()
   default_col_types = default_column_types(miller_array=miller_array)
   if (default_col_types is None):
     raise RuntimeError(
       "Conversion of given type of miller_array to MTZ format"
       " is not supported.")
   if (column_types is None):
     column_types = default_col_types
   elif (len(column_types) != len(default_col_types)):
     raise RuntimeError(
       "Invalid MTZ column_types for the given miller_array.")
   self.initialize_hkl_columns()
   if (not miller_array.anomalous_flag()):
     if (default_col_types in ["FQ", "JQ"]):
       self._add_observations(
         data_label=column_root_label,
         sigmas_label=label_decorator.sigmas(column_root_label),
         column_types=column_types,
         indices=miller_array.indices(),
         data=miller_array.data(),
         sigmas=miller_array.sigmas())
     elif (default_col_types == "FP"):
       self._add_complex(
         amplitudes_label=column_root_label,
         phases_label=label_decorator.phases(column_root_label),
         column_types=column_types,
         indices=miller_array.indices(),
         data=miller_array.data())
     elif (default_col_types in ["F", "J"]):
       self.add_column(
         label=column_root_label,
         type=column_types).set_reals(
           miller_indices=miller_array.indices(),
           data=miller_array.data())
     elif (default_col_types == "I"):
       self.add_column(
         label=column_root_label,
         type=column_types).set_reals(
           miller_indices=miller_array.indices(),
           data=miller_array.data().as_double())
     elif (default_col_types == "AAAA"):
       mtz_reflection_indices = self.add_column(
         label=label_decorator.hendrickson_lattman(column_root_label, 0),
         type=column_types[0]).set_reals(
           miller_indices=miller_array.indices(),
           data=miller_array.data().slice(0))
       for i in xrange(1,4):
         self.add_column(
           label=label_decorator.hendrickson_lattman(column_root_label, i),
           type=column_types[i]).set_reals(
             mtz_reflection_indices=mtz_reflection_indices,
             data=miller_array.data().slice(i))
     else:
       raise RuntimeError("Fatal programming error.")
   else:
     asu, matches = miller_array.match_bijvoet_mates()
     if (default_col_types == "FQDQY"):
       _ = matches.pairs_hemisphere_selection
       selpp = _("+")
       selpm = _("-")
       _ = matches.singles_hemisphere_selection
       selsp = _("+")
       selsm = _("-")
       _ = asu.data()
       fp = _.select(selpp)
       fm = _.select(selpm)
       fs = _.select(selsp)
       fs.extend(_.select(selsm))
       # http://www.ccp4.ac.uk/dist/html/mtzMADmod.html
       f = 0.5 * (fp + fm)
       d = fp - fm
       _ = asu.sigmas()
       sp = _.select(selpp)
       sm = _.select(selpm)
       ss = _.select(selsp)
       ss.extend(_.select(selsm))
       sd = flex.sqrt(sp**2 + sm**2)
       sf = 0.5 * sd
       f.extend(fs)
       sf.extend(ss)
       _ = asu.indices()
       hd = _.select(selpp)
       hf = hd.concatenate(_.select(selsp))
       hf.extend(-_.select(selsm))
       isym = flex.double(selpp.size(), 0)       # both F+ and F-
       isym.resize(selpp.size()+selsp.size(), 1) # only F+
       isym.resize(hf.size(), 2)                 # only F-
       isym.set_selected(miller_array.space_group().is_centric(hf) , 0)
       label_group = [
         column_root_label,
         label_decorator.sigmas(column_root_label),
         label_decorator.delta_anomalous(column_root_label),
         label_decorator.delta_anomalous_sigmas(column_root_label),
         label_decorator.delta_anomalous_isym(column_root_label)]
       for i,(mi,data) in enumerate([(hf,f),(hf,sf),(hd,d),(hd,sd),(hf,isym)]):
         self.add_column(
           label=label_group[i],
           type=column_types[i]).set_reals(miller_indices=mi, data=data)
     else:
       for anomalous_sign in ("+","-"):
         sel = matches.pairs_hemisphere_selection(anomalous_sign)
         sel.extend(matches.singles_hemisphere_selection(anomalous_sign))
         if (anomalous_sign == "+"):
           indices = asu.indices().select(sel)
         else:
           indices = -asu.indices().select(sel)
         data = asu.data().select(sel)
         if (default_col_types in ["GL", "KM"]):
           self._add_observations(
             data_label=label_decorator.anomalous(
               column_root_label, anomalous_sign),
             sigmas_label=label_decorator.sigmas(
               column_root_label, anomalous_sign),
             column_types=column_types,
             indices=indices,
             data=data,
             sigmas=asu.sigmas().select(sel))
         elif (default_col_types == "GP"):
           self._add_complex(
             amplitudes_label=label_decorator.anomalous(
               column_root_label, anomalous_sign),
             phases_label=label_decorator.phases(
               column_root_label, anomalous_sign),
             column_types=column_types,
             indices=indices,
             data=data)
         elif (default_col_types in ["G", "K"]):
           self.add_column(
             label=label_decorator.anomalous(
               column_root_label, anomalous_sign),
             type=column_types).set_reals(
               miller_indices=indices,
               data=data)
         elif (default_col_types == "I"):
           self.add_column(
             label=label_decorator.anomalous(
               column_root_label, anomalous_sign),
             type=column_types).set_reals(
               miller_indices=indices,
               data=data.as_double())
         elif (default_col_types == "AAAA"):
           mtz_reflection_indices = self.add_column(
             label=label_decorator.hendrickson_lattman(
               column_root_label, 0, anomalous_sign),
             type=column_types[0]).set_reals(
               miller_indices=indices,
               data=data.slice(0))
           for i in xrange(1,4):
             self.add_column(
               label=label_decorator.hendrickson_lattman(
                 column_root_label, i, anomalous_sign),
               type=column_types[i]).set_reals(
                 mtz_reflection_indices=mtz_reflection_indices,
                 data=data.slice(i))
         else:
           raise RuntimeError("Fatal programming error.")
   return self
Exemplo n.º 47
0
 def plot_stats(self, results, iparams):
     #retrieve stats from results and plot them
     if iparams.flag_plot or iparams.flag_output_verbose:
         #for plotting set n_bins = 5 to avoid empty bin
         n_bins_plot = 5
         #get expected f^2
         try:
             mxh = mx_handler()
             asu_contents = mxh.get_asu_contents(iparams.n_residues)
             observations_as_f = results[0].observations.as_amplitude_array(
             )
             binner_template_asu = observations_as_f.setup_binner(
                 n_bins=n_bins_plot)
             wp = statistics.wilson_plot(observations_as_f,
                                         asu_contents,
                                         e_statistics=True)
             expected_f_sq = wp.expected_f_sq
             mean_stol_sq = wp.mean_stol_sq
         except Exception:
             expected_f_sq = flex.double([0] * n_bins_plot)
             mean_stol_sq = flex.double(range(n_bins_plot))
             print "Warning: Wilson plot calculation in plot stats failed."
         #setup list
         params_array = np.array([[pres.R_init, pres.R_final, pres.R_xy_init, pres.R_xy_final, \
             pres.G, pres.B, pres.rotx*180/math.pi, pres.roty*180/math.pi, \
             pres.ry, pres.rz, pres.r0, pres.re, pres.voigt_nu, \
             pres.uc_params[0], pres.uc_params[1], pres.uc_params[2], \
             pres.uc_params[3], pres.uc_params[4], pres.uc_params[5], \
             pres.CC_final, pres.pickle_filename] for pres in results])
         params = ['Rinit','Rfinal','Rxyinit', 'Rxyfinal', \
             'G','B','rot_x','rot_y','gamma_y','gamma_z','gamma_0','gamma_e','voigtnu' , \
             'a','b','c','alpha','beta','gamma','CC','Filename']
     #keep parameter history if verbose is selected
     if iparams.flag_output_verbose:
         fileseq_list = flex.int()
         for file_in in os.listdir(iparams.run_no):
             if file_in.endswith('.paramhist'):
                 file_split = file_in.split('.')
                 fileseq_list.append(int(file_split[0]))
         if len(fileseq_list) == 0:
             new_fileseq = 0
         else:
             new_fileseq = flex.max(fileseq_list) + 1
         newfile_name = str(new_fileseq) + '.paramhist'
         txt_out_verbose = '\n'.join([' '.join(p) for p in params_array])
         f = open(iparams.run_no + '/' + newfile_name, 'w')
         f.write(txt_out_verbose)
         f.close()
     #plotting
     if iparams.flag_plot:
         n_rows = 3
         n_cols = int(math.ceil(len(params) / n_rows))
         num_bins = 10
         for i in xrange(len(params) - 1):
             tmp_params = params_array[:, i].astype(np.float)
             plt.subplot(n_rows, n_cols, i + 1)
             plt.hist(tmp_params,
                      num_bins,
                      normed=0,
                      facecolor='green',
                      alpha=0.5)
             plt.ylabel('Frequencies')
             plt.title(params[i] + '\nmu %5.1f med %5.1f sigma %5.1f' %
                       (np.mean(tmp_params), np.median(tmp_params),
                        np.std(tmp_params)))
         plt.show()
Exemplo n.º 48
0
 def __init__(self,
              pdb_hierarchy,
              crystal_symmetry,
              angular_difference_threshold_deg=5.,
              sequence_identity_threshold=90.,
              quiet=False):
     h = pdb_hierarchy
     superposition_threshold = 2 * sequence_identity_threshold - 100.
     n_atoms_all = h.atoms_size()
     s_str = "altloc ' ' and (protein or nucleotide)"
     h = h.select(h.atom_selection_cache().selection(s_str))
     h1 = iotbx.pdb.hierarchy.root()
     h1.append_model(h.models()[0].detached_copy())
     unit_cell = crystal_symmetry.unit_cell()
     result = {}
     if not quiet:
         print("Find groups of chains related by translational NCS")
     # double loop over chains to find matching pairs related by pure translation
     for c1 in h1.chains():
         c1.parent().remove_chain(c1)
         nchains = len(h1.models()[0].chains())
         if ([c1.is_protein(), c1.is_na()].count(True) == 0): continue
         r1 = list(c1.residues())
         c1_seq = "".join(c1.as_sequence())
         sc_1_tmp = c1.atoms().extract_xyz()
         h1_p1 = h1.expand_to_p1(crystal_symmetry=crystal_symmetry)
         for (ii, c2) in enumerate(h1_p1.chains()):
             orig_c2 = h1.models()[0].chains()[ii % nchains]
             r2 = list(c2.residues())
             c2_seq = "".join(c2.as_sequence())
             sites_cart_1, sites_cart_2 = None, None
             sc_2_tmp = c2.atoms().extract_xyz()
             # chains are identical
             if (c1_seq == c2_seq and sc_1_tmp.size() == sc_2_tmp.size()):
                 sites_cart_1 = sc_1_tmp
                 sites_cart_2 = sc_2_tmp
                 p_identity = 100.
             # chains are not identical, do alignment
             else:
                 align_obj = mmtbx.alignment.align(seq_a=c1_seq,
                                                   seq_b=c2_seq)
                 alignment = align_obj.extract_alignment()
                 matches = alignment.matches()
                 equal = matches.count("|")
                 total = len(alignment.a) - alignment.a.count("-")
                 p_identity = 100. * equal / max(1, total)
                 if (p_identity > superposition_threshold):
                     sites_cart_1 = flex.vec3_double()
                     sites_cart_2 = flex.vec3_double()
                     for i1, i2, match in zip(alignment.i_seqs_a,
                                              alignment.i_seqs_b, matches):
                         if (i1 is not None and i2 is not None
                                 and match == "|"):
                             r1i, r2i = r1[i1], r2[i2]
                             assert r1i.resname == r2i.resname, [
                                 r1i.resname, r2i.resname, i1, i2
                             ]
                             for a1 in r1i.atoms():
                                 for a2 in r2i.atoms():
                                     if (a1.name == a2.name):
                                         sites_cart_1.append(a1.xyz)
                                         sites_cart_2.append(a2.xyz)
                                         break
             # superpose two sequence-aligned chains
             if ([sites_cart_1, sites_cart_2].count(None) == 0):
                 lsq_fit_obj = superpose.least_squares_fit(
                     reference_sites=sites_cart_1, other_sites=sites_cart_2)
                 angle = lsq_fit_obj.r.rotation_angle()
                 t_frac = unit_cell.fractionalize(
                     (sites_cart_1 - sites_cart_2).mean())
                 t_frac = [math.modf(t)[0]
                           for t in t_frac]  # put into [-1,1]
                 radius = flex.sum(
                     flex.sqrt((sites_cart_1 - sites_cart_1.mean()
                                ).dot())) / sites_cart_1.size() * 4. / 3.
                 fracscat = min(c1.atoms_size(),
                                c2.atoms_size()) / n_atoms_all
                 result.setdefault(frozenset([c1, orig_c2]), []).append([
                     p_identity,
                     [lsq_fit_obj.r, t_frac, angle, radius, fracscat]
                 ])
             else:
                 result.setdefault(frozenset([c1, orig_c2]),
                                   []).append([p_identity, None])
     # Build graph
     g = graph.adjacency_list()
     vertex_handle = {}
     for key in result:
         seqid = result[key][0][0]
         sup = min(result[key],
                   key=lambda s: 0 if s[1] is None else s[1][2])[1]
         result[key] = [seqid, sup]
         if ((seqid > sequence_identity_threshold)
                 and (sup[2] < angular_difference_threshold_deg)):
             (c1, c2) = key
             if (c1 not in vertex_handle):
                 vertex_handle[c1] = g.add_vertex(label=c1)
             if (c2 not in vertex_handle):
                 vertex_handle[c2] = g.add_vertex(label=c2)
             g.add_edge(vertex1=vertex_handle[c1],
                        vertex2=vertex_handle[c2])
     # Do connected component analysis and compose final tNCS pairs object
     components = connected_component_algorithm.connected_components(g)
     import itertools
     self.ncs_pairs = []
     self.tncsresults = [0, "", [], 0.0]
     for (i, group) in enumerate(components):
         chains = [g.vertex_label(vertex=v) for v in group]
         fracscats = []
         radii = []
         for pair in itertools.combinations(chains, 2):
             sup = result[frozenset(pair)][1]
             fracscats.append(sup[-1])
             radii.append(sup[-2])
         fs = sum(fracscats) / len(fracscats)
         self.tncsresults[3] = fs  # store fracscat in array
         rad = sum(radii) / len(radii)
         #import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
         maxorder = 1
         vectors = []
         previous_id = next(itertools.combinations(chains, 2))[0].id
         for pair in itertools.combinations(chains, 2):
             sup = result[frozenset(pair)][1]
             ncs_pair = ext.pair(
                 r=sup[0],
                 t=sup[1],
                 radius=rad,
                 radius_estimate=rad,
                 fracscat=fs,
                 rho_mn=flex.double(
                 ),  # rho_mn undefined, needs to be set later
                 id=i)
             self.ncs_pairs.append(ncs_pair)
             # show tNCS pairs in group
             fmt = "group %d chains %s <> %s angle: %4.2f trans.vect.: (%s) fracscat: %5.3f"
             t = ",".join([("%6.3f" % t_).strip() for t_ in sup[1]]).strip()
             if not quiet:
                 print(fmt % (i, pair[0].id, pair[1].id, sup[2], t, fs))
             if pair[0].id == previous_id:
                 maxorder += 1
                 orthoxyz = unit_cell.orthogonalize(sup[1])
                 vectors.append((sup[1], orthoxyz, sup[2]))
             else:
                 previous_id = pair[0].id
                 maxorder = 1
                 vectors = []
             if maxorder > self.tncsresults[0]:
                 self.tncsresults[0] = maxorder
                 self.tncsresults[1] = previous_id
                 self.tncsresults[2] = vectors
     if not quiet:
         print("Largest TNCS order, peptide chain, fracvector, orthvector, angle, fracscat = ", \
          str(self.tncsresults))
Exemplo n.º 49
0
def exercise_miller_arrays_as_cif_block():
    from iotbx.cif import reader
    cif_model = reader(input_string=cif_miller_array,
                       builder=cif.builders.cif_model_builder()).model()
    ma_builder = cif.builders.miller_array_builder(cif_model['global'])
    ma1 = ma_builder.arrays()['_refln_F_squared_meas']
    mas_as_cif_block = cif.miller_arrays_as_cif_block(ma1,
                                                      array_type='meas',
                                                      format="corecif")
    mas_as_cif_block.add_miller_array(
        ma1.array(data=flex.complex_double([1 - 1j] * ma1.size())),
        array_type='calc')
    mas_as_cif_block.add_miller_array(
        ma1.array(data=flex.complex_double([1 - 2j] * ma1.size())),
        column_names=['_refln_A_calc', '_refln_B_calc'])
    for key in ('_refln_F_squared_meas', '_refln_F_squared_sigma',
                '_refln_F_calc', '_refln_phase_calc', '_refln_A_calc',
                '_refln_A_calc'):
        assert (key in mas_as_cif_block.cif_block.keys()), key
    #
    mas_as_cif_block = cif.miller_arrays_as_cif_block(ma1,
                                                      array_type='meas',
                                                      format="mmcif")
    mas_as_cif_block.add_miller_array(
        ma1.array(data=flex.complex_double([1 - 1j] * ma1.size())),
        array_type='calc')
    for key in ('_refln.F_squared_meas', '_refln.F_squared_sigma',
                '_refln.F_calc', '_refln.phase_calc',
                '_space_group_symop.operation_xyz', '_cell.length_a',
                '_refln.index_h'):
        assert key in mas_as_cif_block.cif_block.keys()
    #
    mas_as_cif_block = cif.miller_arrays_as_cif_block(
        ma1,
        column_names=[
            '_diffrn_refln_intensity_net', '_diffrn_refln_intensity_sigma'
        ],
        miller_index_prefix='_diffrn_refln')
    mas_as_cif_block.add_miller_array(
        ma1.array(data=flex.std_string(ma1.size(), 'om')),
        column_name='_diffrn_refln_intensity_u')
    for key in ('_diffrn_refln_intensity_net', '_diffrn_refln_intensity_sigma',
                '_diffrn_refln_intensity_u'):
        assert key in list(mas_as_cif_block.cif_block.keys())
    #
    try:
        reader(input_string=cif_global)
    except CifParserError as e:
        pass
    else:
        raise Exception_expected
    cif_model = reader(input_string=cif_global, strict=False).model()
    assert not show_diff(
        str(cif_model), """\
data_1
_c                                3
_d                                4
""")
    # exercise adding miller arrays with non-matching indices
    cs = crystal.symmetry(unit_cell=uctbx.unit_cell((10, 10, 10, 90, 90, 90)),
                          space_group_info=sgtbx.space_group_info(symbol="P1"))
    mi = flex.miller_index(((1, 0, 0), (1, 2, 3), (2, 3, 4)))
    ms1 = miller.set(cs, mi)
    ma1 = miller.array(ms1, data=flex.double((1, 2, 3)))
    mas_as_cif_block = cif.miller_arrays_as_cif_block(
        ma1, column_name="_refln.F_meas_au")
    ms2 = miller.set(cs, mi[:2])
    ma2 = miller.array(ms2, data=flex.complex_double([1 - 2j] * ms2.size()))
    mas_as_cif_block.add_miller_array(ma2,
                                      column_names=("_refln.F_calc_au",
                                                    "_refln.phase_calc")),
    ms3 = miller.set(cs, flex.miller_index(((1, 0, 0), (5, 6, 7), (2, 3, 4))))
    ma3 = miller.array(ms3, data=flex.double((4, 5, 6)))
    mas_as_cif_block.add_miller_array(ma3, column_name="_refln.F_squared_meas")
    ms4 = miller.set(
        cs,
        flex.miller_index(
            ((1, 2, 3), (5, 6, 7), (1, 1, 1), (1, 0, 0), (2, 3, 4))))
    ma4 = ms4.d_spacings()
    mas_as_cif_block.add_miller_array(ma4, column_name="_refln.d_spacing")
    # extract arrays from cif block and make sure we get back what we started with
    arrays = cif.builders.miller_array_builder(
        mas_as_cif_block.cif_block).arrays()
    recycled_arrays = (arrays['_refln.F_meas_au'], arrays['_refln.F_calc_au'],
                       arrays['_refln.F_squared_meas'],
                       arrays['_refln.d_spacing'])
    for orig, recycled in zip((ma1, ma2, ma3, ma4), recycled_arrays):
        assert orig.size() == recycled.size()
        recycled = recycled.customized_copy(
            anomalous_flag=orig.anomalous_flag())
        orig, recycled = orig.common_sets(recycled)
        assert orig.indices().all_eq(recycled.indices())
        assert approx_equal(orig.data(), recycled.data(), eps=1e-5)
    #
    cif_model = reader(input_string=r3adrsf,
                       builder=cif.builders.cif_model_builder()).model()
    cs = cif.builders.crystal_symmetry_builder(
        cif_model["r3adrsf"]).crystal_symmetry

    ma_builder = cif.builders.miller_array_builder(
        cif_model['r3adrAsf'],
        base_array_info=miller.array_info(crystal_symmetry_from_file=cs))
    miller_arrays = list(ma_builder.arrays().values())
    assert len(miller_arrays) == 4
    mas_as_cif_block = cif.miller_arrays_as_cif_block(
        miller_arrays[0].map_to_asu(),
        column_names=miller_arrays[0].info().labels,
        format="corecif")
    for array in miller_arrays[1:]:
        labels = array.info().labels
        if len(labels) > 1:
            for label in labels:
                if label.startswith("wavelength_id"):
                    labels.remove(label)
        mas_as_cif_block.add_miller_array(array=array.map_to_asu(),
                                          column_names=array.info().labels)
    s = StringIO()
    print(mas_as_cif_block.refln_loop, file=s)
    assert not show_diff(
        s.getvalue(), """\
loop_
  _refln_index_h
  _refln_index_k
  _refln_index_l
  _refln.crystal_id
  _refln.scale_group_code
  _refln.wavelength_id
  _refln.pdbx_I_plus
  _refln.pdbx_I_plus_sigma
  _refln.pdbx_I_minus
  _refln.pdbx_I_minus_sigma
  -87  5  46  1  1  3   40.2  40.4    6.7  63.9
  -87  5  45  1  1  3   47.8  29.7   35.1  30.5
  -87  5  44  1  1  3   18.1  33.2    0.5  34.6
  -87  5  43  1  1  3    6.1  45.4   12.9  51.6
  -87  5  42  1  1  3   -6.6  45.6  -15.5  55.8
  -87  7  37  1  1  3    6.3  43.4      ?     ?
  -87  7  36  1  1  3  -67.2  55.4      ?     ?
  -88  2  44  1  1  3      0    -1     35  38.5
  -88  2  43  1  1  3      0    -1   57.4  41.5
  -88  4  45  1  1  3     -1  46.1   -9.1  45.6
  -88  4  44  1  1  3  -19.8  49.2    0.3  34.7
  -88  6  44  1  1  3   -1.8  34.8      ?     ?

""")
Exemplo n.º 50
0
 def prepare_output(self, results, iparams, avg_mode):
     if avg_mode == 'average':
         cc_thres = 0
     else:
         cc_thres = iparams.frame_accept_min_cc
     std_filter = iparams.sigma_rejection
     if iparams.flag_weak_anomalous:
         if avg_mode == 'final':
             target_anomalous_flag = iparams.target_anomalous_flag
         else:
             target_anomalous_flag = False
     else:
         target_anomalous_flag = iparams.target_anomalous_flag
     pr_params_mean, pr_params_med, pr_params_std = self.calc_mean_postref_parameters(
         results)
     G_mean, B_mean, ry_mean, rz_mean, re_mean, r0_mean, voigt_nu_mean, rotx_mean, roty_mean, R_mean, R_xy_mean, SE_mean = pr_params_mean
     G_med, B_med, ry_med, rz_med, re_med, r0_med, voigt_nu_med, rotx_med, roty_med, R_med, R_xy_med, SE_med = pr_params_med
     G_std, B_std, ry_std, rz_std, re_std, r0_std, voigt_nu_std, rotx_std, roty_std, R_std, R_xy_std, SE_std = pr_params_std
     #prepare data for merging
     miller_indices_all = flex.miller_index()
     miller_indices_ori_all = flex.miller_index()
     I_all = flex.double()
     sigI_all = flex.double()
     G_all = flex.double()
     B_all = flex.double()
     p_all = flex.double()
     rx_all = flex.double()
     rs_all = flex.double()
     rh_all = flex.double()
     SE_all = flex.double()
     sin_sq_all = flex.double()
     wavelength_all = flex.double()
     detector_distance_set = flex.double()
     R_init_all = flex.double()
     R_final_all = flex.double()
     R_xy_init_all = flex.double()
     R_xy_final_all = flex.double()
     pickle_filename_all = flex.std_string()
     filtered_results = []
     cn_good_frame, cn_bad_frame_SE, cn_bad_frame_uc, cn_bad_frame_cc, cn_bad_frame_G, cn_bad_frame_re = (
         0, 0, 0, 0, 0, 0)
     crystal_orientation_dict = {}
     for pres in results:
         if pres is not None:
             pickle_filepath = pres.pickle_filename.split('/')
             img_filename = pickle_filepath[len(pickle_filepath) - 1]
             flag_pres_ok = True
             #check SE, CC, UC, G, B, gamma_e
             if math.isnan(pres.G):
                 flag_pres_ok = False
             if math.isnan(pres.SE) or np.isinf(pres.SE):
                 flag_pres_ok = False
             if flag_pres_ok and SE_std > 0:
                 if abs(pres.SE - SE_med) / SE_std > std_filter:
                     flag_pres_ok = False
                     cn_bad_frame_SE += 1
             if flag_pres_ok and pres.CC_final < cc_thres:
                 flag_pres_ok = False
                 cn_bad_frame_cc += 1
             if flag_pres_ok:
                 if G_std > 0:
                     if abs(pres.G - G_med) / G_std > std_filter:
                         flag_pres_ok = False
                         cn_bad_frame_G += 1
             if flag_pres_ok:
                 if re_std > 0:
                     if abs(pres.re - re_med) / re_std > std_filter:
                         flag_pres_ok = False
                         cn_bad_frame_re += 1
             if flag_pres_ok and not good_unit_cell(
                     pres.uc_params, iparams, iparams.merge.uc_tolerance):
                 flag_pres_ok = False
                 cn_bad_frame_uc += 1
             data_size = pres.observations.size()
             if flag_pres_ok:
                 cn_good_frame += 1
                 filtered_results.append(pres)
                 R_init_all.append(pres.R_init)
                 R_final_all.append(pres.R_final)
                 R_xy_init_all.append(pres.R_xy_init)
                 R_xy_final_all.append(pres.R_xy_final)
                 miller_indices_all.extend(pres.observations.indices())
                 miller_indices_ori_all.extend(
                     pres.observations_original.indices())
                 I_all.extend(pres.observations.data())
                 sigI_all.extend(pres.observations.sigmas())
                 G_all.extend(flex.double([pres.G] * data_size))
                 B_all.extend(flex.double([pres.B] * data_size))
                 p_all.extend(pres.partiality)
                 rs_all.extend(pres.rs_set)
                 rh_all.extend(pres.rh_set)
                 sin_sq_all.extend(
                     pres.observations.two_theta(wavelength=pres.wavelength)
                     .sin_theta_over_lambda_sq().data())
                 SE_all.extend(flex.double([pres.SE] * data_size))
                 wavelength_all.extend(
                     flex.double([pres.wavelength] * data_size))
                 detector_distance_set.append(pres.detector_distance_mm)
                 pickle_filename_all.extend(
                     flex.std_string([pres.pickle_filename] * data_size))
                 crystal_orientation_dict[
                     pres.pickle_filename] = pres.crystal_orientation
     #plot stats
     self.plot_stats(filtered_results, iparams)
     #write out updated crystal orientation as a pickle file
     if not iparams.flag_hush:
         pickle.dump(crystal_orientation_dict,
                     open(iparams.run_no + '/' + "crystal.o", "wb"),
                     pickle.HIGHEST_PROTOCOL)
     #calculate average unit cell
     uc_mean, uc_med, uc_std = self.calc_mean_unit_cell(filtered_results)
     unit_cell_mean = unit_cell(tuple(uc_mean))
     #recalculate stats for pr parameters
     pr_params_mean, pr_params_med, pr_params_std = self.calc_mean_postref_parameters(
         filtered_results)
     G_mean, B_mean, ry_mean, rz_mean, re_mean, r0_mean, voigt_nu_mean, rotx_mean, roty_mean, R_mean, R_xy_mean, SE_mean = pr_params_mean
     G_med, B_med, ry_med, rz_med, re_med, r0_med, voigt_nu_med, rotx_med, roty_med, R_med, R_xy_med, SE_med = pr_params_med
     G_std, B_std, ry_std, rz_std, re_std, r0_std, voigt_nu_std, rotx_std, roty_std, R_std, R_xy_std, SE_std = pr_params_std
     #from all observations merge them
     crystal_symmetry = crystal.symmetry(
         unit_cell=tuple(uc_mean),
         space_group_symbol=iparams.target_space_group)
     miller_set_all = miller.set(crystal_symmetry=crystal_symmetry,
                                 indices=miller_indices_all,
                                 anomalous_flag=target_anomalous_flag)
     miller_array_all = miller_set_all.array(
         data=I_all, sigmas=sigI_all).set_observation_type_xray_intensity()
     #sort reflections according to asymmetric-unit symmetry hkl
     perm = miller_array_all.sort_permutation(by_value="packed_indices")
     miller_indices_all_sort = miller_array_all.indices().select(perm)
     miller_indices_ori_all_sort = miller_indices_ori_all.select(perm)
     I_obs_all_sort = miller_array_all.data().select(perm)
     sigI_obs_all_sort = miller_array_all.sigmas().select(perm)
     G_all_sort = G_all.select(perm)
     B_all_sort = B_all.select(perm)
     p_all_sort = p_all.select(perm)
     rs_all_sort = rs_all.select(perm)
     wavelength_all_sort = wavelength_all.select(perm)
     sin_sq_all_sort = sin_sq_all.select(perm)
     SE_all_sort = SE_all.select(perm)
     pickle_filename_all_sort = pickle_filename_all.select(perm)
     miller_array_uniq = miller_array_all.merge_equivalents().array(
     ).complete_array(d_min=iparams.merge.d_min, d_max=iparams.merge.d_max)
     matches_uniq = miller.match_multi_indices(
         miller_indices_unique=miller_array_uniq.indices(),
         miller_indices=miller_indices_all_sort)
     pair_0 = flex.int([pair[0] for pair in matches_uniq.pairs()])
     pair_1 = flex.int([pair[1] for pair in matches_uniq.pairs()])
     group_id_list = flex.int(
         [pair_0[pair_1[i]] for i in range(len(matches_uniq.pairs()))])
     tally = Counter()
     for elem in group_id_list:
         tally[elem] += 1
     cn_group = len(tally)
     #preparte txt out stat
     txt_out = 'Summary of refinement and merging\n'
     txt_out += ' No. good frames:          %12.0f\n' % (cn_good_frame)
     txt_out += ' No. bad cc frames:        %12.0f\n' % (cn_bad_frame_cc)
     txt_out += ' No. bad G frames) :       %12.0f\n' % (cn_bad_frame_G)
     txt_out += ' No. bad unit cell frames: %12.0f\n' % (cn_bad_frame_uc)
     txt_out += ' No. bad gamma_e frames:   %12.0f\n' % (cn_bad_frame_re)
     txt_out += ' No. bad SE:               %12.0f\n' % (cn_bad_frame_SE)
     txt_out += ' No. observations:         %12.0f\n' % (
         len(I_obs_all_sort))
     txt_out += 'Mean target value (BEFORE: Mean Median (Std.))\n'
     txt_out += ' post-refinement:          %12.2f %12.2f (%9.2f)\n' % (
         np.mean(R_init_all), np.median(R_init_all), np.std(R_init_all))
     txt_out += ' (x,y) restraints:         %12.2f %12.2f (%9.2f)\n' % (
         np.mean(R_xy_init_all), np.median(R_xy_init_all),
         np.std(R_xy_init_all))
     txt_out += 'Mean target value (AFTER: Mean Median (Std.))\n'
     txt_out += ' post-refinement:          %12.2f %12.2f (%9.2f)\n' % (
         np.mean(R_final_all), np.median(R_final_all), np.std(R_final_all))
     txt_out += ' (x,y) restraints:         %12.2f %12.2f (%9.2f)\n' % (
         np.mean(R_xy_final_all), np.median(R_xy_final_all),
         np.std(R_xy_final_all))
     txt_out += ' SE:                       %12.2f %12.2f (%9.2f)\n' % (
         SE_mean, SE_med, SE_std)
     txt_out += ' G:                        %12.3e %12.3e (%9.2e)\n' % (
         G_mean, G_med, G_std)
     txt_out += ' B:                        %12.2f %12.2f (%9.2f)\n' % (
         B_mean, B_med, B_std)
     txt_out += ' Rot.x:                    %12.2f %12.2f (%9.2f)\n' % (
         rotx_mean * 180 / math.pi, rotx_med * 180 / math.pi,
         rotx_std * 180 / math.pi)
     txt_out += ' Rot.y:                    %12.2f %12.2f (%9.2f)\n' % (
         roty_mean * 180 / math.pi, roty_med * 180 / math.pi,
         roty_std * 180 / math.pi)
     txt_out += ' gamma_y:                  %12.5f %12.5f (%9.5f)\n' % (
         ry_mean, ry_med, ry_std)
     txt_out += ' gamma_z:                  %12.5f %12.5f (%9.5f)\n' % (
         rz_mean, rz_med, rz_std)
     txt_out += ' gamma_0:                  %12.5f %12.5f (%9.5f)\n' % (
         r0_mean, r0_med, r0_std)
     txt_out += ' gamma_e:                  %12.5f %12.5f (%9.5f)\n' % (
         re_mean, re_med, re_std)
     txt_out += ' voigt_nu:                 %12.5f %12.5f (%9.5f)\n' % (
         voigt_nu_mean, voigt_nu_med, voigt_nu_std)
     txt_out += ' unit cell\n'
     txt_out += '   a:                      %12.2f %12.2f (%9.2f)\n' % (
         uc_mean[0], uc_med[0], uc_std[0])
     txt_out += '   b:                      %12.2f %12.2f (%9.2f)\n' % (
         uc_mean[1], uc_med[1], uc_std[1])
     txt_out += '   c:                      %12.2f %12.2f (%9.2f)\n' % (
         uc_mean[2], uc_med[2], uc_std[2])
     txt_out += '   alpha:                  %12.2f %12.2f (%9.2f)\n' % (
         uc_mean[3], uc_med[3], uc_std[3])
     txt_out += '   beta:                   %12.2f %12.2f (%9.2f)\n' % (
         uc_mean[4], uc_med[4], uc_std[4])
     txt_out += '   gamma:                  %12.2f %12.2f (%9.2f)\n' % (
         uc_mean[5], uc_med[5], uc_std[5])
     txt_out += 'Parmeters from integration (not-refined)\n'
     txt_out += '  Wavelength:              %12.5f %12.5f (%9.5f)\n' % (
         np.mean(wavelength_all), np.median(wavelength_all),
         np.std(wavelength_all))
     txt_out += '  Detector distance:       %12.5f %12.5f (%9.5f)\n' % (
         np.mean(detector_distance_set), np.median(detector_distance_set),
         np.std(detector_distance_set))
     txt_out += '* (standard deviation)\n'
     #write out stat. pickle
     if not iparams.flag_hush:
         stat_dict = {"n_frames_good": [cn_good_frame], \
                      "n_frames_bad_cc": [cn_bad_frame_cc], \
                      "n_frames_bad_G": [cn_bad_frame_G], \
                      "n_frames_bad_uc": [cn_bad_frame_uc], \
                      "n_frames_bad_gamma_e": [cn_bad_frame_re], \
                      "n_frames_bad_SE": [cn_bad_frame_SE], \
                      "n_observations": [len(I_obs_all_sort)], \
                      "R_start": [np.mean(R_init_all)], \
                      "R_end": [np.mean(R_final_all)], \
                      "R_xy_start": [np.mean(R_xy_init_all)], \
                      "R_xy_end": [np.mean(R_xy_final_all)], \
                      "mean_gamma_y": [ry_mean], \
                      "std_gamma_y": [ry_std], \
                      "mean_gamma_z": [rz_mean], \
                      "std_gamma_z": [rz_std], \
                      "mean_gamma_0": [r0_mean], \
                      "std_gamma_0": [r0_std], \
                      "mean_gamma_e": [re_mean], \
                      "std_gamma_e": [re_std], \
                      "mean_voigt_nu": [voigt_nu_mean], \
                      "std_voigt_nu": [voigt_nu_std], \
                      "mean_a": [uc_mean[0]], \
                      "std_a": [uc_std[0]], \
                      "mean_b": [uc_mean[1]], \
                      "std_b": [uc_std[1]], \
                      "mean_c": [uc_mean[2]], \
                      "std_c": [uc_std[2]], \
                      "mean_alpha": [uc_mean[3]], \
                      "std_alpha": [uc_std[3]], \
                      "mean_beta": [uc_mean[4]], \
                      "std_beta": [uc_std[4]], \
                      "mean_gamma": [uc_mean[5]], \
                      "std_gamma": [uc_std[5]]}
         self.write_stat_pickle(iparams, stat_dict)
     return cn_group, group_id_list, miller_indices_all_sort, miller_indices_ori_all_sort, \
            I_obs_all_sort, sigI_obs_all_sort,G_all_sort, B_all_sort, \
            p_all_sort, rs_all_sort, wavelength_all_sort, sin_sq_all_sort, SE_all_sort, uc_mean, \
            np.mean(wavelength_all), pickle_filename_all_sort, txt_out
Exemplo n.º 51
0
def french_wilson_scale(miller_array,
                        params=None,
                        sigma_iobs_rejection_criterion=None,
                        merge=False,
                        min_bin_size=40,
                        max_bins=60,
                        log=None):
    from cctbx.array_family import flex
    if not miller_array.is_xray_intensity_array():
        raise Sorry("Input array appears to be amplitudes. " +
                    "This method is only appropriate for input intensities.")
    if miller_array.unit_cell() is None:
        raise Sorry(
            "No unit cell information found. Please supply unit cell data.")
    if miller_array.crystal_symmetry() is None:
        raise Sorry("No crystal symmetry information found. Please supply " +
                    "crystal symmetry data.")
    if miller_array.sigmas() is None:
        raise Sorry(
            "Input array does not contain sigma values. " +
            "This method requires input intensities with associated sigmas.")
    if (not miller_array.is_unique_set_under_symmetry()):
        if (merge):
            miller_array = miller_array.merge_equivalents().array()
        else:
            raise Sorry("Unmerged data not allowed - please merge " +
                        "symmetry-equivalent reflections first.")
    if (miller_array.data().all_eq(miller_array.data()[0])):
        # XXX some Scalepack files (and possibly others) crash the routine if this
        # check is not performed.  presumably an HKL2000 bug?
        raise Sorry((
            "The input intensities have uniform values (%g); this is probably "
            + "a bug in one of the data processing and/or conversion programs."
        ) % miller_array.data()[0])
    # Phil defaults are set in master_phil above - they should be kept in sync with the
    # default arguments for this function
    if params and params.max_bins:
        max_bins = params.max_bins
    if params and params.min_bin_size:
        min_bin_size = params.min_bin_size
    if log == None:
        log = sys.stdout
    if (sigma_iobs_rejection_criterion is None):
        sigma_iobs_rejection_criterion = -4.0
    elif (sigma_iobs_rejection_criterion == 0.0):
        libtbx.warn(
            "For French and Wilson scaling, sigma_iobs_rejection_criterion " +
            "must be a value between -4.0 and -1.0, or None. " +
            "Setting sigma_iobs_rejection_criteriont to -4.0.")
        sigma_iobs_rejection_criterion = -4.0
    elif ((sigma_iobs_rejection_criterion < -4.0)
          or (sigma_iobs_rejection_criterion > -1.0)):
        raise Sorry(
            "For French and Wilson scaling, sigma_iobs_rejection_criterion " +
            "must be a value between -4.0 and -1.0, or None.")
    rejected = []
    make_sub_header("Scaling input intensities via French-Wilson Method",
                    out=log)
    print("Trying %d bins..." % max_bins, file=log)
    try:
        f_w_binning(miller_array=miller_array,
                    max_bins=max_bins,
                    min_bin_size=min_bin_size,
                    log=log)
    except ValueError:
        try:
            miller_array.setup_binner_counting_sorted(reflections_per_bin=5)
        except AssertionError:
            print(
                "Too few reflections for accurate binning.\n"
                "** Skipping French-Wilson scaling **",
                file=log)
            return None
    print("Number of bins = %d" % miller_array.binner().n_bins_used(),
          file=log)
    new_I = flex.double()
    new_sigma_I = flex.double()
    new_F = flex.double()
    new_sigma_F = flex.double()
    new_indices = flex.miller_index()
    bin_mean_intensities = miller_array.mean(use_binning=True).data
    d_mean_intensities = \
      calculate_mean_intensities(miller_array=miller_array, log=log)
    assert len(d_mean_intensities) == miller_array.data().size()
    for i_bin in miller_array.binner().range_all():
        sel = miller_array.binner().selection(i_bin)
        bin = miller_array.select(sel)
        if bin.size() > 0:
            #bin_mean_intensity = bin_mean_intensities[i_bin]
            cen = bin.select_centric()
            acen = bin.select_acentric()
            for I, sigma_I, index in zip(cen.data(), cen.sigmas(),
                                         cen.indices()):
                mean_intensity = d_mean_intensities[index]
                if (mean_intensity == 0):
                    # XXX is this the appropriate way to handle this?
                    rejected.append((index, I, sigma_I, mean_intensity))
                elif (sigma_I <= 0):
                    if I <= 0 or sigma_I < 0:
                        rejected.append((index, I, sigma_I, mean_intensity))
                        continue
                    else:
                        J = I
                        sigma_J = sigma_I
                        F = math.sqrt(I)
                        sigma_F = sigma_I
                else:
                    J, sigma_J, F, sigma_F = fw_centric(
                                               I=I,
                                               sigma_I=sigma_I,
                                               mean_intensity=mean_intensity,
                                               sigma_iobs_rejection_criterion=\
                                               sigma_iobs_rejection_criterion)
                if J >= 0:
                    assert sigma_J >= 0 and F >= 0 and sigma_F >= 0
                    new_I.append(J)
                    new_indices.append(index)
                    new_sigma_I.append(sigma_J)
                    new_F.append(F)
                    new_sigma_F.append(sigma_F)
                else:
                    rejected.append((index, I, sigma_I, mean_intensity))
            for I, sigma_I, index in zip(acen.data(), acen.sigmas(),
                                         acen.indices()):
                mean_intensity = d_mean_intensities[index]
                if (mean_intensity == 0):
                    rejected.append((index, I, sigma_I, mean_intensity))
                elif (sigma_I <= 0):
                    if I <= 0 or sigma_I < 0:
                        rejected.append((index, I, sigma_I, mean_intensity))
                        continue
                    else:
                        J = I
                        sigma_J = sigma_I
                        F = math.sqrt(I)
                        sigma_F = sigma_I
                else:
                    J, sigma_J, F, sigma_F = fw_acentric(
                                               I=I,
                                               sigma_I=sigma_I,
                                               mean_intensity=mean_intensity,
                                               sigma_iobs_rejection_criterion=\
                                               sigma_iobs_rejection_criterion)
                if J >= 0:
                    assert sigma_J >= 0 and F >= 0 and sigma_F >= 0
                    new_I.append(J)
                    new_indices.append(index)
                    new_sigma_I.append(sigma_J)
                    new_F.append(F)
                    new_sigma_F.append(sigma_F)
                else:
                    rejected.append((index, I, sigma_I, mean_intensity))
    f_obs = miller_array.customized_copy(indices=new_indices,
                                         data=new_F,
                                         sigmas=new_sigma_F)
    f_obs.set_observation_type_xray_amplitude()
    show_rejected_summary(rejected=rejected, log=log)
    return f_obs
Exemplo n.º 52
0
def exercise_mmcif_structure_factors():
    miller_arrays = cif.reader(input_string=r3adrsf).as_miller_arrays()
    assert len(miller_arrays) == 16
    hl_coeffs = find_miller_array_from_labels(
        miller_arrays, ','.join([
            'scale_group_code=1', 'crystal_id=2', 'wavelength_id=3',
            '_refln.pdbx_HL_A_iso', '_refln.pdbx_HL_B_iso',
            '_refln.pdbx_HL_C_iso', '_refln.pdbx_HL_D_iso'
        ]))
    assert hl_coeffs.is_hendrickson_lattman_array()
    assert hl_coeffs.size() == 2
    mas_as_cif_block = cif.miller_arrays_as_cif_block(
        hl_coeffs,
        column_names=('_refln.pdbx_HL_A_iso', '_refln.pdbx_HL_B_iso',
                      '_refln.pdbx_HL_C_iso', '_refln.pdbx_HL_D_iso'))
    abcd = []
    for key in ('_refln.pdbx_HL_A_iso', '_refln.pdbx_HL_B_iso',
                '_refln.pdbx_HL_C_iso', '_refln.pdbx_HL_D_iso'):
        assert key in list(mas_as_cif_block.cif_block.keys())
        abcd.append(flex.double(mas_as_cif_block.cif_block[key]))
    hl_coeffs_from_cif_block = flex.hendrickson_lattman(*abcd)
    assert approx_equal(hl_coeffs.data(), hl_coeffs_from_cif_block)
    f_meas_au = find_miller_array_from_labels(
        miller_arrays, ','.join([
            'scale_group_code=1', 'crystal_id=1', 'wavelength_id=1',
            '_refln.F_meas_au', '_refln.F_meas_sigma_au'
        ]))
    assert f_meas_au.is_xray_amplitude_array()
    assert f_meas_au.size() == 5
    assert f_meas_au.sigmas() is not None
    assert f_meas_au.space_group_info().symbol_and_number(
    ) == 'C 1 2 1 (No. 5)'
    assert approx_equal(f_meas_au.unit_cell().parameters(),
                        (163.97, 45.23, 110.89, 90.0, 131.64, 90.0))
    pdbx_I_plus_minus = find_miller_array_from_labels(
        miller_arrays, ','.join([
            '_refln.pdbx_I_plus', '_refln.pdbx_I_plus_sigma',
            '_refln.pdbx_I_minus', '_refln.pdbx_I_minus_sigma'
        ]))
    assert pdbx_I_plus_minus.is_xray_intensity_array()
    assert pdbx_I_plus_minus.anomalous_flag()
    assert pdbx_I_plus_minus.size() == 21
    assert pdbx_I_plus_minus.unit_cell() is None  # no symmetry information in
    assert pdbx_I_plus_minus.space_group() is None  # this CIF block
    #
    miller_arrays = cif.reader(input_string=r3ad7sf).as_miller_arrays()
    assert len(miller_arrays) == 11
    f_calc = find_miller_array_from_labels(
        miller_arrays, ','.join([
            'crystal_id=2', 'wavelength_id=1', '_refln.F_calc',
            '_refln.phase_calc'
        ]))
    assert f_calc.is_complex_array()
    assert f_calc.size() == 4
    #
    miller_arrays = cif.reader(
        input_string=integer_observations).as_miller_arrays()
    assert len(miller_arrays) == 2
    assert isinstance(miller_arrays[0].data(), flex.double)
    assert isinstance(miller_arrays[0].sigmas(), flex.double)
    #
    miller_arrays = cif.reader(input_string=r3v56sf).as_miller_arrays()
    assert len(miller_arrays) == 2
    for ma in miller_arrays:
        assert ma.is_complex_array()
    assert miller_arrays[0].info().labels == [
        'r3v56sf', '_refln.pdbx_DELFWT', '_refln.pdbx_DELPHWT'
    ]
    assert miller_arrays[1].info().labels == [
        'r3v56sf', '_refln.pdbx_FWT', '_refln.pdbx_PHWT'
    ]
Exemplo n.º 53
0
    def __init__(self, **kwargs):
        group_args.__init__(self, **kwargs)
        # require Dij, d_c
        P = Profiler("2. calculate rho density")
        print("finished Dij, now calculating rho_i, the density")
        from xfel.clustering import Rodriguez_Laio_clustering_2014
        # alternative clustering algorithms: see http://scikit-learn.org/stable/modules/clustering.html
        # also see https://cran.r-project.org/web/packages/dbscan/vignettes/hdbscan.html
        # see also https://en.wikipedia.org/wiki/Hausdorff_dimension

        R = Rodriguez_Laio_clustering_2014(distance_matrix=self.Dij,
                                           d_c=self.d_c)
        self.rho = rho = R.get_rho()
        ave_rho = flex.mean(rho.as_double())
        NN = self.Dij.focus()[0]
        print("The average rho_i is %5.2f, or %4.1f%%" %
              (ave_rho, 100 * ave_rho / NN))
        i_max = flex.max_index(rho)

        P = Profiler("3.transition")
        print("the index with the highest density is %d" % (i_max))
        delta_i_max = flex.max(
            flex.double([self.Dij[i_max, j] for j in range(NN)]))
        print("delta_i_max", delta_i_max)
        rho_order = flex.sort_permutation(rho, reverse=True)
        rho_order_list = list(rho_order)

        P = Profiler("4. delta")
        self.delta = delta = R.get_delta(rho_order=rho_order,
                                         delta_i_max=delta_i_max)

        P = Profiler("5. find cluster maxima")
        #---- Now hunting for clusters ---Lot's of room for improvement (or simplification) here!!!
        cluster_id = flex.int(NN, -1)  # default -1 means no cluster
        delta_order = flex.sort_permutation(delta, reverse=True)
        N_CLUST = 10  # maximum of 10 points to be considered as possible clusters
        #MAX_PERCENTILE_DELTA = 0.99 # cluster centers have to be in the top 10% percentile delta
        MAX_PERCENTILE_RHO = 0.99  # cluster centers have to be in the top 75% percentile rho
        n_cluster = 0
        #max_n_delta = min(N_CLUST, int(MAX_PERCENTILE_DELTA*NN))
        for ic in range(NN):
            # test the density, rho
            item_idx = delta_order[ic]
            if delta[item_idx] > 100:
                print("A: iteration", ic, "delta", delta[item_idx],
                      delta[item_idx] < 0.25 * delta[delta_order[0]])
            if delta[item_idx] < 0.25 * delta[
                    delta_order[0]]:  # too low (another heuristic!)
                continue
            item_rho_order = rho_order_list.index(item_idx)
            if delta[item_idx] > 100:
                print("B: iteration", ic, item_rho_order, item_rho_order / NN,
                      MAX_PERCENTILE_RHO)
            if item_rho_order / NN < MAX_PERCENTILE_RHO:
                cluster_id[item_idx] = n_cluster
                print(ic, item_idx, item_rho_order, cluster_id[item_idx])
                n_cluster += 1
        print("Found %d clusters" % n_cluster)
        for x in range(NN):
            if cluster_id[x] >= 0:
                print("XC", x, cluster_id[x], rho[x], delta[x])
        self.cluster_id_maxima = cluster_id.deep_copy()

        P = Profiler("6. assign all points")
        R.cluster_assignment(rho_order, cluster_id)

        self.cluster_id_full = cluster_id.deep_copy()

        # assign the halos
        P = Profiler("7. assign halos")
        halo = flex.bool(NN, False)
        border = R.get_border(cluster_id=cluster_id)

        for ic in range(n_cluster
                        ):  #loop thru all border regions; find highest density
            print("cluster", ic, "in border", border.count(True))
            this_border = (cluster_id == ic) & (border == True)
            print(len(this_border), this_border.count(True))
            if this_border.count(True) > 0:
                highest_density = flex.max(rho.select(this_border))
                halo_selection = (rho < highest_density) & (this_border
                                                            == True)
                if halo_selection.count(True) > 0:
                    cluster_id.set_selected(halo_selection, -1)
                core_selection = (cluster_id == ic) & ~halo_selection
                highest_density = flex.max(rho.select(core_selection))
                too_sparse = core_selection & (
                    rho.as_double() < highest_density / 10.
                )  # another heuristic
                if too_sparse.count(True) > 0:
                    cluster_id.set_selected(too_sparse, -1)
        self.cluster_id_final = cluster_id.deep_copy()
        print("%d in the excluded halo" % ((cluster_id == -1).count(True)))
Exemplo n.º 54
0
 def combine_pre_merge(self, result, iparams):
     mi_all = flex.miller_index()
     mio_all = flex.miller_index()
     I_all = flex.double()
     sigI_all = flex.double()
     G_all = flex.double()
     B_all = flex.double()
     p_all = flex.double()
     rs_all = flex.double()
     wavelength_all = flex.double()
     sin_all = flex.double()
     SE_all = flex.double()
     uc_mean_set = []
     wavelength_mean_set = []
     pickle_filename_all = flex.std_string()
     for res in result:
         for prep_output in res:
             _, _, mi, mio, I, sigI, G, B, p, rs, wavelength, sin, SE, uc_mean, wavelength_mean, pickle_filename_set, txt_out = prep_output
             mi_all.extend(mi)
             mio_all.extend(mio)
             I_all.extend(I)
             sigI_all.extend(sigI)
             G_all.extend(G)
             B_all.extend(B)
             p_all.extend(p)
             rs_all.extend(rs)
             wavelength_all.extend(wavelength)
             sin_all.extend(sin)
             SE_all.extend(SE)
             uc_mean_set.extend(uc_mean)
             wavelength_mean_set.append(wavelength_mean)
             pickle_filename_all.extend(pickle_filename_set)
     uc_mean = np.mean(np.array(uc_mean_set).reshape(-1, 6), axis=0)
     wavelength_mean = np.mean(wavelength_mean_set)
     ms_template = crystal.symmetry(
         unit_cell=tuple(uc_mean),
         space_group_symbol=iparams.target_space_group).build_miller_set(
             anomalous_flag=iparams.target_anomalous_flag,
             d_min=iparams.merge.d_min)
     ma_all = ms_template.array().customized_copy(indices=mi_all,
                                                  data=I_all,
                                                  sigmas=sigI_all)
     #sort reflections according to asymmetric-unit symmetry hkl
     perm = ma_all.sort_permutation(by_value="packed_indices")
     mi_all_sort = mi_all.select(perm)
     mio_all_sort = mio_all.select(perm)
     I_all_sort = I_all.select(perm)
     sigI_all_sort = sigI_all.select(perm)
     G_all_sort = G_all.select(perm)
     B_all_sort = B_all.select(perm)
     p_all_sort = p_all.select(perm)
     rs_all_sort = rs_all.select(perm)
     wavelength_all_sort = wavelength_all.select(perm)
     sin_all_sort = sin_all.select(perm)
     SE_all_sort = SE_all.select(perm)
     pickle_filename_all_sort = pickle_filename_all.select(perm)
     ma_uniq = ma_all.merge_equivalents().array().complete_array(
         d_min=iparams.merge.d_min, d_max=iparams.merge.d_max)
     matches_uniq = miller.match_multi_indices(
         miller_indices_unique=ma_uniq.indices(),
         miller_indices=mi_all_sort)
     pair_0 = flex.int([pair[0] for pair in matches_uniq.pairs()])
     pair_1 = flex.int([pair[1] for pair in matches_uniq.pairs()])
     group_id_list = flex.int(
         [pair_0[pair_1[i]] for i in range(len(matches_uniq.pairs()))])
     tally = Counter()
     for elem in group_id_list:
         tally[elem] += 1
     cn_group = len(tally)
     return cn_group, group_id_list, mi_all_sort, mio_all_sort, \
            I_all_sort, sigI_all_sort, G_all_sort, B_all_sort, \
            p_all_sort, rs_all_sort, wavelength_all_sort, sin_all_sort, SE_all_sort, uc_mean, \
            wavelength_mean, pickle_filename_all_sort, ""
Exemplo n.º 55
0
    def add_cells_and_files(self, cells, symm_str):
        self.cells = cells
        # Table
        table_str = ""
        for idx, xac in enumerate(cells):
            cell = cells[xac]
            table_str += "<tr>\n"
            table_str += " <td>%.4d</td><td>%s</td>" % (idx+1, xac) # idx, file
            table_str += "".join(map(lambda x: "<td>%.2f</td>"%x, cell))
            table_str += "\n</tr>\n"

        # Hist
        cellconstr = CellConstraints(sgtbx.space_group_info(symm_str).group())
        show_flags = (True, not cellconstr.is_b_equal_a(), not cellconstr.is_c_equal_a_b(),
                      not cellconstr.is_angle_constrained("alpha"),
                      not cellconstr.is_angle_constrained("beta"),
                      not cellconstr.is_angle_constrained("gamma"))
        names = ("a", "b", "c", "&alpha;", "&beta;", "&gamma;")

        hist_str = ""
        label1 = ""
        for i, (name, show) in enumerate(zip(names, show_flags)):
            tmp = ""
            if i in (0,3): tmp += "<tr>"
            if show: tmp += "<th>%s</th>" % name
            if i in (2,5): tmp += "</tr>"

            if i < 3: hist_str += tmp
            else: label1 += tmp

        hist_str += "\n<tr>\n"

        for idx, (name, show) in enumerate(zip(names, show_flags)):
            if idx==3: hist_str += "</tr>" + label1 + "<tr>"
            if not show: continue
            vals = flex.double(map(lambda x: x[idx], cells.values()))
            if len(vals) == 0: continue
            nslots = max(30, int((max(vals) - min(vals)) / 0.5))
            hist = flex.histogram(vals, n_slots=nslots)
            x_vals = map(lambda i: hist.data_min() + hist.slot_width() * (i+.5), xrange(len(hist.slots())))
            y_vals = hist.slots()
            hist_str += """
<td>
<div id="chartdiv_cell%(idx)d" style="width: 500px; height: 400px;"></div>
<script>
 var chart_cell%(idx)d = AmCharts.makeChart("chartdiv_cell%(idx)d", {
    "type": "serial",
    "theme": "none",  
    "legend": {
        "useGraphSettings": true,
        "markerSize":12,
        "valueWidth":0,
        "verticalGap":0
    },
    "dataProvider": [%(data)s],
    "valueAxes": [{
        "minorGridAlpha": 0.08,
        "minorGridEnabled": true,
        "position": "top",
        "axisAlpha":0
    }],
    "graphs": [{
        "balloonText": "[[category]]: [[value]]",
        "title": "%(name)s",
        "type": "column",
        "fillAlphas": 0.8,
        "valueField": "yval"
    }],
    "rotate": false,
    "categoryField": "xval",
    "categoryAxis": {
        "gridPosition": "start",
        "title": ""
    }
});
</script>
</td>
""" % dict(idx=idx, name=name,
           data=",".join(map(lambda x: '{"xval":%.2f,"yval":%d}'%x, zip(x_vals,y_vals)))
           )

        hist_str += "</tr>"

        self.html_inputfiles = """
<h2>Input files</h2>
%d files for merging in %s symmetry

<h3>Unit cell histogram</h3>
<table>
%s
</table>

<h3>Files</h3>
<a href="#" onClick="toggle_show('div-input-files'); return false;">Show/Hide</a>
<div id="div-input-files" style="display:none;">
<table class="cells">
<tr>
 <th>idx</th> <th>file</th> <th>a</th> <th>b</th> <th>c</th> <th>&alpha;</th> <th>&beta;</th> <th>&gamma;</th>
</tr>
%s
</table>
</div>
""" % (len(cells), symm_str, hist_str, table_str)
        self.write_html()
Exemplo n.º 56
0
 def __init__(O,
              sites_cart,
              density_map,
              gradients_method,
              weight_map=None,
              unit_cell=None,
              selection_variable=None,
              selection_variable_real_space=None,
              geometry_restraints_manager=None,
              energies_sites_flags=None,
              real_space_target_weight=1,
              real_space_gradients_delta=None,
              local_standard_deviations_radius=None,
              weight_map_scale_factor=None,
              lbfgs_termination_params=None,
              lbfgs_exception_handling_params=None,
              states_collector=None):
     assert [unit_cell, geometry_restraints_manager].count(None) == 1
     assert real_space_gradients_delta is not None
     if (unit_cell is None):
         unit_cell = geometry_restraints_manager.crystal_symmetry.unit_cell(
         )
     if (selection_variable_real_space is not None):
         assert selection_variable_real_space.size() == sites_cart.size()
     else:
         selection_variable_real_space = flex.bool(sites_cart.size(), True)
     O.gradients_method = gradients_method
     O.x_previous = None
     O.states_collector = states_collector
     O.density_map = density_map
     O.weight_map = weight_map
     O.unit_cell = unit_cell
     O.sites_cart = sites_cart
     O.geometry_restraints_manager = geometry_restraints_manager
     O.energies_sites_flags = energies_sites_flags
     O.real_space_target_weight = real_space_target_weight
     O.real_space_gradients_delta = real_space_gradients_delta
     O.local_standard_deviations_radius = local_standard_deviations_radius
     O.selection_variable_real_space = selection_variable_real_space
     if (O.local_standard_deviations_radius is None):
         O.site_radii = None
     else:
         O.site_radii = flex.double(O.sites_cart.size(),
                                    O.local_standard_deviations_radius)
     O.weight_map_scale_factor = weight_map_scale_factor
     O.selection_variable = selection_variable
     if (O.selection_variable is None):
         O.sites_cart = sites_cart
         O.x = sites_cart.as_double()
     else:
         O.sites_cart = sites_cart.deep_copy()
         O.x = sites_cart.select(O.selection_variable).as_double()
     O.number_of_function_evaluations = -1
     O.f_start, O.g_start = O.compute_functional_and_gradients()
     O.minimizer = scitbx.lbfgs.run(
         target_evaluator=O,
         termination_params=lbfgs_termination_params,
         exception_handling_params=lbfgs_exception_handling_params)
     O.f_final, O.g_final = O.compute_functional_and_gradients()
     del O.x
     del O.site_radii
Exemplo n.º 57
0
  def fit_side_chain(self, clusters):
    rotamer_iterator = \
      mmtbx.refinement.real_space.fit_residue.get_rotamer_iterator(
        mon_lib_srv = self.mon_lib_srv,
        residue     = self.residue)
    if(rotamer_iterator is None): return
    selection_clash = self.co.clash_eval_selection
    selection_rsr   = self.co.rsr_eval_selection
    if(self.target_map is not None):
      start_target_value = self.get_target_value(
        sites_cart = self.residue.atoms().extract_xyz(),
        selection  = selection_rsr)
    sites_cart_start = self.residue.atoms().extract_xyz()
    sites_cart_first_rotamer = list(rotamer_iterator)[0][1]
    # From this point on the coordinates in residue are to initial rotamer!
    self.residue.atoms().set_xyz(sites_cart_first_rotamer)
    axes = []
    atr = []
    for i, angle in enumerate(self.chi_angles[0]):
      cl = clusters[i]
      axes.append(flex.size_t(cl.axis))
      atr.append(flex.size_t(cl.atoms_to_rotate))
    #
    if(self.target_map is not None and self.xyzrad_bumpers is not None):
      # Get reference map values
      ref_map_vals = flex.double()
      for a in self.residue.atoms():
        key = "%s_%s_%s"%(
          a.parent().parent().parent().id, a.parent().resname,
          a.name.strip())
        ref_map_vals.append(self.cmv[key])
      # Get radii
      radii = mmtbx.refinement.real_space.get_radii(
        residue = self.residue, vdw_radii = self.vdw_radii)
      # Exclude rotatable H from clash calculation
      tmp = flex.size_t()
      for i in selection_clash:
        if(self.rotatable_hd[self.residue.atoms()[i].i_seq]): continue
        tmp.append(i)
      selection_clash = tmp[:]
      # Ad hoc: S or SE have larger peaks!
      if(self.residue.resname in ["MET","MSE"]): scale=100
      else:                                      scale=3
      moving = ext.moving(
        sites_cart       = self.residue.atoms().extract_xyz(),
        sites_cart_start = sites_cart_start,
        radii            = radii,
        weights          = self.weights,
        bonded_pairs     = self.pairs,
        ref_map_max      = ref_map_vals * scale,
        ref_map_min      = ref_map_vals / 10)
      #
      ro = ext.fit(
        fixed                    = self.xyzrad_bumpers,
        axes                     = axes,
        rotatable_points_indices = atr,
        angles_array             = self.chi_angles,
        density_map              = self.target_map,
        moving                   = moving,
        unit_cell                = self.unit_cell,
        selection_clash          = selection_clash,
        selection_rsr            = selection_rsr, # select atoms to compute map target
        sin_table                = self.sin_cos_table.sin_table,
        cos_table                = self.sin_cos_table.cos_table,
        step                     = self.sin_cos_table.step,
        n                        = self.sin_cos_table.n)
    elif(self.target_map is not None and self.xyzrad_bumpers is None):
      ro = ext.fit(
        target_value             = start_target_value,
        axes                     = axes,
        rotatable_points_indices = atr,
        angles_array             = self.chi_angles,
        density_map              = self.target_map,
        all_points               = self.residue.atoms().extract_xyz(),
        unit_cell                = self.unit_cell,
        selection                = selection_rsr,
        sin_table                = self.sin_cos_table.sin_table,
        cos_table                = self.sin_cos_table.cos_table,
        step                     = self.sin_cos_table.step,
        n                        = self.sin_cos_table.n)
    else:
      ro = ext.fit(
        sites_cart_start         = sites_cart_start.deep_copy(),
        axes                     = axes,
        rotatable_points_indices = atr,
        angles_array             = self.chi_angles,
        all_points               = self.residue.atoms().extract_xyz(),
        sin_table                = self.sin_cos_table.sin_table,
        cos_table                = self.sin_cos_table.cos_table,
        step                     = self.sin_cos_table.step,
        n                        = self.sin_cos_table.n)
    sites_cart_result = ro.result()
    if(sites_cart_result.size()>0):
      dist = None
      if(self.accept_only_if_max_shift_is_smaller_than is not None):
        dist = flex.max(flex.sqrt((sites_cart_start - sites_cart_result).dot()))
      if(dist is None):
        self.residue.atoms().set_xyz(sites_cart_result)
      else:
        if(dist is not None and
           dist < self.accept_only_if_max_shift_is_smaller_than):
          self.residue.atoms().set_xyz(sites_cart_result)
        else:
          self.residue.atoms().set_xyz(sites_cart_start)
    else:
      self.residue.atoms().set_xyz(sites_cart_start)
    if(self.m): self.m.add(residue = self.residue, state = "fitting")
#    # tune up
    if(self.target_map is not None):
      tune_up(
        target_map           = self.target_map,
        residue              = self.residue,
        mon_lib_srv          = self.mon_lib_srv,
        rotamer_manager      = self.rotamer_manager.rotamer_evaluator,
        unit_cell            = self.unit_cell,
        monitor = self.m,
        torsion_search_start = -30,
        torsion_search_stop  = 30,
        torsion_search_step  = 1)
Exemplo n.º 58
0
def run_detail(show_plot, save_plot):
    P = Profiler("0. Read data")
    import sys
    file_name = sys.argv[1]
    from xfel.clustering.singleframe import CellOnlyFrame
    from cctbx import crystal
    cells = []
    for line in open(file_name, "r"):
        tokens = line.strip().split()
        unit_cell = tuple(float(x) for x in tokens[0:6])
        space_group_symbol = tokens[6]
        crystal_symmetry = crystal.symmetry(
            unit_cell=unit_cell, space_group_symbol=space_group_symbol)
        cells.append(CellOnlyFrame(crystal_symmetry, path=None))
    MM = [c.mm for c in cells]  # get all metrical matrices
    MM_double = flex.double()
    for i in range(len(MM)):
        Tup = MM[i]
        for j in range(6):
            MM_double.append(Tup[j])

    print(("There are %d cells" % (len(MM))))
    coord_x = flex.double([c.uc[0] for c in cells])
    coord_y = flex.double([c.uc[1] for c in cells])
    if show_plot or save_plot:
        import matplotlib
        if not show_plot:
            # http://matplotlib.org/faq/howto_faq.html#generate-images-without-having-a-window-appear
            matplotlib.use('Agg')  # use a non-interactive backend
        from matplotlib import pyplot as plt
        plt.plot([c.uc[0] for c in cells], [c.uc[1] for c in cells],
                 "k.",
                 markersize=3.)
        plt.axes().set_aspect("equal")
        if save_plot:
            plt.savefig(plot_name,
                        size_inches=(10, 10),
                        dpi=300,
                        bbox_inches='tight')
        if show_plot:
            plt.show()

    print("Now constructing a Dij matrix.")
    P = Profiler("1. compute Dij matrix")
    NN = len(MM)

    import omptbx
    omptbx.omp_set_num_threads(64)
    from cctbx.uctbx.determine_unit_cell import NCDist_matrix, NCDist_flatten
    #Dij = NCDist_matrix(MM_double) # double loop, less efficient evaluation of upper triangle
    Dij = NCDist_flatten(MM_double)  # loop is flattened

    #from cctbx.uctbx.determine_unit_cell import NCDist # can this be refactored with MPI?
    #Dij = flex.double(flex.grid(NN,NN))
    #for i in xrange(NN):
    #  for j in xrange(i+1,NN):
    #    Dij[i,j] = NCDist(MM[i], MM[j])
    del P

    d_c = 10  # the distance cutoff, such that average item neighbors 1-2% of all items
    CM = clustering_manager(Dij=Dij, d_c=d_c)

    # Summarize the results here
    n_cluster = 1 + flex.max(CM.cluster_id_final)
    print(len(cells), "have been analyzed")
    print(("# ------------   %d CLUSTERS  ----------------" % (n_cluster)))
    for i in range(n_cluster):
        item = flex.first_index(CM.cluster_id_maxima, i)
        print("Cluster %d.  Central unit cell: item %d" % (i, item))
        cells[item].crystal_symmetry.show_summary()
        print("Cluster has %d items, or %d after trimming borders" %
              ((CM.cluster_id_full == i).count(True),
               (CM.cluster_id_final == i).count(True)))
        print()

    appcolors = [
        'b', 'r', '#ff7f0e', '#2ca02c', '#9467bd', '#8c564b', '#e377c2',
        '#7f7f7f', '#bcbd22', '#17becf'
    ]
    if show_plot:
        #Decision graph
        from matplotlib import pyplot as plt

        plt.plot(CM.rho, CM.delta, "r.", markersize=3.)
        for x in range(NN):
            if CM.cluster_id_maxima[x] >= 0:
                plt.plot([CM.rho[x]], [CM.delta[x]], "ro")
        plt.show()

        #No-halo plot
        from matplotlib import pyplot as plt
        colors = [appcolors[i % 10] for i in CM.cluster_id_full]

        plt.scatter(coord_x,
                    coord_y,
                    marker='o',
                    color=colors,
                    linewidths=0.4,
                    edgecolor='k')
        for i in range(n_cluster):
            item = flex.first_index(CM.cluster_id_maxima, i)
            plt.plot([cells[item].uc[0]], [cells[item].uc[1]], 'y.')
        plt.axes().set_aspect("equal")
        plt.show()

        #Final plot
        halo = (CM.cluster_id_final == -1)
        core = ~halo
        plt.plot(coord_x.select(halo), coord_y.select(halo), "k.")
        colors = [appcolors[i % 10] for i in CM.cluster_id_final.select(core)]
        plt.scatter(coord_x.select(core),
                    coord_y.select(core),
                    marker="o",
                    color=colors,
                    linewidths=0.4,
                    edgecolor='k')
        for i in range(n_cluster):
            item = flex.first_index(CM.cluster_id_maxima, i)
            plt.plot([cells[item].uc[0]], [cells[item].uc[1]], 'y.')
        plt.axes().set_aspect("equal")
        plt.show()
Exemplo n.º 59
0
def exercise_Compute_phifom_from_abcd_interface(args):
    miller_arrays = reflection_file_converter.run(
        args=args, simply_return_all_miller_arrays=True)
    complex_input = None
    for miller_array in miller_arrays:
        if (miller_array.is_complex_array()):
            complex_input = miller_array
            break
    if (complex_input is not None):
        print("complex_input.info():", complex_input.info())
    fom_input = None
    for miller_array in miller_arrays:
        if (miller_array.is_real_array()
                and miller_array.info().lower().find("fom")):
            fom_input = miller_array
            break
    if (fom_input is not None):
        print("fom_input.info():", fom_input.info())
    for miller_array in miller_arrays:
        if (isinstance(miller_array.data(), flex.hendrickson_lattman)):
            print("Hendrickson-Lattman coefficients:", miller_array.info())
            n_bad_figures_of_merit = 0
            n_bad_phases = 0
            if (miller_array.anomalous_flag()):
                print("Clipper cannot currently handle anomalous arrays" \
                    + " with Hendrickson-Lattman coefficients.")
                continue
            phi_fom = clipper.Compute_phifom_from_abcd_interface(
                unit_cell=miller_array.unit_cell(),
                space_group=miller_array.space_group(),
                miller_indices=miller_array.indices(),
                phase_probabilities=miller_array.data())
            phi_clipper = phi_fom.centroid_phases() * 180 / math.pi
            fom_clipper = phi_fom.figures_of_merit()
            fom_deltas = fom_input.data() - fom_clipper
            if (fom_input is not None):
                perm = flex.sort_permutation(flex.abs(fom_deltas),
                                             reverse=True)
                fom_deltas_sorted = fom_deltas.select(perm)
                fom_clipper_sorted = fom_clipper.select(perm)
                fom_input_sorted = fom_input.data().select(perm)
                pp_sorted = miller_array.data().select(perm)
                indices_sorted = miller_array.indices().select(perm)
                centric_flags_sorted = fom_input.centric_flags().data().select(
                    perm)
                print("FOM comparison")
                for f, i, c, d, p, ix in zip(centric_flags_sorted,
                                             fom_input_sorted,
                                             fom_clipper_sorted,
                                             fom_deltas_sorted, pp_sorted,
                                             indices_sorted):
                    print(f, "%.3f %.3f %.3f" % (i, c, d), end=' ')
                    if (abs(d) > 0.01):
                        print("LOOK", "%.2f %.2f %.2f %.2f" % p, ix, end=' ')
                        n_bad_figures_of_merit += 1
                    print()
                print()
            if (complex_input is not None):
                phi_input = complex_input.phases(deg=True).data()
                phi_deltas = flex.double()
                for phi1, phi2 in zip(phi_input, phi_clipper):
                    phi_deltas.append(
                        scitbx.math.signed_phase_error(phi1, phi2, deg=True))
                perm = flex.sort_permutation(flex.abs(phi_deltas),
                                             reverse=True)
                phi_deltas_sorted = phi_deltas.select(perm)
                phi_clipper_sorted = phi_clipper.select(perm)
                phi_input_sorted = phi_input.select(perm)
                pp_sorted = miller_array.data().select(perm)
                indices_sorted = miller_array.indices().select(perm)
                centric_flags_sorted = complex_input.centric_flags().data(
                ).select(perm)
                fom_clipper_sorted = fom_clipper.select(perm)
                print("PHI comparison")
                for f, i, c, d, m, p, ix in zip(centric_flags_sorted,
                                                phi_input_sorted,
                                                phi_clipper_sorted,
                                                phi_deltas_sorted,
                                                fom_clipper_sorted, pp_sorted,
                                                indices_sorted):
                    if (m < 0.01): continue
                    print(f, "%.3f %.3f %.3f" % (i, c, d), end=' ')
                    if (abs(d) > 3 and (max(p) < 1000 or abs(d) > 10)):
                        print("LOOK",
                              "%.2f %.2f %.2f %.2f" % p,
                              "fom=%.3f" % m,
                              ix,
                              end=' ')
                        n_bad_phases += 1
                    print()
                print()
            assert n_bad_figures_of_merit == 0
            assert n_bad_phases == 0
Exemplo n.º 60
0
 def __init__(self,
              residue,
              mon_lib_srv,
              rotamer_manager,
              sin_cos_table,
              cmv,
              unit_cell,
              rotatable_hd=None,
              vdw_radii=None,
              xyzrad_bumpers=None,
              target_map=None,
              target_map_for_cb=None,
              backbone_sample=False,
              accept_only_if_max_shift_is_smaller_than=None,
              log=None):
   adopt_init_args(self, locals())
   if(self.log is None): self.log = sys.stdout
   self.co = mmtbx.refinement.real_space.aa_residue_axes_and_clusters(
     residue         = self.residue,
     mon_lib_srv     = self.mon_lib_srv,
     backbone_sample = True,
     log             = self.log)
   self.m = None
   if(self.target_map is not None and len(self.co.clusters)>0):
     # Set weights
     AN = {"S":16, "O":8, "N":7, "C":6, "SE":34, "H":1, "D":5}
     #AN = {"S":1, "O":1, "N":1, "C":1, "SE":1, "H":1}
     self.weights = flex.double()
     for atom in self.residue.atoms():
       self.weights.append(AN[atom.element.strip().upper()])
     # Bonded pairs
     exclude = ["C","N","O","CA"]
     reference = exclude + ["CB"]
     atoms = self.residue.atoms()
     self.pairs = []
     for i, ai in enumerate(atoms):
       if(ai.name.strip() in reference):
         mv = self.target_map.eight_point_interpolation(
           self.unit_cell.fractionalize(ai.xyz))
       if(ai.name.strip() in exclude): continue
       if(ai.element.strip().upper() in ["H","S","SE"]): continue
       for j, aj in enumerate(atoms):
         if i==j: continue
         if(aj.name.strip() in exclude): continue
         if(aj.element.strip().upper() in ["H","S","SE"]): continue
         d = ai.distance(aj)
         if d < 1.6:
           pair = [i,j]
           pair.sort()
           if(not pair in self.pairs):
             self.pairs.append(pair)
     # Set monitor
     id_str=""
     if(self.residue.parent() is not None and
        self.residue.parent().parent() is not None):
        id_str+="chain: %s"%(self.residue.parent().parent().id)
     id_str+=" residue: %s %s"%(self.residue.resname, self.residue.resseq.strip())
     if(len(self.co.clusters)>1):
       msel = flex.size_t(flatten(self.co.clusters[1:][0].vector))
     else:
       msel = flex.size_t()
     self.m = monitor(
       id_str    = id_str,
       selection = msel,
       map_data  = self.target_map,
       unit_cell = self.unit_cell,
       weights   = self.weights,
       pairs     = self.pairs,
       cmv       = self.cmv,
       rotamer_evaluator = self.rotamer_manager.rotamer_evaluator,
       log       = self.log)
     self.m.add(residue = self.residue, state = "start")
   if(self.target_map is None):
     assert not backbone_sample
   # Actual calculations
   self.chi_angles = self.rotamer_manager.get_chi_angles(
     resname = self.residue.resname)
   if(len(self.co.clusters)>0):
     if(backbone_sample):
       self.fit_c_beta(c_beta_rotation_cluster = self.co.clusters[0])
     self.fit_side_chain(clusters = self.co.clusters[1:])
   if(self.m is not None):
     self.m.finalize(residue = self.residue)