Beispiel #1
0
 def extract_estimated_i_obs(O, partiality_threshold):
   from cctbx.array_family import flex
   indices = flex.miller_index()
   data = flex.double()
   mimmi = O.miller_image_map.miller_indices
   indices.reserve(mimmi.size())
   data.reserve(mimmi.size())
   for h,iiis in zip(mimmi, O.miller_image_map.map):
     estis = collect_estis(O.array, iiis, partiality_threshold)
     if (estis.size() != 0):
       indices.append(h)
       data.append(flex.mean(estis))
   return (indices, data)
 def extract_estimated_i_obs(O, partiality_threshold):
   from cctbx.array_family import flex
   indices = flex.miller_index()
   data = flex.double()
   mimmi = O.miller_image_map.miller_indices
   indices.reserve(mimmi.size())
   data.reserve(mimmi.size())
   for h,iiis in zip(mimmi, O.miller_image_map.map):
     estis = collect_estis(O.array, iiis, partiality_threshold)
     if (estis.size() != 0):
       indices.append(h)
       data.append(flex.mean(estis))
   return (indices, data)
Beispiel #3
0
def load_sfall(fname, data_are_complex=True):
    """
    special script for loading the structure factor file generated in write_multichannel_sfall()
    :param fname: file generated in the write_multichannel_sfall method above.. 
    :return: mil_ar, energies
        mil_ar: dict of miller arrays
        energies: array of xray energies in electron volts
        such that  mil_ar[0] is Fhkl at energy energies[0]
    """

    f = h5py.File(fname, "r")
    data = f["data"][()]
    indices_data = f["indices"][()]
    indices_for_flex = []  # make a list for flex constructor
    for hkl in indices_data:
        h, k, l = list(map(int, hkl))  # cast to python int for Python 3
        indices_for_flex.append((h, k, l))
    hall = f["hall_symbol"][()]
    ucell_param = tuple(f["ucell_tuple"][()])
    energies = f["energies"][()]
    sg = sgtbx.space_group(hall)
    Symm = crystal.symmetry(unit_cell=ucell_param, space_group=sg)
    #indices_flex = tuple(map(tuple, indices))
    mil_idx = flex.miller_index(indices_for_flex)
    mil_set = miller.set(crystal_symmetry=Symm,
                         indices=mil_idx,
                         anomalous_flag=True)

    mil_ar = {}  # load a dict of "sfall at each energy"
    for i_chan, data_chan in enumerate(data):
        if data_are_complex:
            data_flex = flex.complex_double(np.ascontiguousarray(data_chan))
        else:
            data_flex = flex.double(np.ascontiguousarray(data_chan))
        mil_ar[i_chan] = miller.array(mil_set, data=data_flex)

    return mil_ar, energies
Beispiel #4
0
def offset_miller_indices(miller_indices, offset):
    from dials.array_family import flex
    return flex.miller_index(*[
        mi.iround()
        for mi in (miller_indices.as_vec3_double() + offset).parts()
    ])
def offset_miller_indices(miller_indices, offset):
    from dials.array_family import flex

    return flex.miller_index(*[mi.iround() for mi in (miller_indices.as_vec3_double() + offset).parts()])
Beispiel #6
0
def run(args):
  phil = iotbx.phil.process_command_line(
    args = args, master_string = master_phil)
  work_params = phil.work.extract()
  if ("--help" in args) :
    libtbx.phil.parse(master_phil.show())
    return

  if ((work_params.d_min is None) or
      (work_params.data is None) or
      ((work_params.model is None) and
       work_params.scaling.algorithm != "mark1")):
    raise Usage("cxi.merge "
                "d_min=4.0 "
                "data=~/scratch/r0220/006/strong/ "
                "model=3bz1_3bz2_core.pdb")
  
  if ((work_params.rescale_with_average_cell) and
      (not work_params.set_average_unit_cell)) :
    raise Usage("If rescale_with_average_cell=True, you must also specify "+
      "set_average_unit_cell=True.")
  
  miller_set = symmetry(
      unit_cell = work_params.target_unit_cell,
      space_group_info = work_params.target_space_group
    ).build_miller_set(
      anomalous_flag = not work_params.merge_anomalous,
      d_min = work_params.d_min)
  from xfel.cxi.merging.general_fcalc import random_structure
  i_model = random_structure(work_params)

# ---- Augment this code with any special procedures for x scaling
  scaler = xscaling_manager(
    miller_set = miller_set,
    i_model = i_model,
    params = work_params)
  
  scaler.read_all()
  sg = miller_set.space_group()
  pg = sg.build_derived_laue_group()
  rational_ops = []
  for symop in pg:
    rational_ops.append((matrix.sqr(symop.r().transpose().as_rational()),
                         symop.r().as_hkl()))

  # miller_set.show_summary()
    
  uc = work_params.target_unit_cell
    
  hkl_asu = scaler.observations["hkl_id"]
  imageno = scaler.observations["frame_id"]
  intensi = scaler.observations["i"]
  sigma_i = scaler.observations["sigi"]
  lookup = scaler.millers["merged_asu_hkl"]
  origH = scaler.observations["H"]
  origK = scaler.observations["K"]
  origL = scaler.observations["L"]

  from cctbx.miller import map_to_asu
  sgtype = miller_set.space_group_info().type()
  aflag = miller_set.anomalous_flag()
  from cctbx.array_family import flex

  # FIXME in here perform the mapping to ASU for both the original and other
  # index as an array-wise manipulation to make things a bunch faster...
  # however this also uses a big chunk of RAM... FIXME also in here use
  # cb_op.apply(indices) to get the indices reindexed...

  original_indices = flex.miller_index()
  for x in xrange(len(scaler.observations["hkl_id"])):
    original_indices.append(lookup[hkl_asu[x]])

  from cctbx.sgtbx import change_of_basis_op

  I23 = change_of_basis_op('k, -h, l')

  other_indices = I23.apply(original_indices)

  map_to_asu(sgtype, aflag, original_indices)
  map_to_asu(sgtype, aflag, other_indices)

  # FIXME would be useful in here to have a less expensive way of finding the
  # symmetry operation which gave the map to the ASU - perhaps best way is to
  # make a new C++ map_to_asu which records this.
  
  # FIXME in here recover the original frame structure of the data to
  # logical frame objetcs - N.B. the frame will need to be augmented to test
  # alternative indexings

  # construct table of start / end indices for frames: now using Python
  # range indexing

  starts = [0]
  ends = []
    
  for x in xrange(1, len(scaler.observations["hkl_id"])):
    if imageno[x] != imageno[x - 1]:
      ends.append(x)
      starts.append(x)
            
  ends.append(len(scaler.observations["hkl_id"]))
  
  keep_start = []
  keep_end = []
  
  for j, se in enumerate(zip(starts, ends)):
    print 'processing frame %d: %d to %d' % (j, se[0], se[1])
    s, e = se
    isig = sum(i / s for i, s in zip(intensi[s:e], sigma_i[s:e])) / (e - s)
    dmin = 100.0
    for x in xrange(s, e):
      d = uc.d(lookup[hkl_asu[x]])
      if d < dmin:
        dmin = d
    if isig > 6.0 and dmin < 3.2:
      keep_start.append(s)
      keep_end.append(e)

  starts = keep_start
  ends = keep_end

  print 'Keeping %d frames' % len(starts)

  # then start running the comparison code

  frames = []

  for s, e in zip(starts, ends):
    # FIXME need this from remap to ASU
    misym = [0 for x in range(s, e)]
    indices = [original_indices[x] for x in range(s, e)]
    other = [other_indices[x] for x in range(s, e)]
    intensities = intensi[s:e]
    sigmas = sigma_i[s:e]

    frames.append(Frame(uc, indices, other, intensities, sigmas))

  reference = FrameFromReferenceMTZ()

  fout = open('cc_reference.log', 'w')

  for j, f in enumerate(frames):
    _cc = reference.cc(f)
    _oo = reference.cc_other(f)
    print '%d %d %d %d %f %d %f' % (j, starts[j], ends[j], _cc[0], _cc[1],
                                    _oo[0], _oo[1])

    fout.write('%d %d %d %d %f %d %f\n' % (j, starts[j], ends[j],
                                           _cc[0], _cc[1], _oo[0], _oo[1]))

  fout.close()

  return
Beispiel #7
0
def run(args):
  phil = iotbx.phil.process_command_line(
    args = args, master_string = master_phil)
  work_params = phil.work.extract()
  if ("--help" in args) :
    libtbx.phil.parse(master_phil.show())
    return

  if ((work_params.d_min is None) or
      (work_params.data is None) or
      ((work_params.model is None) and
       work_params.scaling.algorithm != "mark1")):
    raise Usage("cxi.merge "
                "d_min=4.0 "
                "data=~/scratch/r0220/006/strong/ "
                "model=3bz1_3bz2_core.pdb")
  
  if ((work_params.rescale_with_average_cell) and
      (not work_params.set_average_unit_cell)) :
    raise Usage("If rescale_with_average_cell=True, you must also specify "+
      "set_average_unit_cell=True.")
  
  miller_set = symmetry(
      unit_cell = work_params.target_unit_cell,
      space_group_info = work_params.target_space_group
    ).build_miller_set(
      anomalous_flag = not work_params.merge_anomalous,
      d_min = work_params.d_min)
  from xfel.cxi.merging.general_fcalc import random_structure
  i_model = random_structure(work_params)

# ---- Augment this code with any special procedures for x scaling
  scaler = xscaling_manager(
    miller_set = miller_set,
    i_model = i_model,
    params = work_params)
  
  scaler.read_all()
  sg = miller_set.space_group()
  pg = sg.build_derived_laue_group()
  rational_ops = []
  for symop in pg:
    rational_ops.append((matrix.sqr(symop.r().transpose().as_rational()),
                         symop.r().as_hkl()))

  # miller_set.show_summary()
    
  uc = work_params.target_unit_cell
    
  hkl_asu = scaler.observations["hkl_id"]
  imageno = scaler.observations["frame_id"]
  intensi = scaler.observations["i"]
  sigma_i = scaler.observations["sigi"]
  lookup = scaler.millers["merged_asu_hkl"]
  origH = scaler.observations["H"]
  origK = scaler.observations["K"]
  origL = scaler.observations["L"]

  from cctbx.miller import map_to_asu
  sgtype = miller_set.space_group_info().type()
  aflag = miller_set.anomalous_flag()
  from cctbx.array_family import flex

  # FIXME in here perform the mapping to ASU for both the original and other
  # index as an array-wise manipulation to make things a bunch faster...
  # however this also uses a big chunk of RAM... FIXME also in here use
  # cb_op.apply(indices) to get the indices reindexed...

  original_indices = flex.miller_index()
  for x in xrange(len(scaler.observations["hkl_id"])):
    original_indices.append(lookup[hkl_asu[x]])

  from cctbx.sgtbx import change_of_basis_op

  I23 = change_of_basis_op('k, -h, l')

  other_indices = I23.apply(original_indices)

  map_to_asu(sgtype, aflag, original_indices)
  map_to_asu(sgtype, aflag, other_indices)

  # FIXME would be useful in here to have a less expensive way of finding the
  # symmetry operation which gave the map to the ASU - perhaps best way is to
  # make a new C++ map_to_asu which records this.
  
  # FIXME in here recover the original frame structure of the data to
  # logical frame objetcs - N.B. the frame will need to be augmented to test
  # alternative indexings

  # construct table of start / end indices for frames: now using Python
  # range indexing

  starts = [0]
  ends = []
    
  for x in xrange(1, len(scaler.observations["hkl_id"])):
    if imageno[x] != imageno[x - 1]:
      ends.append(x)
      starts.append(x)
            
  ends.append(len(scaler.observations["hkl_id"]))
  
  keep_start = []
  keep_end = []
  
  for j, se in enumerate(zip(starts, ends)):
    print 'processing frame %d: %d to %d' % (j, se[0], se[1])
    s, e = se
    isig = sum(i / s for i, s in zip(intensi[s:e], sigma_i[s:e])) / (e - s)
    dmin = 100.0
    for x in xrange(s, e):
      d = uc.d(lookup[hkl_asu[x]])
      if d < dmin:
        dmin = d
    if isig > 6.0 and dmin < 3.2:
      keep_start.append(s)
      keep_end.append(e)

  starts = keep_start
  ends = keep_end

  print 'Keeping %d frames' % len(starts)

  # then start running the comparison code

  frames = []

  for s, e in zip(starts, ends):
    # FIXME need this from remap to ASU
    misym = [0 for x in range(s, e)]
    indices = [original_indices[x] for x in range(s, e)]
    other = [other_indices[x] for x in range(s, e)]
    intensities = intensi[s:e]
    sigmas = sigma_i[s:e]

    frames.append(Frame(uc, indices, other, intensities, sigmas))

  cycle = 0

  total_nref = sum([len(f.get_indices()) for f in frames])

  # pre-scale the data - first determine average ln(k), B; then apply

  kbs = [f.kb() for f in frames]

  mn_k = sum([kb[0] for kb in kbs]) / len(kbs)
  mn_B = sum([kb[1] for kb in kbs]) / len(kbs)

  for f in frames:
    f.scale_to_kb(mn_k, mn_B)
    
  while True:

    print 'Analysing %d frames' % len(frames)
    print 'Cycle %d' % cycle
    cycle += 1

    print 'Power spectrum'
    fn = frame_numbers(frames)
    for j in sorted(fn):
      print '%4d %4d' % (j, fn[j])
            
    nref_cycle = sum([len(f.get_indices()) for f in frames])
    assert(nref_cycle == total_nref)

    # first work on the original indices

    import numpy

    common_reflections = numpy.zeros((len(frames), len(frames)),
                                     dtype = numpy.short)
    
    obs = { } 

    # for other hand add -j

    for j, f in enumerate(frames):
      indices = set(f.get_indices())
      for i in indices:
        _i = tuple(i)
        if not _i in obs:
          obs[_i] = []
        obs[_i].append(j)

    for hkl in obs:
      obs[hkl].sort()
      for j, f1 in enumerate(obs[hkl][:-1]):
        for f2 in obs[hkl][j + 1:]:
          if f1 * f2 > 0:
            common_reflections[(abs(f1), abs(f2))] += 1

    cmn_rfl_list = []

    for f1 in range(len(frames)):
      for f2 in range(f1 + 1, len(frames)):
        if common_reflections[(f1, f2)] > 10:
          cmn_rfl_list.append((common_reflections[(f1, f2)], f1, f2))

    cmn_rfl_list.sort()
    cmn_rfl_list.reverse()
    
    joins = []
    used = []
    
    for n, f1, f2 in cmn_rfl_list:
      
      if f1 in used or f2 in used:
        continue
            
      _cc = frames[f1].cc(frames[f2])

      # really only need to worry about f2 which will get merged...
      # merging multiple files together should be OK provided they are
      # correctly sorted (though the order should not matter anyhow?)
      # anyhow they are sorted anyway... ah as f2 > f1 then just sorting
      # the list by f2 will make sure the data cascase correctly.

      # p-value small (3% ish) for cc > 0.6 for > 10 observations -
      # necessary as will be correlated due to Wilson curves though
      # with B factor < 10 this is less of an issue

      if _cc[0] > 10 and _cc[1] > 0.6:
        print '%4d %.3f' % _cc, f1, f2
        joins.append((f2, f1))
        used.append(f2)

    if not joins:
      print 'No pairs found'
      break

    joins.sort()
    joins.reverse()
        
    for j2, j1 in joins:
      rmerge = frames[j1].merge(frames[j2])
      if rmerge:
        print 'R: %4d %4d %6.3f' % (j1, j2, rmerge)
      else:
        print 'R: %4d %4d ------' % (j1, j2)

    all_joins = [j for j in joins]

    # then do the same for the alternative indices

    other_reflections = numpy.zeros((len(frames), len(frames)),
                                    dtype = numpy.short)

    obs = { } 

    # for other hand add -j

    for j, f in enumerate(frames):
      indices = set(f.get_indices())
      for i in indices:
        _i = tuple(i)
        if not _i in obs:
          obs[_i] = []
        obs[_i].append(j)

      indices = set(f.get_other())
      for i in indices:
        _i = tuple(i)
        if not _i in obs:
          obs[_i] = []
        obs[_i].append(-j)

    for hkl in obs:
      obs[hkl].sort()
      for j, f1 in enumerate(obs[hkl][:-1]):
        for f2 in obs[hkl][j + 1:]:
          if f1 * f2 < 0:
            other_reflections[(abs(f1), abs(f2))] += 1

    oth_rfl_list = []

    for f1 in range(len(frames)):
      for f2 in range(f1 + 1, len(frames)):
        if other_reflections[(f1, f2)] > 10:
          oth_rfl_list.append((other_reflections[(f1, f2)], f1, f2))
    
    joins = []

    oth_rfl_list.sort()
    oth_rfl_list.reverse()
        
    for n, f1, f2 in oth_rfl_list:
      
      if f1 in used or f2 in used:
        continue
            
      _cc = frames[f1].cc_other(frames[f2])

      # really only need to worry about f2 which will get merged...
      # merging multiple files together should be OK provided they are
      # correctly sorted (though the order should not matter anyhow?)
      # anyhow they are sorted anyway... ah as f2 > f1 then just sorting
      # the list by f2 will make sure the data cascase correctly.

      # p-value small (3% ish) for cc > 0.6 for > 10 observations -
      # necessary as will be correlated due to Wilson curves though
      # with B factor < 10 this is less of an issue

      if _cc[0] > 10 and _cc[1] > 0.6:
        print '%4d %.3f' % _cc, f1, f2
        joins.append((f2, f1))
        used.append(f2)

    all_joins += joins

    if not all_joins:
      break
      
    joins.sort()
    joins.reverse()
        
    for j2, j1 in joins:
      frames[j2].reindex()
      rmerge = frames[j1].merge(frames[j2])
      if rmerge:
        print 'R: %4d %4d %6.3f' % (j1, j2, rmerge)
      else:
        print 'R: %4d %4d ------' % (j1, j2)
        
    continue

  frames.sort()

  print 'Biggest few: #frames; #unique refl'
  j = -1
  while frames[j].get_frames() > 1:
    print frames[j].get_frames(), frames[j].get_unique_indices()
    frames[j].output_as_scalepack(sg, 'scalepack-%d.sca' % j)
    j -= 1

  return
Beispiel #8
0
def simdata_pipeline():
    data = gen_data.gen_data(args.i, load_hkl=False, Nshot_max=args.N)

    if args.resoshuff:
        hmap = np.load(args.i)["hkl_map"][()]
        reso_bins = [24, 6.5] + list(np.linspace(2, 6.5, 25)[::-1])
        reso_bins = [
            24.9840, 4.3025, 4.3025, 3.4178, 3.4178, 2.9866, 2.9866, 2.7139,
            2.7139, 2.5195, 2.5195, 2.3711, 2.3711, 2.2524, 2.2524, 2.1545,
            2.1545, 2.0716, 2.0716, 2.0001
        ]
    else:
        hmap = None
        reso_bins = None

    guesses = gen_data.guess_data(data,
                                  use_Iguess=False,
                                  gain_guess=False,
                                  perturbate=True,
                                  perturbate_factor=.5,
                                  hmap=hmap,
                                  reso_bins=reso_bins)
    truth = gen_data.guess_data(data, perturbate=False)
    print("Loaded")

    # t1_lb = time.time()
    # lbfgs_solver = solvers.LogIsolverCurve(
    #    use_curvatures=True,
    #    data=data,
    #    guess=guesses, truth=truth)
    # t2_lb = time.time()
    # embed()

    if args.weights:
        weights = data['weights']
    else:
        weights = None
    t1_eig = time.time()
    ES = eigen_solver(data=data,
                      guess=guesses,
                      truth=truth,
                      lbfgs=False,
                      conj_grad=True,
                      plot=True,
                      plot_truth=True,
                      weights=weights,
                      sovlerization_maximus=args.solverize)
    t2_eig = time.time()
    if args.plot:
        IB = np.exp(ES.helper.x[ES.Nhkl:2 * ES.Nhkl].as_numpy_array())
        IA = np.exp(ES.helper.x[:ES.Nhkl].as_numpy_array())

        IAtru = np.exp(ES.IAprm_truth.as_numpy_array())
        IBtru = np.exp(ES.IBprm_truth.as_numpy_array())
        import pylab as plt
        plt.plot(IA, IAtru, '.')
        plt.plot(IB, IBtru, '.')
        ax = plt.gca()
        ax.set_yscale('log')
        ax.set_xscale('log')
        plt.xlim(1e-1, 1e7)
        plt.ylim(1e-1, 1e7)
        plt.plot(IAtru, IBtru, '.')
        plt.show()

    from cctbx import sgtbx, crystal
    from cctbx.array_family import flex
    from cctbx import miller
    from cxid9114 import utils

    Nh = ES.Nhkl
    IA = ES.helper.x[:Nh].as_numpy_array()
    IB = ES.helper.x[Nh:2 * Nh].as_numpy_array()

    #IA = ES.IAprm_truth.as_numpy_array()
    #IB = ES.IBprm_truth.as_numpy_array()
    #IA = ES.x_init[:Nh].as_numpy_array()
    #IB = ES.x_init[Nh:2*Nh].as_numpy_array()

    dataA = np.load(args.i)
    hkl_map = dataA["hkl_map"][()]
    hkl_map2 = {v: k for k, v in hkl_map.iteritems()}
    Nhkl = len(hkl_map)
    assert (Nh == Nhkl)
    hout, Aout, Bout = [], [], []
    for i in range(Nhkl):
        h = hkl_map2[i]
        valA = IA[i]
        valB = IB[i]
        hout.append(h)
        Aout.append(np.sqrt(np.exp(valA)))
        Bout.append(np.sqrt(np.exp(valB)))

    sigA = np.ones(len(Aout))  #np.sqrt(Aout)
    sigB = np.ones(len(Bout))  #np.sqrt(Bout)

    sg = sgtbx.space_group(" P 4nw 2abw")
    Symm = crystal.symmetry(unit_cell=(79, 79, 38, 90, 90, 90), space_group=sg)
    hout = tuple(hout)
    mil_idx = flex.miller_index(hout)
    mil_set = miller.set(crystal_symmetry=Symm,
                         indices=mil_idx,
                         anomalous_flag=True)
    Aout_flex = flex.double(np.ascontiguousarray(Aout))
    Bout_flex = flex.double(np.ascontiguousarray(Bout))
    sigA_flex = flex.double(np.ascontiguousarray(sigA))
    sigB_flex = flex.double(np.ascontiguousarray(sigB))
    mil_arA = miller.array(
        mil_set, data=Aout_flex,
        sigmas=sigA_flex).set_observation_type_xray_amplitude()
    mil_arB = miller.array(
        mil_set, data=Bout_flex,
        sigmas=sigB_flex).set_observation_type_xray_amplitude()
    #mil_arA = miller.array(mil_set, data=Aout_flex).set_observation_type_xray_amplitude()
    #mil_arB = miller.array(mil_set, data=Bout_flex).set_observation_type_xray_amplitude()

    from cxid9114.parameters import ENERGY_CONV
    waveA = ENERGY_CONV / 8944.
    waveB = ENERGY_CONV / 9034.
    ucell = mil_arA.unit_cell()
    sgi = mil_arA.space_group_info()
    from iotbx import mtz
    mtz_handle = mtz.object()
    mtz_handle.set_title(title="MAD_MTZ")
    mtz_handle.set_space_group_info(space_group_info=sgi)
    #mtz_handle.set_hkl_base(unit_cell=ucell)
    #mtz_cr = mtz_handle.crystals()[0]
    mtz_cr = mtz_handle.add_crystal(name="Crystal",
                                    project_name="project",
                                    unit_cell=ucell)

    dsetA = mtz_cr.add_dataset(name="datasetA", wavelength=waveA)
    _ = dsetA.add_miller_array(miller_array=mil_arA, column_root_label="FAobs")

    dsetB = mtz_cr.add_dataset(name="datasetB", wavelength=waveB)
    _ = dsetB.add_miller_array(miller_array=mil_arB, column_root_label="FBobs")

    mtz_handle.show_summary()
    mtz_handle.write(args.o)

    for i in range(10):
        print ES.IAprm_truth[i], np.log(ES.guess['IAprm'][i]), ES.helper.x[i]
        print ES.IBprm_truth[i], np.log(
            ES.guess['IBprm'][i]), ES.helper.x[ES.Nhkl + i]
        print ES.Gprm_truth[i], ES.guess["Gprm"][i], ES.helper.x[2 * ES.Nhkl +
                                                                 i]
        print
Beispiel #9
0
def load_4bs7_sf():
    import os
    from cctbx.array_family import flex
    from cctbx import miller
    sf_path = os.path.dirname(__file__)
    sf_file = os.path.join(sf_path, "4bs7-sf.cif")

    f = any_reflection_file(file_name=sf_file)
    reader = f.file_content()
    if reader is None:
        raise ValueError(
            "Be sure to install git lfs and pull in the actual file with 4bs7-sf.cif"
        )
    F = reader.build_miller_arrays()["r4bs7sf"]['_refln.F_meas_au_1']
    Symm = F.crystal_symmetry()
    Fhkl = {h: val for h, val in zip(F.indices(), F.data())}

    Fd = reader.build_miller_arrays(
    )['r4bs7sf']['_refln.pdbx_anom_difference_1']
    Fdiff = {h: val for h, val in zip(Fd.indices(), Fd.data())}

    hcommon = set(Fhkl.keys()).intersection(set(Fdiff.keys()))
    Fpos = []
    Fneg = []
    hpos = []
    hneg = []
    for H in hcommon:
        fneg = Fhkl[H] - Fdiff[H]
        if fneg < 0:
            continue

        H_neg = -H[0], -H[1], -H[2]
        Fpos.append(Fhkl[H])
        hpos.append(H)
        Fneg.append(fneg)
        hneg.append(H_neg)

        #val_low = vals_anom[H_neg][0] + diff[H][0]

        #val_high = vals_anom[H_neg][0]   # + .5*diff[H][0]

        #if val_low <= 0:
        #    val_low = .1
        #    #from IPython import embed
        #    #embed()
        #assert val_high >= 0

        #vals_anom[H_neg][0] = val_low
        #vals_anom[H][0] = val_high
        ##if val_low < 0:
        ##    offset = abs(val_low)
        ##    vals_anom[H_neg][0] = val_low + offset*2
        ##    vals_anom[H][0] = val_high + offset*2
        ## propagate the error
        #vals_anom[H_neg][1] = np.sqrt(vals_anom[H_neg][1]**2 + diff[H][1]**2)
        #vals_anom[H][1] = np.sqrt(vals_anom[H][1] ** 2 + diff[H][1] ** 2)

    #hout = tuple(vals_anom.keys())

    Fflex = flex.double(Fpos + Fneg)
    hflex = flex.miller_index(hpos + hneg)

    mset = miller.set(crystal_symmetry=Symm,
                      indices=hflex,
                      anomalous_flag=True)

    #Fdata = flex.double([vals_anom[h][0] for h in hout])
    #Fsigmas = flex.double([vals_anom[h][1] for h in hout])
    #Fhkl_anom = miller.array(mset, data=Fdata, sigmas=Fsigmas).set_observation_type_xray_amplitude()
    Fhkl_anom = miller.array(mset,
                             data=Fflex).set_observation_type_xray_amplitude()

    return Fhkl_anom
Beispiel #10
0
    def _index_finish(self):
        """Perform the indexer post-processing as required."""

        # ok, in here now ask if this solution was sensible!

        if not self.get_indexer_user_input_lattice():

            lattice = self._indxr_lattice
            cell = self._indxr_cell

            lattice2, cell2 = xds_check_indexer_solution(
                os.path.join(self.get_working_directory(), "XPARM.XDS"),
                os.path.join(self.get_working_directory(), "SPOT.XDS"),
            )

            Debug.write("Centring analysis: %s => %s" % (lattice, lattice2))

            doubled_lattice = False
            for j in range(3):
                if int(round(cell2[j] / cell[j])) == 2:
                    doubled_lattice = True
                    axes = "A", "B", "C"
                    Debug.write("Lattice axis doubled: %s" % axes[j])

            if (self._idxref_subtree_problem and
                (lattice2 != lattice)) or doubled_lattice:

                # hmm.... looks like we don't agree on the correct result...
                # update the putative correct result as input

                Debug.write("Detected pseudocentred lattice")
                Debug.write("Inserting solution: %s " % lattice2 +
                            "%6.2f %6.2f %6.2f %6.2f %6.2f %6.2f" % cell2)

                self._indxr_replace(lattice2, cell2)

                Debug.write("Set lattice: %s" % lattice2)
                Debug.write("Set cell: %f %f %f %f %f %f" % cell2)

                # then rerun

                self.set_indexer_done(False)
                return

        # finally read through SPOT.XDS and XPARM.XDS to get an estimate
        # of the low resolution limit - this should be pretty straightforward
        # since what I want is the resolution of the lowest resolution indexed
        # spot..

        spot_file = os.path.join(self.get_working_directory(), "SPOT.XDS")

        experiment = self.get_indexer_experiment_list()[0]
        crystal_model = experiment.crystal

        from iotbx.xds import spot_xds

        spot_xds_handle = spot_xds.reader()
        spot_xds_handle.read_file(spot_file)

        from cctbx.array_family import flex

        miller_indices = flex.miller_index(spot_xds_handle.miller_index)

        # only those reflections that were actually indexed
        miller_indices = miller_indices.select(miller_indices != (0, 0, 0))

        from scitbx import matrix

        ub = matrix.sqr(crystal_model.get_A())
        dmax = 1.05 * flex.max(
            1 / (ub.elems * miller_indices.as_vec3_double()).norms())

        Debug.write("Low resolution limit assigned as: %.2f" % dmax)
        self._indxr_low_resolution = dmax
Beispiel #11
0
  def _index_finish(self):
    '''Perform the indexer post-processing as required.'''

    # ok, in here now ask if this solution was sensible!

    if not self.get_indexer_user_input_lattice():

      lattice = self._indxr_lattice
      cell = self._indxr_cell

      lattice2, cell2 = xds_check_indexer_solution(
          os.path.join(self.get_working_directory(), 'XPARM.XDS'),
          os.path.join(self.get_working_directory(), 'SPOT.XDS'))

      Debug.write('Centring analysis: %s => %s' % \
                  (lattice, lattice2))

      doubled_lattice = False
      for j in range(3):
        if int(round(cell2[j] / cell[j])) == 2:
          doubled_lattice = True
          axes = 'A', 'B', 'C'
          Debug.write('Lattice axis doubled: %s' % axes[j])

      if (self._idxref_subtree_problem and (lattice2 != lattice)) or \
             doubled_lattice:

        # hmm.... looks like we don't agree on the correct result...
        # update the putative correct result as input

        Debug.write('Detected pseudocentred lattice')
        Debug.write('Inserting solution: %s ' % lattice2 +
                    '%6.2f %6.2f %6.2f %6.2f %6.2f %6.2f' % cell2)

        self._indxr_replace(lattice2, cell2)

        Debug.write('Set lattice: %s' % lattice2)
        Debug.write('Set cell: %f %f %f %f %f %f' % \
                    cell2)

        # then rerun

        self.set_indexer_done(False)
        return

    # finally read through SPOT.XDS and XPARM.XDS to get an estimate
    # of the low resolution limit - this should be pretty straightforward
    # since what I want is the resolution of the lowest resolution indexed
    # spot..

    spot_file = os.path.join(self.get_working_directory(), 'SPOT.XDS')

    experiment = self.get_indexer_experiment_list()[0]
    detector = experiment.detector
    beam = experiment.beam
    goniometer = experiment.goniometer
    scan = experiment.scan
    crystal_model = experiment.crystal

    from iotbx.xds import spot_xds
    spot_xds_handle = spot_xds.reader()
    spot_xds_handle.read_file(spot_file)

    from cctbx.array_family import flex
    centroids_px = flex.vec3_double(spot_xds_handle.centroid)
    miller_indices = flex.miller_index(spot_xds_handle.miller_index)

    # only those reflections that were actually indexed
    centroids_px = centroids_px.select(miller_indices != (0,0,0))
    miller_indices = miller_indices.select(miller_indices != (0,0,0))

    ub = crystal_model.get_A()
    dmax = 1.05 * flex.max(1/(ub.elems * miller_indices.as_vec3_double()).norms())

    Debug.write('Low resolution limit assigned as: %.2f' % dmax)
    self._indxr_low_resolution = dmax

    return