예제 #1
0
 def get_miller_index_i_seqs(i_img, parallel=True):
     mt = flex.mersenne_twister(seed=work_params.noise.random_seed + i_img)
     crystal_rotation = mt.random_double_r3_rotation_matrix_arvo_1992()
     if (work_params.kirian_delta_vs_ewald_proximity):
         kirian_delta_vs_ewald_proximity(
             unit_cell=i_calc.p1_anom.unit_cell(),
             miller_indices=i_calc.p1_anom.indices(),
             crystal_rotation_matrix=crystal_rotation,
             ewald_radius=1 / work_params.wavelength,
             d_min=work_params.d_min,
             detector_distance=work_params.detector.distance,
             detector_size=work_params.detector.size,
             detector_pixels=work_params.detector.pixels)
     img = image_simple(
         store_miller_index_i_seqs=True, store_signals=True).compute(
             unit_cell=i_calc.p1_anom.unit_cell(),
             miller_indices=i_calc.p1_anom.indices(),
             spot_intensity_factors=None,
             crystal_rotation_matrix=crystal_rotation,
             ewald_radius=1 / work_params.wavelength,
             ewald_proximity=work_params.ewald_proximity,
             signal_max=1,
             detector_distance=work_params.detector.distance,
             detector_size=work_params.detector.size,
             detector_pixels=work_params.detector.pixels,
             point_spread=work_params.point_spread,
             gaussian_falloff_scale=work_params.gaussian_falloff_scale)
     result = img.miller_index_i_seqs
     if (work_params.usable_partiality_threshold is not None):
         result = result.select(
             img.signals > work_params.usable_partiality_threshold)
     if (parallel):
         return result.copy_to_byte_str()
     return result
 def get_miller_index_i_seqs(i_img, parallel=True):
   mt = flex.mersenne_twister(seed=work_params.noise.random_seed+i_img)
   crystal_rotation = mt.random_double_r3_rotation_matrix_arvo_1992()
   if (work_params.kirian_delta_vs_ewald_proximity):
     kirian_delta_vs_ewald_proximity(
       unit_cell=i_calc.p1_anom.unit_cell(),
       miller_indices=i_calc.p1_anom.indices(),
       crystal_rotation_matrix=crystal_rotation,
       ewald_radius=1/work_params.wavelength,
       d_min=work_params.d_min,
       detector_distance=work_params.detector.distance,
       detector_size=work_params.detector.size,
       detector_pixels=work_params.detector.pixels)
   img = image_simple(
       store_miller_index_i_seqs=True,
       store_signals=True).compute(
     unit_cell=i_calc.p1_anom.unit_cell(),
     miller_indices=i_calc.p1_anom.indices(),
     spot_intensity_factors=None,
     crystal_rotation_matrix=crystal_rotation,
     ewald_radius=1/work_params.wavelength,
     ewald_proximity=work_params.ewald_proximity,
     signal_max=1,
     detector_distance=work_params.detector.distance,
     detector_size=work_params.detector.size,
     detector_pixels=work_params.detector.pixels,
     point_spread=work_params.point_spread,
     gaussian_falloff_scale=work_params.gaussian_falloff_scale)
   result = img.miller_index_i_seqs
   if (work_params.usable_partiality_threshold is not None):
     result = result.select(
       img.signals > work_params.usable_partiality_threshold)
   if (parallel):
     return result.copy_to_byte_str()
   return result
예제 #3
0
def exercise_pair_registry_adp_iso():
  mersenne_twister = flex.mersenne_twister(seed=0)
  for n_seq in xrange(2,20):
    registry = ncs.restraints.pair_registry(n_seq=n_seq, n_ncs=n_seq)
    for j_seq in xrange(1,n_seq):
      assert registry.enter(i_seq=0, j_seq=j_seq, j_ncs=j_seq) == (0, 0)
    selection_pairs = registry.selection_pairs()
    for j in xrange(1,n_seq):
      assert zip(*selection_pairs[j-1]) == [(0,j)]
    weight = 2.134
    average_power = 0.589
    u_isos = mersenne_twister.random_double(size=n_seq) + 1.e-3
    gradients_in = mersenne_twister.random_double(size=n_seq)
    gradients = gradients_in.deep_copy()
    registry_residual_sum = registry.adp_iso_residual_sum(
      weight=weight,
      average_power=average_power,
      u_isos=u_isos,
      u_average_min=1.e-6,
      gradients=gradients)
    gradients -= gradients_in
    assert approx_equal(
      registry_residual_sum,
      adp_iso_residual_sum(
        weight=weight, average_power=average_power, u_isos=u_isos))
    assert approx_equal(
      gradients,
      adp_iso_analytical_gradients(
        weight=weight, average_power=average_power, u_isos=u_isos))
예제 #4
0
def exercise_space_group_contains():
    g = sgtbx.space_group("P 2")
    for s in ["x,y,z", "-x,-y,z", "-x+1,-y-2,z+3"]:
        assert g.contains(sgtbx.rt_mx(s))
    for s in ["x,y,-z", "x+1/2,y,z"]:
        assert not g.contains(sgtbx.rt_mx(s))
    for symbols in sgtbx.space_group_symbol_iterator():
        g = sgtbx.space_group(symbols.hall())
        for s in g:
            assert g.contains(s)
    rnd = flex.mersenne_twister(seed=0)
    n_c = 0
    n_nc = 0
    for symbol in sgtbx.bravais_types.centric:
        g = sgtbx.space_group_info(symbol=symbol,
                                   space_group_t_den=144).group()
        for s in g.change_basis(
                sgtbx.change_of_basis_op("x+1/12,y-1/12,z+1/12")):
            if (rnd.random_double() < 0.9): continue  # avoid long runtime
            gc = sgtbx.space_group(g)
            gc.expand_smx(s)
            if (gc.order_z() == g.order_z()):
                assert g.contains(s)
                n_c += 1
            else:
                assert not g.contains(s)
                n_nc += 1
    assert n_c == 11, n_c
    assert n_nc == 53, n_nc
예제 #5
0
def exercise_space_group_contains():
  g = sgtbx.space_group("P 2")
  for s in ["x,y,z", "-x,-y,z", "-x+1,-y-2,z+3"]:
    assert g.contains(sgtbx.rt_mx(s))
  for s in ["x,y,-z", "x+1/2,y,z"]:
    assert not g.contains(sgtbx.rt_mx(s))
  for symbols in sgtbx.space_group_symbol_iterator():
    g = sgtbx.space_group(symbols.hall())
    for s in g:
      assert g.contains(s)
  rnd = flex.mersenne_twister(seed=0)
  n_c = 0
  n_nc = 0
  for symbol in sgtbx.bravais_types.centric:
    g = sgtbx.space_group_info(symbol=symbol, space_group_t_den=144).group()
    for s in g.change_basis(sgtbx.change_of_basis_op("x+1/12,y-1/12,z+1/12")):
      if (rnd.random_double() < 0.9): continue # avoid long runtime
      gc = sgtbx.space_group(g)
      gc.expand_smx(s)
      if (gc.order_z() == g.order_z()):
        assert g.contains(s)
        n_c += 1
      else:
        assert not g.contains(s)
        n_nc += 1
  assert n_c == 11, n_c
  assert n_nc == 53, n_nc
예제 #6
0
 def plot_samples(O, stage):
   p = O.params.plot_samples
   if (stage not in p.stages):
     return
   if (p.ix is not None):
     O.plot_samples_ix(stage, p.ix)
   elif (p.ix_auto == "all"):
     for ix in xrange(O.x.size()):
       O.plot_samples_ix(stage, ix)
   elif (p.ix_auto == "random"):
     assert p.ix_random.samples_each_scattering_type is not None
     assert p.ix_random.samples_each_scattering_type > 0
     assert p.ix_random.random_seed is not None
     mt = flex.mersenne_twister(seed=p.ix_random.random_seed)
     i_seqs_grouped = O.xray_structure.scatterers() \
       .extract_scattering_types().i_seqs_by_value().values()
     i_seqs_selected = flex.bool(O.x.size(), False)
     for i_seqs in i_seqs_grouped:
       ps = i_seqs.size()
       ss = min(ps, p.ix_random.samples_each_scattering_type)
       isel = mt.random_selection(population_size=ps, sample_size=ss)
       i_seqs_selected.set_selected(i_seqs.select(isel), True)
     for ix,(i_sc,_) in enumerate(O.x_info):
       if (i_seqs_selected[i_sc]):
         O.plot_samples_ix(stage, ix)
   else:
     raise RuntimeError("Unknown plot_samples.ix_auto = %s" % p.ix_auto)
예제 #7
0
def exercise_pair_registry_adp_iso():
    mersenne_twister = flex.mersenne_twister(seed=0)
    for n_seq in xrange(2, 20):
        registry = ncs.restraints.pair_registry(n_seq=n_seq, n_ncs=n_seq)
        for j_seq in xrange(1, n_seq):
            assert registry.enter(i_seq=0, j_seq=j_seq, j_ncs=j_seq) == (0, 0)
        selection_pairs = registry.selection_pairs()
        for j in xrange(1, n_seq):
            assert zip(*selection_pairs[j - 1]) == [(0, j)]
        weight = 2.134
        average_power = 0.589
        u_isos = mersenne_twister.random_double(size=n_seq) + 1.e-3
        gradients_in = mersenne_twister.random_double(size=n_seq)
        gradients = gradients_in.deep_copy()
        registry_residual_sum = registry.adp_iso_residual_sum(
            weight=weight,
            average_power=average_power,
            u_isos=u_isos,
            u_average_min=1.e-6,
            gradients=gradients)
        gradients -= gradients_in
        assert approx_equal(
            registry_residual_sum,
            adp_iso_residual_sum(weight=weight,
                                 average_power=average_power,
                                 u_isos=u_isos))
        assert approx_equal(
            gradients,
            adp_iso_analytical_gradients(weight=weight,
                                         average_power=average_power,
                                         u_isos=u_isos))
예제 #8
0
 def plot_samples(O, stage):
   p = O.params.plot_samples
   if (stage not in p.stages):
     return
   if (p.ix is not None):
     O.plot_samples_ix(stage, p.ix)
   elif (p.ix_auto == "all"):
     for ix in xrange(O.x.size()):
       O.plot_samples_ix(stage, ix)
   elif (p.ix_auto == "random"):
     assert p.ix_random.samples_each_scattering_type is not None
     assert p.ix_random.samples_each_scattering_type > 0
     assert p.ix_random.random_seed is not None
     mt = flex.mersenne_twister(seed=p.ix_random.random_seed)
     i_seqs_grouped = O.xray_structure.scatterers() \
       .extract_scattering_types().i_seqs_by_value().values()
     i_seqs_selected = flex.bool(O.x.size(), False)
     for i_seqs in i_seqs_grouped:
       ps = i_seqs.size()
       ss = min(ps, p.ix_random.samples_each_scattering_type)
       isel = mt.random_selection(population_size=ps, sample_size=ss)
       i_seqs_selected.set_selected(i_seqs.select(isel), True)
     for ix,(i_sc,_) in enumerate(O.x_info):
       if (i_seqs_selected[i_sc]):
         O.plot_samples_ix(stage, ix)
   else:
     raise RuntimeError("Unknown plot_samples.ix_auto = %s" % p.ix_auto)
def run(args, label="R-free-flags", convert_fraction=0.5, random_seed=0):
  assert len(args) == 1
  input_file_name = args[0]
  output_file_name = "less_free_"+os.path.basename(input_file_name)
  print "Reading file:", input_file_name
  mtz_obj = iotbx.mtz.object(file_name=input_file_name)
  column = mtz_obj.get_column(label=label)
  selection_valid = column.selection_valid()
  flags = column.extract_values()
  def get_and_report(what):
    free_indices = ((flags != 0) & selection_valid).iselection()
    work_indices = ((flags == 0) & selection_valid).iselection()
    if (  free_indices.size()
        + work_indices.size() != selection_valid.count(True)):
      raise RuntimeError("""\
Unexpected array of R-free flags:
  Expected: 0 for work reflections, 1 for test reflections.""")
    print what, "number of free reflections:", free_indices.size()
    print what, "number of work reflections:", work_indices.size()
    return free_indices
  free_indices = get_and_report("Input")
  mt = flex.mersenne_twister(seed=random_seed)
  permuted_indices = free_indices.select(
    mt.random_permutation(size=free_indices.size()))
  n_convert = int(permuted_indices.size() * convert_fraction + 0.5)
  print "Number of reflections converted from free to work:", n_convert
  flags.set_selected(permuted_indices[:n_convert], 0)
  get_and_report("Output")
  column.set_values(values=flags, selection_valid=selection_valid)
  print "Writing file:", output_file_name
  mtz_obj.write(file_name=output_file_name)
def run(args, label="R-free-flags", convert_fraction=0.5, random_seed=0):
    assert len(args) == 1
    input_file_name = args[0]
    output_file_name = "less_free_" + os.path.basename(input_file_name)
    print "Reading file:", input_file_name
    mtz_obj = iotbx.mtz.object(file_name=input_file_name)
    column = mtz_obj.get_column(label=label)
    selection_valid = column.selection_valid()
    flags = column.extract_values()

    def get_and_report(what):
        free_indices = ((flags != 0) & selection_valid).iselection()
        work_indices = ((flags == 0) & selection_valid).iselection()
        if (free_indices.size() + work_indices.size() !=
                selection_valid.count(True)):
            raise RuntimeError("""\
Unexpected array of R-free flags:
  Expected: 0 for work reflections, 1 for test reflections.""")
        print what, "number of free reflections:", free_indices.size()
        print what, "number of work reflections:", work_indices.size()
        return free_indices

    free_indices = get_and_report("Input")
    mt = flex.mersenne_twister(seed=random_seed)
    permuted_indices = free_indices.select(
        mt.random_permutation(size=free_indices.size()))
    n_convert = int(permuted_indices.size() * convert_fraction + 0.5)
    print "Number of reflections converted from free to work:", n_convert
    flags.set_selected(permuted_indices[:n_convert], 0)
    get_and_report("Output")
    column.set_values(values=flags, selection_valid=selection_valid)
    print "Writing file:", output_file_name
    mtz_obj.write(file_name=output_file_name)
예제 #11
0
def exercise_isotropic_adp():
  i_seqs = (0,)
  weight = 2
  u_cart = ((1,2,3,5,2,8),)
  u_iso = (0,)
  use_u_aniso = (True,)
  p = adp_restraints.isotropic_adp_proxy(
    i_seqs=i_seqs,
    weight=weight)
  assert p.i_seqs == i_seqs
  assert approx_equal(p.weight, weight)
  i = adp_restraints.isotropic_adp(u_cart=u_cart[0], weight=weight)
  expected_deltas = (-1, 0, 1, 5, 2, 8)
  expected_gradients = (-4, 0, 4, 40, 16, 64)
  assert approx_equal(i.weight, weight)
  assert approx_equal(i.deltas(), expected_deltas)
  assert approx_equal(i.rms_deltas(), 4.5704364002673632)
  assert approx_equal(i.residual(), 376.0)
  assert approx_equal(i.gradients(), expected_gradients)
  gradients_aniso_cart = flex.sym_mat3_double(1, (0,0,0,0,0,0))
  gradients_iso = flex.double(1,0)
  proxies = adp_restraints.shared_isotropic_adp_proxy([p,p])
  u_cart = flex.sym_mat3_double(u_cart)
  u_iso = flex.double(u_iso)
  use_u_aniso = flex.bool(use_u_aniso)
  params = adp_restraint_params(u_cart=u_cart, u_iso=u_iso, use_u_aniso=use_u_aniso)
  residuals = adp_restraints.isotropic_adp_residuals(params, proxies=proxies)
  assert approx_equal(residuals, (i.residual(),i.residual()))
  deltas_rms = adp_restraints.isotropic_adp_deltas_rms(params, proxies=proxies)
  assert approx_equal(deltas_rms, (i.rms_deltas(),i.rms_deltas()))
  residual_sum = adp_restraints.isotropic_adp_residual_sum(
    params,
    proxies=proxies,
    gradients_aniso_cart=gradients_aniso_cart
  )
  assert approx_equal(residual_sum, 752.0)
  fd_grads_aniso, fd_grads_iso = finite_difference_gradients(
    restraint_type=adp_restraints.isotropic_adp,
    proxy=p,
    u_cart=u_cart,
    u_iso=u_iso,
    use_u_aniso=use_u_aniso
  )
  for g,e in zip(gradients_aniso_cart, fd_grads_aniso):
    assert approx_equal(g, matrix.col(e)*2)
  #
  # check frame invariance of residual
  #
  u_cart = matrix.sym(sym_mat3=(0.1,0.2,0.05,0.03,0.02,0.01))
  a = adp_restraints.isotropic_adp(
    u_cart=u_cart.as_sym_mat3(), weight=1)
  expected_residual = a.residual()
  gen = flex.mersenne_twister()
  for i in range(20):
    R = matrix.rec(gen.random_double_r3_rotation_matrix(),(3,3))
    u_cart_rot = R * u_cart * R.transpose()
    a = adp_restraints.isotropic_adp(
      u_cart=u_cart_rot.as_sym_mat3(), weight=1)
    assert approx_equal(a.residual(), expected_residual)
예제 #12
0
def exercise_systematic(verbose):
  from cctbx import miller
  from cctbx import crystal
  from cctbx.array_family import flex
  cs = crystal.symmetry(
    unit_cell=(13,15,14,90,90,100),
    space_group_symbol="P112")
  ms = miller.set(
    crystal_symmetry=cs,
    indices=flex.miller_index([
      (0,0,1),(0,0,-1),
      (0,1,1),
      (1,0,0),
      (-1,-1,-1)]),
    anomalous_flag=True).map_to_asu()
  cf = ms.centric_flags().data()
  assert cf.count(True) == 1
  mt = flex.mersenne_twister(seed=0)
  ma = ms.array(
    data=mt.random_double(size=5)+0.1,
    sigmas=mt.random_double(size=5)+0.1)
  def recycle(expected_column_data):
    mtz_obj = ma.as_mtz_dataset(column_root_label="X").mtz_object()
    sio = StringIO()
    mtz_obj.show_column_data_human_readable(out=sio)
    from libtbx.test_utils import show_diff
    if (verbose): sys.stdout.write(sio.getvalue())
    assert not show_diff(sio.getvalue(), expected_column_data)
    ma_2 = only_element(mtz_obj.as_miller_arrays())
    assert_equal_data_and_sigmas(ma, ma_2)
  recycle("""\
Column data:
-------------------------------------------------------------------------------
                    X(+)         SIGX(+)            X(-)         SIGX(-)

 0  0  1        0.517022        0.192339        0.820324         0.28626
 0  1  1        0.100114        0.445561            None            None
 1  0  0        0.402333        0.496767            None            None
 1  1  1            None            None        0.246756        0.638817
-------------------------------------------------------------------------------
""")
  from cctbx.xray import observation_types
  ma.set_observation_type(observation_types.reconstructed_amplitude())
  recycle("""\
Column data:
-------------------------------------------------------------------------------
                       X            SIGX           DANOX        SIGDANOX
                   ISYMX

 0  0  1        0.668673        0.172438       -0.303302        0.344875
                       0
 0  1  1        0.100114        0.445561            None            None
                       1
 1  0  0        0.402333        0.496767            None            None
                       0
 1  1  1        0.246756        0.638817            None            None
                       2
-------------------------------------------------------------------------------
""")
예제 #13
0
def exercise_isotropic_adp():
  i_seqs = (0,)
  weight = 2
  u_cart = ((1,2,3,5,2,8),)
  u_iso = (0,)
  use_u_aniso = (True,)
  p = adp_restraints.isotropic_adp_proxy(
    i_seqs=i_seqs,
    weight=weight)
  assert p.i_seqs == i_seqs
  assert approx_equal(p.weight, weight)
  i = adp_restraints.isotropic_adp(u_cart=u_cart[0], weight=weight)
  expected_deltas = (-1, 0, 1, 5, 2, 8)
  expected_gradients = (-4, 0, 4, 40, 16, 64)
  assert approx_equal(i.weight, weight)
  assert approx_equal(i.deltas(), expected_deltas)
  assert approx_equal(i.rms_deltas(), 4.5704364002673632)
  assert approx_equal(i.residual(), 376.0)
  assert approx_equal(i.gradients(), expected_gradients)
  gradients_aniso_cart = flex.sym_mat3_double(1, (0,0,0,0,0,0))
  gradients_iso = flex.double(1,0)
  proxies = adp_restraints.shared_isotropic_adp_proxy([p,p])
  u_cart = flex.sym_mat3_double(u_cart)
  u_iso = flex.double(u_iso)
  use_u_aniso = flex.bool(use_u_aniso)
  params = adp_restraint_params(u_cart=u_cart, u_iso=u_iso, use_u_aniso=use_u_aniso)
  residuals = adp_restraints.isotropic_adp_residuals(params, proxies=proxies)
  assert approx_equal(residuals, (i.residual(),i.residual()))
  deltas_rms = adp_restraints.isotropic_adp_deltas_rms(params, proxies=proxies)
  assert approx_equal(deltas_rms, (i.rms_deltas(),i.rms_deltas()))
  residual_sum = adp_restraints.isotropic_adp_residual_sum(
    params,
    proxies=proxies,
    gradients_aniso_cart=gradients_aniso_cart
  )
  assert approx_equal(residual_sum, 752.0)
  fd_grads_aniso, fd_grads_iso = finite_difference_gradients(
    restraint_type=adp_restraints.isotropic_adp,
    proxy=p,
    u_cart=u_cart,
    u_iso=u_iso,
    use_u_aniso=use_u_aniso
  )
  for g,e in zip(gradients_aniso_cart, fd_grads_aniso):
    assert approx_equal(g, matrix.col(e)*2)
  #
  # check frame invariance of residual
  #
  u_cart = matrix.sym(sym_mat3=(0.1,0.2,0.05,0.03,0.02,0.01))
  a = adp_restraints.isotropic_adp(
    u_cart=u_cart.as_sym_mat3(), weight=1)
  expected_residual = a.residual()
  gen = flex.mersenne_twister()
  for i in range(20):
    R = matrix.rec(gen.random_double_r3_rotation_matrix(),(3,3))
    u_cart_rot = R * u_cart * R.transpose()
    a = adp_restraints.isotropic_adp(
      u_cart=u_cart_rot.as_sym_mat3(), weight=1)
    assert approx_equal(a.residual(), expected_residual)
예제 #14
0
def recycle_dano_miller_array(miller_array):
  assert miller_array.is_xray_reconstructed_amplitude_array()
  from cctbx.array_family import flex
  mt = flex.mersenne_twister(seed=0)
  miller_array = miller_array.select(
    mt.random_permutation(size=miller_array.indices().size()))
  mtz_obj = miller_array.as_mtz_dataset(column_root_label="X").mtz_object()
  miller_array_2 = only_element(mtz_obj.as_miller_arrays())
  assert str(miller_array_2.info()) == "ccp4_mtz:X,SIGX,DANOX,SIGDANOX,ISYMX"
  assert_equal_data_and_sigmas(miller_array, miller_array_2)
예제 #15
0
def alignment_by_embedding(reports, plot=False):
    #from IPython import embed; embed()
    """reports is a list of tranch results, one list item per composite tranch
     Each item is a list, over cosets, e.g. two elements for a merohedral twinning op
     Each element is itself a list of uuid's assigned to that coset.
  """
    n_tranches = len(reports)
    reports = copy.deepcopy(reports)
    # will now amend the reports-list so that it has a new section for each permutation of cosets.

    for itranch in range(len(reports)):
        cache_permutations = list(permutations(reports[itranch]))
        reports.append(cache_permutations[1])
        # will have to rewrite this code if there is more that one symmetry operator XXX FIXME

    rij, wij = get_proposal_score(reports)
    mt = flex.mersenne_twister(seed=0)
    #from IPython import embed; embed()
    NN = len(reports)
    xcoord = mt.random_double(size=NN)
    ycoord = mt.random_double(size=NN)
    if plot:
        from matplotlib import pyplot as plt
        plt.plot(xcoord, ycoord, "g.")
        plt.show()

    from cctbx.merging.brehm_diederichs import minimize as mz, minimize_divide
    M = mz(xcoord, ycoord, rij, wij, verbose=True)
    coord_x = M.x[0:NN]
    coord_y = M.x[NN:2 * NN]
    P = minimize_divide(coord_x, coord_y)
    selection = P.plus_minus()
    if plot:
        plt.plot(coord_x.select(selection),
                 coord_y.select(selection),
                 "r.",
                 markersize=2.)
        plt.plot(coord_x.select(~selection),
                 coord_y.select(~selection),
                 "k.",
                 markersize=3.)
        plt.show()

    print(list(selection))
    reformed_reports = [[] for i in range(n_tranches)
                        ]  # output should have as many reports as tranches
    n_permutations = NN // n_tranches
    for iflag, select_flag in enumerate(selection):
        if select_flag:
            itranch = iflag % n_tranches
            #print( itranch, iflag, n_permutations)
            reformed_reports[itranch] = reports[iflag]

    assert [] not in reformed_reports  # all tranches must have coset assignments
    return reformed_reports
예제 #16
0
def run(args):
    assert len(args) < 3
    arg_vals = [int(arg) for arg in args]
    arg_vals = arg_vals + [3, 2][len(arg_vals):]
    n_refl, n_trials = arg_vals
    assert n_refl > 0
    assert n_trials > 0
    mt = flex.mersenne_twister(seed=0)
    for i_trial in xrange(n_trials):
        exercise(mt, n_refl)
    print "OK"
예제 #17
0
def run(args):
  assert len(args) < 3
  arg_vals = [int(arg) for arg in args]
  arg_vals = arg_vals + [3, 2][len(arg_vals):]
  n_refl, n_trials = arg_vals
  assert n_refl > 0
  assert n_trials > 0
  mt = flex.mersenne_twister(seed=0)
  for i_trial in xrange(n_trials):
    exercise(mt, n_refl)
  print "OK"
예제 #18
0
def exercise_adp_iso_analytical():
  mersenne_twister = flex.mersenne_twister(seed=0)
  for weight in [1.234, 2.134]:
    for average_power in [0.345, 0.589]:
      for size in xrange(2,20):
        for i in xrange(10):
          u_isos = mersenne_twister.random_double(size=size) + 1.e-3
          a = adp_iso_analytical_gradients(
            weight=weight, average_power=average_power, u_isos=u_isos)
          f = adp_iso_finite_difference_gradients(
            weight=weight, average_power=average_power, u_isos=u_isos)
          assert approx_equal(a, f)
예제 #19
0
def exercise_random(n_trials=10, n_refl=30):
  mt = flex.mersenne_twister(seed=0)
  for target_type in ["ls", "cc"]:
    for i_trial in xrange(n_trials):
      for obs_type in ["F", "I"]:
        ri = random_inputs(
          mt=mt, n_refl=n_refl, target_type=target_type, obs_type=obs_type)
        tg = ri.get(derivatives_depth=2)
        ga = tg.gradients_work()
        gf = ri.gradients_work_fd()
        assert approx_equal(ga, gf)
        ca = tg.hessians_work()
        cf = ri.hessians_work_fd()
        assert approx_equal(ca, cf)
예제 #20
0
def exercise_adp_iso_analytical():
    mersenne_twister = flex.mersenne_twister(seed=0)
    for weight in [1.234, 2.134]:
        for average_power in [0.345, 0.589]:
            for size in xrange(2, 20):
                for i in xrange(10):
                    u_isos = mersenne_twister.random_double(size=size) + 1.e-3
                    a = adp_iso_analytical_gradients(
                        weight=weight,
                        average_power=average_power,
                        u_isos=u_isos)
                    f = adp_iso_finite_difference_gradients(
                        weight=weight,
                        average_power=average_power,
                        u_isos=u_isos)
                    assert approx_equal(a, f)
예제 #21
0
def exercise_random(n_trials=10, n_refl=30):
    mt = flex.mersenne_twister(seed=0)
    for target_type in ["ls", "cc"]:
        for i_trial in xrange(n_trials):
            for obs_type in ["F", "I"]:
                ri = random_inputs(mt=mt,
                                   n_refl=n_refl,
                                   target_type=target_type,
                                   obs_type=obs_type)
                tg = ri.get(derivatives_depth=2)
                ga = tg.gradients_work()
                gf = ri.gradients_work_fd()
                assert approx_equal(ga, gf)
                ca = tg.hessians_work()
                cf = ri.hessians_work_fd()
                assert approx_equal(ca, cf)
예제 #22
0
def run(args):
  from iotbx.option_parser import option_parser as iotbx_option_parser
  import libtbx.utils
  show_times = libtbx.utils.show_times(time_start="now")
  command_call = ["iotbx.python", __file__]
  command_line = (iotbx_option_parser(
    usage=" ".join(command_call) + " [options] directory|file...")
    .enable_chunk(easy_all=True)
    .enable_multiprocessing()
  ).process(args=args, min_nargs=1)
  if (command_line.run_multiprocessing_chunks_if_applicable(
        command_call=command_call)):
    show_times()
    return
  co = command_line.options
  #
  print "TIME BEGIN cod_refine:", date_and_time()
  print
  #
  master_phil = get_master_phil()
  argument_interpreter = master_phil.command_line_argument_interpreter()
  phil_objects = []
  remaining_args = []
  for arg in command_line.args:
    if (arg.find("=") >= 0):
      phil_objects.append(argument_interpreter.process(arg=arg))
    else:
      remaining_args.append(arg)
  work_phil = master_phil.fetch(sources=phil_objects)
  work_phil.show()
  print
  params = work_phil.extract()
  #
  qi_dict = {}
  all_pickles = []
  for arg in remaining_args:
    if (op.isdir(arg)):
      for node in sorted(os.listdir(arg)):
        if (node.endswith(".pickle")):
          all_pickles.append(op.join(arg, node))
        elif (node.startswith("qi_") and len(node) == 10):
          qi = open(op.join(arg, node)).read().splitlines()
          if (len(qi) == 1):
            cod_id = node[3:]
            quick_info = eval(qi[0])
            assert cod_id not in qi_dict
            qi_dict[cod_id] = quick_info
    elif (op.isfile(arg)):
      all_pickles.append(arg)
    else:
      raise RuntimeError("Not a file or directory: %s" % arg)
  print "Number of pickle files:", len(all_pickles)
  print "Number of quick_infos:", len(qi_dict)
  sort_choice = params.sorting_of_pickle_files
  if (len(qi_dict) != 0 and sort_choice is not None):
    print "Sorting pickle files by n_atoms * n_refl:", sort_choice
    assert sort_choice in ["down", "up"]
    def sort_pickle_files():
      if (sort_choice == "down"): i_sign = -1
      else:                       i_sign = 1
      buffer = []
      for i,path in enumerate(all_pickles):
        cod_id = op.basename(path).split(".",1)[0]
        qi = qi_dict.get(cod_id)
        if (qi is None): nn = 2**31
        else:            nn = qi[0] * qi[1] * qi[2]
        buffer.append((nn, i_sign*i, path))
      buffer.sort()
      if (i_sign < 0):
        buffer.reverse()
      result = []
      for elem in buffer:
        result.append(elem[-1])
      return result
    all_pickles = sort_pickle_files()
  print
  #
  rss = params.random_subset.size
  if (rss is not None and rss > 0):
    seed = params.random_subset.seed
    print "Selecting subset of %d pickle files using random seed %d" % (
      rss, seed)
    mt = flex.mersenne_twister(seed=seed)
    perm = mt.random_permutation(size=len(all_pickles))[:rss]
    flags = flex.bool(len(all_pickles), False).set_selected(perm, True)
    all_pickles = flex.select(all_pickles, permutation=flags.iselection())
    print
  #
  from libtbx.path import makedirs_race
  if (params.wdir_root is not None):
    makedirs_race(path=params.wdir_root)
  if (params.pickle_refined_dir is not None):
    makedirs_race(path=params.pickle_refined_dir)
  #
  n_caught = 0
  for i_pickle,pickle_file_name in enumerate(all_pickles):
    if (i_pickle % command_line.chunk.n != command_line.chunk.i): continue
    tm = user_plus_sys_time()
    try:
      process(params, pickle_file_name)
    except KeyboardInterrupt:
      print >> sys.stderr, "CAUGHT EXCEPTION: KeyboardInterrupt"
      traceback.print_exc()
      print >> sys.stderr
      sys.stderr.flush()
      return
    except Exception:
      sys.stdout.flush()
      print >> sys.stderr, "CAUGHT EXCEPTION: %s" % pickle_file_name
      traceback.print_exc()
      print >> sys.stderr
      sys.stderr.flush()
      n_caught += 1
    else:
      print "done_with: %s (%.2f seconds)" % (pickle_file_name, tm.elapsed())
      print
      sys.stdout.flush()
  print
  print "Number of exceptions caught:", n_caught
  #
  show_times()
  print
  print "TIME END cod_refine:", date_and_time()
예제 #23
0
def run(args):
    assert args in [[], ["--verbose"]]
    if (len(args) != 0):
        cout = sys.stdout
    else:
        cout = null_out()
    edge_list_bonds = [(0, 1), (0, 4), (1, 2), (2, 3), (3, 4)]
    bond_list = [(("C1*", "C2*"), 1.529), (("C1*", "O4*"), 1.412),
                 (("C2*", "C3*"), 1.526), (("C3*", "C4*"), 1.520),
                 (("C4*", "O4*"), 1.449)]
    angle_list = [
        (("C1*", "C2*", "C3*"), 101.3), (("C2*", "C3*", "C4*"), 102.3),
        (("C3*", "C4*", "O4*"), 104.2), (("C4*", "O4*", "C1*"), 110.0)
    ]
    sites_cart, geo_manager = cctbx.geometry_restraints.manager \
      .construct_non_crystallographic_conserving_bonds_and_angles(
        sites_cart=sites_cart_3p,
        edge_list_bonds=edge_list_bonds,
        edge_list_angles=[])
    for bond_atom_names, distance_ideal in bond_list:
        i, j = [atom_names.index(atom_name) for atom_name in bond_atom_names]
        bond_params = geo_manager.bond_params_table[i][j]
        assert approx_equal(bond_params.distance_ideal,
                            distance_ideal,
                            eps=1.e-2)
        bond_params.distance_ideal = distance_ideal
        bond_params.weight = 1 / 0.02**2
    assert geo_manager.angle_proxies is None
    geo_manager.angle_proxies = cctbx.geometry_restraints.shared_angle_proxy()
    for angle_atom_names, angle_ideal in angle_list:
        i_seqs = [
            atom_names.index(atom_name) for atom_name in angle_atom_names
        ]
        geo_manager.angle_proxies.append(
            cctbx.geometry_restraints.angle_proxy(i_seqs=i_seqs,
                                                  angle_ideal=angle_ideal,
                                                  weight=1 / 3**2))
    geo_manager.show_sorted(site_labels=atom_names,
                            sites_cart=sites_cart,
                            f=cout)

    def lbfgs(sites_cart):
        for i_lbfgs_restart in range(3):
            minimized = cctbx.geometry_restraints.lbfgs.lbfgs(
                sites_cart=sites_cart, geometry_restraints_manager=geo_manager)
            assert is_below_limit(value=minimized.final_target_value,
                                  limit=1e-10)
        return minimized

    lbfgs(sites_cart=sites_cart_3p)
    lbfgs(sites_cart=sites_cart_2p)
    conformer_counts = [0] * 4
    sites_cart = sites_cart.deep_copy()
    mt = flex.mersenne_twister(seed=0)
    for i_trial in range(20):
        while True:
            for i in range(sites_cart.size()):
                sites_cart[i] = mt.random_double_point_on_sphere()
            try:
                lbfgs(sites_cart=sites_cart)
            except RuntimeError as e:
                if (not str(e).startswith(
                        "Bond distance > max_reasonable_bond_distance: ")):
                    raise
            else:
                break
        rmsd_list = flex.double()
        for reference_sites in [
                sites_cart_3p, sites_cart_2p, sites_cart_a, sites_cart_b
        ]:
            sup = scitbx.math.superpose.least_squares_fit(
                reference_sites=reference_sites, other_sites=sites_cart)
            rmsd = reference_sites.rms_difference(sup.other_sites_best_fit())
            rmsd_list.append(rmsd)
        oline = " ".join(["%.3f" % rmsd for rmsd in rmsd_list])
        print(oline, file=cout)
        assert is_below_limit(min(rmsd_list), 1e-3)
        conformer_counts[flex.min_index(rmsd_list)] += 1
    print("conformer_counts:", conformer_counts)
    #
    if (libtbx.env.has_module("iotbx")):
        import iotbx.pdb.hierarchy
        hierarchy = iotbx.pdb.hierarchy.root()
        model = iotbx.pdb.hierarchy.model(id="")
        chain = iotbx.pdb.hierarchy.chain(id="A")
        model.append_chain(chain)
        hierarchy.append_model(model)
        #
        sites_cart_pentagon = pentagon_sites_cart()
        for i_stack, sites_cart in enumerate(
            [sites_cart_3p, sites_cart_2p, sites_cart_a, sites_cart_b]):
            atom_group = iotbx.pdb.hierarchy.atom_group(resname="  U",
                                                        altloc="")
            sup = scitbx.math.superpose.least_squares_fit(
                reference_sites=sites_cart_pentagon, other_sites=sites_cart)
            sites_cart_out = sup.other_sites_best_fit()
            for site_label, site_cart in zip(atom_names, sites_cart_out):
                atom = iotbx.pdb.hierarchy.atom()
                atom.name = " %-3s" % site_label
                atom.xyz = matrix.col(site_cart) + matrix.col(
                    (0, 0, i_stack * 1.5))
                atom.occ = 1
                atom.b = 20
                atom.element = " " + site_label[0]
                atom_group.append_atom(atom)
            residue_group = iotbx.pdb.hierarchy.residue_group(resseq="%4d" %
                                                              (i_stack + 1),
                                                              icode=" ")
            residue_group.append_atom_group(atom_group)
            chain.append_residue_group(residue_group)
        hierarchy.atoms().reset_serial()
        pdb_str = hierarchy.as_pdb_string(append_end=True)
        file_name = "puckers.pdb"
        print("Writing file:", file_name)
        open(file_name, "w").write("""\
REMARK random_puckers.py
REMARK 1 = 3'
REMARK 2 = 2'
REMARK 3 = A
REMARK 4 = B
""" + pdb_str)
    #
    print("OK")
예제 #24
0
def run(args):
    from iotbx.option_parser import option_parser as iotbx_option_parser
    import libtbx.utils
    show_times = libtbx.utils.show_times(time_start="now")
    command_call = ["iotbx.python", __file__]
    command_line = (iotbx_option_parser(
        usage=" ".join(command_call) +
        " [options] directory|file...").enable_chunk(
            easy_all=True).enable_multiprocessing()).process(args=args,
                                                             min_nargs=1)
    if (command_line.run_multiprocessing_chunks_if_applicable(
            command_call=command_call)):
        show_times()
        return
    co = command_line.options
    #
    print "TIME BEGIN cod_refine:", date_and_time()
    print
    #
    master_phil = get_master_phil()
    argument_interpreter = master_phil.command_line_argument_interpreter()
    phil_objects = []
    remaining_args = []
    for arg in command_line.args:
        if (arg.find("=") >= 0):
            phil_objects.append(argument_interpreter.process(arg=arg))
        else:
            remaining_args.append(arg)
    work_phil = master_phil.fetch(sources=phil_objects)
    work_phil.show()
    print
    params = work_phil.extract()
    #
    qi_dict = {}
    all_pickles = []
    for arg in remaining_args:
        if (op.isdir(arg)):
            for node in sorted(os.listdir(arg)):
                if (node.endswith(".pickle")):
                    all_pickles.append(op.join(arg, node))
                elif (node.startswith("qi_") and len(node) == 10):
                    qi = open(op.join(arg, node)).read().splitlines()
                    if (len(qi) == 1):
                        cod_id = node[3:]
                        quick_info = eval(qi[0])
                        assert cod_id not in qi_dict
                        qi_dict[cod_id] = quick_info
        elif (op.isfile(arg)):
            all_pickles.append(arg)
        else:
            raise RuntimeError("Not a file or directory: %s" % arg)
    print "Number of pickle files:", len(all_pickles)
    print "Number of quick_infos:", len(qi_dict)
    sort_choice = params.sorting_of_pickle_files
    if (len(qi_dict) != 0 and sort_choice is not None):
        print "Sorting pickle files by n_atoms * n_refl:", sort_choice
        assert sort_choice in ["down", "up"]

        def sort_pickle_files():
            if (sort_choice == "down"): i_sign = -1
            else: i_sign = 1
            buffer = []
            for i, path in enumerate(all_pickles):
                cod_id = op.basename(path).split(".", 1)[0]
                qi = qi_dict.get(cod_id)
                if (qi is None): nn = 2**31
                else: nn = qi[0] * qi[1] * qi[2]
                buffer.append((nn, i_sign * i, path))
            buffer.sort()
            if (i_sign < 0):
                buffer.reverse()
            result = []
            for elem in buffer:
                result.append(elem[-1])
            return result

        all_pickles = sort_pickle_files()
    print
    #
    rss = params.random_subset.size
    if (rss is not None and rss > 0):
        seed = params.random_subset.seed
        print "Selecting subset of %d pickle files using random seed %d" % (
            rss, seed)
        mt = flex.mersenne_twister(seed=seed)
        perm = mt.random_permutation(size=len(all_pickles))[:rss]
        flags = flex.bool(len(all_pickles), False).set_selected(perm, True)
        all_pickles = flex.select(all_pickles, permutation=flags.iselection())
        print
    #
    from libtbx.path import makedirs_race
    if (params.wdir_root is not None):
        makedirs_race(path=params.wdir_root)
    if (params.pickle_refined_dir is not None):
        makedirs_race(path=params.pickle_refined_dir)
    #
    n_caught = 0
    for i_pickle, pickle_file_name in enumerate(all_pickles):
        if (i_pickle % command_line.chunk.n != command_line.chunk.i): continue
        tm = user_plus_sys_time()
        try:
            process(params, pickle_file_name)
        except KeyboardInterrupt:
            print >> sys.stderr, "CAUGHT EXCEPTION: KeyboardInterrupt"
            traceback.print_exc()
            print >> sys.stderr
            sys.stderr.flush()
            return
        except Exception:
            sys.stdout.flush()
            print >> sys.stderr, "CAUGHT EXCEPTION: %s" % pickle_file_name
            traceback.print_exc()
            print >> sys.stderr
            sys.stderr.flush()
            n_caught += 1
        else:
            print "done_with: %s (%.2f seconds)" % (pickle_file_name,
                                                    tm.elapsed())
            print
            sys.stdout.flush()
    print
    print "Number of exceptions caught:", n_caught
    #
    show_times()
    print
    print "TIME END cod_refine:", date_and_time()
예제 #25
0
def process_continue(params, cod_id, c_obs, i_obs, f_obs, structure_prep):
  p = params.f_calc_options
  f_calc = f_obs.structure_factors_from_scatterers(
    xray_structure=structure_prep,
    algorithm=p.algorithm,
    cos_sin_table=p.cos_sin_table).f_calc()
  sel = f_obs.f_obs_f_calc_fan_outlier_selection(f_calc=f_calc)
  assert sel is not None
  n_outliers = sel.count(True)
  if (n_outliers != 0):
    action = params.f_obs_f_calc_fan_outliers
    print "INFO: f_obs_f_calc_fan_outliers = %s: %d" % (action, n_outliers)
    if (action == "remove"):
      i_obs = i_obs.select(~sel)
      f_obs = f_obs.select(~sel)
  if (f_obs.anomalous_flag()):
    print "INFO: converting anomalous i+f_obs to non-anomalous."
    i_obs = i_obs.average_bijvoet_mates()
    f_obs = f_obs.average_bijvoet_mates()
  sel = ((i_obs.data() == 0) & (i_obs.sigmas() == 0)) \
      | ((f_obs.data() == 0) & (f_obs.sigmas() == 0))
  n_zero_d_and_s = sel.count(True)
  if (n_zero_d_and_s != 0):
    print "INFO: removing reflections with i+f_obs=0 and sigma=0:", \
      n_zero_d_and_s
    i_obs = i_obs.select(~sel)
    f_obs = f_obs.select(~sel)
  p = params.f_calc_options
  f_calc = f_obs.structure_factors_from_scatterers(
    xray_structure=structure_prep,
    algorithm=p.algorithm,
    cos_sin_table=p.cos_sin_table).f_calc()
  if (params.use_f_calc_as_f_obs):
    print "INFO: using f_calc as i+f_obs"
    i_obs = f_calc.intensities().customized_copy(
      sigmas=flex.double(f_calc.indices().size(), 0.01))
    f_obs = f_calc.amplitudes().customized_copy(
      sigmas=flex.double(f_calc.indices().size(), 0.01))
  else:
    # scaling applied so that the data written in shelx hklf format
    # have sufficient significant digits, and FVAR is 1 (shelx76 seems
    # to be especially sensitive to FVAR >> 1)
    k = f_obs.scale_factor(f_calc=f_calc)
    assert k != 0
    s = 1/k**2
    print "INFO: scaling i_obs to f_calc by multiplying i_obs with: %.6g" % s
    i_obs = i_obs.apply_scaling(factor=s)
    s = 1/k
    print "INFO: scaling f_obs to f_calc by multiplying f_obs with: %.6g" % s
    f_obs = f_obs.apply_scaling(factor=s)
  def show(obs):
    obs.show_comprehensive_summary()
    from cif_refine import \
      report_fraction_of_negative_observations_if_any as _
    _(cod_id, obs)
  if (c_obs.is_xray_intensity_array()):
    show(i_obs)
  else:
    show(f_obs)
  print "."*79
  #
  structure_work = structure_prep.deep_copy_scatterers()
  sel = structure_work.hd_selection()
  print "Removing hydrogen atoms:", sel.count(True)
  structure_work = structure_work.select(selection=~sel)
  sdt = params.show_distances_threshold
  if (sdt > 0):
    print "Distances smaller than %.6g A:" % sdt
    structure_work.show_distances(distance_cutoff=sdt)
    print "."*79
  #
  if (params.tardy_samples.iq is not None):
    from cctbx.omz import tardy_adaptor
    print
    tardy_adaptor.sample_e_pot(
      id_code=cod_id,
      f_obs=f_obs,
      xray_structure=structure_prep,
      edge_list=edge_list,
      params=params.tardy_samples)
    print
    return
  #
  from iotbx.shelx import fvar_encoding
  fvars, encoded_sites = fvar_encoding.dev_build_shelx76_fvars(structure_work)
  print "Number of FVARs for special position constraints:", len(fvars)-1
  print "."*79
  #
  show_cc_r1(params, "prep", f_obs, structure_prep)
  def cc_r1(label):
    show_cc_r1(params, label, f_obs, structure_work)
  cc_r1("no_h")
  structure_work.convert_to_isotropic()
  cc_r1("iso")
  structure_iso = structure_work.deep_copy_scatterers()
  #
  if (params.reset_u_iso is not None):
    structure_work.set_u_iso(value=params.reset_u_iso)
    cc_r1("setu")
  if (params.shake_sites_rmsd is not None):
    mt = flex.mersenne_twister(seed=0)
    structure_work.shift_sites_in_place(
      shift_length=params.shake_sites_rmsd,
      mersenne_twister=mt)
    print "rms difference after shift_sites_in_place: %.3f" \
      % structure_iso.rms_difference(structure_work)
    cc_r1("shift_xyz")
  #
  if (params.max_atoms is not None):
    n = structure_work.scatterers().size()
    if (n > params.max_atoms):
      print "Skipping refinement of large model: %d atoms COD %s" % (
        n, cod_id)
      return
  #
  structure_work.scatterers().flags_set_grads(state=False)
  for sc in structure_work.scatterers():
    sc.flags.set_grad_site(True)
    assert sc.flags.use_u_iso_only()
    sc.flags.set_grad_u_iso(True)
  n_refinable_parameters = structure_work.n_parameters(
    considering_site_symmetry_constraints=True)
  print "Number of refinable parameters:", n_refinable_parameters
  #
  if (params.iteration_limit < 1):
    return
  #
  if ("dev" not in params.optimizers):
    structure_dev = None
  else:
    structure_dev = structure_work.deep_copy_scatterers()
    omz.dev.refinement(
      i_obs=i_obs,
      f_obs=f_obs,
      xray_structure=structure_dev,
      params=params,
      reference_structure=structure_iso,
      expected_n_refinable_parameters=n_refinable_parameters,
      plot_samples_id=cod_id)
    show_cc_r1(params, "dev", f_obs, structure_dev)
    if (params.export_refined):
      file_name = "dev_%s_%s_%s.pdb" % (
        params.target_type, params.target_obs_type.lower(), cod_id)
      open(file_name, "w").write(structure_dev.as_pdb_file(
        remarks=[file_name]))
    if (params.pickle_refined_dir is not None):
      easy_pickle.dump(
        file_name=op.join(params.pickle_refined_dir, cod_id+".pickle"),
        obj=(c_obs, structure_dev, None))
      print >> open("%s/qi_%s" % (params.pickle_refined_dir, cod_id), "w"), (
        structure_dev.scatterers().size(),
        c_obs.space_group().order_p(),
        c_obs.indices().size(),
        c_obs.d_min())
  #
  def use_smtbx_ls(mode):
    if ("ls_"+mode not in params.optimizers):
      return None
    if (not libtbx.env.has_module(name="smtbx")):
      print "INFO: smtbx not available: refinement skipped."
      return None
    result = structure_work.deep_copy_scatterers()
    run_smtbx_ls(
      mode=mode,
      cod_id=cod_id,
      i_obs=i_obs,
      f_obs=f_obs,
      xray_structure=result,
      params=params)
    show_cc_r1(params, "ls_"+mode, f_obs, result)
    return result
  structure_ls_simple = use_smtbx_ls("simple")
  structure_ls_lm = use_smtbx_ls("lm")
  #
  def use_shelxl(mode):
    if ("shelxl_"+mode not in params.optimizers):
      return None
    result = structure_work.deep_copy_scatterers()
    run_shelxl(
      mode=mode,
      cod_id=cod_id,
      i_obs=i_obs,
      f_obs=f_obs,
      xray_structure=result,
      params=params,
      reference_structure=structure_iso,
      expected_n_refinable_parameters=n_refinable_parameters)
    if (params.export_refined):
      file_name = "shelxl_%s_%s.pdb" % (mode, cod_id)
      open(file_name, "w").write(result.as_pdb_file(
        remarks=[file_name]))
    return result
  structure_shelxl_fm = use_shelxl("fm")
  structure_shelxl_cg = use_shelxl("cg")
  #
  if ("shelx76" not in params.optimizers):
    structure_shelx76 = None
  else:
    structure_shelx76 = structure_work.deep_copy_scatterers()
    run_shelx76(
      cod_id=cod_id,
      f_obs=f_obs,
      xray_structure=structure_shelx76,
      fvars=fvars,
      encoded_sites=encoded_sites,
      params=params,
      reference_structure=structure_iso)
    if (params.export_refined):
      file_name = "shelx76_%s.pdb" % cod_id
      open(file_name, "w").write(structure_shelx76.as_pdb_file(
        remarks=[file_name]))
예제 #26
0
def run(args):
    assert len(args) == 0
    from cctbx import miller
    import cctbx.miller.reindexing
    from cctbx import uctbx
    from cctbx import sgtbx
    from cctbx.array_family import flex
    uc = uctbx.unit_cell((11, 11, 11, 81, 81, 81))
    ms = uc.complete_miller_set_with_lattice_symmetry(anomalous_flag=True,
                                                      d_min=3)
    ra = miller.reindexing.assistant(
        lattice_group=ms.space_group(),
        intensity_group=sgtbx.space_group_info(symbol="P 1").group(),
        miller_indices=ms.expand_to_p1().indices())
    mt = flex.mersenne_twister(seed=0)

    def check_cb_op_perm(cb_op, perm):
        mi_cb = cb_op.apply(ra.miller_indices)
        miis = flex.random_permutation(size=ra.miller_indices.size())[:2]
        k = cb_op.apply(ra.miller_indices.select(miis))
        matches = miller.match_indices(k, ra.miller_indices)
        pairs = matches.pairs()
        assert pairs.column(0).all_eq(flex.size_t_range(k.size()))
        miis_cb = pairs.column(1)
        assert perm.select(miis).all_eq(miis_cb)

    def check_ra():
        for cb_op, perm, inv_perm in zip(ra.cb_ops, ra.perms, ra.inv_perms):
            check_cb_op_perm(cb_op, perm)
            check_cb_op_perm(cb_op.inverse(), inv_perm)

    check_ra()
    assert ra.i_j_multiplication_table == [[0, 1, 2, 3, 4, 5],
                                           [1, 2, 0, 4, 5, 3],
                                           [2, 0, 1, 5, 3, 4],
                                           [3, 5, 4, 0, 2, 1],
                                           [4, 3, 5, 1, 0, 2],
                                           [5, 4, 3, 2, 1, 0]]
    assert ra.i_inv_j_multiplication_table == [[0, 1, 2, 3, 4, 5],
                                               [2, 0, 1, 5, 3, 4],
                                               [1, 2, 0, 4, 5, 3],
                                               [3, 5, 4, 0, 2, 1],
                                               [4, 3, 5, 1, 0, 2],
                                               [5, 4, 3, 2, 1, 0]]
    assert ra.i_j_inv_multiplication_table == [[0, 2, 1, 3, 4, 5],
                                               [1, 0, 2, 4, 5, 3],
                                               [2, 1, 0, 5, 3, 4],
                                               [3, 4, 5, 0, 2, 1],
                                               [4, 5, 3, 1, 0, 2],
                                               [5, 3, 4, 2, 1, 0]]
    from libtbx.test_utils import show_diff
    from six.moves import cStringIO as StringIO
    sio = StringIO()
    assert ra.show_summary(out=sio, prefix=": ") is ra
    assert not show_diff(
        sio.getvalue(), """\
: Lattice symmetry: R 3 2 :R (No. 155)
: Intensity symmetry: P 1 (No. 1)
:
: Indexing ambiguities:
:   k,l,h         3-fold    invariants:    4
:   l,h,k         3-fold    invariants:    4
:   -k,-h,-l      2-fold    invariants:    4
:   -l,-k,-h      2-fold    invariants:    4
:   -h,-l,-k      2-fold    invariants:    4
""")
    #
    ra = miller.reindexing.assistant(lattice_group=ms.space_group(),
                                     intensity_group=ms.space_group(),
                                     miller_indices=ra.miller_indices)
    check_ra()
    sio = StringIO()
    assert ra.show_summary(out=sio) is ra
    assert not show_diff(
        sio.getvalue(), """\
Lattice symmetry: R 3 2 :R (No. 155)
Intensity symmetry: R 3 2 :R (No. 155)

No indexing ambiguity.
""")
    assert ra.i_j_multiplication_table == [[0]]
    assert ra.i_inv_j_multiplication_table == [[0]]
    assert ra.i_j_inv_multiplication_table == [[0]]
    #
    ra = miller.reindexing.assistant(
        lattice_group=ms.space_group(),
        intensity_group=sgtbx.space_group_info(symbol="R 3 :R").group(),
        miller_indices=ra.miller_indices)
    check_ra()
    sio = StringIO()
    assert ra.show_summary(out=sio) is ra
    assert not show_diff(
        sio.getvalue(), """\
Lattice symmetry: R 3 2 :R (No. 155)
Intensity symmetry: R 3 :R (No. 146)

Indexing ambiguity:
  -h,-l,-k      2-fold    invariants:    4
""")
    assert ra.i_j_multiplication_table == [[0, 1], [1, 0]]
    assert ra.i_inv_j_multiplication_table == [[0, 1], [1, 0]]
    assert ra.i_j_inv_multiplication_table == [[0, 1], [1, 0]]
    #
    import math
    ta = math.acos(-1 / 3) * 180 / math.pi
    uc = uctbx.unit_cell((11, 11, 11, ta, ta, ta))
    ms = uc.complete_miller_set_with_lattice_symmetry(anomalous_flag=True,
                                                      d_min=3)
    ra = miller.reindexing.assistant(
        lattice_group=ms.space_group(),
        intensity_group=sgtbx.space_group_info(
            symbol="I 4 (y+z,x+z,x+y)").group(),
        miller_indices=ms.expand_to_p1().indices())
    check_ra()
    sio = StringIO()
    assert ra.show_summary(out=sio) is ra
    assert not show_diff(
        sio.getvalue(), """\
Lattice symmetry: I 4 3 2 (y+z,x+z,x+y) (No. 211)
Intensity symmetry: I 4 (y+z,x+z,x+y) (No. 79)

Indexing ambiguities:
  k,l,h         3-fold    invariants:    2
  -l,-k,-h      2-fold    invariants:    4
  -h,-l,-k      2-fold    invariants:    4
  l,h,k         3-fold    invariants:    2
  -k,-h,-l      2-fold    invariants:    4
""")
    assert ra.i_j_multiplication_table == [[0, 1, 2, 3, 4, 5],
                                           [1, 4, 3, 5, 0, 2],
                                           [2, 5, 0, 4, 3, 1],
                                           [3, 2, 1, 0, 5, 4],
                                           [4, 0, 5, 2, 1, 3],
                                           [5, 3, 4, 1, 2, 0]]
    assert ra.i_inv_j_multiplication_table == [[0, 1, 2, 3, 4, 5],
                                               [4, 0, 5, 2, 1, 3],
                                               [2, 5, 0, 4, 3, 1],
                                               [3, 2, 1, 0, 5, 4],
                                               [1, 4, 3, 5, 0, 2],
                                               [5, 3, 4, 1, 2, 0]]
    assert ra.i_j_inv_multiplication_table == [[0, 4, 2, 3, 1, 5],
                                               [1, 0, 3, 5, 4, 2],
                                               [2, 3, 0, 4, 5, 1],
                                               [3, 5, 1, 0, 2, 4],
                                               [4, 1, 5, 2, 0, 3],
                                               [5, 2, 4, 1, 3, 0]]
    #
    print("OK")
예제 #27
0
def exercise_masks():
  mt = flex.mersenne_twister(seed=0)
  xs_ref = structure.from_shelx(
    file=cStringIO.StringIO(YAKRUY_ins))
  mi = xs_ref.crystal_symmetry().build_miller_set(
    d_min=0.5, anomalous_flag=False)
  fo = mi.structure_factors_from_scatterers(
    xs_ref, algorithm="direct").f_calc().as_amplitude_array()
  k = 0.05 + 10 * mt.random_double()
  fo = fo.customized_copy(data=fo.data()*k)
  fo2 = fo.f_as_f_sq()
  acetonitrile_sel = xs_ref.label_selection(
    'N4', 'C20', 'C21', 'H211', 'H212', 'H213')
  xs_no_sol = xs_ref.deep_copy_scatterers().select(
    acetonitrile_sel, negate=True)
  # check what happens when no voids are found
  mask = masks.mask(xs_ref, fo2)
  mask.compute(solvent_radius=1.2,
               shrink_truncation_radius=1.2,
               resolution_factor=1/2,
               atom_radii_table={'C':1.70, 'B':1.63, 'N':1.55, 'O':1.52})
  assert mask.structure_factors() is None
  assert mask.n_voids() == 0
  assert mask.n_solvent_grid_points() == 0
  assert mask.f_mask() is None
  assert mask.f_model() is None
  assert mask.modified_intensities() is None
  assert mask.f_000 is None
  s = cStringIO.StringIO()
  mask.show_summary(log=s)
  assert not show_diff(s.getvalue(), """\
use_set_completion: False
solvent_radius: 1.20
shrink_truncation_radius: 1.20
van der Waals radii:
    B     C     H     N     O
 1.63  1.70  1.20  1.55  1.52

Total solvent accessible volume / cell = 0.0 Ang^3 [0.0%]

gridding: (30,45,54)
""")
  # and now with some voids
  fo2_complete = fo2.sort()
  fo2_missing_1 = fo2.select_indices(flex.miller_index([(0,0,1),
                                                        ]), negate=True)
  mt = flex.mersenne_twister(seed=0)
  fo2_incomplete = fo2.select(mt.random_bool(fo2.size(), 0.95))

  for fo2, use_space_group_symmetry in zip(
    (fo2_complete, fo2_complete, fo2_missing_1, fo2_incomplete),
    (True, False, True, True)):
    if fo2 is fo2_complete: use_set_completion=False
    else: use_set_completion=True
    mask = masks.mask(xs_no_sol, fo2, use_set_completion=use_set_completion)
    mask.compute(solvent_radius=1.2,
                 shrink_truncation_radius=1.2,
                 resolution_factor=1/3,
                 #atom_radii_table={'C':1.70, 'B':1.63, 'N':1.55, 'O':1.52},
                 use_space_group_symmetry=use_space_group_symmetry)
    n_voids = flex.max(mask.mask.data) - 1
    f_mask = mask.structure_factors()
    f_model = mask.f_model()
    modified_fo = mask.modified_intensities().as_amplitude_array()
    f_obs_minus_f_model = fo.common_set(f_model).f_obs_minus_f_calc(f_obs_factor=1/k, f_calc=f_model)
    diff_map = miller.fft_map(mask.crystal_gridding, f_obs_minus_f_model)
    diff_map.apply_volume_scaling()
    stats = diff_map.statistics()
    assert n_voids == 2
    assert approx_equal(n_voids, mask.n_voids())
    assert mask.n_solvent_grid_points() == 42148
    if fo2 is fo2_complete:
      # check the difference map has no large peaks/holes
      assert max(stats.max(), abs(stats.min())) < 0.11
    # expected electron count: 44
    assert approx_equal(mask.f_000_s, 44, eps=1)
    assert modified_fo.r1_factor(mask.f_calc.common_set(modified_fo), k) < 0.006
    assert fo.common_set(fo2).r1_factor(f_model.common_set(fo2), k) < 0.006

  s = cStringIO.StringIO()
  mask.show_summary(log=s)
  assert not show_diff(s.getvalue(), """\
use_set_completion: True
solvent_radius: 1.20
shrink_truncation_radius: 1.20
van der Waals radii:
    C     H     N     O
 1.77  1.20  1.50  1.45

Total solvent accessible volume / cell = 146.5 Ang^3 [16.3%]
Total electron count / cell = 43.2

gridding: (45,72,80)
Void #Grid points Vol/A^3 Vol/%  Centre of mass (frac)   Eigenvectors (frac)
   1        21074    73.3   8.1  ( 0.267, 0.461, 0.672)  1  ( 0.982, 0.126, 0.142)
                                                         2  (-0.166, 0.206, 0.964)
                                                         3  (-0.092, 0.970,-0.223)
   2        21074    73.3   8.1  (-0.267, 0.539, 0.328)  1  ( 0.982, 0.126, 0.142)
                                                         2  (-0.166, 0.206, 0.964)
                                                         3  (-0.092, 0.970,-0.223)

Void  Vol/Ang^3  #Electrons
   1       73.3         21.6
   2       73.3         21.6
""")
  cif_block = mask.as_cif_block()

  fo2 = fo.f_as_f_sq()
  # this bit is necessary until we have constraints, as
  # otherwise the hydrogens just disappear into the ether.
  xs = xs_no_sol.deep_copy_scatterers()
  h_selection = xs.element_selection('H')
  orig_flags = xs.scatterer_flags()
  flags = orig_flags.deep_copy()
  for flag, is_h in zip(flags, h_selection):
    if is_h:
      flag.set_grads(False)
  xs.set_scatterer_flags(flags)

  # first refine with no mask
  xs = exercise_least_squares(xs, fo2, mask=None)
  xs.set_scatterer_flags(orig_flags)
  for i in range(1):
    # compute improved mask/f_mask
    mask = masks.mask(xs, fo2)
    mask.compute(solvent_radius=1.2,
                 shrink_truncation_radius=1.2,
                 atom_radii_table={'C':1.70, 'B':1.63, 'N':1.55, 'O':1.52},
                 resolution_factor=1/3)
    mask.structure_factors()
    xs = exercise_least_squares(xs, fo2, mask)
  # again exclude hydrogens from tests because of lack of constraints
  emma_ref = xs_no_sol.select(h_selection, negate=True).as_emma_model()
  match = emma.model_matches(emma_ref, xs.select(
    h_selection, negate=True).as_emma_model()).refined_matches[0]
  assert approx_equal(match.rms, 0, eps=1e-3)
예제 #28
0
파일: tst_masks.py 프로젝트: dials/cctbx
def exercise_masks():
    mt = flex.mersenne_twister(seed=0)
    xs_ref = structure.from_shelx(file=StringIO(YAKRUY_ins))
    mi = xs_ref.crystal_symmetry().build_miller_set(d_min=0.5,
                                                    anomalous_flag=False)
    fo = mi.structure_factors_from_scatterers(
        xs_ref, algorithm="direct").f_calc().as_amplitude_array()
    k = 0.05 + 10 * mt.random_double()
    fo = fo.customized_copy(data=fo.data() * k)
    fo2 = fo.f_as_f_sq()
    acetonitrile_sel = xs_ref.label_selection('N4', 'C20', 'C21', 'H211',
                                              'H212', 'H213')
    xs_no_sol = xs_ref.deep_copy_scatterers().select(acetonitrile_sel,
                                                     negate=True)
    # check what happens when no voids are found
    mask = masks.mask(xs_ref, fo2)
    mask.compute(solvent_radius=1.2,
                 shrink_truncation_radius=1.2,
                 resolution_factor=1 / 2,
                 atom_radii_table={
                     'C': 1.70,
                     'B': 1.63,
                     'N': 1.55,
                     'O': 1.52
                 })
    assert mask.structure_factors() is None
    assert mask.n_voids() == 0
    assert mask.n_solvent_grid_points() == 0
    assert mask.f_mask() is None
    assert mask.f_model() is None
    assert mask.modified_intensities() is None
    assert mask.f_000 is None
    s = StringIO()
    mask.show_summary(log=s)
    assert not show_diff(
        s.getvalue(), """\
use_set_completion: False
solvent_radius: 1.20
shrink_truncation_radius: 1.20
van der Waals radii:
    B     C     H     N     O
 1.63  1.70  1.20  1.55  1.52

Total solvent accessible volume / cell = 0.0 Ang^3 [0.0%]

gridding: (30,45,54)
""")
    # and now with some voids
    fo2_complete = fo2.sort()
    fo2_missing_1 = fo2.select_indices(flex.miller_index([
        (0, 0, 1),
    ]),
                                       negate=True)
    mt = flex.mersenne_twister(seed=0)
    fo2_incomplete = fo2.select(mt.random_bool(fo2.size(), 0.95))

    for fo2, use_space_group_symmetry in zip(
        (fo2_complete, fo2_complete, fo2_missing_1, fo2_incomplete),
        (True, False, True, True)):
        if fo2 is fo2_complete: use_set_completion = False
        else: use_set_completion = True
        mask = masks.mask(xs_no_sol,
                          fo2,
                          use_set_completion=use_set_completion)
        mask.compute(
            solvent_radius=1.2,
            shrink_truncation_radius=1.2,
            resolution_factor=1 / 3,
            #atom_radii_table={'C':1.70, 'B':1.63, 'N':1.55, 'O':1.52},
            use_space_group_symmetry=use_space_group_symmetry)
        n_voids = flex.max(mask.mask.data) - 1
        f_mask = mask.structure_factors()
        f_model = mask.f_model()
        modified_fo = mask.modified_intensities().as_amplitude_array()
        f_obs_minus_f_model = fo.common_set(f_model).f_obs_minus_f_calc(
            f_obs_factor=1 / k, f_calc=f_model)
        diff_map = miller.fft_map(mask.crystal_gridding, f_obs_minus_f_model)
        diff_map.apply_volume_scaling()
        stats = diff_map.statistics()
        assert n_voids == 2
        assert approx_equal(n_voids, mask.n_voids())
        assert mask.n_solvent_grid_points() == 42148
        if fo2 is fo2_complete:
            # check the difference map has no large peaks/holes
            assert max(stats.max(), abs(stats.min())) < 0.11
        # expected electron count: 44
        assert approx_equal(mask.f_000_s, 44, eps=1)
        assert modified_fo.r1_factor(mask.f_calc.common_set(modified_fo),
                                     k) < 0.006
        assert fo.common_set(fo2).r1_factor(f_model.common_set(fo2), k) < 0.006

    s = StringIO()
    mask.show_summary(log=s)
    assert not show_diff(
        s.getvalue(), """\
use_set_completion: True
solvent_radius: 1.20
shrink_truncation_radius: 1.20
van der Waals radii:
    C     H     N     O
 1.77  1.20  1.50  1.45

Total solvent accessible volume / cell = 146.5 Ang^3 [16.3%]
Total electron count / cell = 43.2

gridding: (45,72,80)
Void #Grid points Vol/A^3 Vol/%  Centre of mass (frac)   Eigenvectors (frac)
   1        21074    73.3   8.1  ( 0.267, 0.461, 0.672)  1  ( 0.982, 0.126, 0.142)
                                                         2  (-0.166, 0.206, 0.964)
                                                         3  (-0.092, 0.970,-0.223)
   2        21074    73.3   8.1  (-0.267, 0.539, 0.328)  1  ( 0.982, 0.126, 0.142)
                                                         2  (-0.166, 0.206, 0.964)
                                                         3  (-0.092, 0.970,-0.223)

Void  Vol/Ang^3  #Electrons
   1       73.3         21.6
   2       73.3         21.6
""")
    cif_block = mask.as_cif_block()

    fo2 = fo.f_as_f_sq()
    # this bit is necessary until we have constraints, as
    # otherwise the hydrogens just disappear into the ether.
    xs = xs_no_sol.deep_copy_scatterers()
    h_selection = xs.element_selection('H')
    orig_flags = xs.scatterer_flags()
    flags = orig_flags.deep_copy()
    for flag, is_h in zip(flags, h_selection):
        if is_h:
            flag.set_grads(False)
    xs.set_scatterer_flags(flags)

    # first refine with no mask
    xs = exercise_least_squares(xs, fo2, mask=None)
    xs.set_scatterer_flags(orig_flags)
    for i in range(1):
        # compute improved mask/f_mask
        mask = masks.mask(xs, fo2)
        mask.compute(solvent_radius=1.2,
                     shrink_truncation_radius=1.2,
                     atom_radii_table={
                         'C': 1.70,
                         'B': 1.63,
                         'N': 1.55,
                         'O': 1.52
                     },
                     resolution_factor=1 / 3)
        mask.structure_factors()
        xs = exercise_least_squares(xs, fo2, mask)
    # again exclude hydrogens from tests because of lack of constraints
    emma_ref = xs_no_sol.select(h_selection, negate=True).as_emma_model()
    match = emma.model_matches(
        emma_ref,
        xs.select(h_selection, negate=True).as_emma_model()).refined_matches[0]
    assert approx_equal(match.rms, 0, eps=1e-3)
예제 #29
0
def exercise_adp_similarity():
  u_cart = ((1,3,2,4,3,6),(2,4,2,6,5,1))
  u_iso = (-1,-1)
  use_u_aniso = (True, True)
  weight = 1
  a = adp_restraints.adp_similarity(
    u_cart=u_cart,
    weight=weight)
  assert approx_equal(a.use_u_aniso, use_u_aniso)
  assert a.weight == weight
  assert approx_equal(a.residual(), 68)
  assert approx_equal(a.gradients2(),
    ((-2.0, -2.0, 0.0, -8.0, -8.0, 20.0), (2.0, 2.0, -0.0, 8.0, 8.0, -20.0)))
  assert approx_equal(a.deltas(), (-1.0, -1.0, 0.0, -2.0, -2.0, 5.0))
  assert approx_equal(a.rms_deltas(), 2.7487370837451071)
  #
  u_cart = ((1,3,2,4,3,6),(-1,-1,-1,-1,-1,-1))
  u_iso = (-1,2)
  use_u_aniso = (True, False)
  a = adp_restraints.adp_similarity(
    u_cart[0], u_iso[1], weight=weight)
  assert approx_equal(a.use_u_aniso, use_u_aniso)
  assert a.weight == weight
  assert approx_equal(a.residual(), 124)
  assert approx_equal(a.gradients2(),
    ((-2, 2, 0, 16, 12, 24), (2, -2, 0, -16, -12, -24)))
  assert approx_equal(a.deltas(), (-1, 1, 0, 4, 3, 6))
  assert approx_equal(a.rms_deltas(), 3.711842908553348)
  #
  i_seqs_aa = (1,2) # () - ()
  i_seqs_ai = (1,0) # () - o
  i_seqs_ia = (3,2) #  o - ()
  i_seqs_ii = (0,3) #  o - o
  p_aa = adp_restraints.adp_similarity_proxy(i_seqs=i_seqs_aa,weight=weight)
  p_ai = adp_restraints.adp_similarity_proxy(i_seqs=i_seqs_ai,weight=weight)
  p_ia = adp_restraints.adp_similarity_proxy(i_seqs=i_seqs_ia,weight=weight)
  p_ii = adp_restraints.adp_similarity_proxy(i_seqs=i_seqs_ii,weight=weight)
  assert p_aa.i_seqs == i_seqs_aa
  assert p_aa.weight == weight
  u_cart = flex.sym_mat3_double(((-1,-1,-1,-1,-1,-1),
                                 (1,2,2,4,3,6),
                                 (2,4,2,6,5,1),
                                 (-1,-1,-1,-1,-1,-1)))
  u_iso = flex.double((1,-1,-1,2))
  use_u_aniso = flex.bool((False, True,True,False))
  for p in (p_aa,p_ai,p_ia,p_ii):
    params = adp_restraint_params(u_cart=u_cart, u_iso=u_iso, use_u_aniso=use_u_aniso)
    a = adp_restraints.adp_similarity(params, proxy=p)
    assert approx_equal(a.weight, weight)
    #
    gradients_aniso_cart = flex.sym_mat3_double(u_cart.size(), (0,0,0,0,0,0))
    gradients_iso = flex.double(u_cart.size(), 0)
    proxies = adp_restraints.shared_adp_similarity_proxy([p,p])
    residuals = adp_restraints.adp_similarity_residuals(params, proxies=proxies)
    assert approx_equal(residuals, (a.residual(),a.residual()))
    deltas_rms = adp_restraints.adp_similarity_deltas_rms(params, proxies=proxies)
    assert approx_equal(deltas_rms, (a.rms_deltas(),a.rms_deltas()))
    residual_sum = adp_restraints.adp_similarity_residual_sum(
      params,
      proxies=proxies,
      gradients_aniso_cart=gradients_aniso_cart,
      gradients_iso=gradients_iso)
    assert approx_equal(residual_sum, 2 * a.residual())
    fd_grads_aniso, fd_grads_iso = finite_difference_gradients(
      restraint_type=adp_restraints.adp_similarity,
      proxy=p,
      u_cart=u_cart,
      u_iso=u_iso,
      use_u_aniso=use_u_aniso)
    for g,e in zip(gradients_aniso_cart, fd_grads_aniso):
      assert approx_equal(g,  matrix.col(e)*2)
    for g,e in zip(gradients_iso, fd_grads_iso):
      assert approx_equal(g, e*2)
  #
  # check frame invariance of residual
  #
  u_cart_1 = matrix.sym(sym_mat3=(0.1,0.2,0.05,0.03,0.02,0.01))
  u_cart_2 = matrix.sym(sym_mat3=(0.21,0.32,0.11,0.02,0.02,0.07))
  u_cart = (u_cart_1.as_sym_mat3(),u_cart_2.as_sym_mat3())
  u_iso = (-1, -1)
  use_u_aniso = (True, True)
  a = adp_restraints.adp_similarity(u_cart, weight=1)
  expected_residual = a.residual()
  gen = flex.mersenne_twister()
  for i in range(20):
    R = matrix.rec(gen.random_double_r3_rotation_matrix(),(3,3))
    u_cart_1_rot = R * u_cart_1 * R.transpose()
    u_cart_2_rot = R * u_cart_2 * R.transpose()
    u_cart = (u_cart_1_rot.as_sym_mat3(),u_cart_2_rot.as_sym_mat3())
    a = adp_restraints.adp_similarity(u_cart, weight=1)
    assert approx_equal(a.residual(), expected_residual)
예제 #30
0
def run(args):
  assert args in [[], ["--verbose"]]
  if (len(args) != 0):
    cout = sys.stdout
  else:
    cout = null_out()
  edge_list_bonds = [(0,1),(0,4),(1,2),(2,3),(3,4)]
  bond_list = [
    (("C1*", "C2*"), 1.529),
    (("C1*", "O4*"), 1.412),
    (("C2*", "C3*"), 1.526),
    (("C3*", "C4*"), 1.520),
    (("C4*", "O4*"), 1.449)]
  angle_list = [
    (("C1*", "C2*", "C3*"), 101.3),
    (("C2*", "C3*", "C4*"), 102.3),
    (("C3*", "C4*", "O4*"), 104.2),
    (("C4*", "O4*", "C1*"), 110.0)]
  sites_cart, geo_manager = cctbx.geometry_restraints.manager \
    .construct_non_crystallographic_conserving_bonds_and_angles(
      sites_cart=sites_cart_3p,
      edge_list_bonds=edge_list_bonds,
      edge_list_angles=[])
  for bond_atom_names,distance_ideal in bond_list:
    i,j = [atom_names.index(atom_name) for atom_name in bond_atom_names]
    bond_params = geo_manager.bond_params_table[i][j]
    assert approx_equal(bond_params.distance_ideal, distance_ideal, eps=1.e-2)
    bond_params.distance_ideal = distance_ideal
    bond_params.weight = 1/0.02**2
  assert geo_manager.angle_proxies is None
  geo_manager.angle_proxies = cctbx.geometry_restraints.shared_angle_proxy()
  for angle_atom_names,angle_ideal in angle_list:
    i_seqs = [atom_names.index(atom_name) for atom_name in angle_atom_names]
    geo_manager.angle_proxies.append(cctbx.geometry_restraints.angle_proxy(
      i_seqs=i_seqs,
      angle_ideal=angle_ideal,
      weight=1/3**2))
  geo_manager.show_sorted(
    site_labels=atom_names, sites_cart=sites_cart, f=cout)
  def lbfgs(sites_cart):
    for i_lbfgs_restart in xrange(3):
      minimized = cctbx.geometry_restraints.lbfgs.lbfgs(
        sites_cart=sites_cart,
        geometry_restraints_manager=geo_manager)
      assert is_below_limit(value=minimized.final_target_value, limit=1e-10)
    return minimized
  lbfgs(sites_cart=sites_cart_3p)
  lbfgs(sites_cart=sites_cart_2p)
  conformer_counts = [0] * 4
  sites_cart = sites_cart.deep_copy()
  mt = flex.mersenne_twister(seed=0)
  for i_trial in xrange(20):
    while True:
      for i in xrange(sites_cart.size()):
        sites_cart[i] = mt.random_double_point_on_sphere()
      try:
        lbfgs(sites_cart=sites_cart)
      except RuntimeError, e:
        if (not str(e).startswith(
              "Bond distance > max_reasonable_bond_distance: ")):
          raise
      else:
        break
    rmsd_list = flex.double()
    for reference_sites in [
          sites_cart_3p,
          sites_cart_2p,
          sites_cart_a,
          sites_cart_b]:
      sup = scitbx.math.superpose.least_squares_fit(
        reference_sites=reference_sites,
        other_sites=sites_cart)
      rmsd = reference_sites.rms_difference(sup.other_sites_best_fit())
      rmsd_list.append(rmsd)
    oline = " ".join(["%.3f" % rmsd for rmsd in rmsd_list])
    print >> cout, oline
    assert is_below_limit(min(rmsd_list), 1e-3)
    conformer_counts[flex.min_index(rmsd_list)] += 1
예제 #31
0
def run(args):
  assert args in [[], ["--verbose"]]
  if (len(args) == 0):
    out = null_out()
  else:
    out = sys.stdout
  #
  mt = flex.mersenne_twister(seed=0)
  #
  for i_trial in xrange(10):
    l0 = mt.random_double() + 0.5
    l1 = mt.random_double() + 0.5
    l2 = mt.random_double() + 0.5
    angle_model = mt.random_double() * 178 + 1 \
                + 180 * (mt.random_size_t() % 3 - 1)
    v = matrix.col(mt.random_double_point_on_sphere())
    axis = v.ortho()
    site1 = v * l1
    site0 = site1 + v * l0
    r = axis.axis_and_angle_as_r3_rotation_matrix(angle=angle_model, deg=True)
    site2 = site1 + (r * v) * l2
    a = angle(
      sites=[site0, site1, site2],
      angle_ideal=mt.random_double() * 720 - 360,
      weight=mt.random_double() * 10 + 0.1)
    assert approx_equal(min(
      abs(angle_delta_deg(angle_1=a.angle_model, angle_2= angle_model)),
      abs(angle_delta_deg(angle_1=a.angle_model, angle_2=-angle_model))), 0)
    check_derivs(out=out, a=a)
  #
  for site2 in [(0,2.3,0), (0,0,2.5)]:
    perm = flex.size_t([0,1,2])
    while True:
      a = angle(
        sites=tuple(flex.vec3_double(
          [(1.2,0,0), (0,0,0), site2]).select(perm)),
        angle_ideal=mt.random_double() * 720 - 360,
        weight=mt.random_double() * 10 + 0.1)
      check_derivs(out=out, a=a)
      if (not perm.next_permutation()):
        break
  #
  for site0 in [(1,0,0),(0,1,0),(0,0,1),(1,1,1)]:
    perm = flex.size_t([0,1,2])
    while True:
      a = angle(
        sites=tuple(flex.vec3_double(
          [site0, (0,0,0), -matrix.col(site0)]).select(perm)),
        angle_ideal=180,
        weight=1.3)
      check_derivs(out=out, a=a, expect_failure=True)
      if (not perm.next_permutation()):
        break
  #
  plot_file_names = []
  for method in ["ana", "fin"]:
    plot_file_names.extend(write_plots(method=method))
  f = open("angle_xy_as_pdf_commands", "w")
  for file_name in plot_file_names:
    print >> f, "ppdf %s > %s" % (file_name, file_name.replace(".xy", ".pdf"))
  f.close()
  #
  print "OK"
예제 #32
0
def run(args):
  assert len(args) == 0
  from cctbx import miller
  import cctbx.miller.reindexing
  from cctbx import uctbx
  from cctbx import sgtbx
  from cctbx.array_family import flex
  uc = uctbx.unit_cell((11,11,11,81,81,81))
  ms = uc.complete_miller_set_with_lattice_symmetry(
    anomalous_flag=True,
    d_min=3)
  ra = miller.reindexing.assistant(
    lattice_group=ms.space_group(),
    intensity_group=sgtbx.space_group_info(symbol="P 1").group(),
    miller_indices=ms.expand_to_p1().indices())
  mt = flex.mersenne_twister(seed=0)
  def check_cb_op_perm(cb_op, perm):
    mi_cb = cb_op.apply(ra.miller_indices)
    miis = flex.random_permutation(size=ra.miller_indices.size())[:2]
    k = cb_op.apply(ra.miller_indices.select(miis))
    matches = miller.match_indices(k, ra.miller_indices)
    pairs = matches.pairs()
    assert pairs.column(0).all_eq(flex.size_t_range(k.size()))
    miis_cb = pairs.column(1)
    assert perm.select(miis).all_eq(miis_cb)
  def check_ra():
    for cb_op, perm, inv_perm in zip(ra.cb_ops, ra.perms, ra.inv_perms):
      check_cb_op_perm(cb_op, perm)
      check_cb_op_perm(cb_op.inverse(), inv_perm)
  check_ra()
  assert ra.i_j_multiplication_table == [
    [0, 1, 2, 3, 4, 5],
    [1, 2, 0, 4, 5, 3],
    [2, 0, 1, 5, 3, 4],
    [3, 5, 4, 0, 2, 1],
    [4, 3, 5, 1, 0, 2],
    [5, 4, 3, 2, 1, 0]]
  assert ra.i_inv_j_multiplication_table == [
    [0, 1, 2, 3, 4, 5],
    [2, 0, 1, 5, 3, 4],
    [1, 2, 0, 4, 5, 3],
    [3, 5, 4, 0, 2, 1],
    [4, 3, 5, 1, 0, 2],
    [5, 4, 3, 2, 1, 0]]
  assert ra.i_j_inv_multiplication_table == [
    [0, 2, 1, 3, 4, 5],
    [1, 0, 2, 4, 5, 3],
    [2, 1, 0, 5, 3, 4],
    [3, 4, 5, 0, 2, 1],
    [4, 5, 3, 1, 0, 2],
    [5, 3, 4, 2, 1, 0]]
  from libtbx.test_utils import show_diff
  from cStringIO import StringIO
  sio = StringIO()
  assert ra.show_summary(out=sio, prefix=": ") is ra
  assert not show_diff(sio.getvalue(), """\
: Lattice symmetry: R 3 2 :R (No. 155)
: Intensity symmetry: P 1 (No. 1)
:
: Indexing ambiguities:
:   k,l,h         3-fold    invariants:    4
:   l,h,k         3-fold    invariants:    4
:   -k,-h,-l      2-fold    invariants:    4
:   -l,-k,-h      2-fold    invariants:    4
:   -h,-l,-k      2-fold    invariants:    4
""")
  #
  ra = miller.reindexing.assistant(
    lattice_group=ms.space_group(),
    intensity_group=ms.space_group(),
    miller_indices=ra.miller_indices)
  check_ra()
  sio = StringIO()
  assert ra.show_summary(out=sio) is ra
  assert not show_diff(sio.getvalue(), """\
Lattice symmetry: R 3 2 :R (No. 155)
Intensity symmetry: R 3 2 :R (No. 155)

No indexing ambiguity.
""")
  assert ra.i_j_multiplication_table == [[0]]
  assert ra.i_inv_j_multiplication_table == [[0]]
  assert ra.i_j_inv_multiplication_table == [[0]]
  #
  ra = miller.reindexing.assistant(
    lattice_group=ms.space_group(),
    intensity_group=sgtbx.space_group_info(symbol="R 3 :R").group(),
    miller_indices=ra.miller_indices)
  check_ra()
  sio = StringIO()
  assert ra.show_summary(out=sio) is ra
  assert not show_diff(sio.getvalue(), """\
Lattice symmetry: R 3 2 :R (No. 155)
Intensity symmetry: R 3 :R (No. 146)

Indexing ambiguity:
  -h,-l,-k      2-fold    invariants:    4
""")
  assert ra.i_j_multiplication_table == [[0, 1], [1, 0]]
  assert ra.i_inv_j_multiplication_table == [[0, 1], [1, 0]]
  assert ra.i_j_inv_multiplication_table == [[0, 1], [1, 0]]
  #
  import math
  ta = math.acos(-1/3) * 180 / math.pi
  uc = uctbx.unit_cell((11,11,11,ta,ta,ta))
  ms = uc.complete_miller_set_with_lattice_symmetry(
    anomalous_flag=True,
    d_min=3)
  ra = miller.reindexing.assistant(
    lattice_group=ms.space_group(),
    intensity_group=sgtbx.space_group_info(symbol="I 4 (y+z,x+z,x+y)").group(),
    miller_indices=ms.expand_to_p1().indices())
  check_ra()
  sio = StringIO()
  assert ra.show_summary(out=sio) is ra
  assert not show_diff(sio.getvalue(), """\
Lattice symmetry: I 4 3 2 (y+z,x+z,x+y) (No. 211)
Intensity symmetry: I 4 (y+z,x+z,x+y) (No. 79)

Indexing ambiguities:
  k,l,h         3-fold    invariants:    2
  -l,-k,-h      2-fold    invariants:    4
  -h,-l,-k      2-fold    invariants:    4
  l,h,k         3-fold    invariants:    2
  -k,-h,-l      2-fold    invariants:    4
""")
  assert ra.i_j_multiplication_table == [
    [0, 1, 2, 3, 4, 5],
    [1, 4, 3, 5, 0, 2],
    [2, 5, 0, 4, 3, 1],
    [3, 2, 1, 0, 5, 4],
    [4, 0, 5, 2, 1, 3],
    [5, 3, 4, 1, 2, 0]]
  assert ra.i_inv_j_multiplication_table == [
    [0, 1, 2, 3, 4, 5],
    [4, 0, 5, 2, 1, 3],
    [2, 5, 0, 4, 3, 1],
    [3, 2, 1, 0, 5, 4],
    [1, 4, 3, 5, 0, 2],
    [5, 3, 4, 1, 2, 0]]
  assert ra.i_j_inv_multiplication_table == [
    [0, 4, 2, 3, 1, 5],
    [1, 0, 3, 5, 4, 2],
    [2, 3, 0, 4, 5, 1],
    [3, 5, 1, 0, 2, 4],
    [4, 1, 5, 2, 0, 3],
    [5, 2, 4, 1, 3, 0]]
  #
  print "OK"
예제 #33
0
def exercise_adp_similarity():
  u_cart = ((1,3,2,4,3,6),(2,4,2,6,5,1))
  u_iso = (-1,-1)
  use_u_aniso = (True, True)
  weight = 1
  a = adp_restraints.adp_similarity(
    u_cart=u_cart,
    weight=weight)
  assert approx_equal(a.use_u_aniso, use_u_aniso)
  assert a.weight == weight
  assert approx_equal(a.residual(), 68)
  assert approx_equal(a.gradients2(),
    ((-2.0, -2.0, 0.0, -8.0, -8.0, 20.0), (2.0, 2.0, -0.0, 8.0, 8.0, -20.0)))
  assert approx_equal(a.deltas(), (-1.0, -1.0, 0.0, -2.0, -2.0, 5.0))
  assert approx_equal(a.rms_deltas(), 2.7487370837451071)
  #
  u_cart = ((1,3,2,4,3,6),(-1,-1,-1,-1,-1,-1))
  u_iso = (-1,2)
  use_u_aniso = (True, False)
  a = adp_restraints.adp_similarity(
    u_cart[0], u_iso[1], weight=weight)
  assert approx_equal(a.use_u_aniso, use_u_aniso)
  assert a.weight == weight
  assert approx_equal(a.residual(), 124)
  assert approx_equal(a.gradients2(),
    ((-2, 2, 0, 16, 12, 24), (2, -2, 0, -16, -12, -24)))
  assert approx_equal(a.deltas(), (-1, 1, 0, 4, 3, 6))
  assert approx_equal(a.rms_deltas(), 3.711842908553348)
  #
  i_seqs_aa = (1,2) # () - ()
  i_seqs_ai = (1,0) # () - o
  i_seqs_ia = (3,2) #  o - ()
  i_seqs_ii = (0,3) #  o - o
  p_aa = adp_restraints.adp_similarity_proxy(i_seqs=i_seqs_aa,weight=weight)
  p_ai = adp_restraints.adp_similarity_proxy(i_seqs=i_seqs_ai,weight=weight)
  p_ia = adp_restraints.adp_similarity_proxy(i_seqs=i_seqs_ia,weight=weight)
  p_ii = adp_restraints.adp_similarity_proxy(i_seqs=i_seqs_ii,weight=weight)
  assert p_aa.i_seqs == i_seqs_aa
  assert p_aa.weight == weight
  u_cart = flex.sym_mat3_double(((-1,-1,-1,-1,-1,-1),
                                 (1,2,2,4,3,6),
                                 (2,4,2,6,5,1),
                                 (-1,-1,-1,-1,-1,-1)))
  u_iso = flex.double((1,-1,-1,2))
  use_u_aniso = flex.bool((False, True,True,False))
  for p in (p_aa,p_ai,p_ia,p_ii):
    params = adp_restraint_params(u_cart=u_cart, u_iso=u_iso, use_u_aniso=use_u_aniso)
    a = adp_restraints.adp_similarity(params, proxy=p)
    assert approx_equal(a.weight, weight)
    #
    gradients_aniso_cart = flex.sym_mat3_double(u_cart.size(), (0,0,0,0,0,0))
    gradients_iso = flex.double(u_cart.size(), 0)
    proxies = adp_restraints.shared_adp_similarity_proxy([p,p])
    residuals = adp_restraints.adp_similarity_residuals(params, proxies=proxies)
    assert approx_equal(residuals, (a.residual(),a.residual()))
    deltas_rms = adp_restraints.adp_similarity_deltas_rms(params, proxies=proxies)
    assert approx_equal(deltas_rms, (a.rms_deltas(),a.rms_deltas()))
    residual_sum = adp_restraints.adp_similarity_residual_sum(
      params,
      proxies=proxies,
      gradients_aniso_cart=gradients_aniso_cart,
      gradients_iso=gradients_iso)
    assert approx_equal(residual_sum, 2 * a.residual())
    fd_grads_aniso, fd_grads_iso = finite_difference_gradients(
      restraint_type=adp_restraints.adp_similarity,
      proxy=p,
      u_cart=u_cart,
      u_iso=u_iso,
      use_u_aniso=use_u_aniso)
    for g,e in zip(gradients_aniso_cart, fd_grads_aniso):
      assert approx_equal(g,  matrix.col(e)*2)
    for g,e in zip(gradients_iso, fd_grads_iso):
      assert approx_equal(g, e*2)
  #
  # check frame invariance of residual
  #
  u_cart_1 = matrix.sym(sym_mat3=(0.1,0.2,0.05,0.03,0.02,0.01))
  u_cart_2 = matrix.sym(sym_mat3=(0.21,0.32,0.11,0.02,0.02,0.07))
  u_cart = (u_cart_1.as_sym_mat3(),u_cart_2.as_sym_mat3())
  u_iso = (-1, -1)
  use_u_aniso = (True, True)
  a = adp_restraints.adp_similarity(u_cart, weight=1)
  expected_residual = a.residual()
  gen = flex.mersenne_twister()
  for i in range(20):
    R = matrix.rec(gen.random_double_r3_rotation_matrix(),(3,3))
    u_cart_1_rot = R * u_cart_1 * R.transpose()
    u_cart_2_rot = R * u_cart_2 * R.transpose()
    u_cart = (u_cart_1_rot.as_sym_mat3(),u_cart_2_rot.as_sym_mat3())
    a = adp_restraints.adp_similarity(u_cart, weight=1)
    assert approx_equal(a.residual(), expected_residual)
예제 #34
0
def process_continue(params, cod_id, c_obs, i_obs, f_obs, structure_prep):
    p = params.f_calc_options
    f_calc = f_obs.structure_factors_from_scatterers(
        xray_structure=structure_prep,
        algorithm=p.algorithm,
        cos_sin_table=p.cos_sin_table).f_calc()
    sel = f_obs.f_obs_f_calc_fan_outlier_selection(f_calc=f_calc)
    assert sel is not None
    n_outliers = sel.count(True)
    if (n_outliers != 0):
        action = params.f_obs_f_calc_fan_outliers
        print "INFO: f_obs_f_calc_fan_outliers = %s: %d" % (action, n_outliers)
        if (action == "remove"):
            i_obs = i_obs.select(~sel)
            f_obs = f_obs.select(~sel)
    if (f_obs.anomalous_flag()):
        print "INFO: converting anomalous i+f_obs to non-anomalous."
        i_obs = i_obs.average_bijvoet_mates()
        f_obs = f_obs.average_bijvoet_mates()
    sel = ((i_obs.data() == 0) & (i_obs.sigmas() == 0)) \
        | ((f_obs.data() == 0) & (f_obs.sigmas() == 0))
    n_zero_d_and_s = sel.count(True)
    if (n_zero_d_and_s != 0):
        print "INFO: removing reflections with i+f_obs=0 and sigma=0:", \
          n_zero_d_and_s
        i_obs = i_obs.select(~sel)
        f_obs = f_obs.select(~sel)
    p = params.f_calc_options
    f_calc = f_obs.structure_factors_from_scatterers(
        xray_structure=structure_prep,
        algorithm=p.algorithm,
        cos_sin_table=p.cos_sin_table).f_calc()
    if (params.use_f_calc_as_f_obs):
        print "INFO: using f_calc as i+f_obs"
        i_obs = f_calc.intensities().customized_copy(
            sigmas=flex.double(f_calc.indices().size(), 0.01))
        f_obs = f_calc.amplitudes().customized_copy(
            sigmas=flex.double(f_calc.indices().size(), 0.01))
    else:
        # scaling applied so that the data written in shelx hklf format
        # have sufficient significant digits, and FVAR is 1 (shelx76 seems
        # to be especially sensitive to FVAR >> 1)
        k = f_obs.scale_factor(f_calc=f_calc)
        assert k != 0
        s = 1 / k**2
        print "INFO: scaling i_obs to f_calc by multiplying i_obs with: %.6g" % s
        i_obs = i_obs.apply_scaling(factor=s)
        s = 1 / k
        print "INFO: scaling f_obs to f_calc by multiplying f_obs with: %.6g" % s
        f_obs = f_obs.apply_scaling(factor=s)

    def show(obs):
        obs.show_comprehensive_summary()
        from cif_refine import \
          report_fraction_of_negative_observations_if_any as _
        _(cod_id, obs)

    if (c_obs.is_xray_intensity_array()):
        show(i_obs)
    else:
        show(f_obs)
    print "." * 79
    #
    structure_work = structure_prep.deep_copy_scatterers()
    sel = structure_work.hd_selection()
    print "Removing hydrogen atoms:", sel.count(True)
    structure_work = structure_work.select(selection=~sel)
    sdt = params.show_distances_threshold
    if (sdt > 0):
        print "Distances smaller than %.6g A:" % sdt
        structure_work.show_distances(distance_cutoff=sdt)
        print "." * 79
    #
    if (params.tardy_samples.iq is not None):
        from cctbx.omz import tardy_adaptor
        print
        tardy_adaptor.sample_e_pot(id_code=cod_id,
                                   f_obs=f_obs,
                                   xray_structure=structure_prep,
                                   edge_list=edge_list,
                                   params=params.tardy_samples)
        print
        return
    #
    from iotbx.shelx import fvar_encoding
    fvars, encoded_sites = fvar_encoding.dev_build_shelx76_fvars(
        structure_work)
    print "Number of FVARs for special position constraints:", len(fvars) - 1
    print "." * 79
    #
    show_cc_r1(params, "prep", f_obs, structure_prep)

    def cc_r1(label):
        show_cc_r1(params, label, f_obs, structure_work)

    cc_r1("no_h")
    structure_work.convert_to_isotropic()
    cc_r1("iso")
    structure_iso = structure_work.deep_copy_scatterers()
    #
    if (params.reset_u_iso is not None):
        structure_work.set_u_iso(value=params.reset_u_iso)
        cc_r1("setu")
    if (params.shake_sites_rmsd is not None):
        mt = flex.mersenne_twister(seed=0)
        structure_work.shift_sites_in_place(
            shift_length=params.shake_sites_rmsd, mersenne_twister=mt)
        print "rms difference after shift_sites_in_place: %.3f" \
          % structure_iso.rms_difference(structure_work)
        cc_r1("shift_xyz")
    #
    if (params.max_atoms is not None):
        n = structure_work.scatterers().size()
        if (n > params.max_atoms):
            print "Skipping refinement of large model: %d atoms COD %s" % (
                n, cod_id)
            return
    #
    structure_work.scatterers().flags_set_grads(state=False)
    for sc in structure_work.scatterers():
        sc.flags.set_grad_site(True)
        assert sc.flags.use_u_iso_only()
        sc.flags.set_grad_u_iso(True)
    n_refinable_parameters = structure_work.n_parameters(
        considering_site_symmetry_constraints=True)
    print "Number of refinable parameters:", n_refinable_parameters
    #
    if (params.iteration_limit < 1):
        return
    #
    if ("dev" not in params.optimizers):
        structure_dev = None
    else:
        structure_dev = structure_work.deep_copy_scatterers()
        omz.dev.refinement(
            i_obs=i_obs,
            f_obs=f_obs,
            xray_structure=structure_dev,
            params=params,
            reference_structure=structure_iso,
            expected_n_refinable_parameters=n_refinable_parameters,
            plot_samples_id=cod_id)
        show_cc_r1(params, "dev", f_obs, structure_dev)
        if (params.export_refined):
            file_name = "dev_%s_%s_%s.pdb" % (
                params.target_type, params.target_obs_type.lower(), cod_id)
            open(file_name,
                 "w").write(structure_dev.as_pdb_file(remarks=[file_name]))
        if (params.pickle_refined_dir is not None):
            easy_pickle.dump(file_name=op.join(params.pickle_refined_dir,
                                               cod_id + ".pickle"),
                             obj=(c_obs, structure_dev, None))
            print >> open("%s/qi_%s" % (params.pickle_refined_dir, cod_id),
                          "w"), (structure_dev.scatterers().size(),
                                 c_obs.space_group().order_p(),
                                 c_obs.indices().size(), c_obs.d_min())
    #
    def use_smtbx_ls(mode):
        if ("ls_" + mode not in params.optimizers):
            return None
        if (not libtbx.env.has_module(name="smtbx")):
            print "INFO: smtbx not available: refinement skipped."
            return None
        result = structure_work.deep_copy_scatterers()
        run_smtbx_ls(mode=mode,
                     cod_id=cod_id,
                     i_obs=i_obs,
                     f_obs=f_obs,
                     xray_structure=result,
                     params=params)
        show_cc_r1(params, "ls_" + mode, f_obs, result)
        return result

    structure_ls_simple = use_smtbx_ls("simple")
    structure_ls_lm = use_smtbx_ls("lm")

    #
    def use_shelxl(mode):
        if ("shelxl_" + mode not in params.optimizers):
            return None
        result = structure_work.deep_copy_scatterers()
        run_shelxl(mode=mode,
                   cod_id=cod_id,
                   i_obs=i_obs,
                   f_obs=f_obs,
                   xray_structure=result,
                   params=params,
                   reference_structure=structure_iso,
                   expected_n_refinable_parameters=n_refinable_parameters)
        if (params.export_refined):
            file_name = "shelxl_%s_%s.pdb" % (mode, cod_id)
            open(file_name, "w").write(result.as_pdb_file(remarks=[file_name]))
        return result

    structure_shelxl_fm = use_shelxl("fm")
    structure_shelxl_cg = use_shelxl("cg")
    #
    if ("shelx76" not in params.optimizers):
        structure_shelx76 = None
    else:
        structure_shelx76 = structure_work.deep_copy_scatterers()
        run_shelx76(cod_id=cod_id,
                    f_obs=f_obs,
                    xray_structure=structure_shelx76,
                    fvars=fvars,
                    encoded_sites=encoded_sites,
                    params=params,
                    reference_structure=structure_iso)
        if (params.export_refined):
            file_name = "shelx76_%s.pdb" % cod_id
            open(file_name,
                 "w").write(structure_shelx76.as_pdb_file(remarks=[file_name]))
예제 #35
0
def exercise_rigid_bond():
  i_seqs = (1,2)
  weight = 1
  p = adp_restraints.rigid_bond_proxy(i_seqs=i_seqs,weight=weight)
  assert p.i_seqs == i_seqs
  assert p.weight == weight
  sites = ((1,2,3),(2,3,4))
  u_cart = ((1,2,3,4,5,6), (3,4,5,6,7,8))
  expected_gradients = ((-4, -4, -4, -8, -8, -8), (4, 4, 4, 8, 8, 8))
  r = adp_restraints.rigid_bond(sites=sites, u_cart=u_cart, weight=weight)
  assert r.weight == weight
  assert approx_equal(r.delta_z(), -6)
  assert approx_equal(r.residual(), 36)
  assert approx_equal(r.gradients(), expected_gradients)
  sites_cart = flex.vec3_double(((1,2,3),(2,5,4),(3,4,5)))
  u_cart = flex.sym_mat3_double(((1,2,3,4,5,6),
                                 (2,3,3,5,7,7),
                                 (3,4,5,3,7,8)))
  r = adp_restraints.rigid_bond(
    adp_restraint_params(sites_cart=sites_cart, u_cart=u_cart),
    proxy=p)
  assert approx_equal(r.weight, weight)
  unit_cell = uctbx.unit_cell([15,25,30,90,90,90])
  sites_frac = unit_cell.fractionalize(sites_cart=sites_cart)
  u_star = flex.sym_mat3_double([
    adptbx.u_cart_as_u_star(unit_cell, u_cart_i)
    for u_cart_i in u_cart])
  pair = adp_restraints.rigid_bond_pair(sites_frac[1],
                                     sites_frac[2],
                                     u_star[1],
                                     u_star[2],
                                     unit_cell)
  assert approx_equal(pair.delta_z(), abs(r.delta_z()))
  assert approx_equal(pair.z_12(), r.z_12())
  assert approx_equal(pair.z_21(), r.z_21())
  #
  gradients_aniso_cart = flex.sym_mat3_double(sites_cart.size(), (0,0,0,0,0,0))
  gradients_iso = flex.double(sites_cart.size(), 0)
  proxies = adp_restraints.shared_rigid_bond_proxy([p,p])
  params = adp_restraint_params(sites_cart=sites_cart, u_cart=u_cart)
  residuals = adp_restraints.rigid_bond_residuals(params, proxies=proxies)
  assert approx_equal(residuals, (r.residual(),r.residual()))
  deltas = adp_restraints.rigid_bond_deltas(params, proxies=proxies)
  assert approx_equal(deltas, (r.delta_z(),r.delta_z()))
  residual_sum = adp_restraints.rigid_bond_residual_sum(
    params=params,
    proxies=proxies,
    gradients_aniso_cart=gradients_aniso_cart)
  assert approx_equal(residual_sum, 2 * r.residual())
  for g,e in zip(gradients_aniso_cart[1:3], r.gradients()):
    assert approx_equal(g, matrix.col(e)*2)
  fd_grads_aniso, fd_grads_iso = finite_difference_gradients(
    restraint_type=adp_restraints.rigid_bond,
    proxy=p,
    sites_cart=sites_cart,
    u_cart=u_cart)
  for g,e in zip(gradients_aniso_cart, fd_grads_aniso):
    assert approx_equal(g, matrix.col(e)*2)
  #
  # check frame invariance of residual
  #
  u_cart_1 = matrix.sym(sym_mat3=(0.1,0.2,0.05,0.03,0.02,0.01))
  u_cart_2 = matrix.sym(sym_mat3=(0.21,0.32,0.11,0.02,0.02,0.07))
  u_cart = (u_cart_1.as_sym_mat3(),u_cart_2.as_sym_mat3())
  site_cart_1 = matrix.col((1,2,3))
  site_cart_2 = matrix.col((3,1,4.2))
  sites = (tuple(site_cart_1),tuple(site_cart_2))
  a = adp_restraints.rigid_bond(sites=sites, u_cart=u_cart, weight=1)
  expected_residual = a.residual()
  gen = flex.mersenne_twister()
  for i in range(20):
    R = matrix.rec(gen.random_double_r3_rotation_matrix(),(3,3))
    u_cart_1_rot = R * u_cart_1 * R.transpose()
    u_cart_2_rot = R * u_cart_2 * R.transpose()
    u_cart = (u_cart_1_rot.as_sym_mat3(),u_cart_2_rot.as_sym_mat3())
    site_cart_1_rot = R * site_cart_1
    site_cart_2_rot = R * site_cart_2
    sites = (tuple(site_cart_1_rot),tuple(site_cart_2_rot))
    a = adp_restraints.rigid_bond(
      sites=sites, u_cart=u_cart,
      weight=1)
    assert approx_equal(a.residual(), expected_residual)
예제 #36
0
def exercise_rigid_bond():
  i_seqs = (1,2)
  weight = 1
  p = adp_restraints.rigid_bond_proxy(i_seqs=i_seqs,weight=weight)
  assert p.i_seqs == i_seqs
  assert p.weight == weight
  sites = ((1,2,3),(2,3,4))
  u_cart = ((1,2,3,4,5,6), (3,4,5,6,7,8))
  expected_gradients = ((-4, -4, -4, -8, -8, -8), (4, 4, 4, 8, 8, 8))
  r = adp_restraints.rigid_bond(sites=sites, u_cart=u_cart, weight=weight)
  assert r.weight == weight
  assert approx_equal(r.delta_z(), -6)
  assert approx_equal(r.residual(), 36)
  assert approx_equal(r.gradients(), expected_gradients)
  sites_cart = flex.vec3_double(((1,2,3),(2,5,4),(3,4,5)))
  u_cart = flex.sym_mat3_double(((1,2,3,4,5,6),
                                 (2,3,3,5,7,7),
                                 (3,4,5,3,7,8)))
  r = adp_restraints.rigid_bond(
    adp_restraint_params(sites_cart=sites_cart, u_cart=u_cart),
    proxy=p)
  assert approx_equal(r.weight, weight)
  unit_cell = uctbx.unit_cell([15,25,30,90,90,90])
  sites_frac = unit_cell.fractionalize(sites_cart=sites_cart)
  u_star = flex.sym_mat3_double([
    adptbx.u_cart_as_u_star(unit_cell, u_cart_i)
    for u_cart_i in u_cart])
  pair = adp_restraints.rigid_bond_pair(sites_frac[1],
                                     sites_frac[2],
                                     u_star[1],
                                     u_star[2],
                                     unit_cell)
  assert approx_equal(pair.delta_z(), abs(r.delta_z()))
  assert approx_equal(pair.z_12(), r.z_12())
  assert approx_equal(pair.z_21(), r.z_21())
  #
  gradients_aniso_cart = flex.sym_mat3_double(sites_cart.size(), (0,0,0,0,0,0))
  gradients_iso = flex.double(sites_cart.size(), 0)
  proxies = adp_restraints.shared_rigid_bond_proxy([p,p])
  params = adp_restraint_params(sites_cart=sites_cart, u_cart=u_cart)
  residuals = adp_restraints.rigid_bond_residuals(params, proxies=proxies)
  assert approx_equal(residuals, (r.residual(),r.residual()))
  deltas = adp_restraints.rigid_bond_deltas(params, proxies=proxies)
  assert approx_equal(deltas, (r.delta_z(),r.delta_z()))
  residual_sum = adp_restraints.rigid_bond_residual_sum(
    params=params,
    proxies=proxies,
    gradients_aniso_cart=gradients_aniso_cart)
  assert approx_equal(residual_sum, 2 * r.residual())
  for g,e in zip(gradients_aniso_cart[1:3], r.gradients()):
    assert approx_equal(g, matrix.col(e)*2)
  fd_grads_aniso, fd_grads_iso = finite_difference_gradients(
    restraint_type=adp_restraints.rigid_bond,
    proxy=p,
    sites_cart=sites_cart,
    u_cart=u_cart)
  for g,e in zip(gradients_aniso_cart, fd_grads_aniso):
    assert approx_equal(g, matrix.col(e)*2)
  #
  # check frame invariance of residual
  #
  u_cart_1 = matrix.sym(sym_mat3=(0.1,0.2,0.05,0.03,0.02,0.01))
  u_cart_2 = matrix.sym(sym_mat3=(0.21,0.32,0.11,0.02,0.02,0.07))
  u_cart = (u_cart_1.as_sym_mat3(),u_cart_2.as_sym_mat3())
  site_cart_1 = matrix.col((1,2,3))
  site_cart_2 = matrix.col((3,1,4.2))
  sites = (tuple(site_cart_1),tuple(site_cart_2))
  a = adp_restraints.rigid_bond(sites=sites, u_cart=u_cart, weight=1)
  expected_residual = a.residual()
  gen = flex.mersenne_twister()
  for i in range(20):
    R = matrix.rec(gen.random_double_r3_rotation_matrix(),(3,3))
    u_cart_1_rot = R * u_cart_1 * R.transpose()
    u_cart_2_rot = R * u_cart_2 * R.transpose()
    u_cart = (u_cart_1_rot.as_sym_mat3(),u_cart_2_rot.as_sym_mat3())
    site_cart_1_rot = R * site_cart_1
    site_cart_2_rot = R * site_cart_2
    sites = (tuple(site_cart_1_rot),tuple(site_cart_2_rot))
    a = adp_restraints.rigid_bond(
      sites=sites, u_cart=u_cart,
      weight=1)
    assert approx_equal(a.residual(), expected_residual)