def get_optimisation_test_set(n_grp, n_dst, n_atm, residual_amplitude=1.0):
  """Generate a test set of random uijs for optimisation"""

  assert n_grp > 0
  assert n_grp <= len(TLS_MATRICES)
  assert n_dst > 0
  assert n_atm > 3

  target_uijs = numpy.zeros((n_dst, n_atm, 6))

  # Default to equal weights
  target_weights = flex.double(flex.grid((n_dst, n_atm)), 1.0)

  # Create residual base
  residual_values_numpy = rran(n_atm*6).reshape((n_atm,6))
  real_residual_amps = residual_amplitude * rran(n_atm)
  residual_base = flex.sym_mat3_double(residual_values_numpy)

  # Create residual total and add to target
  residual_uijs = real_residual_amps.reshape((n_atm,1)) * residual_values_numpy
  for i_dst in xrange(n_dst):
    target_uijs[i_dst] = residual_uijs

  # Generate a random set of amplitudes
  real_group_amps = rran(n_grp*n_dst).reshape((n_grp,n_dst))

  # Output lists of base uijs and atoms they are associated with
  base_uijs = []
  base_sels = []
  # Mapping of elements to datasets
  dataset_hash = flex.size_t(range(n_dst) * n_grp)

  for i_grp in xrange(n_grp):
    # Number of atoms in this group -- at least 2
    if n_grp == 1:
      n_atm_this = n_atm
    else:
      n_atm_this = max(2,iran(n_atm))
    # Which atoms are covered by this group
    i_sel = iran(n_atm, size=n_atm_this, replace=False)
    # Generate some uijs for this
    tls_m = TLSMatrices(TLS_MATRICES[i_grp])
    for i_dst in xrange(n_dst):
      x_ = 50.0*(-0.5+rran(n_atm_this*3).reshape((n_atm_this,3)))
      x_ = flex.vec3_double(x_)
      # Generate the base elements
      u_ = tls_m.uijs(x_, origin=(0.,0.,0.))
      base_uijs.append(flex.sym_mat3_double(u_))
      base_sels.append(flex.size_t(i_sel))
      # Generate the amplitude-multiplied equivalent
      u_m = (real_group_amps[i_grp,i_dst]*tls_m).uijs(x_, origin=(0.,0.,0.))
      target_uijs[i_dst, i_sel, :] += numpy.array(u_m)
  # Reshape
  target_uijs = flex.sym_mat3_double(target_uijs.reshape((n_dst*n_atm,6)))
  target_uijs.reshape(flex.grid((n_dst,n_atm)))

  return (target_uijs, target_weights, \
          base_uijs, base_sels, dataset_hash, \
          residual_base, \
          real_group_amps.flatten(), real_residual_amps)
Beispiel #2
0
def set_ladp(xray_structure, axes_and_atoms_i_seqs, value, depth,
             enable_recursion=True):
  sc = (math.pi/180)
  sites_cart = xray_structure.sites_cart()
  scatterers = xray_structure.scatterers()
  all_selections = flex.size_t()
  u_carts = flex.sym_mat3_double(sites_cart.size(), [0,0,0,0,0,0])
  for i_seq, aaa_ in enumerate(axes_and_atoms_i_seqs):
    if(enable_recursion): query = i_seq >= depth
    else: query = i_seq == depth
    if(query): all_selections.extend(aaa_[0][1])
  for i_seq, r in enumerate(axes_and_atoms_i_seqs):
    if(enable_recursion): query = i_seq >= depth
    else: query = i_seq == depth
    if(query):
      for aaai in r:
        G1 = flex.double(sites_cart[aaai[0][0]])
        G2 = flex.double(sites_cart[aaai[0][1]])
        g = G2-G1
        dg = math.sqrt(g[0]**2+g[1]**2+g[2]**2)
        lx,ly,lz = g/dg
        l = [lx,ly,lz]
        L = matrix.sqr((lx**2,lx*ly,lx*lz, lx*ly,ly**2,ly*lz, lx*lz,ly*lz,lz**2))
        for i_seq_moving in aaai[1]:
          site_cart = sites_cart[i_seq_moving]
          delta = flex.double(site_cart) - G1
          A = matrix.sqr(
            (0,delta[2],-delta[1], -delta[2],0,delta[0], delta[1],-delta[0],0))
          u_cart = (value * A * L * A.transpose() * sc).as_sym_mat3()
          check_u_cart(axis = l, u_cart = u_cart)
          scatterers[i_seq_moving].flags.set_use_u_aniso(True)
          u_carts[i_seq_moving] = list(flex.double(u_carts[i_seq_moving]) +
            flex.double(u_cart))
  xray_structure.set_u_cart(u_cart = u_carts, selection = all_selections)
  return xray_structure
def time_ellipsoid(n=1000000):
  from gltbx.quadrics import time_ellipsoid_to_sphere_transform
  import timeit
  u = flex.sym_mat3_double(n, (0.0008, 0.0004, 0.0002,
                               0.0001, 0.00015, 0.00005))
  timer = timeit.Timer(lambda: time_ellipsoid_to_sphere_transform(u))
  # \xb5s is unicode for mu
  print (u"%i ellipsoid --> sphere transforms: %.3g \xb5s per transform"
         % (n, timer.timeit(1)/n*1e6))
def time_ellipsoid(n=1000000):
  from gltbx.quadrics import time_ellipsoid_to_sphere_transform
  import timeit
  u = flex.sym_mat3_double(n, (0.0008, 0.0004, 0.0002,
                               0.0001, 0.00015, 0.00005))
  timer = timeit.Timer(lambda: time_ellipsoid_to_sphere_transform(u))
  # \xb5s is unicode for mu
  print (u"%i ellipsoid --> sphere transforms: %.3g \xb5s per transform"
         % (n, timer.timeit(1)/n*1e6))
def ru(crystal_symmetry, u_scale=1, u_min=0.1):
    from cctbx import sgtbx
    symbol = crystal_symmetry.space_group().type().lookup_symbol()
    point_group = sgtbx.space_group_info(
        symbol=symbol).group().build_derived_point_group()
    adp_constraints = sgtbx.tensor_rank_2_constraints(space_group=point_group,
                                                      reciprocal_space=True)
    u_star = adptbx.u_cart_as_u_star(
        crystal_symmetry.unit_cell(),
        adptbx.random_u_cart(u_scale=u_scale, u_min=u_min))
    u_indep = adp_constraints.independent_params(all_params=u_star)
    u_star = adp_constraints.all_params(independent_params=u_indep)
    r = flex.sym_mat3_double()
    r.append(adptbx.u_star_as_u_cart(crystal_symmetry.unit_cell(), u_star))
    return r
def ru(crystal_symmetry, u_scale=1, u_min=0.1):
  from cctbx import sgtbx
  symbol = crystal_symmetry.space_group().type().lookup_symbol()
  point_group = sgtbx.space_group_info(
    symbol=symbol).group().build_derived_point_group()
  adp_constraints = sgtbx.tensor_rank_2_constraints(
    space_group=point_group,
    reciprocal_space=True)
  u_star = adptbx.u_cart_as_u_star(crystal_symmetry.unit_cell(),
    adptbx.random_u_cart(u_scale=u_scale,u_min=u_min))
  u_indep = adp_constraints.independent_params(all_params=u_star)
  u_star = adp_constraints.all_params(independent_params=u_indep)
  r = flex.sym_mat3_double()
  r.append(adptbx.u_star_as_u_cart(crystal_symmetry.unit_cell(), u_star))
  return r
Beispiel #7
0
def set_ladp(xray_structure,
             axes_and_atoms_i_seqs,
             value,
             depth,
             enable_recursion=True):
    sc = (math.pi / 180)
    sites_cart = xray_structure.sites_cart()
    scatterers = xray_structure.scatterers()
    all_selections = flex.size_t()
    u_carts = flex.sym_mat3_double(sites_cart.size(), [0, 0, 0, 0, 0, 0])
    for i_seq, aaa_ in enumerate(axes_and_atoms_i_seqs):
        if (enable_recursion): query = i_seq >= depth
        else: query = i_seq == depth
        if (query): all_selections.extend(aaa_[0][1])
    for i_seq, r in enumerate(axes_and_atoms_i_seqs):
        if (enable_recursion): query = i_seq >= depth
        else: query = i_seq == depth
        if (query):
            for aaai in r:
                G1 = flex.double(sites_cart[aaai[0][0]])
                G2 = flex.double(sites_cart[aaai[0][1]])
                g = G2 - G1
                dg = math.sqrt(g[0]**2 + g[1]**2 + g[2]**2)
                lx, ly, lz = g / dg
                l = [lx, ly, lz]
                L = matrix.sqr((lx**2, lx * ly, lx * lz, lx * ly, ly**2,
                                ly * lz, lx * lz, ly * lz, lz**2))
                for i_seq_moving in aaai[1]:
                    site_cart = sites_cart[i_seq_moving]
                    delta = flex.double(site_cart) - G1
                    A = matrix.sqr((0, delta[2], -delta[1], -delta[2], 0,
                                    delta[0], delta[1], -delta[0], 0))
                    u_cart = (value * A * L * A.transpose() * sc).as_sym_mat3()
                    check_u_cart(axis=l, u_cart=u_cart)
                    scatterers[i_seq_moving].flags.set_use_u_aniso(True)
                    u_carts[i_seq_moving] = list(
                        flex.double(u_carts[i_seq_moving]) +
                        flex.double(u_cart))
    xray_structure.set_u_cart(u_cart=u_carts, selection=all_selections)
    return xray_structure
Beispiel #8
0
def scale_uij_to_target_by_selection(hierarchy,
                                     selections,
                                     target=1.0,
                                     tolerance=1e-12):
    """Change the scale of groups of uij so that the maximum eigenvalue of each selection is `target` (normally for visualisation only). also scale B by the same value."""
    hierarchy = hierarchy.deep_copy()
    cache = hierarchy.atom_selection_cache()
    all_uij = hierarchy.atoms().extract_uij()
    # Pull out copies of the uijs and b for scaling
    out_u = hierarchy.atoms().extract_uij()
    out_b = hierarchy.atoms().extract_b()
    # Iterate through the selections
    for sel_str in selections:
        # Convert to boolsean selection
        sel = cache.selection(sel_str)
        # Extract uijs to calculate scale factor
        uijs = numpy.array(all_uij.select(sel))
        assert uijs.shape[1] == 6
        # Extract the maximum axis length of each uij
        eigs = numpy.apply_along_axis(uij_eigenvalues, axis=1, arr=uijs)
        maxs = numpy.max(eigs, axis=1)
        # Calculate average of the maxima
        mean_max = numpy.mean(maxs)
        # Scale to zero if max eigenvalue is approximately zero
        if mean_max < tolerance:
            mult = 0.0
        else:
            # Calculate the scaling that modifies the mean to correspond to target
            mult = float(target / mean_max)
        # Apply scaling and set szz value
        out_u.set_selected(
            sel, flex.sym_mat3_double(out_u.select(sel).as_double() * mult))
        out_b.set_selected(sel, out_b.select(sel) * mult)
    # Apply the scaled values to the hierarchy
    hierarchy.atoms().set_uij(out_u)
    hierarchy.atoms().set_b(out_b)
    return hierarchy
Beispiel #9
0
def tst_functional_gradient_calculator_invalid_arguments():
    """Check errors are raised as expected"""

    n_grp = 3
    n_dst = 5
    n_atm = 10

    target_uijs, target_weights, \
      base_uijs, base_sels, dataset_hash, \
      atomic_base, \
      real_group_amps, real_atomic_amps \
        = get_optimisation_test_set(n_grp, n_dst, n_atm)

    ########################################################
    # Check expected error messages are raised
    ########################################################

    # Starting values
    base_amplitudes_start = flex.double(n_grp * n_dst, 1.0)

    wgt_kw_args = dict(
        weight_sum_of_amplitudes=0.0,
        weight_sum_of_amplitudes_squared=0.0,
        weight_sum_of_squared_amplitudes=0.0,
    )

    # Should not error
    f_g_calculator = MultiGroupMultiDatasetUijAmplitudeFunctionalAndGradientCalculator(
        target_uijs=target_uijs,
        target_weights=target_weights,
        base_amplitudes=base_amplitudes_start,
        base_uijs=base_uijs,
        base_atom_indices=base_sels,
        base_dataset_hash=dataset_hash,
        atomic_uijs=atomic_base,
        **wgt_kw_args)
    f, g = f_g_calculator.compute_functional_and_gradients()

    # target_uijs

    msg = "invalid target_uijs: must be 2-dimensional flex array (currently 3)"
    with raises(Exception) as e:
        f_g_calculator = MultiGroupMultiDatasetUijAmplitudeFunctionalAndGradientCalculator(
            target_uijs=flex.sym_mat3_double(flex.grid((n_dst - 1, n_atm, 5)),
                                             (1., 1., 1., 0., 0., 0.)),
            target_weights=target_weights,
            base_amplitudes=base_amplitudes_start,
            base_uijs=base_uijs,
            base_atom_indices=base_sels,
            base_dataset_hash=dataset_hash,
            atomic_uijs=atomic_base,
            **wgt_kw_args)
    assert msg == str(e.value), '"{}" does not match "{}"'.format(
        msg, str(e.value))

    # target_weights

    msg = "invalid dimension of target_weights (dimension 3): must be same dimension as target_uijs (dimension 2)"
    with raises(Exception) as e:
        f_g_calculator = MultiGroupMultiDatasetUijAmplitudeFunctionalAndGradientCalculator(
            target_uijs=target_uijs,
            target_weights=flex.double(flex.grid((n_dst, n_atm, 5)), 1.0),
            base_amplitudes=base_amplitudes_start,
            base_uijs=base_uijs,
            base_atom_indices=base_sels,
            base_dataset_hash=dataset_hash,
            atomic_uijs=atomic_base,
            **wgt_kw_args)
    assert msg == str(e.value)

    msg = "incompatible dimension of target_weights (axis 0): must be same size as target_uijs ({} != {})".format(
        n_dst, n_dst - 1)
    with raises(Exception) as e:
        f_g_calculator = MultiGroupMultiDatasetUijAmplitudeFunctionalAndGradientCalculator(
            target_uijs=flex.sym_mat3_double(flex.grid((n_dst - 1, n_atm)),
                                             (1., 1., 1., 0., 0., 0.)),
            target_weights=target_weights,
            base_amplitudes=base_amplitudes_start,
            base_uijs=base_uijs,
            base_atom_indices=base_sels,
            base_dataset_hash=dataset_hash,
            atomic_uijs=atomic_base,
            **wgt_kw_args)
    assert msg == str(e.value)

    msg = "incompatible dimension of target_weights (axis 1): must be same size as target_uijs ({} != {})".format(
        n_atm, n_atm + 1)
    with raises(Exception) as e:
        f_g_calculator = MultiGroupMultiDatasetUijAmplitudeFunctionalAndGradientCalculator(
            target_uijs=flex.sym_mat3_double(flex.grid((n_dst, n_atm + 1)),
                                             (1., 1., 1., 0., 0., 0.)),
            target_weights=target_weights,
            base_amplitudes=base_amplitudes_start,
            base_uijs=base_uijs,
            base_atom_indices=base_sels,
            base_dataset_hash=dataset_hash,
            atomic_uijs=atomic_base,
            **wgt_kw_args)
    assert msg == str(e.value)

    # base components

    msg = "invalid input base components. base_amplitudes (length {}), base_uijs (length {}) and base_atom_indices (length {}) must all be the same length"
    with raises(Exception) as e:
        f_g_calculator = MultiGroupMultiDatasetUijAmplitudeFunctionalAndGradientCalculator(
            target_uijs=target_uijs,
            target_weights=target_weights,
            base_amplitudes=flex.double(n_grp * n_dst - 1, 1.0),
            base_uijs=base_uijs,
            base_atom_indices=base_sels,
            base_dataset_hash=dataset_hash,
            atomic_uijs=atomic_base,
            **wgt_kw_args)
    assert msg.format(n_grp * n_dst - 1, n_grp * n_dst,
                      n_grp * n_dst) == str(e.value)
    with raises(Exception) as e:
        f_g_calculator = MultiGroupMultiDatasetUijAmplitudeFunctionalAndGradientCalculator(
            target_uijs=target_uijs,
            target_weights=target_weights,
            base_amplitudes=base_amplitudes_start,
            base_uijs=base_uijs[:-1],
            base_atom_indices=base_sels[:-1],
            base_dataset_hash=dataset_hash,
            atomic_uijs=atomic_base,
            **wgt_kw_args)
    assert msg.format(n_grp * n_dst, n_grp * n_dst - 1,
                      n_grp * n_dst - 1) == str(e.value)
    with raises(Exception) as e:
        f_g_calculator = MultiGroupMultiDatasetUijAmplitudeFunctionalAndGradientCalculator(
            target_uijs=target_uijs,
            target_weights=target_weights,
            base_amplitudes=base_amplitudes_start,
            base_uijs=base_uijs[:-1],
            base_atom_indices=base_sels,
            base_dataset_hash=dataset_hash,
            atomic_uijs=atomic_base,
            **wgt_kw_args)
    assert msg.format(n_grp * n_dst, n_grp * n_dst - 1,
                      n_grp * n_dst) == str(e.value)
    with raises(Exception) as e:
        f_g_calculator = MultiGroupMultiDatasetUijAmplitudeFunctionalAndGradientCalculator(
            target_uijs=target_uijs,
            target_weights=target_weights,
            base_amplitudes=base_amplitudes_start,
            base_uijs=base_uijs,
            base_atom_indices=base_sels[:-1],
            base_dataset_hash=dataset_hash,
            atomic_uijs=atomic_base,
            **wgt_kw_args)
    assert msg.format(n_grp * n_dst, n_grp * n_dst,
                      n_grp * n_dst - 1) == str(e.value)

    msg = "incompatible pair (element 2) in base_uijs/base_atom_indices: pairwise elements must be the same length ({} and {})".format(
        n_atm + 1, len(base_uijs[2]))
    with raises(Exception) as e:
        f_g_calculator = MultiGroupMultiDatasetUijAmplitudeFunctionalAndGradientCalculator(
            target_uijs=target_uijs,
            target_weights=target_weights,
            base_amplitudes=base_amplitudes_start,
            base_uijs=base_uijs[:2] + [flex.sym_mat3_double(n_atm + 1)] +
            base_uijs[3:],
            base_atom_indices=base_sels,
            base_dataset_hash=dataset_hash,
            atomic_uijs=atomic_base,
            **wgt_kw_args)
    assert msg == str(e.value)

    msg = "incompatible pair (element 2) in base_uijs/base_atom_indices: pairwise elements must be the same length ({} and {})".format(
        len(base_sels[2]), n_atm + 1)
    with raises(Exception) as e:
        f_g_calculator = MultiGroupMultiDatasetUijAmplitudeFunctionalAndGradientCalculator(
            target_uijs=target_uijs,
            target_weights=target_weights,
            base_amplitudes=base_amplitudes_start,
            base_uijs=base_uijs,
            base_atom_indices=base_sels[:2] + [flex.size_t_range(n_atm + 1)] +
            base_sels[3:],
            base_dataset_hash=dataset_hash,
            atomic_uijs=atomic_base,
            **wgt_kw_args)
    assert msg == str(e.value)

    msg = "invalid selection in base_atom_indices ({}): attempting to select atom outside of array (size {})".format(
        n_atm, n_atm)
    with raises(Exception) as e:
        f_g_calculator = MultiGroupMultiDatasetUijAmplitudeFunctionalAndGradientCalculator(
            target_uijs=target_uijs,
            target_weights=target_weights,
            base_amplitudes=base_amplitudes_start,
            base_uijs=base_uijs[:2] + [flex.sym_mat3_double(n_atm + 1)] +
            base_uijs[3:],
            base_atom_indices=base_sels[:2] + [flex.size_t_range(n_atm + 1)] +
            base_sels[3:],
            base_dataset_hash=dataset_hash,
            atomic_uijs=atomic_base,
            **wgt_kw_args)
    assert msg == str(e.value)

    # dataset_hash

    msg = "invalid base_dataset_hash (length {}): must be same length as base_amplitudes, base_uijs & base_atom_indices (length {})".format(
        len(dataset_hash) - 1, len(dataset_hash))
    with raises(Exception) as e:
        f_g_calculator = MultiGroupMultiDatasetUijAmplitudeFunctionalAndGradientCalculator(
            target_uijs=target_uijs,
            target_weights=target_weights,
            base_amplitudes=base_amplitudes_start,
            base_uijs=base_uijs,
            base_atom_indices=base_sels,
            base_dataset_hash=flex.size_t(list(dataset_hash)[:-1]),
            atomic_uijs=atomic_base,
            **wgt_kw_args)
    assert msg == str(e.value), (msg, str(e.value))

    msg = "invalid value in base_dataset_hash ({}): attempts to select element outside range of target_uijs (size {})".format(
        n_dst - 1, n_dst - 1)
    with raises(Exception) as e:
        f_g_calculator = MultiGroupMultiDatasetUijAmplitudeFunctionalAndGradientCalculator(
            target_uijs=flex.sym_mat3_double(flex.grid((n_dst - 1, n_atm)),
                                             (1., 1., 1., 0., 0., 0.)),
            target_weights=flex.double(flex.grid((n_dst - 1, n_atm)),
                                       1.0),  # need to resize this also
            base_amplitudes=base_amplitudes_start,
            base_uijs=base_uijs,
            base_atom_indices=base_sels,
            base_dataset_hash=dataset_hash,
            atomic_uijs=atomic_base,
            **wgt_kw_args)
    assert msg == str(e.value), (msg, str(e.value))

    msg = "Dataset index {} is not present in base_dataset_hash -- this dataset has no base elements associated with it.".format(
        n_dst)
    with raises(Exception) as e:
        f_g_calculator = MultiGroupMultiDatasetUijAmplitudeFunctionalAndGradientCalculator(
            target_uijs=flex.sym_mat3_double(flex.grid((n_dst + 1, n_atm)),
                                             (1., 1., 1., 0., 0., 0.)),
            target_weights=flex.double(flex.grid((n_dst + 1, n_atm)),
                                       1.0),  # need to resize this also
            base_amplitudes=base_amplitudes_start,
            base_uijs=base_uijs,
            base_atom_indices=base_sels,
            base_dataset_hash=dataset_hash,
            atomic_uijs=atomic_base,
            **wgt_kw_args)
    assert msg == str(e.value), (msg, str(e.value))

    # atomic uijs

    msg = "invalid size of atomic_uijs ({}): must match 2nd dimension of target_uijs ({})".format(
        n_atm - 1, n_atm)
    with raises(Exception) as e:
        f_g_calculator = MultiGroupMultiDatasetUijAmplitudeFunctionalAndGradientCalculator(
            target_uijs=target_uijs,
            target_weights=target_weights,
            base_amplitudes=base_amplitudes_start,
            base_uijs=base_uijs,
            base_atom_indices=base_sels,
            base_dataset_hash=dataset_hash,
            atomic_uijs=flex.sym_mat3_double(n_atm - 1),
            **wgt_kw_args)
    assert msg == str(e.value), (msg, str(e.value))

    # atomic mask

    f_g_calculator = MultiGroupMultiDatasetUijAmplitudeFunctionalAndGradientCalculator(
        target_uijs=target_uijs,
        target_weights=target_weights,
        base_amplitudes=base_amplitudes_start,
        base_uijs=base_uijs,
        base_atom_indices=base_sels,
        base_dataset_hash=dataset_hash,
        atomic_uijs=atomic_base,
        **wgt_kw_args)
    # should not error
    f_g_calculator.set_atomic_optimisation_mask(flex.bool(n_dst, True))
    f_g_calculator.set_atomic_optimisation_mask(flex.bool(n_dst, False))
    # should error
    msg = "Input array (size {}) must be the same length as number of datasets ({})".format(
        n_dst - 1, n_dst)
    with raises(Exception) as e:
        f_g_calculator.set_atomic_optimisation_mask(flex.bool(n_dst - 1, True))
    assert msg == str(e.value)
    msg = "Input array (size {}) must be the same length as number of datasets ({})".format(
        n_dst + 1, n_dst)
    with raises(Exception) as e:
        f_g_calculator.set_atomic_optimisation_mask(flex.bool(n_dst + 1, True))
    assert msg == str(e.value)

    # setting amplitudes

    # should not error
    f_g_calculator.set_current_amplitudes(flex.double(n_grp * n_dst + n_atm))
    # should error
    msg = "Input array (size {}) must be the same length as current_amplitudes (size {})".format(
        n_grp * n_dst + n_atm - 1, n_grp * n_dst + n_atm)
    with raises(Exception) as e:
        f_g_calculator.set_current_amplitudes(
            flex.double(n_grp * n_dst + n_atm - 1))
    assert msg == str(e.value)

    print('OK')