Ejemplo n.º 1
0
def exercise_term_gradients_d_ab(term, x_max=1., n_points=50):
  for i in xrange(n_points+1):
    x = x_max * i / n_points
    grad_finite = term_finite_gradient_d_ab_at_x(term, x)
    grad_analytical = term.gradients_d_ab_at_x_sq(x*x)
    assert eps_eq(grad_finite.a, grad_analytical.a)
    assert eps_eq(grad_finite.b, grad_analytical.b)
Ejemplo n.º 2
0
def recycle():
    for n, first, last in [[(5, 3, 4), (0, 0, 0), (3, 5, 6)],
                           [(4, 3, 5), (-1, -3, 4), (6, 4, 5)],
                           [(3, 4, 5), (-2, 3, 0), (-2, 3, 0)],
                           [(3, 4, 5), (-2, 3, 0), (-2, 3, 3)],
                           [(3, 4, 5), (-2, 3, 0), (-2, 8, 0)],
                           [(3, 4, 5), (-2, 3, 0), (-2, 9, 0)],
                           [(3, 4, 5), (-2, 3, 0), (3, 3, 0)],
                           [(3, 4, 5), (-2, 3, 0), (4, 3, 0)]]:
        gridding = iotbx.xplor.map.gridding(n=n, first=first, last=last)
        flex_grid = gridding.as_flex_grid()
        data = 20000 * flex.random_double(size=flex_grid.size_1d()) - 10000
        data.resize(flex_grid)
        stats = maptbx.statistics(data)
        iotbx.xplor.map.writer(file_name="tmp.map",
                               title_lines=["regression test"],
                               unit_cell=uctbx.unit_cell(
                                   (10, 20, 30, 80, 90, 100)),
                               gridding=gridding,
                               data=data,
                               average=stats.mean(),
                               standard_deviation=stats.sigma())
        read = iotbx.xplor.map.reader(file_name="tmp.map")
        assert read.title_lines == ["regression test"]
        assert read.gridding.n == n
        assert read.gridding.first == first
        assert read.gridding.last == last
        assert read.unit_cell.is_similar_to(
            uctbx.unit_cell((10, 20, 30, 80, 90, 100)))
        assert eps_eq(read.average, stats.mean(), eps=1.e-4)
        assert eps_eq(read.standard_deviation, stats.sigma(), eps=1.e-4)
        assert read.data.origin() == first
        assert read.data.last(False) == last
        assert read.data.focus() == data.focus()
        assert eps_eq(read.data, data, eps=1.e-4)
Ejemplo n.º 3
0
def exercise_term_gradients_d_ab(term, x_max=1., n_points=50):
    for i in range(n_points + 1):
        x = x_max * i / n_points
        grad_finite = term_finite_gradient_d_ab_at_x(term, x)
        grad_analytical = term.gradients_d_ab_at_x_sq(x * x)
        assert eps_eq(grad_finite.a, grad_analytical.a)
        assert eps_eq(grad_finite.b, grad_analytical.b)
Ejemplo n.º 4
0
def exercise_integral_dx(gaussian, x_max=1., n_points=1000):
  numerical_integral = 0
  x_step = x_max / n_points
  for i in xrange(n_points+1):
    x = x_max * i / n_points
    new_value = gaussian.at_x(x)
    if (i):
      numerical_integral += (prev_value + new_value) * .5
    prev_value = new_value
    analytical_integral = gaussian.integral_dx_at_x(x, 1.e-3)
    assert eps_eq(analytical_integral, gaussian.integral_dx_at_x(x))
    assert eps_eq(numerical_integral*x_step, analytical_integral, eps=1.e-5)
Ejemplo n.º 5
0
def exercise_integral_dx(gaussian, x_max=1., n_points=1000):
    numerical_integral = 0
    x_step = x_max / n_points
    for i in range(n_points + 1):
        x = x_max * i / n_points
        new_value = gaussian.at_x(x)
        if (i):
            numerical_integral += (prev_value + new_value) * .5
        prev_value = new_value
        analytical_integral = gaussian.integral_dx_at_x(x, 1.e-3)
        assert eps_eq(analytical_integral, gaussian.integral_dx_at_x(x))
        assert eps_eq(numerical_integral * x_step,
                      analytical_integral,
                      eps=1.e-5)
Ejemplo n.º 6
0
def exercise_fft_map_as_xplor_map(space_group_info, n_elements=10, d_min=3):
  structure = random_structure.xray_structure(
    space_group_info,
    elements=["Si"]*n_elements,
    volume_per_atom=1000,
    min_distance=3.,
    general_positions_only=False)
  f_calc = structure.structure_factors(
    d_min=d_min, anomalous_flag=False).f_calc()
  fft_map = f_calc.fft_map()
  fft_map.as_xplor_map(
    file_name="tmp.map",
    gridding_last=[n-1 for n in fft_map.n_real()])
  read = iotbx.xplor.map.reader(file_name="tmp.map")
  assert read.title_lines == ["cctbx.miller.fft_map"]
  assert read.gridding.n == fft_map.n_real()
  assert approx_equal(flex.linear_correlation(
    read.data.as_1d(),
    fft_map.real_map_unpadded(in_place=False).as_1d()).coefficient(), 1)
  for first,last in [[(0,0,0),(3,5,6)],
                     [(-1,-3,4),(6,4,5)],
                     [(-2,3,0),(-2,3,0)],
                     [(-2,3,0),(-2,3,3)],
                     [(-2,3,0),(-2,8,0)],
                     [(-2,3,0),(-2,9,0)],
                     [(-2,3,0),(3,3,0)],
                     [(-2,3,0),(4,3,0)]]:
    fft_map.as_xplor_map(
      file_name="tmp.map",
      gridding_first=first,
      gridding_last=last)
    read = iotbx.xplor.map.reader(file_name="tmp.map")
    assert read.title_lines == ["cctbx.miller.fft_map"]
    assert read.gridding.n == fft_map.n_real()
    assert read.gridding.first == first
    assert read.gridding.last == last
    real_map = fft_map.real_map()
    first_p1 = [i%n for i,n in zip(first, fft_map.n_real())]
    assert eps_eq(read.data[first], real_map[first_p1], eps=1.e-4)
    last_p1 = [i%n for i,n in zip(last, fft_map.n_real())]
    assert eps_eq(read.data[last], real_map[last_p1], eps=1.e-4)
    for x in xrange(1,10):
      point = [iround(f+(l-f)*x/10.) for f,l in zip(first,last)]
      point_p1 = [i%n for i,n in zip(point, fft_map.n_real())]
      assert eps_eq(read.data[point], real_map[point_p1], eps=1.e-4)
Ejemplo n.º 7
0
def exercise_fft_map_as_xplor_map(space_group_info, n_elements=10, d_min=3):
    structure = random_structure.xray_structure(space_group_info,
                                                elements=["Si"] * n_elements,
                                                volume_per_atom=1000,
                                                min_distance=3.,
                                                general_positions_only=False)
    f_calc = structure.structure_factors(d_min=d_min,
                                         anomalous_flag=False).f_calc()
    fft_map = f_calc.fft_map()
    fft_map.as_xplor_map(file_name="tmp.map",
                         gridding_last=[n - 1 for n in fft_map.n_real()])
    read = iotbx.xplor.map.reader(file_name="tmp.map")
    assert read.title_lines == ["cctbx.miller.fft_map"]
    assert read.gridding.n == fft_map.n_real()
    assert approx_equal(
        flex.linear_correlation(
            read.data.as_1d(),
            fft_map.real_map_unpadded(in_place=False).as_1d()).coefficient(),
        1)
    for first, last in [[(0, 0, 0), (3, 5, 6)], [(-1, -3, 4), (6, 4, 5)],
                        [(-2, 3, 0), (-2, 3, 0)], [(-2, 3, 0), (-2, 3, 3)],
                        [(-2, 3, 0), (-2, 8, 0)], [(-2, 3, 0), (-2, 9, 0)],
                        [(-2, 3, 0), (3, 3, 0)], [(-2, 3, 0), (4, 3, 0)]]:
        fft_map.as_xplor_map(file_name="tmp.map",
                             gridding_first=first,
                             gridding_last=last)
        read = iotbx.xplor.map.reader(file_name="tmp.map")
        assert read.title_lines == ["cctbx.miller.fft_map"]
        assert read.gridding.n == fft_map.n_real()
        assert read.gridding.first == first
        assert read.gridding.last == last
        real_map = fft_map.real_map()
        first_p1 = [i % n for i, n in zip(first, fft_map.n_real())]
        assert eps_eq(read.data[first], real_map[first_p1], eps=1.e-4)
        last_p1 = [i % n for i, n in zip(last, fft_map.n_real())]
        assert eps_eq(read.data[last], real_map[last_p1], eps=1.e-4)
        for x in xrange(1, 10):
            point = [
                iround(f + (l - f) * x / 10.) for f, l in zip(first, last)
            ]
            point_p1 = [i % n for i, n in zip(point, fft_map.n_real())]
            assert eps_eq(read.data[point], real_map[point_p1], eps=1.e-4)
Ejemplo n.º 8
0
def recycle():
  for n,first,last in [[(5,3,4),(0,0,0),(3,5,6)],
                       [(4,3,5),(-1,-3,4),(6,4,5)],
                       [(3,4,5),(-2,3,0),(-2,3,0)],
                       [(3,4,5),(-2,3,0),(-2,3,3)],
                       [(3,4,5),(-2,3,0),(-2,8,0)],
                       [(3,4,5),(-2,3,0),(-2,9,0)],
                       [(3,4,5),(-2,3,0),(3,3,0)],
                       [(3,4,5),(-2,3,0),(4,3,0)]]:
    gridding = iotbx.xplor.map.gridding(
      n=n, first=first, last=last)
    flex_grid = gridding.as_flex_grid()
    data = 20000*flex.random_double(size=flex_grid.size_1d())-10000
    data.resize(flex_grid)
    stats = maptbx.statistics(data)
    iotbx.xplor.map.writer(
      file_name="tmp.map",
      title_lines=["regression test"],
      unit_cell=uctbx.unit_cell((10,20,30,80,90,100)),
      gridding=gridding,
      data=data,
      average=stats.mean(),
      standard_deviation=stats.sigma())
    read = iotbx.xplor.map.reader(file_name="tmp.map")
    assert read.title_lines == ["regression test"]
    assert read.gridding.n == n
    assert read.gridding.first == first
    assert read.gridding.last == last
    assert read.unit_cell.is_similar_to(
      uctbx.unit_cell((10,20,30,80,90,100)))
    assert eps_eq(read.average, stats.mean(), eps=1.e-4)
    assert eps_eq(read.standard_deviation, stats.sigma(), eps=1.e-4)
    assert read.data.origin() == first
    assert read.data.last(False) == last
    assert read.data.focus() == data.focus()
    assert eps_eq(read.data, data, eps=1.e-4)
Ejemplo n.º 9
0
def exercise_term():
  t = gaussian.term(2,3)
  assert approx_equal(t.a, 2)
  assert approx_equal(t.b, 3)
  assert approx_equal(t.at_x_sq(4), 2*math.exp(-3*4))
  assert approx_equal(t.at_x(2), 2*math.exp(-3*4))
  eps = 1.e-5
  for ix in (xrange(10)):
    x = ix/10.
    assert eps_eq((t.at_x(x+eps)-t.at_x(x-eps))/(2*eps), t.gradient_dx_at_x(x))
  for f in [1,-1]:
    for t in [gaussian.term(f*2,3),
              gaussian.term(f*3,0),
              gaussian.term(f*4,1.e-4),
              gaussian.term(f*5,-1)]:
      exercise_gradient_dx(t)
      exercise_integral_dx(t)
      exercise_term_gradients_d_ab(t)
Ejemplo n.º 10
0
def exercise_term():
    t = gaussian.term(2, 3)
    assert approx_equal(t.a, 2)
    assert approx_equal(t.b, 3)
    assert approx_equal(t.at_x_sq(4), 2 * math.exp(-3 * 4))
    assert approx_equal(t.at_x(2), 2 * math.exp(-3 * 4))
    eps = 1.e-5
    for ix in (range(10)):
        x = ix / 10.
        assert eps_eq((t.at_x(x + eps) - t.at_x(x - eps)) / (2 * eps),
                      t.gradient_dx_at_x(x))
    for f in [1, -1]:
        for t in [
                gaussian.term(f * 2, 3),
                gaussian.term(f * 3, 0),
                gaussian.term(f * 4, 1.e-4),
                gaussian.term(f * 5, -1)
        ]:
            exercise_gradient_dx(t)
            exercise_integral_dx(t)
            exercise_term_gradients_d_ab(t)
Ejemplo n.º 11
0
def driver1(use_fortran_library=False):
    n = 25
    nbd = flex.int(n)
    x = flex.double(n)
    l = flex.double(n)
    u = flex.double(n)
    g = flex.double(n)
    if ("--Verbose" in sys.argv[1:]):
        iprint = 1000
    else:
        iprint = -1
    for i in range(0, n, 2):
        nbd[i] = 2
        l[i] = 1.0e0
        u[i] = 1.0e2
    for i in range(1, n, 2):
        nbd[i] = 2
        l[i] = -1.0e2
        u[i] = 1.0e2
    for i in range(n):
        x[i] = 3.0e0
    minimizer = lbfgsb.minimizer(n=n,
                                 m=5,
                                 l=l,
                                 u=u,
                                 nbd=nbd,
                                 factr=1.0e+7,
                                 pgtol=1.0e-5,
                                 iprint=iprint)
    f = 0
    while True:
        if (minimizer.process(x, f, g, use_fortran_library)):
            f = .25e0 * (x[0] - 1.e0)**2
            for i in range(1, n):
                f = f + (x[i] - x[i - 1]**2)**2
            f = 4.e0 * f
            t1 = x[1] - x[0]**2
            g[0] = 2.e0 * (x[0] - 1.e0) - 1.6e1 * x[0] * t1
            for i in range(1, n - 1):
                t2 = t1
                t1 = x[i + 1] - x[i]**2
                g[i] = 8.e0 * t2 - 1.6e1 * x[i] * t1
            g[n - 1] = 8.e0 * t1
        elif (minimizer.is_terminated()):
            break
    assert minimizer.task(
    ) == "CONVERGENCE: REL_REDUCTION_OF_F <= FACTR*EPSMCH"
    assert minimizer.f_list().size() == minimizer.n_iteration() + 1
    assert minimizer.f_list()[-1] == minimizer.f()
    assert minimizer.f_list()[-2] == minimizer.f_previous_iteration()
    assert not minimizer.initial_x_replaced_by_projection()
    assert minimizer.is_constrained()
    assert minimizer.is_fully_constrained()
    if ("--soft" not in sys.argv[1:]):
        assert minimizer.n_fg_evaluations_total() == 27
        assert minimizer.n_fg_evaluations_iter() == 1
        assert minimizer.n_intervals_explored_cauchy_search_total() == 48
        assert minimizer.n_intervals_explored_cauchy_search_iter() == 1
        assert minimizer.n_skipped_bfgs_updates_total() == 0
        assert minimizer.n_bfgs_updates_total() == 22
        assert minimizer.subspace_argmin_is_within_box() == 1
        assert minimizer.n_free_variables() == 25
        assert minimizer.n_active_constraints() == 0
        assert minimizer.n_variables_leaving_active_set() == 0
        assert minimizer.n_variables_entering_active_set() == 0
        assert eps_eq(minimizer.theta_bfgs_matrix_current(), 23.2674689856)
        assert minimizer.f_previous_iteration() >= 0
        assert minimizer.floating_point_epsilon() \
            == scitbx.math.floating_point_epsilon_double_get()
        assert eps_eq(minimizer.factr_times_floating_point_epsilon(),
                      minimizer.factr() * minimizer.floating_point_epsilon())
        assert eps_eq(minimizer.two_norm_line_search_direction_vector(),
                      1.56259327735e-05)
        assert eps_eq(minimizer.two_norm_line_search_direction_vector_sq(),
                      minimizer.two_norm_line_search_direction_vector()**2)
        assert minimizer.accumulated_time_cauchy_search() > -0.02
        assert minimizer.accumulated_time_subspace_minimization() > -0.02
        assert minimizer.accumulated_time_line_search() > -0.02
        assert eps_eq(minimizer.slope_line_search_function_current(),
                      9.31496613169e-10)
        assert eps_eq(minimizer.slope_line_search_function_start(),
                      -3.6762390377e-09)
        assert eps_eq(minimizer.maximum_relative_step_length(), 1.37484166517)
        assert eps_eq(minimizer.relative_step_length_line_search(), 1.0)
        assert eps_eq(minimizer.infinity_norm_projected_gradient(),
                      0.000143369780806)
Ejemplo n.º 12
0
def exercise_two_models_with_holes(processed_pdb):
    selection_strings = ["chain A", "chain B", "chain C", "chain D"]
    group = ncs.restraints.group.from_atom_selections(
        processed_pdb=processed_pdb,
        reference_selection_string=None,
        selection_strings=selection_strings,
        coordinate_sigma=0.05,
        b_factor_weight=0.4321,
        special_position_warnings_only=False)
    sites_cart = processed_pdb.all_chain_proxies.pdb_atoms.extract_xyz()
    ncs_operators = group.operators(sites_cart=sites_cart)
    out = StringIO()
    ncs_operators.show(sites_cart=sites_cart, out=out, prefix="{*")
    assert not show_diff(
        out.getvalue(), """\
{*NCS operator 1:
{*  Reference selection: "chain A"
{*      Other selection: "chain B"
{*  Number of atom pairs: 22
{*  Rotation={{-0.925533, 0.322815, -0.197938},
{*            {0.329616, 0.429511, -0.840758},
{*            {-0.186393, -0.843393, -0.503931}}
{*  Translation={{163.62}, {-13.0292}, {44.8533}}
{*  Histogram of differences:
{*    0.092573 - 0.238983: 1
{*    0.238983 - 0.385393: 2
{*    0.385393 - 0.531803: 7
{*    0.531803 - 0.678214: 3
{*    0.678214 - 0.824624: 4
{*    0.824624 - 0.971034: 5
{*  RMS difference with respect to the reference: 0.653687
{*NCS operator 2:
{*  Reference selection: "chain A"
{*      Other selection: "chain C"
{*  Number of atom pairs: 32
{*  Rotation={{-0.988874, -0.139883, -0.0506023},
{*            {0.0139383, -0.425808, 0.904706},
{*            {-0.1481, 0.893935, 0.423021}}
{*  Translation={{177.315}, {18.2319}, {1.24026}}
{*  Histogram of differences:
{*    0.291817 - 0.418479: 2
{*    0.418479 - 0.545141: 9
{*    0.545141 - 0.671802: 5
{*    0.671802 - 0.798464: 10
{*    0.798464 - 0.925126: 3
{*    0.925126 - 1.051787: 3
{*  RMS difference with respect to the reference: 0.677436
{*NCS operator 3:
{*  Reference selection: "chain A"
{*      Other selection: "chain D"
{*  Number of atom pairs: 24
{*  Rotation={{0.950594, -0.191857, 0.244055},
{*            {-0.192933, -0.981014, -0.0197252},
{*            {0.243205, -0.0283355, -0.969561}}
{*  Translation={{6.77797}, {56.2476}, {-5.96327}}
{*  Histogram of differences:
{*    0.270982 - 0.414517: 3
{*    0.414517 - 0.558053: 3
{*    0.558053 - 0.701588: 7
{*    0.701588 - 0.845124: 6
{*    0.845124 - 0.988659: 2
{*    0.988659 - 1.132195: 3
{*  RMS difference with respect to the reference: 0.724248
""")
    energies_sites_no_gradients = ncs_operators.energies_sites(
        sites_cart=sites_cart, compute_gradients=False)
    assert energies_sites_no_gradients.number_of_restraints == 110
    assert eps_eq(energies_sites_no_gradients.residual_sum, 7014.03969257)
    assert eps_eq(energies_sites_no_gradients.target, 7014.03969257)
    assert energies_sites_no_gradients.gradients is None
    assert eps_eq(energies_sites_no_gradients.rms_with_respect_to_average, [
        0.41226641576521778, 0.38139080907663186, 0.39748408968570492,
        0.40001937328488651
    ])
    energies_sites = ncs_operators.energies_sites(sites_cart=sites_cart)
    assert energies_sites_no_gradients.number_of_restraints \
        == energies_sites.number_of_restraints
    assert energies_sites_no_gradients.residual_sum \
        == energies_sites.residual_sum
    assert energies_sites_no_gradients.target \
        == energies_sites.target
    assert eps_eq(energies_sites.gradients.norm(), 3349.99455344)
    assert eps_eq(energies_sites.rms_with_respect_to_average,
                  energies_sites_no_gradients.rms_with_respect_to_average)
    site_labels = [
        '"' + atom.pdb_label_columns() + '"'
        for atom in processed_pdb.all_chain_proxies.pdb_atoms
    ]
    out = StringIO()
    energies_sites.show_distances_to_average(site_labels=site_labels,
                                             out=out,
                                             prefix="#^")
    assert not show_diff(out.getvalue(),
                         """\
#^NCS selection: "chain A"
#^                     Distance to NCS average
#^  " N   GLN A   1 ":   0.4263
#^  " CA  GLN A   1 ":   0.2141
#^  " C   GLN A   1 ":   0.4052
...
#^  " CA  THR B   6 ":   0.4001
#^  " C   THR B   6 ":   0.6281
#^NCS selection: "chain C"
#^                     Distance to NCS average
#^  " N   GLN C   1 ":   0.4135
#^  " CA  GLN C   1 ":   0.5070
...
#^  " C   SER D   4 ":   0.6943
#^  " CA BSER D   4 ":   0.4444
#^  " N   THR D   6 ":   0.3724
#^  " CA  THR D   6 ":   0.4129
#^  " C   THR D   6 ":   0.4017
""",
                         selections=[range(5),
                                     range(56, 62),
                                     range(-5, 0)])
    for ag, fg in zip(
            energies_sites.gradients,
            finite_difference_site_gradients(
                ncs_operators=ncs_operators,
                sites_cart=sites_cart,
                sites_average=energies_sites.sites_average)):
        assert eps_eq(ag, fg)
    #
    u_isos = processed_pdb.xray_structure().extract_u_iso_or_u_equiv()
    eng = group.energies_adp_iso(u_isos=u_isos,
                                 average_power=1,
                                 compute_gradients=False)
    energies_adp_iso_no_gradients = eng
    assert eng.number_of_restraints == 110
    assert eps_eq(eng.residual_sum, 1.11021057745)
    assert eps_eq(eng.target, eng.residual_sum)
    assert eng.gradients is None
    assert eps_eq(eng.rms_with_respect_to_average, [
        3.8233537528289001, 4.4894247897900934, 3.71150443476373,
        4.0839849076232442
    ])
    energies_adp_iso = group.energies_adp_iso(u_isos=u_isos, average_power=1)
    assert energies_adp_iso.number_of_restraints == eng.number_of_restraints
    assert eps_eq(energies_adp_iso.residual_sum, eng.residual_sum)
    assert eps_eq(energies_adp_iso.target, eng.target)
    assert eps_eq(energies_adp_iso.gradients.norm(), 4.50764745473)
    assert eps_eq(energies_adp_iso.rms_with_respect_to_average,
                  eng.rms_with_respect_to_average)
    out = StringIO()
    energies_adp_iso.show_differences_to_average(site_labels=site_labels,
                                                 out=out,
                                                 prefix="Y$")
    assert not show_diff(out.getvalue().replace(" 7.66 = ", " 7.65 = "),
                         """\
Y$NCS selection: "chain A"
Y$                       B-iso   NCS ave  Difference
Y$  " N   GLN A   1 ":   11.77 -   12.08 =  -0.3133
Y$  " CA  GLN A   1 ":    9.09 -    9.28 =  -0.1933
Y$  " C   GLN A   1 ":   15.40 -   12.05 =   3.3500
...
Y$  " CA  THR A   6 ":    3.98 -    7.65 =  -3.6750
Y$  " C   THR A   6 ":   14.64 -   10.99 =   3.6475
Y$NCS selection: "chain B"
Y$                       B-iso   NCS ave  Difference
Y$  " N   GLU B   2 ":   12.87 -   10.35 =   2.5225
...
Y$  " C   SER D   4 ":    7.29 -   10.45 =  -3.1575
Y$  " CA BSER D   4 ":    5.23 -    7.00 =  -1.7667
Y$  " N   THR D   6 ":    4.55 -    8.69 =  -4.1425
Y$  " CA  THR D   6 ":    8.78 -    7.65 =   1.1250
Y$  " C   THR D   6 ":   10.80 -   10.99 =  -0.1925
""",
                         selections=[range(5),
                                     range(32, 37),
                                     range(-5, 0)])
    for average_power in [1, 0.69, 0.35]:
        finite_difference_gradients = flex.double()
        eps = 1.e-6
        for i_u_iso in xrange(u_isos.size()):
            rs = []
            for signed_eps in [eps, -eps]:
                u_isos_eps = u_isos.deep_copy()
                u_isos_eps[i_u_iso] += signed_eps
                energies = group.energies_adp_iso(u_isos=u_isos_eps,
                                                  average_power=average_power,
                                                  compute_gradients=False)
                rs.append(energies.residual_sum)
            finite_difference_gradients.append((rs[0] - rs[1]) / (2 * eps))
        energies_adp_iso = group.energies_adp_iso(u_isos=u_isos,
                                                  average_power=average_power)
        assert eps_eq(energies_adp_iso.gradients, finite_difference_gradients)
    #
    groups = ncs.restraints.groups()
    groups.members.append(group)
    for coordinate_sigma, b_factor_weight in [(None, 1.234), (0.1, None)]:
        groups.members.append(
            ncs.restraints.group.from_atom_selections(
                processed_pdb=processed_pdb,
                reference_selection_string=None,
                selection_strings=selection_strings,
                coordinate_sigma=coordinate_sigma,
                b_factor_weight=b_factor_weight,
                special_position_warnings_only=False))
    energies_adp_iso = groups.energies_adp_iso(u_isos=u_isos, average_power=1)
    assert energies_adp_iso.number_of_restraints == 220
    assert eps_eq(energies_adp_iso.residual_sum, 4.2807726061)
    assert eps_eq(energies_adp_iso.target, energies_adp_iso.residual_sum)
    assert eps_eq(energies_adp_iso.gradients.norm(), 17.3806790658)
    energies_adp_iso = groups.energies_adp_iso(u_isos=u_isos,
                                               average_power=1,
                                               normalization=True)
    assert energies_adp_iso.number_of_restraints == 220
    assert eps_eq(energies_adp_iso.residual_sum, 4.2807726061)
    assert eps_eq(energies_adp_iso.target, energies_adp_iso.residual_sum / 220)
    assert eps_eq(energies_adp_iso.gradients.norm(), 17.3806790658 / 220)
    for rms in energies_adp_iso.rms_with_respect_to_averages:
        if (rms is not None):
            assert eps_eq(
                rms, energies_adp_iso_no_gradients.rms_with_respect_to_average)
    assert energies_adp_iso.rms_with_respect_to_averages[2] is None
    energies_sites = groups.energies_sites(sites_cart=sites_cart)
    assert energies_sites.number_of_restraints == 220
    assert eps_eq(energies_sites.residual_sum, 8767.54961571)
    assert eps_eq(energies_sites.target, energies_sites.residual_sum)
    assert eps_eq(energies_sites.gradients.norm(), 4187.49319181)
    energies_sites = groups.energies_sites(sites_cart=sites_cart,
                                           normalization=True)
    assert energies_sites.number_of_restraints == 220
    assert eps_eq(energies_sites.residual_sum, 8767.54961571)
    assert eps_eq(energies_sites.target, energies_sites.residual_sum / 220)
    assert eps_eq(energies_sites.gradients.norm(), 4187.49319181 / 220)
    for rms in energies_sites.rms_with_respect_to_averages:
        if (rms is not None):
            assert eps_eq(
                rms, energies_sites_no_gradients.rms_with_respect_to_average)
    assert energies_sites.rms_with_respect_to_averages[1] is None
    out = StringIO()
    groups.show_adp_iso_differences_to_average(u_isos=u_isos,
                                               site_labels=site_labels,
                                               out=out,
                                               prefix="W@")
    assert not show_diff(out.getvalue(),
                         """\
W@NCS restraint group 1:
W@  weight: 0.4321
W@  NCS selection: "chain A"
W@                         B-iso   NCS ave  Difference
W@    " N   GLN A   1 ":   11.77 -   12.08 =  -0.3133
...
W@    " C   THR D   6 ":   10.80 -   10.99 =  -0.1925
W@NCS restraint group 3:
W@  b_factor_weight: None  =>  restraints disabled
""",
                         selections=[range(5), range(-3, 0)])
    out = StringIO()
    groups.show_operators(sites_cart=sites_cart, out=out, prefix="K&")
    assert not show_diff(out.getvalue(),
                         """\
K&NCS restraint group 1:
K&  NCS operator 1:
K&    Reference selection: "chain A"
...
K&      0.824624 - 0.971034: 5
K&    RMS difference with respect to the reference: 0.653687
K&  NCS operator 2:
K&    Reference selection: "chain A"
K&        Other selection: "chain C"
K&    Number of atom pairs: 32
...
K&      0.845124 - 0.988659: 2
K&      0.988659 - 1.132195: 3
K&    RMS difference with respect to the reference: 0.724248
""",
                         selections=[range(3),
                                     range(15, 21),
                                     range(-3, 0)])
    out = StringIO()
    groups.show_sites_distances_to_average(sites_cart=sites_cart,
                                           site_labels=site_labels,
                                           out=out,
                                           prefix="[")
    assert not show_diff(out.getvalue(),
                         """\
[NCS restraint group 1:
[  coordinate_sigma: 0.05
[  weight:  400
[  NCS selection: "chain A"
[                       Distance to NCS average
[    " N   GLN A   1 ":   0.4263
...
[    " C   THR D   6 ":   0.4017
[NCS restraint group 2:
[  coordinate_sigma: None  =>  restraints disabled
[NCS restraint group 3:
[  coordinate_sigma: 0.1
[  weight:  100
[  NCS selection: "chain A"
[                       Distance to NCS average
[    " N   GLN A   1 ":   0.4263
...
[    " C   THR D   6 ":   0.4017
""",
                         selections=[range(6),
                                     range(120, 129),
                                     range(-1, 0)])
    #
    selection = groups.selection_restrained()
    assert selection.size() == 132
    assert selection.count(True) == 110
    out = StringIO()
    processed_pdb.show_atoms_without_ncs_restraints(
        ncs_restraints_groups=groups, out=out, prefix="%&")
    assert not show_diff(
        out.getvalue(), """\
%&Atoms without NCS restraints:
%&MODEL        1
%&ATOM     20  N   ALA B   3     111.517   7.175  -8.669  1.00 10.73           N
%&ATOM     21  CA  ALA B   3     112.152   8.103  -8.026  1.00 16.28           C
%&ATOM     22  C   ALA B   3     111.702   8.243  -5.903  1.00  9.19           C
%&ATOM     24  CA  SER B   4     111.797   9.689  -4.364  1.00  8.91           C
%&TER
%&ATOM     38  N   ALA C   3     109.043  27.391  28.663  1.00 15.05           N
%&ATOM     39  CA  ALA C   3     109.073  26.531  28.433  1.00  3.14           C
%&ATOM     40  C   ALA C   3     108.930  26.867  26.637  1.00 15.22           C
%&TER
%&ATOM     57  N   ALA D   3      65.439   9.903 -12.471  1.00  7.59           N
%&ATOM     58  CA  ALA D   3      65.019   9.566 -11.201  1.00 15.62           C
%&ATOM     59  C   ALA D   3      65.679  11.045 -11.097  1.00  2.65           C
%&ATOM     61  CA CSER D   4      65.657  12.870  -8.333  1.00  5.84           C
%&TER
%&ENDMDL
%&MODEL        2
%&ATOM     86  N   ALA B   3     111.984   7.364  -8.288  1.00  3.16           N
%&ATOM     87  CA  ALA B   3     112.389   8.456  -7.544  1.00 12.73           C
%&ATOM     88  C   ALA B   3     111.615   8.267  -6.238  1.00  7.17           C
%&ATOM     90  CA  SER B   4     111.707   9.131  -4.317  1.00 13.83           C
%&TER
%&ATOM    104  N   ALA C   3     109.237  27.879  29.334  1.00  6.45           N
%&ATOM    105  CA  ALA C   3     109.043  26.399  28.156  1.00 15.21           C
%&ATOM    106  C   ALA C   3     108.983  26.760  27.178  1.00 11.73           C
%&TER
%&ATOM    123  N   ALA D   3      65.188  10.335 -12.959  1.00  6.94           N
%&ATOM    124  CA  ALA D   3      65.543   9.614 -11.242  1.00  5.18           C
%&ATOM    125  C   ALA D   3      65.064  11.402 -10.648  1.00 16.01           C
%&ATOM    127  CA CSER D   4      65.127  12.969  -9.218  1.00  9.03           C
%&TER
%&ENDMDL
""")
    #
    for group in groups.members:
        assert group.registry.number_of_additional_isolated_sites == 0
    groups.register_additional_isolated_sites(number=10)
    for group in groups.members:
        assert group.registry.number_of_additional_isolated_sites == 10
    groups.register_additional_isolated_sites(number=3)
    for group in groups.members:
        assert group.registry.number_of_additional_isolated_sites == 13
Ejemplo n.º 13
0
def driver1(use_fortran_library=False):
    n = 25
    nbd = flex.int(n)
    x = flex.double(n)
    l = flex.double(n)
    u = flex.double(n)
    g = flex.double(n)
    if "--Verbose" in sys.argv[1:]:
        iprint = 1000
    else:
        iprint = -1
    for i in xrange(0, n, 2):
        nbd[i] = 2
        l[i] = 1.0e0
        u[i] = 1.0e2
    for i in xrange(1, n, 2):
        nbd[i] = 2
        l[i] = -1.0e2
        u[i] = 1.0e2
    for i in xrange(n):
        x[i] = 3.0e0
    minimizer = lbfgsb.minimizer(n=n, m=5, l=l, u=u, nbd=nbd, factr=1.0e7, pgtol=1.0e-5, iprint=iprint)
    f = 0
    while True:
        if minimizer.process(x, f, g, use_fortran_library):
            f = 0.25e0 * (x[0] - 1.0e0) ** 2
            for i in xrange(1, n):
                f = f + (x[i] - x[i - 1] ** 2) ** 2
            f = 4.0e0 * f
            t1 = x[1] - x[0] ** 2
            g[0] = 2.0e0 * (x[0] - 1.0e0) - 1.6e1 * x[0] * t1
            for i in xrange(1, n - 1):
                t2 = t1
                t1 = x[i + 1] - x[i] ** 2
                g[i] = 8.0e0 * t2 - 1.6e1 * x[i] * t1
            g[n - 1] = 8.0e0 * t1
        elif minimizer.is_terminated():
            break
    assert minimizer.task() == "CONVERGENCE: REL_REDUCTION_OF_F <= FACTR*EPSMCH"
    assert minimizer.f_list().size() == minimizer.n_iteration() + 1
    assert minimizer.f_list()[-1] == minimizer.f()
    assert minimizer.f_list()[-2] == minimizer.f_previous_iteration()
    assert not minimizer.initial_x_replaced_by_projection()
    assert minimizer.is_constrained()
    assert minimizer.is_fully_constrained()
    if "--soft" not in sys.argv[1:]:
        assert minimizer.n_fg_evaluations_total() == 27
        assert minimizer.n_fg_evaluations_iter() == 1
        assert minimizer.n_intervals_explored_cauchy_search_total() == 48
        assert minimizer.n_intervals_explored_cauchy_search_iter() == 1
        assert minimizer.n_skipped_bfgs_updates_total() == 0
        assert minimizer.n_bfgs_updates_total() == 22
        assert minimizer.subspace_argmin_is_within_box() == 1
        assert minimizer.n_free_variables() == 25
        assert minimizer.n_active_constraints() == 0
        assert minimizer.n_variables_leaving_active_set() == 0
        assert minimizer.n_variables_entering_active_set() == 0
        assert eps_eq(minimizer.theta_bfgs_matrix_current(), 23.2674689856)
        assert minimizer.f_previous_iteration() >= 0
        assert minimizer.floating_point_epsilon() == scitbx.math.floating_point_epsilon_double_get()
        assert eps_eq(
            minimizer.factr_times_floating_point_epsilon(), minimizer.factr() * minimizer.floating_point_epsilon()
        )
        assert eps_eq(minimizer.two_norm_line_search_direction_vector(), 1.56259327735e-05)
        assert eps_eq(
            minimizer.two_norm_line_search_direction_vector_sq(), minimizer.two_norm_line_search_direction_vector() ** 2
        )
        assert minimizer.accumulated_time_cauchy_search() > -0.02
        assert minimizer.accumulated_time_subspace_minimization() > -0.02
        assert minimizer.accumulated_time_line_search() > -0.02
        assert eps_eq(minimizer.slope_line_search_function_current(), 9.31496613169e-10)
        assert eps_eq(minimizer.slope_line_search_function_start(), -3.6762390377e-09)
        assert eps_eq(minimizer.maximum_relative_step_length(), 1.37484166517)
        assert eps_eq(minimizer.relative_step_length_line_search(), 1.0)
        assert eps_eq(minimizer.infinity_norm_projected_gradient(), 0.000143369780806)
Ejemplo n.º 14
0
def exercise_fit():
    x = flex.double((0.1, 0.2, 0.5))
    y = flex.double((3, 2, 1))
    sigmas = flex.double((0.04, 0.02, 0.01))
    gf = gaussian.fit(x, y, sigmas, gaussian.sum((1, 2), (4, 5)))
    assert approx_equal(gf.array_of_a(), (1, 2))
    assert approx_equal(gf.array_of_b(), (4, 5))
    assert approx_equal(gf.c(), 0)
    assert not gf.use_c()
    assert approx_equal(gf.table_x(), x)
    assert approx_equal(gf.table_y(), y)
    assert approx_equal(gf.table_sigmas(), sigmas)
    assert approx_equal(
        gf.fitted_values(),
        [2.8632482881537511, 2.4896052951221748, 0.94088903489182252])
    reference_gaussian = gaussian.sum((1, 2, 3), (4, 5, 6))
    gf = gaussian.fit(x, reference_gaussian, sigmas,
                      gaussian.sum((1, 2), (4, 5)))
    assert approx_equal(gf.array_of_a(), (1, 2))
    assert approx_equal(gf.array_of_b(), (4, 5))
    assert approx_equal(gf.c(), 0)
    assert approx_equal(gf.table_x(), x)
    assert approx_equal(gf.table_y(), reference_gaussian.at_x(x))
    assert approx_equal(gf.table_sigmas(), sigmas)
    assert isinstance(gf.sort(), gaussian.fit)
    assert gf.sort().table_x() == gf.table_x()
    assert gf.sort().table_y() == gf.table_y()
    assert gf.sort().table_sigmas() == gf.table_sigmas()
    assert approx_equal(gf.differences(),
                        gf.at_x(x) - reference_gaussian.at_x(x))
    c_fit = gaussian.fit(
        flex.double([
            0.0, 0.066666666666666666, 0.13333333333333333, 0.2,
            0.26666666666666666
        ]),
        gaussian.sum((2.657506, 1.078079, 1.490909, -4.2410698, 0.71379101),
                     (14.780758, 0.776775, 42.086842, -0.000294, 0.239535),
                     4.2979832), flex.double(5, 0.0005),
        gaussian.sum((1.1423916, 4.1728425, 0.61716694),
                     (0.50733125, 14.002512, 41.978928)))
    differences = flex.double([
        -0.064797341823577881, 0.003608505180995536, 0.098159179757290715,
        0.060724224581695019, -0.10766283796372011
    ])
    assert approx_equal(c_fit.differences(), differences)
    assert approx_equal(
        c_fit.significant_relative_errors(),
        [0.0107212, 0.0005581, 0.0213236, 0.0169304, 0.0385142])
    gf = gaussian.fit(x, reference_gaussian, flex.double(x.size(), 1),
                      gaussian.sum((1, 2), (4, 5)))
    assert list(gf.bound_flags(False, False)) == [False, False, False, False]
    assert list(gf.bound_flags(True, False)) == [True, False, True, False]
    assert list(gf.bound_flags(False, True)) == [False, True, False, True]
    sgf = gf.apply_shifts(flex.double((3, -3, 4, 6)), True)
    assert approx_equal(sgf.array_of_a(), (1 + 3, 2 + 4))
    assert approx_equal(sgf.array_of_b(),
                        ((math.sqrt(4) - 3)**2, (math.sqrt(5) + 6)**2))
    assert approx_equal(sgf.c(), 0)
    assert not sgf.use_c()
    sgf = gf.apply_shifts(flex.double((3, -3, 4, 6)), False)
    assert approx_equal(sgf.array_of_a(), (1 + 3, 2 + 4))
    assert approx_equal(sgf.array_of_b(), (4 - 3, 5 + 6))
    assert approx_equal(sgf.c(), 0)
    assert not sgf.use_c()
    differences = sgf.differences()
    for use_sigmas in [False, True]:
        assert approx_equal(sgf.target_function(2, use_sigmas, differences),
                            25.0320634)
        assert approx_equal(sgf.target_function(4, use_sigmas, differences),
                            256.2682575)
        assert approx_equal(sgf.gradients_d_abc(2, use_sigmas, differences),
                            [15.6539271, -4.1090114, 10.4562306, -1.6376781])
    gfc = gaussian.fit(x, reference_gaussian, flex.double(x.size(), 1),
                       gaussian.sum((1, 2), (4, 5), 6))
    assert list(gfc.bound_flags(False,
                                False)) == [False, False, False, False, False]
    assert list(gfc.bound_flags(True,
                                False)) == [True, False, True, False, True]
    assert list(gfc.bound_flags(False,
                                True)) == [False, True, False, True, False]
    sgfc = gfc.apply_shifts(flex.double((3, -3, 4, 6, -5)), True)
    assert approx_equal(sgfc.array_of_a(), (1 + 3, 2 + 4))
    assert approx_equal(sgfc.array_of_b(),
                        ((math.sqrt(4) - 3)**2, (math.sqrt(5) + 6)**2))
    assert approx_equal(sgfc.c(), 6 - 5)
    assert sgfc.use_c()
    sgfc = gfc.apply_shifts(flex.double((3, -3, 4, 6, -5)), False)
    assert approx_equal(sgfc.array_of_a(), (1 + 3, 2 + 4))
    assert approx_equal(sgfc.array_of_b(), (4 - 3, 5 + 6))
    assert approx_equal(sgfc.c(), 6 - 5)
    assert sgfc.use_c()
    differences = sgfc.differences()
    for use_sigmas in [False, True]:
        assert approx_equal(sgfc.target_function(2, use_sigmas, differences),
                            44.8181444)
        assert approx_equal(sgfc.target_function(4, use_sigmas, differences),
                            757.3160329)
        assert approx_equal(
            sgfc.gradients_d_abc(2, use_sigmas, differences),
            [21.1132071, -6.0532695, 13.6638274, -2.2460994, 22.7860809])
    differences = c_fit.differences()
    gabc = c_fit.gradients_d_abc(2, False, differences)
    assert approx_equal(gabc, [
        -0.016525391425206391, 0.0074465239375589107, 0.020055876723667564,
        0.00054794635257838251, -0.018754011379726425, -0.0011194004809549143
    ])
    assert approx_equal(
        c_fit.gradients_d_shifts(flex.double((0.1, 0.4, 0.2, 0.5, 0.3, 0.6)),
                                 gabc),
        [-0.0165254, 0.01656512, 0.0200559, 0.0046488, -0.0187540, -0.0158487])
    g5c = gaussian.sum(
        (2.657505989074707, 1.0780789852142334, 1.4909089803695679,
         -4.2410697937011719, 0.71379101276397705),
        (14.780757904052734, 0.77677500247955322, 42.086841583251953,
         -0.00029399999766610563, 0.23953500390052795), 4.2979831695556641)
    for include_constant_term in (False, True):
        a = flex.double(g5c.array_of_a())
        b = flex.double(g5c.array_of_b())
        permutation = flex.sort_permutation(data=flex.abs(a), reverse=True)[:4]
        gf = gaussian.fit(
            flex.double([0]), g5c, flex.double(1, 1),
            gaussian.sum(iter(a.select(permutation)),
                         iter(b.select(permutation)), 0,
                         include_constant_term))
        assert approx_equal(gf.differences(), [-5.01177418232])
        shifts = flex.double(8, -1)
        if (include_constant_term): shifts.append(-.2)
        sgf = gf.apply_shifts(shifts, False)
        assert approx_equal(sgf.array_of_a(),
                            [-5.2410698, 1.657506, 0.49090898, 0.078078985])
        assert approx_equal(sgf.array_of_b(),
                            [-1.0002940, 13.780758, 41.086842, -0.223225])
        if (include_constant_term):
            assert approx_equal(sgf.c(), -.2)
        expected_gradients = [1, 0, 1, 0, 1, 0, 1, 0]
        if (include_constant_term): expected_gradients.append(1)
        assert approx_equal(fit_finite_diff_gradients(sgf, 0),
                            expected_gradients,
                            eps=1.e-4)
        for i in range(10):
            gf = gaussian.fit(flex.double([i / 10.]), g5c, flex.double(1, 1),
                              sgf)
            differences = flex.double([0.5])
            assert approx_equal(gf.gradients_d_abc(2, False, differences),
                                fit_finite_diff_gradients(gf,
                                                          gf.table_x()[0]),
                                eps=1.e-3)
            for sigma in [0.04, 0.02, 0.01]:
                gf = gaussian.fit(flex.double([i / 20.]), g5c,
                                  flex.double([sigma]), sgf)
                for power in [2, 4]:
                    for use_sigmas in [False, True]:
                        differences = gf.differences()
                        an = gf.gradients_d_abc(power, use_sigmas, differences)
                        fi = fit_finite_diff_target_gradients(
                            gf, power, use_sigmas)
                        assert eps_eq(an, fi, eps=1.e-3)
Ejemplo n.º 15
0
def exercise_gradient_dx(gaussian, x_max=1., n_points=50):
    for i in range(n_points + 1):
        x = x_max * i / n_points
        grad_finite = finite_gradient_dx_at_x(gaussian, x)
        grad_analytical = gaussian.gradient_dx_at_x(x)
        assert eps_eq(grad_finite, grad_analytical)
Ejemplo n.º 16
0
def exercise_minimizer_interface():
    n = 25
    m = 5
    l = flex.double(n, -1)
    u = flex.double(n, 1)
    nbd = flex.int(n)
    factr = 1.0e+7
    pgtol = 1.0e-5
    iprint = -1
    for enable_stp_init in [False, True]:
        minimizer = lbfgsb.ext.minimizer(n, m, l, u, nbd, enable_stp_init,
                                         factr, pgtol, iprint)
        assert minimizer.n() == n
        assert minimizer.m() == m
        assert minimizer.l().id() == l.id()
        assert minimizer.u().id() == u.id()
        assert minimizer.nbd().id() == nbd.id()
        assert minimizer.enable_stp_init() == enable_stp_init
        assert eps_eq(minimizer.factr(), factr)
        assert eps_eq(minimizer.pgtol(), pgtol)
        assert eps_eq(minimizer.iprint(), iprint)
        assert not minimizer.requests_f_and_g()
        assert not minimizer.is_terminated()
        assert minimizer.task() == "START"
        x = flex.double(n, 0)
        f = 1
        g = flex.double(n, -1)
        assert minimizer.process(x, f, g, False)
        assert minimizer.task() == "FG_START"
        assert minimizer.f() == 0
        if (not enable_stp_init):
            try:
                minimizer.requests_stp_init()
            except RuntimeError as e:
                assert str(e).endswith(
                    ": SCITBX_ASSERT(enable_stp_init()) failure.")
            else:
                raise Exception_expected
        else:
            assert not minimizer.process(x, f, g)
            assert minimizer.requests_stp_init()
            assert approx_equal(minimizer.relative_step_length_line_search(),
                                0.2)
            assert approx_equal(minimizer.current_search_direction(), [1] * n)
            minimizer.set_relative_step_length_line_search(value=0.3)
            assert approx_equal(minimizer.relative_step_length_line_search(),
                                0.3)
        assert minimizer.process(x, f, g)
        assert minimizer.f() == 1
        assert minimizer.task() == "FG_LNSRCH"
        assert not minimizer.is_terminated()
        if (not enable_stp_init):
            assert approx_equal(x, [0.2] * n)
        else:
            assert approx_equal(x, [0.3] * n)
        minimizer.request_stop()
        assert not minimizer.process(x, f, g)
        assert minimizer.task() == "STOP: NO RESTORE"
        minimizer.request_stop_with_restore()
        assert minimizer.task() == "STOP: CPU"
        minimizer.request_restart()
        assert not minimizer.requests_f_and_g()
        assert not minimizer.is_terminated()
        assert minimizer.task() == "START"
        minimizer = lbfgsb.minimizer(n=n)
        assert minimizer.l().size() == n
        assert minimizer.u().size() == n
        assert minimizer.nbd().size() == n
        assert minimizer.nbd().all_eq(0)
Ejemplo n.º 17
0
def recycle(miller_array, column_root_label, column_types=None, verbose=0):
    original_dataset = to_mtz(miller_array, column_root_label, column_types)
    label_decorator = mtz.label_decorator()
    written = original_dataset.mtz_object()
    if 0 or verbose:
        written.show_summary()
    original_dataset.mtz_object().write(file_name="tmp_iotbx_mtz.mtz")
    restored = mtz.object(file_name="tmp_iotbx_mtz.mtz")
    if 0 or verbose:
        restored.show_summary()
    assert restored.title() == written.title()
    assert [line.rstrip() for line in restored.history()] == list(written.history())
    assert restored.space_group_name() == written.space_group_name()
    assert restored.space_group_number() == written.space_group_number()
    assert restored.space_group() == written.space_group()
    assert restored.point_group_name() == written.point_group_name()
    assert restored.lattice_centring_type() == written.lattice_centring_type()
    assert restored.n_batches() == written.n_batches()
    assert restored.n_reflections() == written.n_reflections()
    assert eps_eq(restored.max_min_resolution(), written.max_min_resolution(), eps=1.0e-5)
    assert restored.n_crystals() == written.n_crystals()
    assert restored.n_active_crystals() == written.n_active_crystals()
    assert restored.n_crystals() == 2
    for rx, wx in zip(restored.crystals(), written.crystals()):
        assert rx.name() == wx.name()
        assert rx.project_name() == wx.project_name()
        assert rx.unit_cell().is_similar_to(wx.unit_cell())
        assert rx.n_datasets() == wx.n_datasets()
        for rd, wd in zip(rx.datasets(), wx.datasets()):
            assert rd.name() == wd.name()
            assert rd.wavelength() == wd.wavelength()
            assert rd.n_columns() == wd.n_columns()
    miller_set = restored.crystals()[1].miller_set()
    assert miller_set.indices().size() == restored.n_reflections()
    crystal_symmetry = restored.crystals()[1].crystal_symmetry()
    restored_dataset = restored.crystals()[1].datasets()[0]
    if not miller_array.anomalous_flag():
        if miller_array.sigmas() is None:
            if miller_array.is_complex_array():
                assert restored_dataset.n_columns() == 3 + 2
                group = restored.extract_complex(
                    column_label_ampl=column_root_label, column_label_phi=label_decorator.phases(column_root_label)
                )
            elif miller_array.is_hendrickson_lattman_array():
                assert restored_dataset.n_columns() == 3 + 4
                deco = label_decorator.hendrickson_lattman
                group = restored.extract_hendrickson_lattman(
                    column_label_a=deco(column_root_label, 0),
                    column_label_b=deco(column_root_label, 1),
                    column_label_c=deco(column_root_label, 2),
                    column_label_d=deco(column_root_label, 3),
                )
            else:
                assert restored_dataset.n_columns() == 3 + 1
                group = restored.extract_reals(column_label=column_root_label)
            r = miller.array(
                miller_set=miller.set(crystal_symmetry=crystal_symmetry, indices=group.indices, anomalous_flag=False),
                data=group.data,
            )
        else:
            assert restored_dataset.n_columns() == 3 + 2
            group = restored.extract_observations(
                column_label_data=column_root_label, column_label_sigmas=label_decorator.sigmas(column_root_label)
            )
            r = miller.array(
                miller_set=miller.set(crystal_symmetry=crystal_symmetry, indices=group.indices, anomalous_flag=False),
                data=group.data,
                sigmas=group.sigmas,
            )
    else:
        if miller_array.sigmas() is None:
            if miller_array.is_complex_array():
                assert restored_dataset.n_columns() == 3 + 4
                group = restored.extract_complex_anomalous(
                    column_label_ampl_plus=label_decorator.anomalous(column_root_label, "+"),
                    column_label_phi_plus=label_decorator.phases(column_root_label, "+"),
                    column_label_ampl_minus=label_decorator.anomalous(column_root_label, "-"),
                    column_label_phi_minus=label_decorator.phases(column_root_label, "-"),
                )
            elif miller_array.is_hendrickson_lattman_array():
                assert restored_dataset.n_columns() == 3 + 8
                deco = label_decorator.hendrickson_lattman
                group = restored.extract_hendrickson_lattman_anomalous(
                    column_label_a_plus=deco(column_root_label, 0, "+"),
                    column_label_b_plus=deco(column_root_label, 1, "+"),
                    column_label_c_plus=deco(column_root_label, 2, "+"),
                    column_label_d_plus=deco(column_root_label, 3, "+"),
                    column_label_a_minus=deco(column_root_label, 0, "-"),
                    column_label_b_minus=deco(column_root_label, 1, "-"),
                    column_label_c_minus=deco(column_root_label, 2, "-"),
                    column_label_d_minus=deco(column_root_label, 3, "-"),
                )
            else:
                assert restored_dataset.n_columns() == 3 + 2
                group = restored.extract_reals_anomalous(
                    column_label_plus=label_decorator.anomalous(column_root_label, "+"),
                    column_label_minus=label_decorator.anomalous(column_root_label, "-"),
                )
            r = miller.array(
                miller_set=miller.set(crystal_symmetry=crystal_symmetry, indices=group.indices, anomalous_flag=True),
                data=group.data,
            )
        else:
            assert restored_dataset.n_columns() == 3 + 4
            group = restored.extract_observations_anomalous(
                column_label_data_plus=label_decorator.anomalous(column_root_label, "+"),
                column_label_sigmas_plus=label_decorator.sigmas(column_root_label, "+"),
                column_label_data_minus=label_decorator.anomalous(column_root_label, "-"),
                column_label_sigmas_minus=label_decorator.sigmas(column_root_label, "-"),
            )
            r = miller.array(
                miller_set=miller.set(crystal_symmetry=crystal_symmetry, indices=group.indices, anomalous_flag=True),
                data=group.data,
                sigmas=group.sigmas,
            )
    verify_miller_arrays(miller_array, r)
    restored_miller_arrays = restored.as_miller_arrays()
    assert len(restored_miller_arrays) == 1
    thff = restored_miller_arrays[0].info().type_hints_from_file
    assert thff is not None
    assert miller_array.is_hendrickson_lattman_array() == (thff == "hendrickson_lattman")
    verify_miller_arrays(miller_array, restored_miller_arrays[0])
    mtz_object = miller_array.as_mtz_dataset(column_root_label=column_root_label).mtz_object()
    restored_miller_arrays = mtz_object.as_miller_arrays()
    assert len(restored_miller_arrays) == 1
    verify_miller_arrays(miller_array, restored_miller_arrays[0])
    if miller_array.is_bool_array() or miller_array.is_integer_array() or miller_array.is_real_array():
        cb_op = miller_array.change_of_basis_op_to_niggli_cell()
        mtz_object.change_basis_in_place(cb_op=cb_op)
        cb_array = miller_array.change_basis(cb_op=cb_op)
        assert mtz_object.space_group() == cb_array.space_group()
        for mtz_crystal in mtz_object.crystals():
            assert mtz_crystal.unit_cell().is_similar_to(cb_array.unit_cell())
        restored_miller_arrays = mtz_object.as_miller_arrays()
        assert len(restored_miller_arrays) == 1
        verify_miller_arrays(cb_array, restored_miller_arrays[0])
        mtz_object.change_basis_in_place(cb_op=cb_op.inverse())
        assert mtz_object.space_group() == miller_array.space_group()
        for mtz_crystal in mtz_object.crystals():
            assert mtz_crystal.unit_cell().is_similar_to(miller_array.unit_cell())
        restored_miller_arrays = mtz_object.as_miller_arrays()
        assert len(restored_miller_arrays) == 1
        verify_miller_arrays(miller_array, restored_miller_arrays[0])
Ejemplo n.º 18
0
def exercise_two_models_with_holes(processed_pdb):
  selection_strings=["chain A", "chain B", "chain C", "chain D"]
  group = ncs.restraints.group.from_atom_selections(
    processed_pdb=processed_pdb,
    reference_selection_string=None,
    selection_strings=selection_strings,
    coordinate_sigma=0.05,
    b_factor_weight=0.4321,
    special_position_warnings_only=False)
  sites_cart = processed_pdb.all_chain_proxies.pdb_atoms.extract_xyz()
  ncs_operators = group.operators(sites_cart=sites_cart)
  out = StringIO()
  ncs_operators.show(sites_cart=sites_cart, out=out, prefix="{*")
  assert not show_diff(out.getvalue(), """\
{*NCS operator 1:
{*  Reference selection: "chain A"
{*      Other selection: "chain B"
{*  Number of atom pairs: 22
{*  Rotation={{-0.925533, 0.322815, -0.197938},
{*            {0.329616, 0.429511, -0.840758},
{*            {-0.186393, -0.843393, -0.503931}}
{*  Translation={{163.62}, {-13.0292}, {44.8533}}
{*  Histogram of differences:
{*    0.092573 - 0.238983: 1
{*    0.238983 - 0.385393: 2
{*    0.385393 - 0.531803: 7
{*    0.531803 - 0.678214: 3
{*    0.678214 - 0.824624: 4
{*    0.824624 - 0.971034: 5
{*  RMS difference with respect to the reference: 0.653687
{*NCS operator 2:
{*  Reference selection: "chain A"
{*      Other selection: "chain C"
{*  Number of atom pairs: 32
{*  Rotation={{-0.988874, -0.139883, -0.0506023},
{*            {0.0139383, -0.425808, 0.904706},
{*            {-0.1481, 0.893935, 0.423021}}
{*  Translation={{177.315}, {18.2319}, {1.24026}}
{*  Histogram of differences:
{*    0.291817 - 0.418479: 2
{*    0.418479 - 0.545141: 9
{*    0.545141 - 0.671802: 5
{*    0.671802 - 0.798464: 10
{*    0.798464 - 0.925126: 3
{*    0.925126 - 1.051787: 3
{*  RMS difference with respect to the reference: 0.677436
{*NCS operator 3:
{*  Reference selection: "chain A"
{*      Other selection: "chain D"
{*  Number of atom pairs: 24
{*  Rotation={{0.950594, -0.191857, 0.244055},
{*            {-0.192933, -0.981014, -0.0197252},
{*            {0.243205, -0.0283355, -0.969561}}
{*  Translation={{6.77797}, {56.2476}, {-5.96327}}
{*  Histogram of differences:
{*    0.270982 - 0.414517: 3
{*    0.414517 - 0.558053: 3
{*    0.558053 - 0.701588: 7
{*    0.701588 - 0.845124: 6
{*    0.845124 - 0.988659: 2
{*    0.988659 - 1.132195: 3
{*  RMS difference with respect to the reference: 0.724248
""")
  energies_sites_no_gradients = ncs_operators.energies_sites(
    sites_cart=sites_cart, compute_gradients=False)
  assert energies_sites_no_gradients.number_of_restraints == 110
  assert eps_eq(energies_sites_no_gradients.residual_sum, 7014.03969257)
  assert eps_eq(energies_sites_no_gradients.target, 7014.03969257)
  assert energies_sites_no_gradients.gradients is None
  assert eps_eq(energies_sites_no_gradients.rms_with_respect_to_average,
    [0.41226641576521778, 0.38139080907663186,
     0.39748408968570492, 0.40001937328488651])
  energies_sites = ncs_operators.energies_sites(sites_cart=sites_cart)
  assert energies_sites_no_gradients.number_of_restraints \
      == energies_sites.number_of_restraints
  assert energies_sites_no_gradients.residual_sum \
      == energies_sites.residual_sum
  assert energies_sites_no_gradients.target \
      == energies_sites.target
  assert eps_eq(energies_sites.gradients.norm(), 3349.99455344)
  assert eps_eq(energies_sites.rms_with_respect_to_average,
   energies_sites_no_gradients.rms_with_respect_to_average)
  site_labels = [
    '"'+atom.pdb_label_columns()+'"' for atom in
      processed_pdb.all_chain_proxies.pdb_atoms]
  out = StringIO()
  energies_sites.show_distances_to_average(
    site_labels=site_labels, out=out, prefix="#^")
  assert not show_diff(out.getvalue(), """\
#^NCS selection: "chain A"
#^                     Distance to NCS average
#^  " N   GLN A   1 ":   0.4263
#^  " CA  GLN A   1 ":   0.2141
#^  " C   GLN A   1 ":   0.4052
...
#^  " CA  THR B   6 ":   0.4001
#^  " C   THR B   6 ":   0.6281
#^NCS selection: "chain C"
#^                     Distance to NCS average
#^  " N   GLN C   1 ":   0.4135
#^  " CA  GLN C   1 ":   0.5070
...
#^  " C   SER D   4 ":   0.6943
#^  " CA BSER D   4 ":   0.4444
#^  " N   THR D   6 ":   0.3724
#^  " CA  THR D   6 ":   0.4129
#^  " C   THR D   6 ":   0.4017
""", selections=[range(5),range(56,62),range(-5,0)])
  for ag,fg in zip(energies_sites.gradients,
                   finite_difference_site_gradients(
                     ncs_operators=ncs_operators,
                     sites_cart=sites_cart,
                     sites_average=energies_sites.sites_average)):
    assert eps_eq(ag, fg)
  #
  u_isos = processed_pdb.xray_structure().extract_u_iso_or_u_equiv()
  eng = group.energies_adp_iso(
    u_isos=u_isos, average_power=1, compute_gradients=False)
  energies_adp_iso_no_gradients = eng
  assert eng.number_of_restraints == 110
  assert eps_eq(eng.residual_sum, 1.11021057745)
  assert eps_eq(eng.target, eng.residual_sum)
  assert eng.gradients is None
  assert eps_eq(eng.rms_with_respect_to_average,
    [3.8233537528289001, 4.4894247897900934,
     3.71150443476373, 4.0839849076232442])
  energies_adp_iso = group.energies_adp_iso(u_isos=u_isos, average_power=1)
  assert energies_adp_iso.number_of_restraints == eng.number_of_restraints
  assert eps_eq(energies_adp_iso.residual_sum, eng.residual_sum)
  assert eps_eq(energies_adp_iso.target, eng.target)
  assert eps_eq(energies_adp_iso.gradients.norm(), 4.50764745473)
  assert eps_eq(energies_adp_iso.rms_with_respect_to_average,
                eng.rms_with_respect_to_average)
  out = StringIO()
  energies_adp_iso.show_differences_to_average(
    site_labels=site_labels, out=out, prefix="Y$")
  assert not show_diff(out.getvalue().replace(" 7.66 = ", " 7.65 = "), """\
Y$NCS selection: "chain A"
Y$                       B-iso   NCS ave  Difference
Y$  " N   GLN A   1 ":   11.77 -   12.08 =  -0.3133
Y$  " CA  GLN A   1 ":    9.09 -    9.28 =  -0.1933
Y$  " C   GLN A   1 ":   15.40 -   12.05 =   3.3500
...
Y$  " CA  THR A   6 ":    3.98 -    7.65 =  -3.6750
Y$  " C   THR A   6 ":   14.64 -   10.99 =   3.6475
Y$NCS selection: "chain B"
Y$                       B-iso   NCS ave  Difference
Y$  " N   GLU B   2 ":   12.87 -   10.35 =   2.5225
...
Y$  " C   SER D   4 ":    7.29 -   10.45 =  -3.1575
Y$  " CA BSER D   4 ":    5.23 -    7.00 =  -1.7667
Y$  " N   THR D   6 ":    4.55 -    8.69 =  -4.1425
Y$  " CA  THR D   6 ":    8.78 -    7.65 =   1.1250
Y$  " C   THR D   6 ":   10.80 -   10.99 =  -0.1925
""", selections=[range(5),range(32,37),range(-5,0)])
  for average_power in [1, 0.69, 0.35]:
    finite_difference_gradients = flex.double()
    eps = 1.e-6
    for i_u_iso in xrange(u_isos.size()):
      rs = []
      for signed_eps in [eps, -eps]:
        u_isos_eps = u_isos.deep_copy()
        u_isos_eps[i_u_iso] += signed_eps
        energies = group.energies_adp_iso(
          u_isos=u_isos_eps,
          average_power=average_power,
          compute_gradients=False)
        rs.append(energies.residual_sum)
      finite_difference_gradients.append((rs[0]-rs[1])/(2*eps))
    energies_adp_iso = group.energies_adp_iso(
      u_isos=u_isos, average_power=average_power)
    assert eps_eq(energies_adp_iso.gradients, finite_difference_gradients)
  #
  groups = ncs.restraints.groups()
  groups.members.append(group)
  for coordinate_sigma,b_factor_weight in [
        (None,1.234),
        (0.1,None)]:
    groups.members.append(
      ncs.restraints.group.from_atom_selections(
        processed_pdb=processed_pdb,
        reference_selection_string=None,
        selection_strings=selection_strings,
        coordinate_sigma=coordinate_sigma,
        b_factor_weight=b_factor_weight,
        special_position_warnings_only=False))
  energies_adp_iso = groups.energies_adp_iso(u_isos=u_isos, average_power=1)
  assert energies_adp_iso.number_of_restraints == 220
  assert eps_eq(energies_adp_iso.residual_sum, 4.2807726061)
  assert eps_eq(energies_adp_iso.target, energies_adp_iso.residual_sum)
  assert eps_eq(energies_adp_iso.gradients.norm(), 17.3806790658)
  energies_adp_iso = groups.energies_adp_iso(
    u_isos=u_isos, average_power=1, normalization=True)
  assert energies_adp_iso.number_of_restraints == 220
  assert eps_eq(energies_adp_iso.residual_sum, 4.2807726061)
  assert eps_eq(energies_adp_iso.target, energies_adp_iso.residual_sum/220)
  assert eps_eq(energies_adp_iso.gradients.norm(), 17.3806790658/220)
  for rms in energies_adp_iso.rms_with_respect_to_averages:
    if (rms is not None):
      assert eps_eq(
        rms, energies_adp_iso_no_gradients.rms_with_respect_to_average)
  assert energies_adp_iso.rms_with_respect_to_averages[2] is None
  energies_sites = groups.energies_sites(sites_cart=sites_cart)
  assert energies_sites.number_of_restraints == 220
  assert eps_eq(energies_sites.residual_sum, 8767.54961571)
  assert eps_eq(energies_sites.target, energies_sites.residual_sum)
  assert eps_eq(energies_sites.gradients.norm(), 4187.49319181)
  energies_sites = groups.energies_sites(
    sites_cart=sites_cart, normalization=True)
  assert energies_sites.number_of_restraints == 220
  assert eps_eq(energies_sites.residual_sum, 8767.54961571)
  assert eps_eq(energies_sites.target, energies_sites.residual_sum/220)
  assert eps_eq(energies_sites.gradients.norm(), 4187.49319181/220)
  for rms in energies_sites.rms_with_respect_to_averages:
    if (rms is not None):
      assert eps_eq(
        rms, energies_sites_no_gradients.rms_with_respect_to_average)
  assert energies_sites.rms_with_respect_to_averages[1] is None
  out = StringIO()
  groups.show_adp_iso_differences_to_average(
    u_isos=u_isos, site_labels=site_labels, out=out, prefix="W@")
  assert not show_diff(out.getvalue(), """\
W@NCS restraint group 1:
W@  weight: 0.4321
W@  NCS selection: "chain A"
W@                         B-iso   NCS ave  Difference
W@    " N   GLN A   1 ":   11.77 -   12.08 =  -0.3133
...
W@    " C   THR D   6 ":   10.80 -   10.99 =  -0.1925
W@NCS restraint group 3:
W@  b_factor_weight: None  =>  restraints disabled
""", selections=[range(5),range(-3,0)])
  out = StringIO()
  groups.show_operators(sites_cart=sites_cart, out=out, prefix="K&")
  assert not show_diff(out.getvalue(), """\
K&NCS restraint group 1:
K&  NCS operator 1:
K&    Reference selection: "chain A"
...
K&      0.824624 - 0.971034: 5
K&    RMS difference with respect to the reference: 0.653687
K&  NCS operator 2:
K&    Reference selection: "chain A"
K&        Other selection: "chain C"
K&    Number of atom pairs: 32
...
K&      0.845124 - 0.988659: 2
K&      0.988659 - 1.132195: 3
K&    RMS difference with respect to the reference: 0.724248
""", selections=[range(3),range(15,21),range(-3,0)])
  out = StringIO()
  groups.show_sites_distances_to_average(
    sites_cart=sites_cart, site_labels=site_labels, out=out, prefix="[")
  assert not show_diff(out.getvalue(), """\
[NCS restraint group 1:
[  coordinate_sigma: 0.05
[  weight:  400
[  NCS selection: "chain A"
[                       Distance to NCS average
[    " N   GLN A   1 ":   0.4263
...
[    " C   THR D   6 ":   0.4017
[NCS restraint group 2:
[  coordinate_sigma: None  =>  restraints disabled
[NCS restraint group 3:
[  coordinate_sigma: 0.1
[  weight:  100
[  NCS selection: "chain A"
[                       Distance to NCS average
[    " N   GLN A   1 ":   0.4263
...
[    " C   THR D   6 ":   0.4017
""", selections=[range(6),range(120,129),range(-1,0)])
  #
  selection = groups.selection_restrained()
  assert selection.size() == 132
  assert selection.count(True) == 110
  out = StringIO()
  processed_pdb.show_atoms_without_ncs_restraints(
    ncs_restraints_groups=groups, out=out, prefix="%&")
  assert not show_diff(out.getvalue(), """\
%&Atoms without NCS restraints:
%&MODEL        1
%&ATOM     20  N   ALA B   3     111.517   7.175  -8.669  1.00 10.73           N
%&ATOM     21  CA  ALA B   3     112.152   8.103  -8.026  1.00 16.28           C
%&ATOM     22  C   ALA B   3     111.702   8.243  -5.903  1.00  9.19           C
%&ATOM     24  CA  SER B   4     111.797   9.689  -4.364  1.00  8.91           C
%&TER
%&ATOM     38  N   ALA C   3     109.043  27.391  28.663  1.00 15.05           N
%&ATOM     39  CA  ALA C   3     109.073  26.531  28.433  1.00  3.14           C
%&ATOM     40  C   ALA C   3     108.930  26.867  26.637  1.00 15.22           C
%&TER
%&ATOM     57  N   ALA D   3      65.439   9.903 -12.471  1.00  7.59           N
%&ATOM     58  CA  ALA D   3      65.019   9.566 -11.201  1.00 15.62           C
%&ATOM     59  C   ALA D   3      65.679  11.045 -11.097  1.00  2.65           C
%&ATOM     61  CA CSER D   4      65.657  12.870  -8.333  1.00  5.84           C
%&TER
%&ENDMDL
%&MODEL        2
%&ATOM     86  N   ALA B   3     111.984   7.364  -8.288  1.00  3.16           N
%&ATOM     87  CA  ALA B   3     112.389   8.456  -7.544  1.00 12.73           C
%&ATOM     88  C   ALA B   3     111.615   8.267  -6.238  1.00  7.17           C
%&ATOM     90  CA  SER B   4     111.707   9.131  -4.317  1.00 13.83           C
%&TER
%&ATOM    104  N   ALA C   3     109.237  27.879  29.334  1.00  6.45           N
%&ATOM    105  CA  ALA C   3     109.043  26.399  28.156  1.00 15.21           C
%&ATOM    106  C   ALA C   3     108.983  26.760  27.178  1.00 11.73           C
%&TER
%&ATOM    123  N   ALA D   3      65.188  10.335 -12.959  1.00  6.94           N
%&ATOM    124  CA  ALA D   3      65.543   9.614 -11.242  1.00  5.18           C
%&ATOM    125  C   ALA D   3      65.064  11.402 -10.648  1.00 16.01           C
%&ATOM    127  CA CSER D   4      65.127  12.969  -9.218  1.00  9.03           C
%&TER
%&ENDMDL
""")
  #
  for group in groups.members:
    assert group.registry.number_of_additional_isolated_sites == 0
  groups.register_additional_isolated_sites(number=10)
  for group in groups.members:
    assert group.registry.number_of_additional_isolated_sites == 10
  groups.register_additional_isolated_sites(number=3)
  for group in groups.members:
    assert group.registry.number_of_additional_isolated_sites == 13
Ejemplo n.º 19
0
def exercise_minimizer_interface():
    n = 25
    m = 5
    l = flex.double(n, -1)
    u = flex.double(n, 1)
    nbd = flex.int(n)
    factr = 1.0e7
    pgtol = 1.0e-5
    iprint = -1
    for enable_stp_init in [False, True]:
        minimizer = lbfgsb.ext.minimizer(n, m, l, u, nbd, enable_stp_init, factr, pgtol, iprint)
        assert minimizer.n() == n
        assert minimizer.m() == m
        assert minimizer.l().id() == l.id()
        assert minimizer.u().id() == u.id()
        assert minimizer.nbd().id() == nbd.id()
        assert minimizer.enable_stp_init() == enable_stp_init
        assert eps_eq(minimizer.factr(), factr)
        assert eps_eq(minimizer.pgtol(), pgtol)
        assert eps_eq(minimizer.iprint(), iprint)
        assert not minimizer.requests_f_and_g()
        assert not minimizer.is_terminated()
        assert minimizer.task() == "START"
        x = flex.double(n, 0)
        f = 1
        g = flex.double(n, -1)
        assert minimizer.process(x, f, g, False)
        assert minimizer.task() == "FG_START"
        assert minimizer.f() == 0
        if not enable_stp_init:
            try:
                minimizer.requests_stp_init()
            except RuntimeError, e:
                assert str(e).endswith(": SCITBX_ASSERT(enable_stp_init()) failure.")
            else:
                raise Exception_expected
        else:
            assert not minimizer.process(x, f, g)
            assert minimizer.requests_stp_init()
            assert approx_equal(minimizer.relative_step_length_line_search(), 0.2)
            assert approx_equal(minimizer.current_search_direction(), [1] * n)
            minimizer.set_relative_step_length_line_search(value=0.3)
            assert approx_equal(minimizer.relative_step_length_line_search(), 0.3)
        assert minimizer.process(x, f, g)
        assert minimizer.f() == 1
        assert minimizer.task() == "FG_LNSRCH"
        assert not minimizer.is_terminated()
        if not enable_stp_init:
            assert approx_equal(x, [0.2] * n)
        else:
            assert approx_equal(x, [0.3] * n)
        minimizer.request_stop()
        assert not minimizer.process(x, f, g)
        assert minimizer.task() == "STOP: NO RESTORE"
        minimizer.request_stop_with_restore()
        assert minimizer.task() == "STOP: CPU"
        minimizer.request_restart()
        assert not minimizer.requests_f_and_g()
        assert not minimizer.is_terminated()
        assert minimizer.task() == "START"
        minimizer = lbfgsb.minimizer(n=n)
        assert minimizer.l().size() == n
        assert minimizer.u().size() == n
        assert minimizer.nbd().size() == n
        assert minimizer.nbd().all_eq(0)
Ejemplo n.º 20
0
def exercise_gradient_dx(gaussian, x_max=1., n_points=50):
  for i in xrange(n_points+1):
    x = x_max * i / n_points
    grad_finite = finite_gradient_dx_at_x(gaussian, x)
    grad_analytical = gaussian.gradient_dx_at_x(x)
    assert eps_eq(grad_finite, grad_analytical)
Ejemplo n.º 21
0
def recycle(miller_array, column_root_label, column_types=None, verbose=0):
    original_dataset = to_mtz(miller_array, column_root_label, column_types)
    label_decorator = mtz.label_decorator()
    written = original_dataset.mtz_object()
    if (0 or verbose):
        written.show_summary()
    original_dataset.mtz_object().write(file_name="tmp_iotbx_mtz.mtz")
    restored = mtz.object(file_name="tmp_iotbx_mtz.mtz")
    if (0 or verbose):
        restored.show_summary()
    assert restored.title() == written.title()
    assert [line.rstrip() for line in restored.history()] \
        == list(written.history())
    assert restored.space_group_name() == written.space_group_name()
    assert restored.space_group_number() == written.space_group_number()
    assert restored.space_group() == written.space_group()
    assert restored.point_group_name() == written.point_group_name()
    assert restored.lattice_centring_type() == written.lattice_centring_type()
    assert restored.n_batches() == written.n_batches()
    assert restored.n_reflections() == written.n_reflections()
    assert eps_eq(restored.max_min_resolution(),
                  written.max_min_resolution(),
                  eps=1.e-5)
    assert restored.n_crystals() == written.n_crystals()
    assert restored.n_active_crystals() == written.n_active_crystals()
    assert restored.n_crystals() == 2
    for rx, wx in zip(restored.crystals(), written.crystals()):
        assert rx.name() == wx.name()
        assert rx.project_name() == wx.project_name()
        assert rx.unit_cell().is_similar_to(wx.unit_cell())
        assert rx.n_datasets() == wx.n_datasets()
        for rd, wd in zip(rx.datasets(), wx.datasets()):
            assert rd.name() == wd.name()
            assert rd.wavelength() == wd.wavelength()
            assert rd.n_columns() == wd.n_columns()
    miller_set = restored.crystals()[1].miller_set()
    assert miller_set.indices().size() == restored.n_reflections()
    crystal_symmetry = restored.crystals()[1].crystal_symmetry()
    restored_dataset = restored.crystals()[1].datasets()[0]
    if (not miller_array.anomalous_flag()):
        if (miller_array.sigmas() is None):
            if (miller_array.is_complex_array()):
                assert restored_dataset.n_columns() == 3 + 2
                group = restored.extract_complex(
                    column_label_ampl=column_root_label,
                    column_label_phi=label_decorator.phases(column_root_label))
            elif (miller_array.is_hendrickson_lattman_array()):
                assert restored_dataset.n_columns() == 3 + 4
                deco = label_decorator.hendrickson_lattman
                group = restored.extract_hendrickson_lattman(
                    column_label_a=deco(column_root_label, 0),
                    column_label_b=deco(column_root_label, 1),
                    column_label_c=deco(column_root_label, 2),
                    column_label_d=deco(column_root_label, 3))
            else:
                assert restored_dataset.n_columns() == 3 + 1
                group = restored.extract_reals(column_label=column_root_label)
            r = miller.array(miller_set=miller.set(
                crystal_symmetry=crystal_symmetry,
                indices=group.indices,
                anomalous_flag=False),
                             data=group.data)
        else:
            assert restored_dataset.n_columns() == 3 + 2
            group = restored.extract_observations(
                column_label_data=column_root_label,
                column_label_sigmas=label_decorator.sigmas(column_root_label))
            r = miller.array(miller_set=miller.set(
                crystal_symmetry=crystal_symmetry,
                indices=group.indices,
                anomalous_flag=False),
                             data=group.data,
                             sigmas=group.sigmas)
    else:
        if (miller_array.sigmas() is None):
            if (miller_array.is_complex_array()):
                assert restored_dataset.n_columns() == 3 + 4
                group = restored.extract_complex_anomalous(
                    column_label_ampl_plus=label_decorator.anomalous(
                        column_root_label, "+"),
                    column_label_phi_plus=label_decorator.phases(
                        column_root_label, "+"),
                    column_label_ampl_minus=label_decorator.anomalous(
                        column_root_label, "-"),
                    column_label_phi_minus=label_decorator.phases(
                        column_root_label, "-"))
            elif (miller_array.is_hendrickson_lattman_array()):
                assert restored_dataset.n_columns() == 3 + 8
                deco = label_decorator.hendrickson_lattman
                group = restored.extract_hendrickson_lattman_anomalous(
                    column_label_a_plus=deco(column_root_label, 0, "+"),
                    column_label_b_plus=deco(column_root_label, 1, "+"),
                    column_label_c_plus=deco(column_root_label, 2, "+"),
                    column_label_d_plus=deco(column_root_label, 3, "+"),
                    column_label_a_minus=deco(column_root_label, 0, "-"),
                    column_label_b_minus=deco(column_root_label, 1, "-"),
                    column_label_c_minus=deco(column_root_label, 2, "-"),
                    column_label_d_minus=deco(column_root_label, 3, "-"))
            else:
                assert restored_dataset.n_columns() == 3 + 2
                group = restored.extract_reals_anomalous(
                    column_label_plus=label_decorator.anomalous(
                        column_root_label, "+"),
                    column_label_minus=label_decorator.anomalous(
                        column_root_label, "-"))
            r = miller.array(miller_set=miller.set(
                crystal_symmetry=crystal_symmetry,
                indices=group.indices,
                anomalous_flag=True),
                             data=group.data)
        else:
            assert restored_dataset.n_columns() == 3 + 4
            group = restored.extract_observations_anomalous(
                column_label_data_plus=label_decorator.anomalous(
                    column_root_label, "+"),
                column_label_sigmas_plus=label_decorator.sigmas(
                    column_root_label, "+"),
                column_label_data_minus=label_decorator.anomalous(
                    column_root_label, "-"),
                column_label_sigmas_minus=label_decorator.sigmas(
                    column_root_label, "-"))
            r = miller.array(miller_set=miller.set(
                crystal_symmetry=crystal_symmetry,
                indices=group.indices,
                anomalous_flag=True),
                             data=group.data,
                             sigmas=group.sigmas)
    verify_miller_arrays(miller_array, r)
    restored_miller_arrays = restored.as_miller_arrays()
    assert len(restored_miller_arrays) == 1
    thff = restored_miller_arrays[0].info().type_hints_from_file
    assert thff is not None
    assert miller_array.is_hendrickson_lattman_array() \
        == (thff == "hendrickson_lattman")
    verify_miller_arrays(miller_array, restored_miller_arrays[0])
    mtz_object = miller_array.as_mtz_dataset(
        column_root_label=column_root_label).mtz_object()
    restored_miller_arrays = mtz_object.as_miller_arrays()
    assert len(restored_miller_arrays) == 1
    verify_miller_arrays(miller_array, restored_miller_arrays[0])
    if (miller_array.is_bool_array() or miller_array.is_integer_array()
            or miller_array.is_real_array()):
        cb_op = miller_array.change_of_basis_op_to_niggli_cell()
        mtz_object.change_basis_in_place(cb_op=cb_op)
        cb_array = miller_array.change_basis(cb_op=cb_op)
        assert mtz_object.space_group() == cb_array.space_group()
        for mtz_crystal in mtz_object.crystals():
            assert mtz_crystal.unit_cell().is_similar_to(cb_array.unit_cell())
        restored_miller_arrays = mtz_object.as_miller_arrays()
        assert len(restored_miller_arrays) == 1
        verify_miller_arrays(cb_array, restored_miller_arrays[0])
        mtz_object.change_basis_in_place(cb_op=cb_op.inverse())
        assert mtz_object.space_group() == miller_array.space_group()
        for mtz_crystal in mtz_object.crystals():
            assert mtz_crystal.unit_cell().is_similar_to(
                miller_array.unit_cell())
        restored_miller_arrays = mtz_object.as_miller_arrays()
        assert len(restored_miller_arrays) == 1
        verify_miller_arrays(miller_array, restored_miller_arrays[0])
Ejemplo n.º 22
0
def exercise_fit():
  x = flex.double((0.1, 0.2, 0.5))
  y = flex.double((3,2,1))
  sigmas = flex.double((0.04,0.02,0.01))
  gf = gaussian.fit(
    x, y, sigmas,
    gaussian.sum((1,2), (4,5)))
  assert approx_equal(gf.array_of_a(), (1,2))
  assert approx_equal(gf.array_of_b(), (4,5))
  assert approx_equal(gf.c(), 0)
  assert not gf.use_c()
  assert approx_equal(gf.table_x(), x)
  assert approx_equal(gf.table_y(), y)
  assert approx_equal(gf.table_sigmas(), sigmas)
  assert approx_equal(gf.fitted_values(),
    [2.8632482881537511, 2.4896052951221748, 0.94088903489182252])
  reference_gaussian = gaussian.sum((1,2,3), (4,5,6))
  gf = gaussian.fit(
    x, reference_gaussian, sigmas,
    gaussian.sum((1,2), (4,5)))
  assert approx_equal(gf.array_of_a(), (1,2))
  assert approx_equal(gf.array_of_b(), (4,5))
  assert approx_equal(gf.c(), 0)
  assert approx_equal(gf.table_x(), x)
  assert approx_equal(gf.table_y(), reference_gaussian.at_x(x))
  assert approx_equal(gf.table_sigmas(), sigmas)
  assert isinstance(gf.sort(), gaussian.fit)
  assert gf.sort().table_x() == gf.table_x()
  assert gf.sort().table_y() == gf.table_y()
  assert gf.sort().table_sigmas() == gf.table_sigmas()
  assert approx_equal(gf.differences(), gf.at_x(x)-reference_gaussian.at_x(x))
  c_fit = gaussian.fit(
    flex.double([0.0, 0.066666666666666666, 0.13333333333333333,
                 0.2, 0.26666666666666666]),
    gaussian.sum(
      (2.657506, 1.078079, 1.490909, -4.2410698, 0.71379101),
      (14.780758, 0.776775, 42.086842, -0.000294, 0.239535),
      4.2979832),
    flex.double(5, 0.0005),
    gaussian.sum(
      (1.1423916, 4.1728425, 0.61716694),
      (0.50733125, 14.002512, 41.978928)))
  differences = flex.double([-0.064797341823577881, 0.003608505180995536,
    0.098159179757290715, 0.060724224581695019, -0.10766283796372011])
  assert approx_equal(c_fit.differences(), differences)
  assert approx_equal(c_fit.significant_relative_errors(),
    [0.0107212, 0.0005581, 0.0213236, 0.0169304, 0.0385142])
  gf = gaussian.fit(
    x, reference_gaussian, flex.double(x.size(), 1),
    gaussian.sum((1,2), (4,5)))
  assert list(gf.bound_flags(False, False)) == [False,False,False,False]
  assert list(gf.bound_flags(True, False)) == [True,False,True,False]
  assert list(gf.bound_flags(False, True)) == [False,True,False,True]
  sgf = gf.apply_shifts(flex.double((3,-3,4,6)), True)
  assert approx_equal(sgf.array_of_a(), (1+3,2+4))
  assert approx_equal(sgf.array_of_b(),
    ((math.sqrt(4)-3)**2,(math.sqrt(5)+6)**2))
  assert approx_equal(sgf.c(), 0)
  assert not sgf.use_c()
  sgf = gf.apply_shifts(flex.double((3,-3,4,6)), False)
  assert approx_equal(sgf.array_of_a(), (1+3,2+4))
  assert approx_equal(sgf.array_of_b(), (4-3,5+6))
  assert approx_equal(sgf.c(), 0)
  assert not sgf.use_c()
  differences = sgf.differences()
  for use_sigmas in [False, True]:
    assert approx_equal(sgf.target_function(2, use_sigmas, differences),
      25.0320634)
    assert approx_equal(sgf.target_function(4, use_sigmas, differences),
      256.2682575)
    assert approx_equal(
      sgf.gradients_d_abc(2, use_sigmas, differences),
      [15.6539271, -4.1090114, 10.4562306, -1.6376781])
  gfc = gaussian.fit(
    x, reference_gaussian, flex.double(x.size(), 1),
    gaussian.sum((1,2), (4,5), 6))
  assert list(gfc.bound_flags(False, False)) == [False,False,False,False,False]
  assert list(gfc.bound_flags(True, False)) == [True,False,True,False,True]
  assert list(gfc.bound_flags(False, True)) == [False,True,False,True,False]
  sgfc = gfc.apply_shifts(flex.double((3,-3,4,6,-5)), True)
  assert approx_equal(sgfc.array_of_a(), (1+3,2+4))
  assert approx_equal(sgfc.array_of_b(),
    ((math.sqrt(4)-3)**2,(math.sqrt(5)+6)**2))
  assert approx_equal(sgfc.c(), 6-5)
  assert sgfc.use_c()
  sgfc = gfc.apply_shifts(flex.double((3,-3,4,6,-5)), False)
  assert approx_equal(sgfc.array_of_a(), (1+3,2+4))
  assert approx_equal(sgfc.array_of_b(), (4-3,5+6))
  assert approx_equal(sgfc.c(), 6-5)
  assert sgfc.use_c()
  differences = sgfc.differences()
  for use_sigmas in [False, True]:
    assert approx_equal(sgfc.target_function(2, use_sigmas, differences),
      44.8181444)
    assert approx_equal(sgfc.target_function(4, use_sigmas, differences),
      757.3160329)
    assert approx_equal(
      sgfc.gradients_d_abc(2, use_sigmas, differences),
      [21.1132071, -6.0532695, 13.6638274, -2.2460994, 22.7860809])
  differences = c_fit.differences()
  gabc = c_fit.gradients_d_abc(2, False, differences)
  assert approx_equal(
    gabc,
    [-0.016525391425206391, 0.0074465239375589107, 0.020055876723667564,
     0.00054794635257838251, -0.018754011379726425, -0.0011194004809549143])
  assert approx_equal(
    c_fit.gradients_d_shifts(flex.double((0.1,0.4,0.2,0.5,0.3,0.6)), gabc),
    [-0.0165254, 0.01656512, 0.0200559, 0.0046488, -0.0187540, -0.0158487])
  g5c = gaussian.sum(
    (2.657505989074707, 1.0780789852142334, 1.4909089803695679,
     -4.2410697937011719, 0.71379101276397705),
    (14.780757904052734, 0.77677500247955322, 42.086841583251953,
     -0.00029399999766610563, 0.23953500390052795),
    4.2979831695556641)
  for include_constant_term in (False, True):
    a = flex.double(g5c.array_of_a())
    b = flex.double(g5c.array_of_b())
    permutation = flex.sort_permutation(data=flex.abs(a), reverse=True)[:4]
    gf = gaussian.fit(
      flex.double([0]),
      g5c,
      flex.double(1, 1),
      gaussian.sum(
        iter(a.select(permutation)),
        iter(b.select(permutation)), 0, include_constant_term))
    assert approx_equal(gf.differences(), [-5.01177418232])
    shifts = flex.double(8,-1)
    if (include_constant_term): shifts.append(-.2)
    sgf = gf.apply_shifts(shifts, False)
    assert approx_equal(sgf.array_of_a(),
                        [-5.2410698, 1.657506, 0.49090898, 0.078078985])
    assert approx_equal(sgf.array_of_b(),
                        [-1.0002940, 13.780758, 41.086842, -0.223225])
    if (include_constant_term):
      assert approx_equal(sgf.c(), -.2)
    expected_gradients = [1,0,1,0,1,0,1,0]
    if (include_constant_term): expected_gradients.append(1)
    assert approx_equal(
      fit_finite_diff_gradients(sgf, 0),
      expected_gradients,
      eps=1.e-4)
    for i in xrange(10):
      gf = gaussian.fit(
        flex.double([i / 10.]),
        g5c,
        flex.double(1, 1),
        sgf)
      differences = flex.double([0.5])
      assert approx_equal(
        gf.gradients_d_abc(2, False, differences),
        fit_finite_diff_gradients(gf, gf.table_x()[0]),
        eps=1.e-3)
      for sigma in [0.04,0.02,0.01]:
        gf = gaussian.fit(
          flex.double([i / 20.]),
          g5c,
          flex.double([sigma]),
          sgf)
        for power in [2,4]:
          for use_sigmas in [False, True]:
            differences = gf.differences()
            an=gf.gradients_d_abc(power, use_sigmas, differences)
            fi=fit_finite_diff_target_gradients(gf, power, use_sigmas)
            assert eps_eq(an, fi, eps=1.e-3)