コード例 #1
0
  def exercise_ls_cycles(self):
    xs = self.xray_structure.deep_copy_scatterers()
    xs.shake_adp() # it must happen before the reparamtrisation is constructed
                   # because the ADP values are read then and only then.
    connectivity_table = smtbx.utils.connectivity_table(xs)
    reparametrisation = constraints.reparametrisation(
      structure=xs,
      constraints=[],
      connectivity_table=connectivity_table)
    ls = least_squares.crystallographic_ls(
      self.fo_sq.as_xray_observations(), reparametrisation,
      weighting_scheme=least_squares.unit_weighting(),
      origin_fixing_restraints_type=oop.null())

    cycles = normal_eqns_solving.naive_iterations(
      ls,
      n_max_iterations=10,
      track_all=True)

    assert approx_equal(ls.scale_factor(), 1, eps=1e-4)
    assert approx_equal(ls.objective(), 0)
    # skip next-to-last one to allow for no progress and rounding error
    n = len(cycles.objective_history)
    assert cycles.objective_history[0] > cycles.objective_history[n-1],\
           cycles.objective_history
    assert approx_equal(cycles.gradient_norm_history[-1], 0, eps=1e-6)

    for sc0, sc1 in zip(self.xray_structure.scatterers(), xs.scatterers()):
      assert approx_equal(sc0.u_star, sc1.u_star)
コード例 #2
0
  def exercise_ls_cycles(self):
    xs = self.xray_structure.deep_copy_scatterers()
    connectivity_table = smtbx.utils.connectivity_table(xs)
    emma_ref = xs.as_emma_model()
    # shaking must happen before the reparametrisation is constructed,
    # otherwise the original values will prevail
    xs.shake_sites_in_place(rms_difference=0.1)
    reparametrisation = constraints.reparametrisation(
      structure=xs,
      constraints=[],
      connectivity_table=connectivity_table)
    ls = least_squares.crystallographic_ls(
      self.fo_sq.as_xray_observations(), reparametrisation,
      weighting_scheme=least_squares.mainstream_shelx_weighting(a=0),
      origin_fixing_restraints_type=
      origin_fixing_restraints.atomic_number_weighting)

    cycles = normal_eqns_solving.naive_iterations(
      ls,
      gradient_threshold=1e-12,
      step_threshold=1e-7,
      track_all=True)

    assert approx_equal(ls.scale_factor(), 1, eps=1e-5), ls.scale_factor()
    assert approx_equal(ls.objective(), 0), ls.objective()

    match = emma.model_matches(emma_ref, xs.as_emma_model()).refined_matches[0]
    assert match.rt.r == matrix.identity(3)
    for pair in match.pairs:
      assert approx_equal(match.calculate_shortest_dist(pair), 0, eps=1e-4)
コード例 #3
0
  def exercise_ls_cycles(self):
    xs = self.xray_structure.deep_copy_scatterers()
    connectivity_table = smtbx.utils.connectivity_table(xs)
    emma_ref = xs.as_emma_model()
    # shaking must happen before the reparametrisation is constructed,
    # otherwise the original values will prevail
    xs.shake_sites_in_place(rms_difference=0.1)
    reparametrisation = constraints.reparametrisation(
      structure=xs,
      constraints=[],
      connectivity_table=connectivity_table)
    ls = least_squares.crystallographic_ls(
      self.fo_sq.as_xray_observations(), reparametrisation,
      weighting_scheme=least_squares.unit_weighting(),
      origin_fixing_restraints_type=
      origin_fixing_restraints.atomic_number_weighting)

    cycles = normal_eqns_solving.naive_iterations(
      ls,
      n_max_iterations=5,
      track_all=True)

    assert approx_equal(ls.scale_factor(), 1, eps=1e-5)
    assert approx_equal(ls.objective(), 0)
    # skip next-to-last one to allow for no progress and rounding error
    assert (
      cycles.objective_history[0]
      >= cycles.objective_history[1]
      >= cycles.objective_history[3]), numstr(cycles.objective_history)
    assert approx_equal(cycles.gradient_norm_history[-1], 0, eps=5e-8)

    match = emma.model_matches(emma_ref, xs.as_emma_model()).refined_matches[0]
    assert match.rt.r == matrix.identity(3)
    for pair in match.pairs:
      assert approx_equal(match.calculate_shortest_dist(pair), 0, eps=1e-4)
コード例 #4
0
    def run_plain(self):
        self.MINI = lbfgs_minimizer_derivatives(
            current_x=self.rs2_current,
            parameterization=self.rs2_parameterization_class,
            refinery=self.rs2_refinery,
            out=self.out)

        self.refined_mini = self.MINI
        values = self.rs2_parameterization_class(self.MINI.x)
        self.nave1_current = flex.double([
            values.G, values.BFACTOR, values.RS, values.thetax * 1000.,
            values.thetay * 1000.
        ])
        self.nave1_parameterization_class = nave1_parameterization
        self.MINI2 = per_frame_helper(
            current_x=self.nave1_current,
            parameterization=self.nave1_parameterization_class,
            refinery=self.nave1_refinery,
            out=self.out)
        print >> self.out, "Trying Lev-Mar2"
        iterations = normal_eqns_solving.naive_iterations(
            non_linear_ls=self.MINI2,
            step_threshold=0.0001,
            gradient_threshold=1.E-10)
        self.refined_mini = self.MINI2
        self.refinery = self.nave1_refinery  # used elsewhere, not private interface
        self.parameterization_class = nave1_parameterization
コード例 #5
0
  def exercise_ls_cycles(self):
    xs = self.xray_structure.deep_copy_scatterers()
    connectivity_table = smtbx.utils.connectivity_table(xs)
    emma_ref = xs.as_emma_model()
    # shaking must happen before the reparametrisation is constructed,
    # otherwise the original values will prevail
    xs.shake_sites_in_place(rms_difference=0.1)
    reparametrisation = constraints.reparametrisation(
      structure=xs,
      constraints=[],
      connectivity_table=connectivity_table)
    ls = least_squares.crystallographic_ls(
      self.fo_sq.as_xray_observations(), reparametrisation,
      weighting_scheme=least_squares.mainstream_shelx_weighting(a=0),
      origin_fixing_restraints_type=
      origin_fixing_restraints.atomic_number_weighting)

    cycles = normal_eqns_solving.naive_iterations(
      ls,
      gradient_threshold=1e-12,
      step_threshold=1e-7,
      track_all=True)

    assert approx_equal(ls.scale_factor(), 1, eps=1e-5), ls.scale_factor()
    assert approx_equal(ls.objective(), 0), ls.objective()

    match = emma.model_matches(emma_ref, xs.as_emma_model()).refined_matches[0]
    assert match.rt.r == matrix.identity(3)
    for pair in match.pairs:
      assert approx_equal(match.calculate_shortest_dist(pair), 0, eps=1e-4)
コード例 #6
0
 def run_minimzer(self, values, sels, **kwargs):
     # base class uses non-squared values, but lbfgs version refines the squares.
     values = self.parameterization(values.reference**2)
     self.refinery = sdfac_refinery(self.scaler.ISIGI,
                                    self.scaler.miller_set.indices(), sels,
                                    self.log)
     self.helper = sdfac_helper(current_x=values.reference,
                                parameterization=self.parameterization,
                                refinery=self.refinery,
                                out=self.log)
     self.iterations = normal_eqns_solving.naive_iterations(
         non_linear_ls=self.helper,
         step_threshold=0.0001,
         gradient_threshold=1.E-10)
     return self
コード例 #7
0
ファイル: __init__.py プロジェクト: dials/cctbx
    def __init__(self,
                 xray_structure,
                 obs_,
                 exti=None,
                 connectivity_table=None):
        if exti is None:
            exti = xray.dummy_extinction_correction()
        adopt_init_args(self, locals())
        assert obs_.fo_sq.anomalous_flag()
        assert not (obs_.twin_fractions and obs_.merohedral_components)

        xray_structure = xray_structure.deep_copy_scatterers()
        for sc in xray_structure.scatterers():
            f = xray.scatterer_flags()
            f.set_use_u_aniso(sc.flags.use_u_aniso())
            f.set_use_u_iso(sc.flags.use_u_iso())
            f.set_use_fp_fdp(True)
            sc.flags = f

        twin_fractions = ()
        it = xray.twin_component(sgtbx.rot_mx((-1, 0, 0, 0, -1, 0, 0, 0, -1)),
                                 0.2, True)
        twin_components = (it, )
        obs = observations.customized_copy(obs_, twin_fractions,
                                           twin_components)
        # reparameterisation needs all fractions
        twin_fractions += twin_components
        if connectivity_table is None:
            connectivity_table = smtbx.utils.connectivity_table(xray_structure)
        reparametrisation = constraints.reparametrisation(
            xray_structure, [],
            connectivity_table,
            twin_fractions=twin_fractions,
            extinction=exti)
        normal_eqns = least_squares.crystallographic_ls(obs, reparametrisation)
        cycles = normal_eqns_solving.naive_iterations(normal_eqns,
                                                      n_max_iterations=10,
                                                      gradient_threshold=1e-7,
                                                      step_threshold=1e-4)
        self.flack_x = it.value
        self.sigma_x = math.sqrt(
            normal_eqns.covariance_matrix(
                jacobian_transpose=reparametrisation.
                jacobian_transpose_matching(
                    reparametrisation.mapping_to_grad_fc_independent_scalars))
            [0])
コード例 #8
0
ファイル: tst_masks.py プロジェクト: dials/cctbx
def exercise_least_squares(xray_structure, fo_sq, mask=None):
    from smtbx.refinement import least_squares
    fo_sq = fo_sq.customized_copy(sigmas=flex.double(fo_sq.size(), 1.))
    xs = xray_structure.deep_copy_scatterers()
    if mask is not None:
        f_mask = mask.f_mask()
    else:
        f_mask = None
    connectivity_table = smtbx.utils.connectivity_table(xs)
    reparametrisation = constraints.reparametrisation(
        structure=xs, constraints=[], connectivity_table=connectivity_table)
    obs = fo_sq.as_xray_observations()
    ls = least_squares.crystallographic_ls(obs,
                                           reparametrisation,
                                           f_mask=f_mask,
                                           weighting_scheme="default")
    cycles = normal_eqns_solving.naive_iterations(ls, n_max_iterations=3)
    return xs
コード例 #9
0
  def run_plain(self):
    self.MINI = lbfgs_minimizer_derivatives( current_x = self.rs2_current,
        parameterization = self.rs2_parameterization_class, refinery = self.rs2_refinery,
        out = self.out )

    self.refined_mini = self.MINI
    values = self.rs2_parameterization_class(self.MINI.x)
    self.nave1_current = flex.double(
      [values.G, values.BFACTOR, values.RS, values.thetax*1000., values.thetay*1000.])
    self.nave1_parameterization_class = nave1_parameterization
    self.MINI2 = per_frame_helper( current_x = self.nave1_current,
        parameterization = self.nave1_parameterization_class, refinery = self.nave1_refinery,
        out = self.out )
    print >>self.out, "Trying Lev-Mar2"
    iterations = normal_eqns_solving.naive_iterations(non_linear_ls = self.MINI2,
        step_threshold = 0.0001,
        gradient_threshold = 1.E-10)
    self.refined_mini = self.MINI2
    self.refinery = self.nave1_refinery # used elsewhere, not private interface
コード例 #10
0
ファイル: __init__.py プロジェクト: cctbx/cctbx-playground
  def __init__(self, xray_structure, obs_, exti=None, connectivity_table=None):
    if exti is None:
      exti = xray.dummy_extinction_correction()
    adopt_init_args(self, locals())
    assert obs_.fo_sq.anomalous_flag()
    xray_structure = xray_structure.deep_copy_scatterers()
    flags = xray_structure.scatterer_flags()
    for sc in xray_structure.scatterers():
      f = xray.scatterer_flags()
      f.set_use_u_aniso(sc.flags.use_u_aniso())
      f.set_use_u_iso(sc.flags.use_u_iso())
      f.set_use_fp_fdp(True)
      sc.flags = f

    twin_fractions = obs_.twin_fractions
    twin_components = obs_.merohedral_components
    for tw in twin_fractions: tw.grad = False
    for tc in twin_components: tc.grad = False

    it = xray.twin_component(sgtbx.rot_mx((-1,0,0,0,-1,0,0,0,-1)), 0.2, True)
    twin_components += (it,)
    obs = observations.customized_copy(obs_, twin_fractions, twin_components)
    # reparameterisation needs all fractions
    twin_fractions += twin_components
    if connectivity_table is None:
      connectivity_table = smtbx.utils.connectivity_table(xray_structure)
    reparametrisation = constraints.reparametrisation(
      xray_structure, [], connectivity_table,
      twin_fractions=twin_fractions,
      extinction=exti
    )
    normal_eqns = least_squares.crystallographic_ls(obs,
      reparametrisation)
    cycles = normal_eqns_solving.naive_iterations(
      normal_eqns, n_max_iterations=10,
      gradient_threshold=1e-7,
      step_threshold=1e-4)
    self.flack_x = it.value
    self.sigma_x = math.sqrt(normal_eqns.covariance_matrix(
      jacobian_transpose=reparametrisation.jacobian_transpose_matching(
        reparametrisation.mapping_to_grad_fc_independent_scalars))[0])
コード例 #11
0
def run():
    import libtbx.utils
    libtbx.utils.show_times_at_exit()
    import sys
    from libtbx.option_parser import option_parser
    command_line = (option_parser().option(
        None, "--normal_eqns_solving_method",
        default='naive').option(None,
                                "--fix_random_seeds",
                                action='store_true',
                                default='naive')).process(args=sys.argv[1:])
    opts = command_line.options
    if opts.fix_random_seeds:
        import random
        random.seed(1)
        flex.set_random_seed(1)
    gradient_threshold = 1e-8
    step_threshold = 1e-8
    if opts.normal_eqns_solving_method == 'naive':
        m = lambda eqns: normal_eqns_solving.naive_iterations(
            eqns,
            gradient_threshold=gradient_threshold,
            step_threshold=step_threshold)
    elif opts.normal_eqns_solving_method == 'levenberg-marquardt':
        m = lambda eqns: normal_eqns_solving.levenberg_marquardt_iterations(
            eqns,
            gradient_threshold=gradient_threshold,
            step_threshold=gradient_threshold,
            tau=1e-7)
    else:
        raise RuntimeError("Unknown method %s" %
                           opts.normal_eqns_solving_method)
    for t in [
            saturated_test_case(m),
            sucrose_test_case(m),
            symmetry_equivalent_test_case(m),
            fpfdp_test_case(m),
            constrained_fpfdp_test_case(m),
            scalar_scaled_adp_test_case(m),
    ]:
        t.run()
コード例 #12
0
    def exercise_ls_cycles(self):
        xs = self.xray_structure.deep_copy_scatterers()
        xs.shake_adp(
        )  # it must happen before the reparamtrisation is constructed
        # because the ADP values are read then and only then.
        connectivity_table = smtbx.utils.connectivity_table(xs)
        reparametrisation = constraints.reparametrisation(
            structure=xs,
            constraints=[],
            connectivity_table=connectivity_table)
        ls = least_squares.crystallographic_ls(
            self.fo_sq.as_xray_observations(),
            reparametrisation,
            weighting_scheme=least_squares.mainstream_shelx_weighting(a=0),
            origin_fixing_restraints_type=oop.null())

        try:
            cycles = normal_eqns_solving.naive_iterations(
                ls, gradient_threshold=1e-12, track_all=True)

            assert approx_equal(ls.scale_factor(), 1, eps=1e-4)
            assert approx_equal(ls.objective(), 0)
            assert cycles.gradient_norm_history[-1] < cycles.gradient_threshold

            for sc0, sc1 in zip(self.xray_structure.scatterers(),
                                xs.scatterers()):
                assert approx_equal(sc0.u_star, sc1.u_star)
        except RuntimeError, err:
            import re
            m = re.search(
                r'^cctbx::adptbx::debye_waller_factor_exp: \s* arg_limit \s+ exceeded'
                '.* arg \s* = \s* ([\d.eE+-]+)', str(err), re.X)
            assert m is not None, eval
            print "Warning: refinement of ADP's diverged"
            print '         argument to debye_waller_factor_exp reached %s' % m.group(
                1)
            print 'Here is the failing structure'
            xs.show_summary()
            xs.show_scatterers()
            raise self.refinement_diverged()
コード例 #13
0
ファイル: tst_masks.py プロジェクト: cctbx/cctbx-playground
def exercise_least_squares(xray_structure, fo_sq, mask=None):
  from smtbx.refinement import least_squares
  fo_sq = fo_sq.customized_copy(sigmas=flex.double(fo_sq.size(),1.))
  xs = xray_structure.deep_copy_scatterers()
  if mask is not None:
    f_mask = mask.f_mask()
  else:
    f_mask = None
  connectivity_table = smtbx.utils.connectivity_table(xs)
  reparametrisation = constraints.reparametrisation(
    structure=xs,
    constraints=[],
    connectivity_table=connectivity_table)
  obs = fo_sq.as_xray_observations()
  ls = least_squares.crystallographic_ls(
    obs,
    reparametrisation,
    f_mask=f_mask,
    weighting_scheme="default")
  cycles = normal_eqns_solving.naive_iterations(ls,
                                                n_max_iterations=3)
  return xs
コード例 #14
0
  def exercise_ls_cycles(self):
    xs = self.xray_structure.deep_copy_scatterers()
    xs.shake_adp() # it must happen before the reparamtrisation is constructed
                   # because the ADP values are read then and only then.
    connectivity_table = smtbx.utils.connectivity_table(xs)
    reparametrisation = constraints.reparametrisation(
      structure=xs,
      constraints=[],
      connectivity_table=connectivity_table)
    ls = least_squares.crystallographic_ls(
      self.fo_sq.as_xray_observations(), reparametrisation,
      weighting_scheme=least_squares.mainstream_shelx_weighting(a=0),
      origin_fixing_restraints_type=oop.null())

    try:
      cycles = normal_eqns_solving.naive_iterations(
        ls,
        gradient_threshold=1e-12,
        track_all=True)

      assert approx_equal(ls.scale_factor(), 1, eps=1e-4)
      assert approx_equal(ls.objective(), 0)
      assert cycles.gradient_norm_history[-1] < cycles.gradient_threshold

      for sc0, sc1 in zip(self.xray_structure.scatterers(), xs.scatterers()):
        assert approx_equal(sc0.u_star, sc1.u_star)
    except RuntimeError, err:
      import re
      m = re.search(
        r'^cctbx::adptbx::debye_waller_factor_exp: \s* arg_limit \s+ exceeded'
        '.* arg \s* = \s* ([\d.eE+-]+)', str(err), re.X)
      assert m is not None, eval
      print "Warning: refinement of ADP's diverged"
      print '         argument to debye_waller_factor_exp reached %s' % m.group(1)
      print 'Here is the failing structure'
      xs.show_summary()
      xs.show_scatterers()
      raise self.refinement_diverged()
コード例 #15
0
def run():
  import libtbx.utils
  libtbx.utils.show_times_at_exit()
  import sys
  from libtbx.option_parser import option_parser
  command_line = (option_parser()
    .option(None, "--normal_eqns_solving_method",
            default='naive')
    .option(None, "--fix_random_seeds",
            action='store_true',
            default='naive')
  ).process(args=sys.argv[1:])
  opts = command_line.options
  if opts.fix_random_seeds:
    import random
    random.seed(1)
    flex.set_random_seed(1)
  gradient_threshold=1e-8
  step_threshold=1e-8
  if opts.normal_eqns_solving_method == 'naive':
    m = lambda eqns: normal_eqns_solving.naive_iterations(
      eqns,
      gradient_threshold=gradient_threshold,
      step_threshold=step_threshold)
  elif opts.normal_eqns_solving_method == 'levenberg-marquardt':
    m = lambda eqns: normal_eqns_solving.levenberg_marquardt_iterations(
      eqns,
      gradient_threshold=gradient_threshold,
      step_threshold=gradient_threshold,
      tau=1e-7)
  else:
    raise RuntimeError("Unknown method %s" % opts.normal_eqns_solving_method)
  for t in [
    saturated_test_case(m),
    sucrose_test_case(m),
    symmetry_equivalent_test_case(m),
    ]:
    t.run()
コード例 #16
0
def exercise_restrained_refinement(options):
    import random
    random.seed(1)
    flex.set_random_seed(1)
    xs0 = smtbx.development.random_xray_structure(
        sgtbx.space_group_info('P1'),
        n_scatterers=options.n_scatterers,
        elements="random")
    for sc in xs0.scatterers():
        sc.flags.set_grad_site(True)
    sc0 = xs0.scatterers()
    uc = xs0.unit_cell()

    mi = xs0.build_miller_set(anomalous_flag=False, d_min=options.resolution)
    fo_sq = mi.structure_factors_from_scatterers(
        xs0, algorithm="direct").f_calc().norm()
    fo_sq = fo_sq.customized_copy(sigmas=flex.double(fo_sq.size(), 1))

    i, j, k, l = random.sample(xrange(options.n_scatterers), 4)
    bond_proxies = geometry_restraints.shared_bond_simple_proxy()
    w = 1e9
    d_ij = uc.distance(sc0[i].site, sc0[j].site) * 0.8
    bond_proxies.append(
        geom.bond_simple_proxy(i_seqs=(i, j), distance_ideal=d_ij, weight=w))
    d_jk = uc.distance(sc0[j].site, sc0[k].site) * 0.85
    bond_proxies.append(
        geom.bond_simple_proxy(i_seqs=(j, k), distance_ideal=d_jk, weight=w))
    d_ki = min(
        uc.distance(sc0[k].site, sc0[i].site) * 0.9, (d_ij + d_jk) * 0.8)
    bond_proxies.append(
        geom.bond_simple_proxy(i_seqs=(k, i), distance_ideal=d_ki, weight=w))
    d_jl = uc.distance(sc0[j].site, sc0[l].site) * 0.9
    bond_proxies.append(
        geom.bond_simple_proxy(i_seqs=(j, l), distance_ideal=d_jl, weight=w))
    d_lk = min(
        uc.distance(sc0[l].site, sc0[k].site) * 0.8, 0.75 * (d_jk + d_jl))
    bond_proxies.append(
        geom.bond_simple_proxy(i_seqs=(l, k), distance_ideal=d_jl, weight=w))
    restraints_manager = restraints.manager(bond_proxies=bond_proxies)

    xs1 = xs0.deep_copy_scatterers()
    xs1.shake_sites_in_place(rms_difference=0.1)

    def ls_problem():
        xs = xs1.deep_copy_scatterers()
        reparametrisation = constraints.reparametrisation(
            structure=xs,
            constraints=[],
            connectivity_table=smtbx.utils.connectivity_table(xs),
            temperature=20)
        return least_squares.crystallographic_ls(
            fo_sq.as_xray_observations(),
            reparametrisation=reparametrisation,
            restraints_manager=restraints_manager)

    gradient_threshold, step_threshold = 1e-6, 1e-6
    eps = 5e-3

    ls = ls_problem()
    t = wall_clock_time()
    cycles = normal_eqns_solving.naive_iterations(
        ls,
        gradient_threshold=gradient_threshold,
        step_threshold=step_threshold,
        track_all=True)
    if options.verbose:
        print "%i %s steps in %.6f s" % (cycles.n_iterations, cycles,
                                         t.elapsed())
    sc = ls.xray_structure.scatterers()
    for p in bond_proxies:
        d = uc.distance(*[sc[i_pair].site for i_pair in p.i_seqs])
        assert approx_equal(d, p.distance_ideal, eps)

    ls = ls_problem()
    t = wall_clock_time()
    cycles = normal_eqns_solving.levenberg_marquardt_iterations(
        ls,
        gradient_threshold=gradient_threshold,
        step_threshold=step_threshold,
        tau=1e-3,
        track_all=True)
    if options.verbose:
        print "%i %s steps in %.6f s" % (cycles.n_iterations, cycles,
                                         t.elapsed())
    sc = ls.xray_structure.scatterers()
    sc = ls.xray_structure.scatterers()
    for p in bond_proxies:
        d = uc.distance(*[sc[i].site for i in p.i_seqs])
        assert approx_equal(d, p.distance_ideal, eps)
コード例 #17
0
  def exercise(self):
    xs0 = self.structure
    xs = xs0.deep_copy_scatterers()
    xs.shake_sites_in_place(rms_difference=0.15)
    xs.shake_adp()

    for sc in xs.scatterers():
      sc.flags.set_use_u_iso(False).set_use_u_aniso(True)
      sc.flags.set_grad_site(True).set_grad_u_aniso(True)
    connectivity_table = smtbx.utils.connectivity_table(xs)
    if self.twin_fractions[0] < 0.5:
      twin_fractions = self.twin_fractions.deep_copy() + 0.1
    else:
      twin_fractions = self.twin_fractions.deep_copy() - 0.1
    twin_components = tuple(
      [xray.twin_component(law, fraction, grad=True)
       for law, fraction in zip(self.twin_laws, twin_fractions)])
    reparametrisation = constraints.reparametrisation(
      structure=xs,
      constraints=[],
      connectivity_table=connectivity_table,
      twin_fractions=twin_components)
    obs = self.fo_sq.as_xray_observations(twin_components=twin_components)
    normal_eqns = least_squares.crystallographic_ls(
      obs, reparametrisation,
      weighting_scheme=least_squares.unit_weighting(),
      origin_fixing_restraints_type=
      origin_fixing_restraints.atomic_number_weighting)
    cycles = normal_eqns_solving.naive_iterations(
      normal_eqns,
      n_max_iterations=10,
      track_all=True)
    assert approx_equal(
      [twin.value for twin in normal_eqns.twin_fractions],
      self.twin_fractions, eps=1e-2)
    assert approx_equal(normal_eqns.objective(), 0, eps=1e-5)
    assert normal_eqns.n_parameters == 64
    # now with fixed twin fraction
    xs.shake_sites_in_place(rms_difference=0.15)
    xs.shake_adp()
    twin_components = tuple(
      [xray.twin_component(law, fraction, grad=False)
       for law, fraction in zip(self.twin_laws, twin_fractions)])
    #change the twin_components of the observations...
    obs = observations.customized_copy(obs, twin_components=twin_components)
    reparametrisation = constraints.reparametrisation(
      structure=xs,
      constraints=[],
      connectivity_table=connectivity_table,
      twin_fractions=twin_components)
    normal_eqns = least_squares.crystallographic_ls(
      obs, reparametrisation,
      weighting_scheme=least_squares.unit_weighting(),
      origin_fixing_restraints_type=
      origin_fixing_restraints.atomic_number_weighting)
    cycles = normal_eqns_solving.naive_iterations(
      normal_eqns,
      n_max_iterations=10,
      track_all=True)
    assert approx_equal(
      [twin.value for twin in normal_eqns.twin_fractions],
      twin_fractions)
    assert normal_eqns.objective() != 0 # since the twin fraction is not refined
    assert normal_eqns.n_parameters == 63
コード例 #18
0
def exercise_restrained_refinement(options):
  import random
  random.seed(1)
  flex.set_random_seed(1)
  xs0 = smtbx.development.random_xray_structure(
    sgtbx.space_group_info('P1'),
    n_scatterers=options.n_scatterers,
    elements="random")
  for sc in xs0.scatterers():
    sc.flags.set_grad_site(True)
  sc0 = xs0.scatterers()
  uc = xs0.unit_cell()

  mi = xs0.build_miller_set(anomalous_flag=False, d_min=options.resolution)
  fo_sq = mi.structure_factors_from_scatterers(
    xs0, algorithm="direct").f_calc().norm()
  fo_sq = fo_sq.customized_copy(sigmas=flex.double(fo_sq.size(), 1))

  i, j, k, l = random.sample(xrange(options.n_scatterers), 4)
  bond_proxies = geometry_restraints.shared_bond_simple_proxy()
  w = 1e9
  d_ij = uc.distance(sc0[i].site, sc0[j].site)*0.8
  bond_proxies.append(geom.bond_simple_proxy(
    i_seqs=(i, j),
    distance_ideal=d_ij,
    weight=w))
  d_jk = uc.distance(sc0[j].site, sc0[k].site)*0.85
  bond_proxies.append(geom.bond_simple_proxy(
    i_seqs=(j, k),
    distance_ideal=d_jk,
    weight=w))
  d_ki = min(uc.distance(sc0[k].site, sc0[i].site)*0.9, (d_ij + d_jk)*0.8)
  bond_proxies.append(geom.bond_simple_proxy(
    i_seqs=(k, i),
    distance_ideal=d_ki,
    weight=w))
  d_jl = uc.distance(sc0[j].site, sc0[l].site)*0.9
  bond_proxies.append(geom.bond_simple_proxy(
    i_seqs=(j, l),
    distance_ideal=d_jl,
    weight=w))
  d_lk = min(uc.distance(sc0[l].site, sc0[k].site)*0.8, 0.75*(d_jk + d_jl))
  bond_proxies.append(geom.bond_simple_proxy(
    i_seqs=(l, k),
    distance_ideal=d_jl,
    weight=w))
  restraints_manager = restraints.manager(bond_proxies=bond_proxies)

  xs1 = xs0.deep_copy_scatterers()
  xs1.shake_sites_in_place(rms_difference=0.1)

  def ls_problem():
    xs = xs1.deep_copy_scatterers()
    reparametrisation = constraints.reparametrisation(
      structure=xs,
      constraints=[],
      connectivity_table=smtbx.utils.connectivity_table(xs),
      temperature=20)
    return least_squares.crystallographic_ls(
      fo_sq.as_xray_observations(),
      reparametrisation=reparametrisation,
      restraints_manager=restraints_manager)

  gradient_threshold, step_threshold = 1e-6, 1e-6
  eps = 5e-3

  ls = ls_problem()
  t = wall_clock_time()
  cycles = normal_eqns_solving.naive_iterations(
    ls,
    gradient_threshold=gradient_threshold,
    step_threshold=step_threshold,
    track_all=True)
  if options.verbose:
    print "%i %s steps in %.6f s" % (cycles.n_iterations, cycles, t.elapsed())
  sc = ls.xray_structure.scatterers()
  for p in bond_proxies:
    d = uc.distance(*[ sc[i_pair].site for i_pair in p.i_seqs ])
    assert approx_equal(d, p.distance_ideal, eps)

  ls = ls_problem()
  t = wall_clock_time()
  cycles = normal_eqns_solving.levenberg_marquardt_iterations(
    ls,
    gradient_threshold=gradient_threshold,
    step_threshold=step_threshold,
    tau=1e-3,
    track_all=True)
  if options.verbose:
    print "%i %s steps in %.6f s" % (cycles.n_iterations, cycles, t.elapsed())
  sc = ls.xray_structure.scatterers()
  sc = ls.xray_structure.scatterers()
  for p in bond_proxies:
    d = uc.distance(*[ sc[i].site for i in p.i_seqs ])
    assert approx_equal(d, p.distance_ideal, eps)
コード例 #19
0
  def refine_rotx_roty2(OO,enable_rotational_target=True):

      helper = OO.per_frame_helper_factory()
      helper.restart()

      if enable_rotational_target:
        print "Trying least squares minimization of excursions",
        from scitbx.lstbx import normal_eqns_solving
        iterations = normal_eqns_solving.naive_iterations(
          non_linear_ls = helper,
          gradient_threshold = 1.E-10)

      results =  helper.x

      print "with %d reflections"%len(OO.parent.indexed_pairs),
      print "result %6.2f degrees"%(results[1]*180./math.pi),
      print "result %6.2f degrees"%(results[0]*180./math.pi)

      if False: # Excursion histogram
        print "The input mosaicity is %7.3f deg full width"%OO.parent.inputai.getMosaicity()
        # final histogram
        if OO.pvr_fix:
          final = 360.* helper.fvec_callable_pvr(results)
        else:
          final = 360.* helper.fvec_callable_NOT_USED_AFTER_BUGFIX(results)

        rmsdexc = math.sqrt(flex.mean(final*final))
        from matplotlib import pyplot as plt
        nbins = len(final)//20
        n,bins,patches = plt.hist(final,
          nbins, normed=0, facecolor="orange", alpha=0.75)
        plt.xlabel("Rotation on e1 axis, rmsd %7.3f deg"%rmsdexc)
        plt.title("Histogram of cctbx.xfel misorientation")
        plt.axis([-0.5,0.5,0,100])
        plt.plot([rmsdexc],[18],"b|")
        plt.show()

      # Determine optimal mosaicity and domain size model (monochromatic)
      if OO.pvr_fix:
        final = 360.* helper.fvec_callable_pvr(results)
      else:
        final = 360.* helper.fvec_callable_NOT_USED_AFTER_BUGFIX(results)
      #Guard against misindexing -- seen in simulated data, with zone nearly perfectly aligned
      guard_stats = flex.max(final), flex.min(final)
      if False and REMOVETEST_KILLING_LEGITIMATE_EXCURSIONS (guard_stats[0] > 2.0 or guard_stats[1] < -2.0):
        raise Exception("Misindexing diagnosed by meaningless excursion angle (bandpass_gaussian model)");
      print "The mean excursion is %7.3f degrees"%(flex.mean(final))

      two_thetas = helper.last_set_orientation.unit_cell().two_theta(OO.reserve_indices,OO.central_wavelength_ang,deg=True)
      dspacings = helper.last_set_orientation.unit_cell().d(OO.reserve_indices)
      dspace_sq = dspacings * dspacings
      excursion_rad = final * math.pi/ 180.

      #  First -- try to get a reasonable envelope for the observed excursions.
          ## minimum of three regions; maximum of 50 measurements in each bin
      print "fitting parameters on %d spots"%len(excursion_rad)
      n_bins = min(max(3, len(excursion_rad)//25),50)
      bin_sz = len(excursion_rad)//n_bins
      print "nbins",n_bins,"bin_sz",bin_sz
      order = flex.sort_permutation(two_thetas)
      two_thetas_env = flex.double()
      dspacings_env = flex.double()
      excursion_rads_env = flex.double()
      for x in xrange(0,n_bins):
        subset = order[x*bin_sz:(x+1)*bin_sz]
        two_thetas_env.append( flex.mean(two_thetas.select(subset)) )
        dspacings_env.append( flex.mean(dspacings.select(subset)))
        excursion_rads_env.append( flex.max( flex.abs( excursion_rad.select(subset))))

      #  Second -- parameter fit
          ## solve the normal equations
      sum_inv_u_sq = flex.sum(dspacings_env * dspacings_env)
      sum_inv_u    = flex.sum(dspacings_env)
      sum_te_u     = flex.sum(dspacings_env * excursion_rads_env)
      sum_te       = flex.sum(excursion_rads_env)
      Normal_Mat   = sqr((sum_inv_u_sq, sum_inv_u, sum_inv_u, len(dspacings_env)))
      Vector       = col((sum_te_u, sum_te))
      solution     = Normal_Mat.inverse() * Vector
      s_ang = 1./(2*solution[0])
      print "Best LSQ fit Scheerer domain size is %9.2f ang"%(
        s_ang)
      tan_phi_rad = helper.last_set_orientation.unit_cell().d(OO.reserve_indices) / (2. * s_ang)
      tan_phi_deg = tan_phi_rad * 180./math.pi
      k_degrees = solution[1]* 180./math.pi
      print "The LSQ full mosaicity is %8.5f deg; half-mosaicity %9.5f"%(2*k_degrees, k_degrees)
      tan_outer_deg = tan_phi_deg + k_degrees

      if OO.mosaic_refinement_target=="ML":
        from xfel.mono_simulation.max_like import minimizer
        print "input", s_ang,2. * solution[1]*180/math.pi
        # coerce the estimates to be positive for max-likelihood
        lower_limit_domain_size = math.pow(
         helper.last_set_orientation.unit_cell().volume(),
         1./3.)*20 # 10-unit cell block size minimum reasonable domain

        d_estimate = max(s_ang, lower_limit_domain_size)
        M = minimizer(d_i = dspacings, psi_i = excursion_rad, eta_rad = abs(2. * solution[1]),
                      Deff = d_estimate)
        print "output",1./M.x[0], M.x[1]*180./math.pi
        tan_phi_rad_ML = helper.last_set_orientation.unit_cell().d(OO.reserve_indices) / (2. / M.x[0])
        tan_phi_deg_ML = tan_phi_rad_ML * 180./math.pi
        # bugfix: Need factor of 0.5 because the plot shows half mosaicity (displacement from the center point defined as zero)
        tan_outer_deg_ML = tan_phi_deg_ML + 0.5*M.x[1]*180./math.pi

      if OO.parent.horizons_phil.integration.mosaic.enable_polychromatic:
        # add code here to perform polychromatic modeling.
        """
        get miller indices DONE
        get model-predicted mono-wavelength centroid S1 vectors
        back-convert S1vec, with mono-wavelength, to detector-plane position, factoring in subpixel correction
        compare with spot centroid measured position
        compare with locus of bodypixels
        """
        print list(OO.reserve_indices)
        print len(OO.reserve_indices), len(two_thetas)
        positions = [
              OO.ucbp3.simple_forward_calculation_spot_position(
              wavelength = OO.central_wavelength_ang,
              observation_no = obsno).position
              for obsno in xrange(len(OO.parent.indexed_pairs))]
        print len(positions)
        print positions # model-predicted positions
        print len(OO.parent.spots)
        print OO.parent.indexed_pairs
        print OO.parent.spots
        print len(OO.parent.spots)
        meas_spots = [OO.parent.spots[pair["spot"]] for pair in OO.parent.indexed_pairs]
  #      for xspot in meas_spots:
  #        xspot.ctr_mass_x(),xspot.ctr_mass_y()
  #        xspot.max_pxl_x()
  #        xspot.bodypixels
  #        xspot.ctr_mass_x()

        # Do some work to calculate an rmsd
        diff_vecs = flex.vec3_double()
        for p,xspot in zip(positions, meas_spots):
          diff_vecs.append((p[0]-xspot.ctr_mass_y(), p[1]-xspot.ctr_mass_x(), 0.0))
        # could use diff_vecs.rms_length()
        diff_vecs_sq = diff_vecs.dot(diff_vecs)
        mean_diff_vec_sq = flex.mean(diff_vecs_sq)
        rmsd = math.sqrt(mean_diff_vec_sq)
        print "mean obs-pred diff vec on %d spots is %6.2f pixels"%(len(positions),rmsd)

        positions_to_fictitious = [
              OO.ucbp3.simple_forward_calculation_spot_position(
              wavelength = OO.central_wavelength_ang,
              observation_no = obsno).position_to_fictitious
              for obsno in xrange(len(OO.parent.indexed_pairs))]
        # Do some work to calculate an rmsd
        diff_vecs = flex.vec3_double()
        for p,xspot in zip(positions_to_fictitious, meas_spots):
          diff_vecs.append((p[0]-xspot.ctr_mass_y(), p[1]-xspot.ctr_mass_x(), 0.0))
        rmsd = diff_vecs.rms_length()
        print "mean obs-pred_to_fictitious diff vec on %d spots is %6.2f pixels"%(len(positions),rmsd)

        """
        actually, it might be better if the entire set of experimental observations
        is transformed into the ideal detector plane, for the purposes of poly_treatment.


        start here.  Now it would be good to actually implement probability of observing a body pixel given the model.
        We have everything needed right here.
        """
        if OO.parent.horizons_phil.integration.mosaic.enable_AD14F7B:
          # Image plot: obs and predicted positions + bodypixels
          from matplotlib import pyplot as plt
          plt.plot( [p[0] for p in positions_to_fictitious], [p[1] for p in positions_to_fictitious], "r.")
          plt.plot( [xspot.ctr_mass_y() for xspot in meas_spots],
                    [xspot.ctr_mass_x() for xspot in meas_spots], "g.")
          bodypx = []
          for xspot in meas_spots:
            for body in xspot.bodypixels:
              bodypx.append(body)
          plt.plot( [b.y for b in bodypx], [b.x for b in bodypx], "b.")
          plt.axes().set_aspect("equal")
          plt.show()

      print "MEAN excursion",flex.mean(final),
      if OO.mosaic_refinement_target=="ML":
        print "mosaicity deg FW=",M.x[1]*180./math.pi
      else:
        print
      if OO.parent.horizons_phil.integration.mosaic.enable_AD14F7B: # Excursion vs resolution fit
        AD1TF7B_MAX2T = 30.
        AD1TF7B_MAXDP = 1.
        from matplotlib import pyplot as plt
        fig = plt.figure()
        plt.plot(two_thetas, final, "bo")
        mean = flex.mean(final)
        minplot = flex.min(two_thetas)
        plt.plot([0,minplot],[mean,mean],"k-")
        LR = flex.linear_regression(two_thetas, final)
        #LR.show_summary()
        model_y = LR.slope()*two_thetas + LR.y_intercept()
        plt.plot(two_thetas, model_y, "k-")
        print helper.last_set_orientation.unit_cell()
        #for sdp,tw in zip (dspacings,two_thetas):
          #print sdp,tw
        if OO.mosaic_refinement_target=="ML":
          plt.title("ML: mosaicity FW=%4.2f deg, Dsize=%5.0fA on %d spots"%(M.x[1]*180./math.pi, 2./M.x[0], len(two_thetas)))
          plt.plot(two_thetas, tan_phi_deg_ML, "r.")
          plt.plot(two_thetas, -tan_phi_deg_ML, "r.")
          plt.plot(two_thetas, tan_outer_deg_ML, "g.")
          plt.plot(two_thetas, -tan_outer_deg_ML, "g.")
        else:
          plt.plot(two_thetas_env, excursion_rads_env *180./math.pi, "r|")
          plt.plot(two_thetas_env, -excursion_rads_env *180./math.pi, "r|")
          plt.plot(two_thetas_env, excursion_rads_env *180./math.pi, "r-")
          plt.plot(two_thetas_env, -excursion_rads_env *180./math.pi, "r-")
          plt.plot(two_thetas, tan_phi_deg, "r.")
          plt.plot(two_thetas, -tan_phi_deg, "r.")
          plt.plot(two_thetas, tan_outer_deg, "g.")
          plt.plot(two_thetas, -tan_outer_deg, "g.")
        plt.xlim([0,AD1TF7B_MAX2T])
        plt.ylim([-AD1TF7B_MAXDP,AD1TF7B_MAXDP])
        OO.parent.show_figure(plt,fig,"psi")
        plt.close()

      from xfel.mono_simulation.util import green_curve_area,ewald_proximal_volume
      if OO.mosaic_refinement_target=="ML":
        OO.parent.green_curve_area = green_curve_area(two_thetas, tan_outer_deg_ML)
        OO.parent.inputai.setMosaicity(M.x[1]*180./math.pi) # full width, degrees
        OO.parent.ML_half_mosaicity_deg = M.x[1]*180./(2.*math.pi)
        OO.parent.ML_domain_size_ang = 1./M.x[0]
        OO.parent.ewald_proximal_volume = ewald_proximal_volume(
            wavelength_ang = OO.central_wavelength_ang,
            resolution_cutoff_ang = OO.parent.horizons_phil.integration.mosaic.ewald_proximal_volume_resolution_cutoff,
            domain_size_ang = 1./M.x[0],
            full_mosaicity_rad = M.x[1])
        return results, helper.last_set_orientation,1./M.x[0] # full width domain size, angstroms
      else:
        assert OO.mosaic_refinement_target=="LSQ"
        OO.parent.green_curve_area = green_curve_area(two_thetas, tan_outer_deg)
        OO.parent.inputai.setMosaicity(2*k_degrees) # full width
        OO.parent.ML_half_mosaicity_deg = k_degrees
        OO.parent.ML_domain_size_ang = s_ang
        OO.parent.ewald_proximal_volume = ewald_proximal_volume(
            wavelength_ang = OO.central_wavelength_ang,
            resolution_cutoff_ang = OO.parent.horizons_phil.integration.mosaic.ewald_proximal_volume_resolution_cutoff,
            domain_size_ang = s_ang,
            full_mosaicity_rad = 2*k_degrees*math.pi/180.)
        return results, helper.last_set_orientation,s_ang # full width domain size, angstroms
コード例 #20
0
  def exercise(self):
    xs0 = self.structure
    xs = xs0.deep_copy_scatterers()
    k1, s1, li1, o1, o2 = xs.scatterers()
    self.shake_point_group_3(k1)
    self.shake_point_group_3(s1)
    self.shake_point_group_3(li1)
    self.shake_point_group_3(o1)
    o2.site = tuple(
      [ x*(1 + random.uniform(-self.delta_site, self.delta_site))
        for x in o2.site])
    o2.u_star = tuple(
      [ u*(1 + random.uniform(-self.delta_u_star, self.delta_u_star))
        for u in o2.u_star])

    for sc in xs.scatterers():
      sc.flags.set_use_u_iso(False).set_use_u_aniso(True)
      sc.flags.set_grad_site(True).set_grad_u_aniso(True)
      connectivity_table = smtbx.utils.connectivity_table(xs)
    reparametrisation = constraints.reparametrisation(
      structure=xs,
      constraints=[],
      connectivity_table=connectivity_table)
    ls = least_squares.crystallographic_ls(
      self.fo_sq.as_xray_observations(), reparametrisation,
      weighting_scheme=least_squares.unit_weighting(),
      origin_fixing_restraints_type=
      origin_fixing_restraints.atomic_number_weighting)

    cycles = normal_eqns_solving.naive_iterations(
      ls,
      gradient_threshold=1e-6,
      step_threshold=1e-6,
      track_all=True)

    ## Test whether refinement brought back the shaked structure to its
    ## original state
    match = emma.model_matches(xs0.as_emma_model(),
                               xs.as_emma_model()).refined_matches[0]
    assert match.rt.r == matrix.identity(3)
    assert not match.singles1 and not match.singles2
    assert match.rms < 1e-6

    delta_u_carts= (   xs.scatterers().extract_u_cart(xs.unit_cell())
                    - xs0.scatterers().extract_u_cart(xs.unit_cell())).norms()
    assert flex.abs(delta_u_carts) < 1e-6

    assert approx_equal(ls.scale_factor(), 1, eps=1e-4)

    ## Test covariance matrix
    jac_tr = reparametrisation.jacobian_transpose_matching_grad_fc()
    cov = ls.covariance_matrix(
      jacobian_transpose=jac_tr, normalised_by_goof=False)\
        .matrix_packed_u_as_symmetric()
    m, n = cov.accessor().focus()
    # x,y for point group 3 sites are fixed: no variance or correlation
    for i in (0, 9, 18, 27,):
      assert cov.matrix_copy_block(i, 0, 2, n) == 0

    # u_star coefficients u13 and u23 for point group 3 sites are fixed
    # to 0: again no variance or correlation with any other param
    for i in (7, 16, 25, 34,):
      assert cov.matrix_copy_block(i, 0, 2, n).as_1d()\
             .all_approx_equal(0., 1e-20)

    # u_star coefficients u11, u22 and u12 for point group 3 sites
    # are totally correlated, with variances in ratios 1:1:1/2
    for i in (3, 12, 21, 30,):
      assert cov[i, i] != 0
      assert approx_equal(cov[i, i], cov[i+1, i+1], eps=1e-15)
      assert approx_equal(cov[i, i+1]/cov[i, i], 1, eps=1e-12)
      assert approx_equal(cov[i, i+3]/cov[i, i], 0.5, eps=1e-12)
コード例 #21
0
    def refine_rotx_roty2(OO, enable_rotational_target=True):

        helper = OO.per_frame_helper_factory()
        helper.restart()

        if enable_rotational_target:
            print "Trying least squares minimization of excursions",
            from scitbx.lstbx import normal_eqns_solving
            iterations = normal_eqns_solving.naive_iterations(
                non_linear_ls=helper, gradient_threshold=1.E-10)

        results = helper.x

        print "with %d reflections" % len(OO.parent.indexed_pairs),
        print "result %6.2f degrees" % (results[1] * 180. / math.pi),
        print "result %6.2f degrees" % (results[0] * 180. / math.pi)

        if False:  # Excursion histogram
            print "The input mosaicity is %7.3f deg full width" % OO.parent.inputai.getMosaicity(
            )
            # final histogram
            if OO.pvr_fix:
                final = 360. * helper.fvec_callable_pvr(results)
            else:
                final = 360. * helper.fvec_callable_NOT_USED_AFTER_BUGFIX(
                    results)

            rmsdexc = math.sqrt(flex.mean(final * final))
            from matplotlib import pyplot as plt
            nbins = len(final) // 20
            n, bins, patches = plt.hist(final,
                                        nbins,
                                        normed=0,
                                        facecolor="orange",
                                        alpha=0.75)
            plt.xlabel("Rotation on e1 axis, rmsd %7.3f deg" % rmsdexc)
            plt.title("Histogram of cctbx.xfel misorientation")
            plt.axis([-0.5, 0.5, 0, 100])
            plt.plot([rmsdexc], [18], "b|")
            plt.show()

        # Determine optimal mosaicity and domain size model (monochromatic)
        if OO.pvr_fix:
            final = 360. * helper.fvec_callable_pvr(results)
        else:
            final = 360. * helper.fvec_callable_NOT_USED_AFTER_BUGFIX(results)
        #Guard against misindexing -- seen in simulated data, with zone nearly perfectly aligned
        guard_stats = flex.max(final), flex.min(final)
        if False and REMOVETEST_KILLING_LEGITIMATE_EXCURSIONS(
                guard_stats[0] > 2.0 or guard_stats[1] < -2.0):
            raise Exception(
                "Misindexing diagnosed by meaningless excursion angle (bandpass_gaussian model)"
            )
        print "The mean excursion is %7.3f degrees" % (flex.mean(final))

        two_thetas = helper.last_set_orientation.unit_cell().two_theta(
            OO.reserve_indices, OO.central_wavelength_ang, deg=True)
        dspacings = helper.last_set_orientation.unit_cell().d(
            OO.reserve_indices)
        dspace_sq = dspacings * dspacings
        excursion_rad = final * math.pi / 180.

        #  First -- try to get a reasonable envelope for the observed excursions.
        ## minimum of three regions; maximum of 50 measurements in each bin
        print "fitting parameters on %d spots" % len(excursion_rad)
        n_bins = min(max(3, len(excursion_rad) // 25), 50)
        bin_sz = len(excursion_rad) // n_bins
        print "nbins", n_bins, "bin_sz", bin_sz
        order = flex.sort_permutation(two_thetas)
        two_thetas_env = flex.double()
        dspacings_env = flex.double()
        excursion_rads_env = flex.double()
        for x in range(0, n_bins):
            subset = order[x * bin_sz:(x + 1) * bin_sz]
            two_thetas_env.append(flex.mean(two_thetas.select(subset)))
            dspacings_env.append(flex.mean(dspacings.select(subset)))
            excursion_rads_env.append(
                flex.max(flex.abs(excursion_rad.select(subset))))

        #  Second -- parameter fit
        ## solve the normal equations
        sum_inv_u_sq = flex.sum(dspacings_env * dspacings_env)
        sum_inv_u = flex.sum(dspacings_env)
        sum_te_u = flex.sum(dspacings_env * excursion_rads_env)
        sum_te = flex.sum(excursion_rads_env)
        Normal_Mat = sqr(
            (sum_inv_u_sq, sum_inv_u, sum_inv_u, len(dspacings_env)))
        Vector = col((sum_te_u, sum_te))
        solution = Normal_Mat.inverse() * Vector
        s_ang = 1. / (2 * solution[0])
        print "Best LSQ fit Scheerer domain size is %9.2f ang" % (s_ang)
        tan_phi_rad = helper.last_set_orientation.unit_cell().d(
            OO.reserve_indices) / (2. * s_ang)
        tan_phi_deg = tan_phi_rad * 180. / math.pi
        k_degrees = solution[1] * 180. / math.pi
        print "The LSQ full mosaicity is %8.5f deg; half-mosaicity %9.5f" % (
            2 * k_degrees, k_degrees)
        tan_outer_deg = tan_phi_deg + k_degrees

        if OO.mosaic_refinement_target == "ML":
            from xfel.mono_simulation.max_like import minimizer
            print "input", s_ang, 2. * solution[1] * 180 / math.pi
            # coerce the estimates to be positive for max-likelihood
            lower_limit_domain_size = math.pow(
                helper.last_set_orientation.unit_cell().volume(), 1. /
                3.) * 20  # 10-unit cell block size minimum reasonable domain

            d_estimate = max(s_ang, lower_limit_domain_size)
            M = minimizer(d_i=dspacings,
                          psi_i=excursion_rad,
                          eta_rad=abs(2. * solution[1]),
                          Deff=d_estimate)
            print "output", 1. / M.x[0], M.x[1] * 180. / math.pi
            tan_phi_rad_ML = helper.last_set_orientation.unit_cell().d(
                OO.reserve_indices) / (2. / M.x[0])
            tan_phi_deg_ML = tan_phi_rad_ML * 180. / math.pi
            # bugfix: Need factor of 0.5 because the plot shows half mosaicity (displacement from the center point defined as zero)
            tan_outer_deg_ML = tan_phi_deg_ML + 0.5 * M.x[1] * 180. / math.pi

        if OO.parent.horizons_phil.integration.mosaic.enable_polychromatic:
            # add code here to perform polychromatic modeling.
            """
        get miller indices DONE
        get model-predicted mono-wavelength centroid S1 vectors
        back-convert S1vec, with mono-wavelength, to detector-plane position, factoring in subpixel correction
        compare with spot centroid measured position
        compare with locus of bodypixels
        """
            print list(OO.reserve_indices)
            print len(OO.reserve_indices), len(two_thetas)
            positions = [
                OO.ucbp3.simple_forward_calculation_spot_position(
                    wavelength=OO.central_wavelength_ang,
                    observation_no=obsno).position
                for obsno in range(len(OO.parent.indexed_pairs))
            ]
            print len(positions)
            print positions  # model-predicted positions
            print len(OO.parent.spots)
            print OO.parent.indexed_pairs
            print OO.parent.spots
            print len(OO.parent.spots)
            meas_spots = [
                OO.parent.spots[pair["spot"]]
                for pair in OO.parent.indexed_pairs
            ]
            #      for xspot in meas_spots:
            #        xspot.ctr_mass_x(),xspot.ctr_mass_y()
            #        xspot.max_pxl_x()
            #        xspot.bodypixels
            #        xspot.ctr_mass_x()

            # Do some work to calculate an rmsd
            diff_vecs = flex.vec3_double()
            for p, xspot in zip(positions, meas_spots):
                diff_vecs.append((p[0] - xspot.ctr_mass_y(),
                                  p[1] - xspot.ctr_mass_x(), 0.0))
            # could use diff_vecs.rms_length()
            diff_vecs_sq = diff_vecs.dot(diff_vecs)
            mean_diff_vec_sq = flex.mean(diff_vecs_sq)
            rmsd = math.sqrt(mean_diff_vec_sq)
            print "mean obs-pred diff vec on %d spots is %6.2f pixels" % (
                len(positions), rmsd)

            positions_to_fictitious = [
                OO.ucbp3.simple_forward_calculation_spot_position(
                    wavelength=OO.central_wavelength_ang,
                    observation_no=obsno).position_to_fictitious
                for obsno in range(len(OO.parent.indexed_pairs))
            ]
            # Do some work to calculate an rmsd
            diff_vecs = flex.vec3_double()
            for p, xspot in zip(positions_to_fictitious, meas_spots):
                diff_vecs.append((p[0] - xspot.ctr_mass_y(),
                                  p[1] - xspot.ctr_mass_x(), 0.0))
            rmsd = diff_vecs.rms_length()
            print "mean obs-pred_to_fictitious diff vec on %d spots is %6.2f pixels" % (
                len(positions), rmsd)
            """
        actually, it might be better if the entire set of experimental observations
        is transformed into the ideal detector plane, for the purposes of poly_treatment.


        start here.  Now it would be good to actually implement probability of observing a body pixel given the model.
        We have everything needed right here.
        """
            if OO.parent.horizons_phil.integration.mosaic.enable_AD14F7B:
                # Image plot: obs and predicted positions + bodypixels
                from matplotlib import pyplot as plt
                plt.plot([p[0] for p in positions_to_fictitious],
                         [p[1] for p in positions_to_fictitious], "r.")
                plt.plot([xspot.ctr_mass_y() for xspot in meas_spots],
                         [xspot.ctr_mass_x() for xspot in meas_spots], "g.")
                bodypx = []
                for xspot in meas_spots:
                    for body in xspot.bodypixels:
                        bodypx.append(body)
                plt.plot([b.y for b in bodypx], [b.x for b in bodypx], "b.")
                plt.axes().set_aspect("equal")
                plt.show()

        print "MEAN excursion", flex.mean(final),
        if OO.mosaic_refinement_target == "ML":
            print "mosaicity deg FW=", M.x[1] * 180. / math.pi
        else:
            print
        if OO.parent.horizons_phil.integration.mosaic.enable_AD14F7B:  # Excursion vs resolution fit
            AD1TF7B_MAX2T = 30.
            AD1TF7B_MAXDP = 1.
            from matplotlib import pyplot as plt
            fig = plt.figure()
            plt.plot(two_thetas, final, "bo")
            mean = flex.mean(final)
            minplot = flex.min(two_thetas)
            plt.plot([0, minplot], [mean, mean], "k-")
            LR = flex.linear_regression(two_thetas, final)
            #LR.show_summary()
            model_y = LR.slope() * two_thetas + LR.y_intercept()
            plt.plot(two_thetas, model_y, "k-")
            print helper.last_set_orientation.unit_cell()
            #for sdp,tw in zip (dspacings,two_thetas):
            #print sdp,tw
            if OO.mosaic_refinement_target == "ML":
                plt.title(
                    "ML: mosaicity FW=%4.2f deg, Dsize=%5.0fA on %d spots" %
                    (M.x[1] * 180. / math.pi, 2. / M.x[0], len(two_thetas)))
                plt.plot(two_thetas, tan_phi_deg_ML, "r.")
                plt.plot(two_thetas, -tan_phi_deg_ML, "r.")
                plt.plot(two_thetas, tan_outer_deg_ML, "g.")
                plt.plot(two_thetas, -tan_outer_deg_ML, "g.")
            else:
                plt.plot(two_thetas_env, excursion_rads_env * 180. / math.pi,
                         "r|")
                plt.plot(two_thetas_env, -excursion_rads_env * 180. / math.pi,
                         "r|")
                plt.plot(two_thetas_env, excursion_rads_env * 180. / math.pi,
                         "r-")
                plt.plot(two_thetas_env, -excursion_rads_env * 180. / math.pi,
                         "r-")
                plt.plot(two_thetas, tan_phi_deg, "r.")
                plt.plot(two_thetas, -tan_phi_deg, "r.")
                plt.plot(two_thetas, tan_outer_deg, "g.")
                plt.plot(two_thetas, -tan_outer_deg, "g.")
            plt.xlim([0, AD1TF7B_MAX2T])
            plt.ylim([-AD1TF7B_MAXDP, AD1TF7B_MAXDP])
            OO.parent.show_figure(plt, fig, "psi")
            plt.close()

        from xfel.mono_simulation.util import green_curve_area, ewald_proximal_volume
        if OO.mosaic_refinement_target == "ML":
            OO.parent.green_curve_area = green_curve_area(
                two_thetas, tan_outer_deg_ML)
            OO.parent.inputai.setMosaicity(M.x[1] * 180. /
                                           math.pi)  # full width, degrees
            OO.parent.ML_half_mosaicity_deg = M.x[1] * 180. / (2. * math.pi)
            OO.parent.ML_domain_size_ang = 1. / M.x[0]
            OO.parent.ewald_proximal_volume = ewald_proximal_volume(
                wavelength_ang=OO.central_wavelength_ang,
                resolution_cutoff_ang=OO.parent.horizons_phil.integration.
                mosaic.ewald_proximal_volume_resolution_cutoff,
                domain_size_ang=1. / M.x[0],
                full_mosaicity_rad=M.x[1])
            return results, helper.last_set_orientation, 1. / M.x[
                0]  # full width domain size, angstroms
        else:
            assert OO.mosaic_refinement_target == "LSQ"
            OO.parent.green_curve_area = green_curve_area(
                two_thetas, tan_outer_deg)
            OO.parent.inputai.setMosaicity(2 * k_degrees)  # full width
            OO.parent.ML_half_mosaicity_deg = k_degrees
            OO.parent.ML_domain_size_ang = s_ang
            OO.parent.ewald_proximal_volume = ewald_proximal_volume(
                wavelength_ang=OO.central_wavelength_ang,
                resolution_cutoff_ang=OO.parent.horizons_phil.integration.
                mosaic.ewald_proximal_volume_resolution_cutoff,
                domain_size_ang=s_ang,
                full_mosaicity_rad=2 * k_degrees * math.pi / 180.)
            return results, helper.last_set_orientation, s_ang  # full width domain size, angstroms