Ejemplo n.º 1
0
    def __init__(self,
                 uaniso,
                 x_initial,
                 adp_nma,
#                 weights,
                 n_modes,
                 zero_mode_flag,
                 max_iterations):
        adopt_init_args(self, locals())
#        assert self.uaniso.size() == self.weights.size()
        self.x     = self.x_initial
        self.x_min = self.x_initial
        self.n = self.x.size()
        t1 = time.time()
        self.minimizer = lbfgs.run(
                                  target_evaluator = self,
                                  termination_params = lbfgs.termination_parameters(
                                      max_iterations = max_iterations,
                                      max_calls = int(max_iterations*1.5)),
                                  exception_handling_params = 
                                  lbfgs.exception_handling_parameters(
                                      ignore_line_search_failed_step_at_lower_bound = False,
                                      ignore_line_search_failed_step_at_upper_bound = True,
                                      ignore_line_search_failed_maxfev              = False)
                                  )
        self.compute_functional_and_gradients()
        t2 = time.time()
        print t2 - t1
Ejemplo n.º 2
0
 def __init__(self,
              fmodels,
              constrained_groups_selections,
              selections,
              par_initial,
              max_number_of_iterations):
   adopt_init_args(self, locals())
   self.fmodels.create_target_functors()
   self.fmodels.prepare_target_functors_for_minimization()
   from phenix.refinement import weight_xray_chem
   self.weights = weight_xray_chem.weights(wx       = 1,
                                           wx_scale = 1,
                                           angle_x  = None,
                                           wn       = 1,
                                           wn_scale = 1,
                                           angle_n  = None,
                                           w        = 0,
                                           wxn      = 1) # XXX
   self.par_min = self.par_initial.deep_copy()
   self.x = self.pack(self.par_min)
   self.n = self.x.size()
   self.minimizer = lbfgs.run(
   target_evaluator = self,
   termination_params = lbfgs.termination_parameters(
     max_iterations = max_number_of_iterations),
   exception_handling_params = lbfgs.exception_handling_parameters(
     ignore_line_search_failed_step_at_lower_bound = True,
     ignore_line_search_failed_step_at_upper_bound = True))
   self.compute_functional_and_gradients()
   del self.x
Ejemplo n.º 3
0
 def __init__(self,
              uiso,
              T_initial,
              L_initial,
              S_initial,
              refine_T,
              refine_L,
              refine_S,
              origin,
              sites,
              max_iterations):
   adopt_init_args(self, locals())
   assert uiso.size() == sites.size()
   self.dim_T = len(self.T_initial)
   self.dim_L = len(self.L_initial)
   self.dim_S = len(self.S_initial)
   assert self.dim_T == 1 and self.dim_S == 3 and self.dim_L == 6
   self.T_min = self.T_initial
   self.L_min = self.L_initial
   self.S_min = self.S_initial
   self.x = self.pack(self.T_min, self.L_min, self.S_min)
   self.n = self.x.size()
   self.minimizer = lbfgs.run(
     target_evaluator = self,
     termination_params = lbfgs.termination_parameters(
       max_iterations = max_iterations,
       max_calls      = int(max_iterations*1.5)),
       exception_handling_params =
       lbfgs.exception_handling_parameters(
         ignore_line_search_failed_step_at_lower_bound = True,
         ignore_line_search_failed_step_at_upper_bound = True,
         ignore_line_search_failed_maxfev              = True)
       )
   self.compute_functional_and_gradients()
   del self.x
Ejemplo n.º 4
0
  def __init__(self, d_i, psi_i, eta_rad, Deff):
    import sys
    self.safelog = -1. + math.log(sys.float_info.max)
    self.S = StringIO.StringIO()
    pickle.dump([d_i, psi_i, eta_rad, Deff],self.S,0)
    assert len(d_i) == len(psi_i)
    self.d_i = d_i
    self.psi_i = psi_i
    self.Nobs = len(d_i)
    self.escalate = 10. # 10 is a soft switch; 50-100 a hard switch
    self.x = flex.double([log(2./Deff), log(eta_rad)]) # parameters alpha, eta
    self.minimizer = run(
      target_evaluator=self,
      core_params=core_parameters(
        gtol=0.1
        # increasing the accuracy of the line search technique (default=0.9)
        # as suggested by source code.  Otherwise Deff is set unreasonably high
        # and the exponential blows up.
      ),
      termination_params = termination_parameters(
        traditional_convergence_test=False,
        drop_convergence_test_max_drop_eps=1.e-5,
        min_iterations=0,
        max_iterations = 100,
        max_calls=200),
      exception_handling_params=exception_handling_parameters(
         ignore_line_search_failed_rounding_errors=True,
         ignore_line_search_failed_step_at_lower_bound=True,#the only change from default
         ignore_line_search_failed_step_at_upper_bound=False,
         ignore_line_search_failed_maxfev=False,
         ignore_line_search_failed_xtol=False,
         ignore_search_direction_not_descent=False)
      )

    self.x=flex.exp(self.x)
Ejemplo n.º 5
0
 def __init__(
       self,
       fmodel,
       sc_start,
       selections,
       par_initial,
       refine_adp,
       refine_occ,
       max_number_of_iterations,
       run_finite_differences_test = False,
       restraints_weight = None,
       restraints_manager = None):
   adopt_init_args(self, locals())
   self.target_functor = fmodel.target_functor()
   self.target_functor.prepare_for_minimization()
   self.counter=0
   assert len(self.selections) == len(self.par_initial)
   self.par_min = copy.deepcopy(self.par_initial)
   self.x = self.pack(self.par_min)
   self.n = self.x.size()
   self.weight = restraints_weight
   if(self.restraints_manager is not None and self.weight is None):
     gx = self.target_functor(
       compute_gradients=True).gradients_wrt_atomic_parameters(
         u_iso     = refine_adp,
         occupancy = refine_occ)
     rtg = self.restraints_manager.target_and_gradients(
       xray_structure    = self.fmodel.xray_structure,
       to_compute_weight = True)
     gx_norm = gx.norm()
     if(gx_norm != 0):
       self.weight = rtg.gradients.norm()/gx_norm
     else: self.weight = 1.0
   if(self.weight is not None):
     assert self.restraints_manager is not None
   if(run_finite_differences_test):
     self.buffer_ana = flex.double()
     self.buffer_fin = flex.double()
   self.minimizer = lbfgs.run(
     target_evaluator = self,
     termination_params = lbfgs.termination_parameters(
       max_iterations = max_number_of_iterations),
     exception_handling_params = lbfgs.exception_handling_parameters(
       ignore_line_search_failed_step_at_lower_bound = True,
       ignore_line_search_failed_step_at_upper_bound = True))
   self.compute_functional_and_gradients()
   del self.x
   self.tested = 0
   if(run_finite_differences_test):
     for a,f in zip(self.buffer_ana, self.buffer_fin):
       print a, f
     diff = flex.abs(self.buffer_ana - self.buffer_fin)
     s = diff < 1.e-3
     if(s.size()>0 and s.count(True)*100./s.size()>50):
       self.tested += 1
Ejemplo n.º 6
0
 def __init__(self,
              fmodel,
              tlsos_initial,
              refine_T,
              refine_L,
              refine_S,
              selections,
              selections_1d,
              max_iterations,
              run_finite_differences_test = False,
              correct_adp = True):
   adopt_init_args(self, locals())
   fmodel.xray_structure.scatterers().flags_set_grads(state=False)
   xray.set_scatterer_grad_flags(scatterers = fmodel.xray_structure.scatterers(),
                                 u_aniso     = True)
   if(self.run_finite_differences_test): self.correct_adp = False
   self.fmodel_copy = self.fmodel.deep_copy()
   self.target_functor = self.fmodel_copy.target_functor()
   self.run_finite_differences_test_counter = 0
   self.T_initial = []
   self.L_initial = []
   self.S_initial = []
   self.origins   = []
   for tlso_ in tlsos_initial:
     self.T_initial.append(tlso_.t)
     self.L_initial.append(tlso_.l)
     self.S_initial.append(tlso_.s)
     self.origins.append(tlso_.origin)
   self.counter = 0
   self.n_groups = len(self.T_initial)
   self.dim_T = len(self.T_initial[0])
   self.dim_L = len(self.L_initial[0])
   self.dim_S = len(self.S_initial[0])
   self.T_min = self.T_initial
   self.L_min = self.L_initial
   self.S_min = self.S_initial
   self.x = self.pack(self.T_min, self.L_min, self.S_min)
   self.minimizer = lbfgs.run(
     target_evaluator = self,
     core_params = lbfgs.core_parameters(maxfev = 10),
     termination_params = lbfgs.termination_parameters(
       min_iterations = max_iterations,
       max_calls = int(max_iterations*1.5)),
       exception_handling_params = lbfgs.exception_handling_parameters(
         ignore_line_search_failed_step_at_lower_bound = True,
         ignore_line_search_failed_step_at_upper_bound = True,
         ignore_line_search_failed_maxfev              = True))
   self.compute_functional_and_gradients()
   del self.x
   self.tlsos_result = generate_tlsos(
     selections     = self.selections,
     xray_structure = self.fmodel.xray_structure,
     T              = self.T_min,
     L              = self.L_min,
     S              = self.S_min)
Ejemplo n.º 7
0
 def __init__(self, MI, Fr, n_frames, eps):
   self.counter=0
   self.MI = MI
   self.Fr = Fr
   self.n_frames=n_frames
   self.x = flex.double(self.n_frames,1)
   termination_params = lbfgs.termination_parameters(
                     traditional_convergence_test=True,
                      traditional_convergence_test_eps=eps)
   self.minimizer = lbfgs.run(target_evaluator=self, termination_params=termination_params)
   print "End of minimization: Converged"
Ejemplo n.º 8
0
    def __init__(self, *args, **kwargs):

        Refinery.__init__(self, *args, **kwargs)

        self._termination_params = lbfgs.termination_parameters(max_iterations=self._max_iterations)

        import cStringIO

        self._log_string = cStringIO.StringIO

        return
Ejemplo n.º 9
0
 def __init__(self, orient, constraint='triclinic',
              min_iterations=25, max_calls=1000):
   self.constraint=constraint
   adopt_init_args(self, locals())
   self.n = 9
   self.x = flex.double(orient.direct_matrix())
   self.minimizer = lbfgs.run(
     target_evaluator=self,
     termination_params=lbfgs.termination_parameters(
       traditional_convergence_test=00000,
       min_iterations=min_iterations,
       max_calls=max_calls))
   del self.g
Ejemplo n.º 10
0
 def __init__(self, I, weight, hkl, frames, G, Ih, eps):
   self.counter=0
   self.I = flex.double(I)
   self.weight = flex.double(weight)
   self.frames = flex.size_t(frames)
   self.hkl = flex.size_t(hkl)
   self.Ih = flex.double(Ih)
   self.x = flex.double(G)
   termination_params = lbfgs.termination_parameters(
                     traditional_convergence_test=True,
                      traditional_convergence_test_eps=eps)
   self.minimizer = lbfgs.run(target_evaluator=self, termination_params=termination_params)
   print "End of minimization: Converged after", self.counter, "steps"
Ejemplo n.º 11
0
    def __init__(self,
                 fmodel,
                 model,
                 xs_initial,
                 adp_nmas,
                 selections,
                 selections_1d,
                 max_iterations,
                 n_modes,
                 weight_nmre = 1.0,
                 run_finite_differences_test = False,
                 correct_adp = True,
                 zero_mode_flag = True):
        adopt_init_args(self, locals())
        fmodel.xray_structure.scatterers().flags_set_grads(state=False)
        xray.set_scatterer_grad_flags(scatterers = fmodel.xray_structure.scatterers(),
                                      u_aniso = True)
        if(self.run_finite_differences_test): self.correct_adp = False
        self.fmodel_copy = self.fmodel.deep_copy()
        self.target_functor = self.fmodel_copy.target_functor()
        self.run_finite_differences_test_counter = 0
        self.counter = 0
        self.n_groups = len(self.xs_initial)
        self.dim_x    = len(self.xs_initial[0])
        self.xs_min    = self.xs_initial
        self.x = self.pack(self.xs_min)
#        for adp_nma_selected, selection in zip(adp_nmas, selections):
#            weights_selected = self.fmodel.xray_structure.atomic_weights.select(selection)
#            modes1d_selected = selected_modes_to_1D(modes = self.modes, n_modes = self.n_modes,
#                                                selection = self.selection)
#            assert len(modes1d_selected)/n_modes == len(weights_selected)
#            adp_nma_selected = init_nm_adp(modes = modes1d_selected,
#                              weights = weights_selected,
#                              n_modes = n_modes,
#                              zero_mode_flag = zero_mode_flag)
#            self.adp_nmas.append(adp_nma_selected)
        self.minimizer = lbfgs.run(
                target_evaluator = self,
                core_params = lbfgs.core_parameters(),
                termination_params = lbfgs.termination_parameters(
                    min_iterations = max_iterations,
                    max_calls = int(max_iterations*1.5)),
                exception_handling_params = lbfgs.exception_handling_parameters(
                    ignore_line_search_failed_step_at_lower_bound = False,
                    ignore_line_search_failed_step_at_upper_bound = True,
                    ignore_line_search_failed_maxfev              = False))
        self.compute_functional_and_gradients()
        del self.x
        self.xs_result = self.xs_min
Ejemplo n.º 12
0
 def __init__(self,
              fmodel,
              selections,
              r_initial,
              t_initial,
              refine_r,
              refine_t,
              max_iterations,
              euler_angle_convention,
              lbfgs_maxfev):
   adopt_init_args(self, locals())
   self.fmodel_copy = self.fmodel.deep_copy()
   self.target_functor = self.fmodel_copy.target_functor()
   self.target_functor.prepare_for_minimization()
   self.atomic_weights = self.fmodel.xray_structure.atomic_weights()
   self.sites_cart = self.fmodel.xray_structure.sites_cart()
   self.sites_frac = self.fmodel.xray_structure.sites_frac()
   self.n_groups = len(self.selections)
   assert self.n_groups > 0
   self.counter=0
   assert len(self.r_initial)  == len(self.t_initial)
   assert len(self.selections) == len(self.t_initial)
   self.dim_r = 3
   self.dim_t = 3
   self.r_min = copy.deepcopy(self.r_initial)
   self.t_min = copy.deepcopy(self.t_initial)
   for i in xrange(len(self.r_min)):
       self.r_min[i] = tuple(self.r_min[i])
       self.t_min[i] = tuple(self.t_min[i])
   self.x = self.pack(self.r_min, self.t_min)
   self.n = self.x.size()
   self.minimizer = lbfgs.run(
     target_evaluator = self,
     core_params = lbfgs.core_parameters(
          maxfev = lbfgs_maxfev),
     termination_params = lbfgs.termination_parameters(
          max_iterations = max_iterations),
     exception_handling_params = lbfgs.exception_handling_parameters(
          ignore_line_search_failed_step_at_lower_bound = True,
          ignore_line_search_failed_step_at_upper_bound = True)
                    )
   self.compute_functional_and_gradients(suppress_gradients=True)
   del self.x
 def __init__(self,
              fmodel_core_data,
              f_obs,
              u_initial=[0,0,0,0,0,0],
              refine_u=True,
              min_iterations=500,
              max_iterations=500,
              symmetry_constraints_on_b_cart = True,
              u_min_max = 500.,
              u_min_min =-500.):
   adopt_init_args(self, locals())
   self.u_min = self.u_initial
   self.u_factor = self.fmodel_core_data.uc.volume()**(2/3.)
   if(self.symmetry_constraints_on_b_cart):
     self.adp_constraints = self.f_obs.space_group().adp_constraints()
     u_star = self.f_obs.space_group().average_u_star(u_star = self.u_initial)
     self.dim_u = self.adp_constraints.n_independent_params()
     assert self.dim_u <= 6
     independent_params = self.adp_constraints.independent_params(u_star)
     self.x = self.pack(
       u=independent_params,
       u_factor=self.u_factor)
   else:
     self.dim_u = len(self.u_initial)
     assert self.dim_u == 6
     self.x = self.pack(
       u=flex.double(self.u_min),
       u_factor=self.u_factor)
   lbfgs_exception_handling_params = lbfgs.exception_handling_parameters(
     ignore_line_search_failed_step_at_lower_bound = True,
     ignore_line_search_failed_step_at_upper_bound = True,
     ignore_line_search_failed_maxfev              = True)
   self.minimizer = lbfgs.run(
     target_evaluator = self,
     core_params = lbfgs.core_parameters(),
     termination_params = lbfgs.termination_parameters(
       min_iterations            = min_iterations,
       max_iterations            = max_iterations),
       exception_handling_params = lbfgs_exception_handling_params)
   self.compute_functional_and_gradients()
   del self.x
 def __init__(self,
       fmodel,
       groups,
       call_back_after_minimizer_cycle=None,
       number_of_minimizer_cycles=3,
       lbfgs_max_iterations=20,
       number_of_finite_difference_tests=0):
   adopt_init_args(self, locals())
   self.x = flex.double()
   for group in groups:
     if (group.refine_f_prime): self.x.append(group.f_prime)
     if (group.refine_f_double_prime): self.x.append(group.f_double_prime)
   fmodel.xray_structure.scatterers().flags_set_grads(state=False)
   for group in groups:
     if (group.refine_f_prime):
       fmodel.xray_structure.scatterers().flags_set_grad_fp(
         iselection=group.iselection)
     if (group.refine_f_double_prime):
       fmodel.xray_structure.scatterers().flags_set_grad_fdp(
         iselection=group.iselection)
   self.target_functor = fmodel.target_functor()
   self.target_functor.prepare_for_minimization()
   for self.i_cycle in xrange(number_of_minimizer_cycles):
     self.lbfgs = lbfgs.run(
       target_evaluator=self,
       termination_params=lbfgs.termination_parameters(
         max_iterations=lbfgs_max_iterations),
       exception_handling_params=lbfgs.exception_handling_parameters(
         ignore_line_search_failed_step_at_lower_bound = True))
     if (call_back_after_minimizer_cycle is not None):
       self.unpack()
       if (not call_back_after_minimizer_cycle(minimizer=self)):
         break
   if (call_back_after_minimizer_cycle is None):
     self.unpack()
   del self.i_cycle
   del self.lbfgs
   del self.x
   del self.target_functor
   del self.fmodel
   del self.groups
Ejemplo n.º 15
0
 def __init__(self, current_x=None, args=None,
              min_iterations=0, max_iterations=None, max_calls=1000, max_drop_eps=1.e-5):
   self.n = current_x.size()
   self.x = current_x
   self.args = args
   self.minimizer = lbfgs.run(
           target_evaluator=self,
           termination_params=lbfgs.termination_parameters(
             traditional_convergence_test=False,
             drop_convergence_test_max_drop_eps=max_drop_eps,
             min_iterations=min_iterations,
             max_iterations=max_iterations,
             max_calls=max_calls),
           exception_handling_params=lbfgs.exception_handling_parameters(
              ignore_line_search_failed_rounding_errors=True,
              ignore_line_search_failed_step_at_lower_bound=True,
              ignore_line_search_failed_step_at_upper_bound=False,
              ignore_line_search_failed_maxfev=False,
              ignore_line_search_failed_xtol=False,
              ignore_search_direction_not_descent=False)
           )
Ejemplo n.º 16
0
 def __init__(self, current_x=None, args=None,
              min_iterations=0, max_iterations=None, max_calls=1000, max_drop_eps=1.e-5):
   self.n = current_x.size()
   self.x = current_x
   self.args = args
   self.minimizer = lbfgs.run(
           target_evaluator=self,
           termination_params=lbfgs.termination_parameters(
             traditional_convergence_test=False,
             drop_convergence_test_max_drop_eps=max_drop_eps,
             min_iterations=min_iterations,
             max_iterations=max_iterations,
             max_calls=max_calls),
           exception_handling_params=lbfgs.exception_handling_parameters(
              ignore_line_search_failed_rounding_errors=True,
              ignore_line_search_failed_step_at_lower_bound=True,
              ignore_line_search_failed_step_at_upper_bound=False,
              ignore_line_search_failed_maxfev=False,
              ignore_line_search_failed_xtol=False,
              ignore_search_direction_not_descent=False)
           )
Ejemplo n.º 17
0
 def __init__(self, current_x=None, parameterization=None, refinery=None, out=None,
              min_iterations=0, max_calls=1000, max_drop_eps=1.e-5):
   adopt_init_args(self, locals())
   self.n = current_x.size()
   self.x = current_x
   from scitbx import lbfgs
   self.minimizer = lbfgs.run(
     target_evaluator=self,
     termination_params=lbfgs.termination_parameters(
       traditional_convergence_test=True,
       drop_convergence_test_max_drop_eps=max_drop_eps,
       min_iterations=min_iterations,
       max_iterations = None,
       max_calls=max_calls),
     exception_handling_params=lbfgs.exception_handling_parameters(
        ignore_line_search_failed_rounding_errors=True,
        ignore_line_search_failed_step_at_lower_bound=True,#the only change from default
        ignore_line_search_failed_step_at_upper_bound=False,
        ignore_line_search_failed_maxfev=False,
        ignore_line_search_failed_xtol=False,
        ignore_search_direction_not_descent=False)
     )
 def __init__(self, current_x=None, parameterization=None, refinery=None, out=None,
              min_iterations=0, max_calls=1000, max_drop_eps=1.e-5):
   adopt_init_args(self, locals())
   self.n = current_x.size()
   self.x = current_x
   from scitbx import lbfgs
   self.minimizer = lbfgs.run(
     target_evaluator=self,
     termination_params=lbfgs.termination_parameters(
       traditional_convergence_test=False,
       drop_convergence_test_max_drop_eps=max_drop_eps,
       min_iterations=min_iterations,
       max_iterations = None,
       max_calls=max_calls),
     exception_handling_params=lbfgs.exception_handling_parameters(
        ignore_line_search_failed_rounding_errors=True,
        ignore_line_search_failed_step_at_lower_bound=True,#the only change from default
        ignore_line_search_failed_step_at_upper_bound=False,
        ignore_line_search_failed_maxfev=False,
        ignore_line_search_failed_xtol=False,
        ignore_search_direction_not_descent=False)
     )
Ejemplo n.º 19
0
 def __init__(self, uiso, T_initial, L_initial, S_initial, refine_T,
              refine_L, refine_S, origin, sites, max_iterations):
     adopt_init_args(self, locals())
     assert uiso.size() == sites.size()
     self.dim_T = len(self.T_initial)
     self.dim_L = len(self.L_initial)
     self.dim_S = len(self.S_initial)
     assert self.dim_T == 1 and self.dim_S == 3 and self.dim_L == 6
     self.T_min = self.T_initial
     self.L_min = self.L_initial
     self.S_min = self.S_initial
     self.x = self.pack(self.T_min, self.L_min, self.S_min)
     self.n = self.x.size()
     self.minimizer = lbfgs.run(
         target_evaluator=self,
         termination_params=lbfgs.termination_parameters(
             max_iterations=max_iterations,
             max_calls=int(max_iterations * 1.5)),
         exception_handling_params=lbfgs.exception_handling_parameters(
             ignore_line_search_failed_step_at_lower_bound=True,
             ignore_line_search_failed_step_at_upper_bound=True,
             ignore_line_search_failed_maxfev=True))
     self.compute_functional_and_gradients()
     del self.x
Ejemplo n.º 20
0
    def __init__(self, d_i, psi_i, eta_rad, Deff):
        import sys
        self.safelog = -1. + math.log(sys.float_info.max)
        self.S = StringIO.StringIO()
        pickle.dump([d_i, psi_i, eta_rad, Deff], self.S, 0)
        assert len(d_i) == len(psi_i)
        self.d_i = d_i
        self.psi_i = psi_i
        self.Nobs = len(d_i)
        self.escalate = 10.  # 10 is a soft switch; 50-100 a hard switch
        self.x = flex.double([log(2. / Deff),
                              log(eta_rad)])  # parameters alpha, eta
        self.minimizer = run(
            target_evaluator=self,
            core_params=core_parameters(
                gtol=0.1
                # increasing the accuracy of the line search technique (default=0.9)
                # as suggested by source code.  Otherwise Deff is set unreasonably high
                # and the exponential blows up.
            ),
            termination_params=termination_parameters(
                traditional_convergence_test=False,
                drop_convergence_test_max_drop_eps=1.e-5,
                min_iterations=0,
                max_iterations=100,
                max_calls=200),
            exception_handling_params=exception_handling_parameters(
                ignore_line_search_failed_rounding_errors=True,
                ignore_line_search_failed_step_at_lower_bound=
                True,  #the only change from default
                ignore_line_search_failed_step_at_upper_bound=False,
                ignore_line_search_failed_maxfev=False,
                ignore_line_search_failed_xtol=False,
                ignore_search_direction_not_descent=False))

        self.x = flex.exp(self.x)
Ejemplo n.º 21
0
    def refine(self):
        '''Actually perform the parameter refinement.'''

        tp = lbfgs.termination_parameters(max_iterations=1000)
        r = lbfgs.run(target_evaluator=self, termination_params=tp)
        return r
 def __init__(self,
              fmodel_core_data,
              f_obs,
              k_initial,
              b_initial,
              u_initial,
              refine_k,
              refine_b,
              refine_u,
              min_iterations,
              max_iterations,
              fmodel_core_data_twin = None,
              twin_fraction = None,
              symmetry_constraints_on_b_cart = True,
              u_min_max = 500.,
              u_min_min =-500.,
              k_sol_max = 10.,
              k_sol_min =-10.,
              b_sol_max = 500.,
              b_sol_min =-500.):
   adopt_init_args(self, locals())
   if(twin_fraction == 0):
     twin_fraction = None
     self.twin_fraction = None
     fmodel_core_data_twin=None
     self.fmodel_core_data_twin=None
   assert [fmodel_core_data_twin,twin_fraction].count(None) in [0,2]
   self.n_shells = self.fmodel_core_data.data.n_shells()
   if not self.fmodel_core_data_twin is None:
     assert self.fmodel_core_data_twin.data.n_shells() == self.n_shells
   assert self.n_shells > 0  and self.n_shells <= 10
   self.k_min = self.k_initial
   assert len(self.k_min) == self.n_shells
   self.b_min = self.b_initial
   self.u_min = self.u_initial
   self.u_factor = self.fmodel_core_data.uc.volume()**(2/3.)
   if(self.symmetry_constraints_on_b_cart):
     self.adp_constraints = self.f_obs.space_group().adp_constraints()
     u_star = self.f_obs.space_group().average_u_star(u_star = self.u_initial)
     self.dim_u = self.adp_constraints.n_independent_params()
     assert self.dim_u <= 6
     independent_params = self.adp_constraints.independent_params(u_star)
     self.x = self.pack(
       u=independent_params,
       k=self.k_min,
       b=self.b_min,
       u_factor=self.u_factor)
   else:
     self.dim_u = len(self.u_initial)
     assert self.dim_u == 6
     self.x = self.pack(
       u=flex.double(self.u_min),
       k=self.k_min,
       b=self.b_min,
       u_factor=self.u_factor)
   lbfgs_exception_handling_params = lbfgs.exception_handling_parameters(
     ignore_line_search_failed_step_at_lower_bound = True,
     ignore_line_search_failed_step_at_upper_bound = True,
     ignore_line_search_failed_maxfev              = True)
   self.minimizer = lbfgs.run(
     target_evaluator = self,
     core_params = lbfgs.core_parameters(),
     termination_params = lbfgs.termination_parameters(
       min_iterations            = min_iterations,
       max_iterations            = max_iterations),
       exception_handling_params = lbfgs_exception_handling_params)
   self.compute_functional_and_gradients()
   del self.x
Ejemplo n.º 23
0
  def __init__(self,height_list=None,xyz_list=None,shape="parabola",
               max_iterations=25):

    self.height_list = height_list
    self.xyz_list = xyz_list

    # pick shape
    if (shape == "parabola"):
      self.n = parabola().n
      self.fit = parabola
      self.check_lengths(height_list=height_list,xyz_list=xyz_list)

      # construct vector and matrix for initial guess
      b_elements = []
      A_elements = []
      for i in xrange(self.n):
        b_elements.append(height_list[i])
        A_elements.append(xyz_list[i][0]*xyz_list[i][0])
        A_elements.append(xyz_list[i][1]*xyz_list[i][1])
        A_elements.append(xyz_list[i][2]*xyz_list[i][2])
        A_elements.append(xyz_list[i][0])
        A_elements.append(xyz_list[i][1])
        A_elements.append(xyz_list[i][2])
        A_elements.append(1.0)
      A = matrix.sqr(A_elements)
      b = matrix.col(b_elements)

    elif ((shape == "quadratic") or (shape == "gaussian")):
      self.n = quadratic().n
      if (shape == "quadratic"):
        self.fit = quadratic
      else:
        self.fit = gaussian
      self.check_lengths(height_list=height_list,xyz_list=xyz_list)

      # construct vector and matrix for initial guess
      b_elements = []
      A_elements = []
      for i in xrange(self.n):
        b_elements.append(height_list[i])
        A_elements.append(xyz_list[i][0]*xyz_list[i][0])
        A_elements.append(xyz_list[i][1]*xyz_list[i][1])
        A_elements.append(xyz_list[i][2]*xyz_list[i][2])
        A_elements.append(xyz_list[i][0])
        A_elements.append(xyz_list[i][1])
        A_elements.append(xyz_list[i][2])
        A_elements.append(xyz_list[i][0]*xyz_list[i][1])
        A_elements.append(xyz_list[i][0]*xyz_list[i][2])
        A_elements.append(xyz_list[i][1]*xyz_list[i][2])
        A_elements.append(1.0)
      A = matrix.sqr(A_elements)
      b = matrix.col(b_elements)

    else:
      print "fit_peak error: shape not valid"
      exit()

    # get initial guess (solve Ax = b)
    self.x = flex.double(A.inverse()*b)

    # if there are more points, minimize using the LBFGS minimizer
    if (len(height_list) > self.n):
      self.minimizer = lbfgs.run(\
        target_evaluator=self,termination_params=\
        lbfgs.termination_parameters(max_iterations=max_iterations))

    # finalize fit
    answer = self.fit(parameters=self.x)
    self.vertex = answer.vertex
    x_max = xyz_list[1][0]  # these min and max values are based on the order
    x_min = xyz_list[2][0]
    y_max = xyz_list[3][1]
    y_min = xyz_list[4][1]
    z_max = xyz_list[5][2]
    z_min = xyz_list[6][2]
    assert(x_max > x_min)
    assert(y_max > y_min)
    assert(z_max > z_min)
    if ((self.vertex[0] < x_min) or (self.vertex[0] > x_max)):
      self.vertex[0] = xyz_list[0][0]
    if ((self.vertex[1] < y_min) or (self.vertex[1] > y_max)):
      self.vertex[1] = xyz_list[0][1]
    if ((self.vertex[2] < z_min) or (self.vertex[2] > z_max)):
      self.vertex[2] = xyz_list[0][2]
Ejemplo n.º 24
0
  def refine(self):
    '''Actually perform the parameter refinement.'''

    tp = lbfgs.termination_parameters(max_iterations=1000)
    r = lbfgs.run(target_evaluator = self, termination_params=tp)
    return r
Ejemplo n.º 25
0
    def __init__(self,
                 height_list=None,
                 xyz_list=None,
                 shape="parabola",
                 max_iterations=25):

        self.height_list = height_list
        self.xyz_list = xyz_list

        # pick shape
        if (shape == "parabola"):
            self.n = parabola().n
            self.fit = parabola
            self.check_lengths(height_list=height_list, xyz_list=xyz_list)

            # construct vector and matrix for initial guess
            b_elements = []
            A_elements = []
            for i in range(self.n):
                b_elements.append(height_list[i])
                A_elements.append(xyz_list[i][0] * xyz_list[i][0])
                A_elements.append(xyz_list[i][1] * xyz_list[i][1])
                A_elements.append(xyz_list[i][2] * xyz_list[i][2])
                A_elements.append(xyz_list[i][0])
                A_elements.append(xyz_list[i][1])
                A_elements.append(xyz_list[i][2])
                A_elements.append(1.0)
            A = matrix.sqr(A_elements)
            b = matrix.col(b_elements)

        elif ((shape == "quadratic") or (shape == "gaussian")):
            self.n = quadratic().n
            if (shape == "quadratic"):
                self.fit = quadratic
            else:
                self.fit = gaussian
            self.check_lengths(height_list=height_list, xyz_list=xyz_list)

            # construct vector and matrix for initial guess
            b_elements = []
            A_elements = []
            for i in range(self.n):
                b_elements.append(height_list[i])
                A_elements.append(xyz_list[i][0] * xyz_list[i][0])
                A_elements.append(xyz_list[i][1] * xyz_list[i][1])
                A_elements.append(xyz_list[i][2] * xyz_list[i][2])
                A_elements.append(xyz_list[i][0])
                A_elements.append(xyz_list[i][1])
                A_elements.append(xyz_list[i][2])
                A_elements.append(xyz_list[i][0] * xyz_list[i][1])
                A_elements.append(xyz_list[i][0] * xyz_list[i][2])
                A_elements.append(xyz_list[i][1] * xyz_list[i][2])
                A_elements.append(1.0)
            A = matrix.sqr(A_elements)
            b = matrix.col(b_elements)

        else:
            print("fit_peak error: shape not valid")
            exit()

        # get initial guess (solve Ax = b)
        self.x = flex.double(A.inverse() * b)

        # if there are more points, minimize using the LBFGS minimizer
        if (len(height_list) > self.n):
            self.minimizer = lbfgs.run(\
              target_evaluator=self,termination_params=\
              lbfgs.termination_parameters(max_iterations=max_iterations))

        # finalize fit
        answer = self.fit(parameters=self.x)
        self.vertex = answer.vertex
        x_max = xyz_list[1][
            0]  # these min and max values are based on the order
        x_min = xyz_list[2][0]
        y_max = xyz_list[3][1]
        y_min = xyz_list[4][1]
        z_max = xyz_list[5][2]
        z_min = xyz_list[6][2]
        assert (x_max > x_min)
        assert (y_max > y_min)
        assert (z_max > z_min)
        if ((self.vertex[0] < x_min) or (self.vertex[0] > x_max)):
            self.vertex[0] = xyz_list[0][0]
        if ((self.vertex[1] < y_min) or (self.vertex[1] > y_max)):
            self.vertex[1] = xyz_list[0][1]
        if ((self.vertex[2] < z_min) or (self.vertex[2] > z_max)):
            self.vertex[2] = xyz_list[0][2]
def mpi_split_evaluator_run(target_evaluator,
                    termination_params=None,
                    core_params=None,
                    exception_handling_params=None,
                    log=None,
                    #---> Insertion starts
                    gradient_only=False,
                    line_search=True):
                    #<--- Insertion ends
  """The supported scenario is that each MPI worker rank has a target evaluator
  that has part of the data.  Each rank calculates a bit of the functional and
  gradients, but then mpi reduce is used to sum them all up.  There has been
  no low-level redesign to support MPI.  In particular, the ext.minimizer is
  run (wastefully) by every worker rank, using the same data.  It is assumed that
  the calculation of compute_functional_and_gradients() is overwhelmingly the rate
  limiting step, and that is what MPI parallelism is intended to distribute here."""
  from libtbx.mpi4py import MPI
  comm = MPI.COMM_WORLD
  rank = comm.Get_rank()
  size = comm.Get_size()

  if (termination_params is None):
    termination_params = termination_parameters()
  if (core_params is None):
    core_params = core_parameters()
  if (exception_handling_params is None):
    exception_handling_params = exception_handling_parameters()
  x = target_evaluator.x
  if (log is not None):
    print >> log, "lbfgs minimizer():"
    print >> log, "  x.size():", x.size()
    print >> log, "  m:", core_params.m
    print >> log, "  maxfev:", core_params.maxfev
    print >> log, "  gtol:", core_params.gtol
    print >> log, "  xtol:", core_params.xtol
    print >> log, "  stpmin:", core_params.stpmin
    print >> log, "  stpmax:", core_params.stpmax
    print >> log, "lbfgs traditional_convergence_test:", \
      termination_params.traditional_convergence_test
  minimizer = ext.minimizer(
    x.size(),
    core_params.m,
    core_params.maxfev,
    core_params.gtol,
    core_params.xtol,
    core_params.stpmin,
    core_params.stpmax)
  if (termination_params.traditional_convergence_test):
    is_converged = ext.traditional_convergence_test(
      x.size(),
      termination_params.traditional_convergence_test_eps)
  else:
    is_converged = ext.drop_convergence_test(
      n_test_points=termination_params.drop_convergence_test_n_test_points,
      max_drop_eps=termination_params.drop_convergence_test_max_drop_eps,
      iteration_coefficient
        =termination_params.drop_convergence_test_iteration_coefficient)
  callback_after_step = getattr(target_evaluator, "callback_after_step", None)
  diag_mode = getattr(target_evaluator, "diag_mode", None)
  if (diag_mode is not None): assert diag_mode in ["once", "always"]
  f_min, x_min = None, None
  f, g = None, None
  try:
    while 1:
      if (diag_mode is None):
        #XXX Only the diag_mode==None case is currently implemented, just as example
        f_term, g_term = target_evaluator.compute_functional_and_gradients()
        f_total = comm.reduce(f_term, MPI.SUM, 0)
        g_total = comm.reduce(g_term, MPI.SUM, 0)
        if rank==0: transmit = (f_total,g_total)
        else: transmit = None
        f, g = comm.bcast(transmit, root=0)
        if False and rank==0: # for debug
          print ("%s %10.4f"%("MPI stp",f),"["," ".join(["%10.4f"%a for a in x]),"]")
        d = None
      else:
        f, g, d = target_evaluator.compute_functional_gradients_diag()
        if (diag_mode == "once"):
          diag_mode = None
      if (f_min is None):
        if (not termination_params.traditional_convergence_test):
          is_converged(f)
        f_min, x_min = f, x.deep_copy()
      elif (f_min > f):
        f_min, x_min = f, x.deep_copy()
      if (log is not None):
        print >> log, "lbfgs minimizer.run():" \
          " f=%.6g, |g|=%.6g, x_min=%.6g, x_mean=%.6g, x_max=%.6g" % (
          f, g.norm(), flex.min(x), flex.mean(x), flex.max(x))
      if (d is None):
        #---> Insertion starts
        if (minimizer.run(x, f, g, gradient_only,line_search)): continue
        #<--- Insertion ends
      else:
        #---> Insertion starts
        if (minimizer.run(x, f, g, d, gradient_only,line_search)): continue
        #<--- Insertion ends
      if (log is not None):
        print >> log, "lbfgs minimizer step"
      if (callback_after_step is not None):
        if (callback_after_step(minimizer) is True):
          if (log is not None):
            print >> log, "lbfgs minimizer stop: callback_after_step is True"
          break
      if (termination_params.traditional_convergence_test):
        if (    minimizer.iter() >= termination_params.min_iterations
            and is_converged(x, g)):
          if (log is not None):
            print >> log, "lbfgs minimizer stop: traditional_convergence_test"
          break
      else:
        if (is_converged(f)):
          if (log is not None):
            print >> log, "lbfgs minimizer stop: drop_convergence_test"
          break
      if (    termination_params.max_iterations is not None
          and minimizer.iter() >= termination_params.max_iterations):
        if (log is not None):
          print >> log, "lbfgs minimizer stop: max_iterations"
        break
      if (    termination_params.max_calls is not None
          and minimizer.nfun() > termination_params.max_calls):
        if (log is not None):
          print >> log, "lbfgs minimizer stop: max_calls"
        break
      if (d is None):
        #---> Insertion starts
        if (not minimizer.run(x, f, g, gradient_only,line_search)): break
        #<--- Insertion ends
      else:
        #---> Insertion starts
        if (not minimizer.run(x, f, g, d, gradient_only,line_search)): break
        #<--- Insertion ends
  except RuntimeError as e:
    minimizer.error = str(e)
    if (log is not None):
      print >> log, "lbfgs minimizer exception:", str(e)
    if (x_min is not None):
      x.clear()
      x.extend(x_min)
    error_classification = exception_handling_params.filter(
      minimizer.error, x.size(), x, g)
    if (error_classification > 0):
      raise
    elif (error_classification < 0):
      minimizer.is_unusual_error = True
    else:
      minimizer.is_unusual_error = False
  else:
    minimizer.error = None
    minimizer.is_unusual_error = None
  if (log is not None):
    print >> log, "lbfgs minimizer done."
  return minimizer