Example #1
0
 def solve(self, maxiter=200, thresh=1e-4):
     self.helper.restart()
     try:
         _ = normal_eqns_solving.levenberg_marquardt_iterations_encapsulated_eqns(
             non_linear_ls=self.helper,
             n_max_iterations=maxiter,
             track_all=True,
             step_threshold=thresh)
     except KeyboardInterrupt:
         pass
     print "End of minimization: Converged", self.helper.counter, "cycles"
     print self.helper.get_eigen_summary()
Example #2
0
  def __init__(self,x_obs,y_obs,w_obs,initial):
    self.counter = 0
    self.x = initial.deep_copy()
    self.helper = eigen_helper(initial_estimates = self.x)
    self.helper.set_cpp_data(x_obs,y_obs,w_obs)
    self.helper.restart()
    iterations = normal_eqns_solving.levenberg_marquardt_iterations_encapsulated_eqns(
               non_linear_ls = self.helper,
               n_max_iterations = 5000,
               track_all=True,
               step_threshold = 0.0001,
    )
    ###### get esd's
    self.helper.build_up()
    NM = sqr(self.get_helper_normal_matrix())
    from scitbx.linalg.svd import inverse_via_svd
    svd_inverse,sigma = inverse_via_svd(NM.as_flex_double_matrix())
    IA = sqr(svd_inverse)
    self.error_diagonal = flex.double([IA(i,i) for i in xrange(self.helper.x.size())])

    print "End of minimization: Converged", self.helper.counter,"cycles"
Example #3
0
  def __init__(self,x_obs,y_obs,w_obs,initial):
    self.counter = 0
    self.x = initial.deep_copy()
    self.helper = eigen_helper(initial_estimates = self.x)
    self.helper.set_cpp_data(x_obs,y_obs,w_obs)
    self.helper.restart()
    iterations = normal_eqns_solving.levenberg_marquardt_iterations_encapsulated_eqns(
               non_linear_ls = self.helper,
               n_max_iterations = 5000,
               track_all=True,
               step_threshold = 0.0001,
    )
    ###### get esd's
    self.helper.build_up()
    NM = sqr(self.get_helper_normal_matrix())
    from scitbx.linalg.svd import inverse_via_svd
    svd_inverse,sigma = inverse_via_svd(NM.as_flex_double_matrix())
    IA = sqr(svd_inverse)
    self.error_diagonal = flex.double([IA(i,i) for i in xrange(self.helper.x.size())])

    print "End of minimization: Converged", self.helper.counter,"cycles"
Example #4
0
    def __init__(self, Ibase, Gbase, FSIM, curvatures=False, **kwargs):
        # For backward compatibility handle the case where phil is undefined
        if "params" in kwargs.keys():
            self.params = kwargs["params"]
        else:
            from xfel.command_line.cxi_merge import master_phil
            phil = iotbx.phil.process_command_line(
                args=[], master_string=master_phil).show()
            self.params = phil.work.extract()
            self.params.levmar.parameter_flags.append(
                "Bfactor")  # default example refines Bfactor

        self.counter = 0

        self.x = flex.double(list(Ibase) + list(Gbase))
        self.N_I = len(Ibase)
        self.N_G = len(Gbase)
        self.N_raw_obs = FSIM.raw_obs.size()
        print "# structure factors:", self.N_I, "# frames:", self.N_G, "(Visited set; refined parameters)"

        step_threshold = self.params.levmar.termination.step_threshold
        objective_decrease_threshold = self.params.levmar.termination.objective_decrease_threshold
        if "Bfactor" in self.params.levmar.parameter_flags:
            self.x = self.x.concatenate(flex.double(len(Gbase), 0.0))
        if "Deff" in self.params.levmar.parameter_flags:
            D_values = flex.double(
                [2. * e.crystal.domain_size for e in kwargs["experiments"]])
            self.x = self.x.concatenate(D_values)
        if "Rxy" in self.params.levmar.parameter_flags:
            self.x = self.x.concatenate(flex.double(2 * len(Gbase), 0.0))

        levenberg_helper = choice_as_helper_base(
            self.params.levmar.parameter_flags)
        self.helper = levenberg_helper(initial_estimates=self.x)
        self.helper.set_cpp_data(FSIM, self.N_I, self.N_G)
        if "experiments" in kwargs:
            self.helper.set_wavelength(
                [e.beam.get_wavelength() for e in kwargs["experiments"]])
            self.helper.set_domain_size([
                2. * e.crystal.get_domain_size_ang()
                for e in kwargs["experiments"]
            ])  #ad hoc factor of 2
            self.helper.set_Astar_matrix(
                [e.crystal.get_A() for e in kwargs["experiments"]])

        bitflags = choice_as_bitflag(self.params.levmar.parameter_flags)
        self.helper.set_parameter_flags(bitflags)
        self.helper.restart()

        iterations = normal_eqns_solving.levenberg_marquardt_iterations_encapsulated_eqns(
            non_linear_ls=self.helper,
            n_max_iterations=5000,
            track_all=True,
            step_threshold=step_threshold,
            objective_decrease_threshold=objective_decrease_threshold,
            verbose_iterations=True,
        )
        if "Deff" in self.params.levmar.parameter_flags:
            newDeff = self.helper.x[self.N_I + self.N_G:]  # XXX specific
            Dstats = flex.mean_and_variance(newDeff)
            print "Refined Deff mean & standard deviation:",
            print Dstats.mean(), Dstats.unweighted_sample_standard_deviation()
        if "Rxy" in self.params.levmar.parameter_flags:
            AX = self.helper.x[self.N_I + self.N_G:self.N_I +
                               2 * self.N_G]  # XXX specific
            AY = self.helper.x[self.N_I + 2 * self.N_G:self.N_I +
                               3 * self.N_G]  # XXX specific
            stats = flex.mean_and_variance(AX)
            print "Rx rotational increments in degrees: %8.6f +/- %8.6f" % (
                stats.mean(), stats.unweighted_sample_standard_deviation())
            stats = flex.mean_and_variance(AY)
            print "Ry rotational increments in degrees: %8.6f +/- %8.6f" % (
                stats.mean(), stats.unweighted_sample_standard_deviation())

        print "End of minimisation: Converged", self.helper.counter, "cycles"
        chi_squared = self.helper.objective() * 2.
        print "obj", chi_squared
        print "# of obs:", FSIM.raw_obs.size()
        dof = FSIM.raw_obs.size() - (len(self.x))
        print "degrees of freedom =", dof
        print "chisq/dof: %7.3f" % (chi_squared / dof)
        print
  def __init__(self,Ibase,Gbase,FSIM,curvatures=False,**kwargs):
    # For backward compatibility handle the case where phil is undefined
    if "params" in kwargs.keys():
      self.params = kwargs["params"]
    else:
      from xfel.command_line.cxi_merge import master_phil
      phil = iotbx.phil.process_command_line(args=[], master_string=master_phil).show()
      self.params = phil.work.extract()
      self.params.levmar.parameter_flags.append("Bfactor") # default example refines Bfactor

    self.counter = 0

    self.x = flex.double(list(Ibase) + list(Gbase))
    self.N_I = len(Ibase)
    self.N_G = len(Gbase)
    self.N_raw_obs = FSIM.raw_obs.size()
    print "# structure factors:",self.N_I, "# frames:",self.N_G, "(Visited set; refined parameters)"

    step_threshold = self.params.levmar.termination.step_threshold
    objective_decrease_threshold = self.params.levmar.termination.objective_decrease_threshold
    if "Bfactor" in self.params.levmar.parameter_flags:
        self.x = self.x.concatenate(flex.double(len(Gbase),0.0))
    if "Deff" in self.params.levmar.parameter_flags:
        D_values = flex.double([2.*e.crystal.domain_size for e in kwargs["experiments"]])
        self.x = self.x.concatenate(D_values)
    if "Rxy" in self.params.levmar.parameter_flags:
        self.x = self.x.concatenate(flex.double(2*len(Gbase),0.0))

    levenberg_helper = choice_as_helper_base(self.params.levmar.parameter_flags)
    self.helper = levenberg_helper(initial_estimates = self.x)
    self.helper.set_cpp_data(FSIM, self.N_I, self.N_G)
    if kwargs.has_key("experiments"):
      self.helper.set_wavelength([e.beam.get_wavelength() for e in kwargs["experiments"]])
      self.helper.set_domain_size([2.*e.crystal.domain_size for e in kwargs["experiments"]])#ad hoc factor of 2
      self.helper.set_Astar_matrix([e.crystal.get_A() for e in kwargs["experiments"]])

    bitflags = choice_as_bitflag(self.params.levmar.parameter_flags)
    self.helper.set_parameter_flags(bitflags)
    self.helper.restart()

    iterations = normal_eqns_solving.levenberg_marquardt_iterations_encapsulated_eqns(
               non_linear_ls = self.helper,
               n_max_iterations = 5000,
               track_all=True,
               step_threshold = step_threshold,
               objective_decrease_threshold = objective_decrease_threshold
    )
    if "Deff" in self.params.levmar.parameter_flags:
      newDeff = self.helper.x[self.N_I+self.N_G:] # XXX specific
      Dstats=flex.mean_and_variance(newDeff)
      print "Refined Deff mean & standard deviation:",
      print Dstats.mean(),Dstats.unweighted_sample_standard_deviation()
    if "Rxy" in self.params.levmar.parameter_flags:
      AX = self.helper.x[self.N_I+self.N_G:self.N_I+2*self.N_G] # XXX specific
      AY = self.helper.x[self.N_I+2*self.N_G:self.N_I+3*self.N_G] # XXX specific
      stats=flex.mean_and_variance(AX)
      print "Rx rotational increments in degrees: %8.6f +/- %8.6f"%(
           stats.mean(),stats.unweighted_sample_standard_deviation())
      stats=flex.mean_and_variance(AY)
      print "Ry rotational increments in degrees: %8.6f +/- %8.6f"%(
           stats.mean(),stats.unweighted_sample_standard_deviation())

    print "End of minimisation: Converged", self.helper.counter,"cycles"
    chi_squared = self.helper.objective() * 2.
    print "obj",chi_squared
    print "# of obs:",FSIM.raw_obs.size()
    dof = FSIM.raw_obs.size() - ( len(self.x) )
    print "degrees of freedom =",dof
    print "chisq/dof: %7.3f"%(chi_squared / dof)
    print
Example #6
0
    def __init__(self,
                 conj_grad=True,
                 weights=None,
                 plot_truth=False,
                 plot=False,
                 sovlerization_maximus=True,
                 *args,
                 **kwargs):
        solvers.LBFGSsolver.__init__(self, *args,
                                     **kwargs)  # NOTE: do it with lbfgs=False
        # ^ brings in Yobs, LA, LB, PA, PB, Nhkl, Ns, Nmeas,   Aidx, Gidx

        # correct because working with logs
        if self.IAprm_truth is not None:
            self.IAprm_truth = flex.log(self.IAprm_truth)
            self.IBprm_truth = flex.log(self.IBprm_truth)
            self.Gprm_truth = self.Gprm_truth
            self.x_truth = (self.IAprm_truth.concatenate(
                self.IBprm_truth)).concatenate(self.Gprm_truth)

        self.x_init = flex.double(np.ascontiguousarray(
            self.guess["IAprm"])).concatenate(
                flex.double(np.ascontiguousarray(
                    self.guess["IBprm"]))).concatenate(
                        flex.double(np.ascontiguousarray(self.guess["Gprm"])))
        assert (len(self.x_init) == self.Nhkl * 2 + self.Ns)

        IAx = flex.log(self.x_init[:self.Nhkl])
        IBx = flex.log(self.x_init[self.Nhkl:2 * self.Nhkl])
        Gx = self.x_init[2 * self.Nhkl:]

        self.x_init = IAx.concatenate(IBx)
        self.x_init = self.x_init.concatenate(Gx)

        self.counter = 0

        # set dummie weights for now
        if weights is None:
            self.Wobs = flex.double(np.ones(len(self.Yobs)))
        else:
            self.Wobs = weights

        if plot_truth:
            try:
                truth = self.x_truth
            except AttributeError as error:
                print(error)
                truth = None
        else:
            truth = None

        self.helper = eigen_helper(initial_estimates=self.x_init,
                                   Nhkl=self.Nhkl,
                                   plot=plot,
                                   truth=truth)
        self.helper.eigen_wrapper.conj_grad = conj_grad
        self.helper.set_basic_data(self.Yobs, self.Wobs, self.Aidx, self.Gidx,
                                   self.PA, self.PB, self.LA, self.LB,
                                   self.Nhkl, self.Ns)

        self.helper.restart()

        if sovlerization_maximus:
            try:
                _ = normal_eqns_solving.levenberg_marquardt_iterations_encapsulated_eqns(
                    non_linear_ls=self.helper,
                    n_max_iterations=10000,
                    track_all=True,
                    step_threshold=0.0001)
            except (KeyboardInterrupt, AssertionError):
                pass
            print "End of minimization: Converged", self.helper.counter, "cycles"
            print self.helper.get_eigen_summary()
            print "Converged functional: ", self.helper.functional_basic(
                self.helper.x)