def unpack_stddev(self):
   # the data-to_parameter ratio will control which method for returning e.s.d's
   data_to_parameter = float(self.N_raw_obs) / self.helper.x.size()
   self.helper.build_up()
   if data_to_parameter <= 4. and self.helper.x.size() < 500:
     # estimate standard deviations by singular value decomposition
     norm_mat_packed_upper = self.helper.get_normal_matrix()
     norm_mat_all_elems = self.packed_to_all(norm_mat_packed_upper)
     NM = sqr(norm_mat_all_elems)
     from scitbx.linalg.svd import inverse_via_svd
     svd_inverse,sigma = inverse_via_svd(NM.as_flex_double_matrix())
     IA = sqr(svd_inverse)
     estimated_stddev = flex.double([math.sqrt(IA(i,i)) for i in xrange(self.helper.x.size())])
   else:
     # estimate standard deviations by normal matrix curvatures
     diagonal_curvatures = self.helper.get_normal_matrix_diagonal()
     estimated_stddev = flex.sqrt(1./diagonal_curvatures)
   return self.fitted_as_annotated(estimated_stddev)
Example #2
0
 def unpack_stddev(self):
     # the data-to_parameter ratio will control which method for returning e.s.d's
     data_to_parameter = float(self.N_raw_obs) / self.helper.x.size()
     self.helper.build_up()
     if data_to_parameter <= 4. and self.helper.x.size() < 500:
         # estimate standard deviations by singular value decomposition
         norm_mat_packed_upper = self.helper.get_normal_matrix()
         norm_mat_all_elems = self.packed_to_all(norm_mat_packed_upper)
         NM = sqr(norm_mat_all_elems)
         from scitbx.linalg.svd import inverse_via_svd
         svd_inverse, sigma = inverse_via_svd(NM.as_flex_double_matrix())
         IA = sqr(svd_inverse)
         estimated_stddev = flex.double(
             [math.sqrt(IA(i, i)) for i in range(self.helper.x.size())])
     else:
         # estimate standard deviations by normal matrix curvatures
         diagonal_curvatures = self.helper.get_normal_matrix_diagonal()
         estimated_stddev = flex.sqrt(1. / diagonal_curvatures)
     return self.fitted_as_annotated(estimated_stddev)
Example #3
0
  def __init__(self,x_obs,y_obs,w_obs,initial):
    self.counter = 0
    self.x = initial.deep_copy()
    self.helper = eigen_helper(initial_estimates = self.x)
    self.helper.set_cpp_data(x_obs,y_obs,w_obs)
    self.helper.restart()
    iterations = normal_eqns_solving.levenberg_marquardt_iterations_encapsulated_eqns(
               non_linear_ls = self.helper,
               n_max_iterations = 5000,
               track_all=True,
               step_threshold = 0.0001,
    )
    ###### get esd's
    self.helper.build_up()
    NM = sqr(self.get_helper_normal_matrix())
    from scitbx.linalg.svd import inverse_via_svd
    svd_inverse,sigma = inverse_via_svd(NM.as_flex_double_matrix())
    IA = sqr(svd_inverse)
    self.error_diagonal = flex.double([IA(i,i) for i in xrange(self.helper.x.size())])

    print "End of minimization: Converged", self.helper.counter,"cycles"
Example #4
0
  def __init__(self,x_obs,y_obs,w_obs,initial):
    self.counter = 0
    self.x = initial.deep_copy()
    self.helper = eigen_helper(initial_estimates = self.x)
    self.helper.set_cpp_data(x_obs,y_obs,w_obs)
    self.helper.restart()
    iterations = normal_eqns_solving.levenberg_marquardt_iterations_encapsulated_eqns(
               non_linear_ls = self.helper,
               n_max_iterations = 5000,
               track_all=True,
               step_threshold = 0.0001,
    )
    ###### get esd's
    self.helper.build_up()
    NM = sqr(self.get_helper_normal_matrix())
    from scitbx.linalg.svd import inverse_via_svd
    svd_inverse,sigma = inverse_via_svd(NM.as_flex_double_matrix())
    IA = sqr(svd_inverse)
    self.error_diagonal = flex.double([IA(i,i) for i in xrange(self.helper.x.size())])

    print "End of minimization: Converged", self.helper.counter,"cycles"
Example #5
0
    def esd_plot(self):
        print "OK esd"

        ### working on the esd problem:
        self.helper.build_up()
        norm_mat_packed_upper = self.helper.get_normal_matrix()
        norm_mat_all_elems = self.packed_to_all(norm_mat_packed_upper)
        diagonal_curvatures = self.helper.get_normal_matrix_diagonal()
        NM = sqr(norm_mat_all_elems)

        print "The normal matrix is:"
        self.pretty(NM)

        from scitbx.linalg.svd import inverse_via_svd
        svd_inverse, sigma = inverse_via_svd(NM.as_flex_double_matrix())

        print "ia", len(svd_inverse), len(sigma)
        IA = sqr(svd_inverse)
        for i in range(self.helper.x.size()):
            if i == self.N_I or i == self.N_I + self.N_G: print
            print "%2d %10.4f %10.4f %10.4f" % (
                i, self.helper.x[i], math.sqrt(
                    1. / diagonal_curvatures[i]), math.sqrt(IA(i, i)))

        from matplotlib import pyplot as plt
        plt.plot(flex.sqrt(flex.double([IA(i, i) for i in range(self.N_I)])),
                 flex.sqrt(1. / diagonal_curvatures[:self.N_I]), "r.")
        plt.title(
            "Structure factor e.s.d's from normal matrix curvatures vs. SVD variance diagonal"
        )
        plt.axes().set_aspect("equal")
        plt.show()
        return

        # additional work to validate the Cholesky factorization and investigate stability:
        identity = IA * NM

        print "verify identity:"
        self.pretty(identity, max_col=58, format="%7.1g")
        # we can fool ourselves that the SVD gave us a perfect inverse:
        self.pretty(identity, max_col=72, format="%4.0f")

        ### figure out stuff about permutation matrices
        self.helper.build_up()

        ordering = self.helper.get_eigen_permutation_ordering()
        print "ordering:", list(ordering)
        matcode = self.permutation_ordering_to_matrix(ordering)

        print "matcode:"
        self.pretty(matcode, max_col=72, format="%1d", zformat="%1d")

        permuted_normal_matrix = (matcode.inverse()) * NM * matcode
        print "product"
        self.pretty(permuted_normal_matrix)

        ### Now work with the Cholesky factorization
        cholesky_fac_packed_lower = self.helper.get_cholesky_lower()
        Lower = self.lower_triangular_packed_to_matrix(
            cholesky_fac_packed_lower)
        print "lower:"
        self.pretty(Lower, max_col=59, format="%7.0g", zformat="%7.0g")

        Transpose = Lower.transpose()
        print "transpose"
        self.pretty(Transpose, max_col=59, format="%7.0g", zformat="%7.0g")

        diagonal_factor = self.helper.get_cholesky_diagonal()
        Diag = self.diagonal_vector_to_matrix(diagonal_factor)
        print "diagonal:"
        self.pretty(Diag, max_col=59, format="%7.0g", zformat="%7.0g")

        Composed = Lower * Diag * Transpose
        print "composed"
        self.pretty(Composed, max_col=67, format="%6.0g", zformat="%6.0g")

        Diff = Composed - permuted_normal_matrix
        print "diff"
        self.prettynz(Diff, max_col=67, format="%6.0g", zformat="%6.0g")
        #  OK, this proves that L * D * LT = P * A * P-1
        #  in other words, Eigen has correctly factored the permuted normal matrix
        ############

        Variance_diagonal = self.unstable_matrix_inversion_diagonal(
            Lower, Diag, Transpose)

        for i in range(self.helper.x.size()):
            if i == self.N_I or i == self.N_I + self.N_G: print
            print "%2d %10.4f %10.4f %10.4f" % (
                i, self.helper.x[i], math.sqrt(
                    1. / diagonal_curvatures[i]), math.sqrt(IA(i, i))),
            print "svd err diag: %10.4f" % (IA(
                i, i)), "eigen: %15.4f" % (Variance_diagonal[ordering[i]])
  def esd_plot(self):
    print "OK esd"

    ### working on the esd problem:
    self.helper.build_up()
    norm_mat_packed_upper = self.helper.get_normal_matrix()
    norm_mat_all_elems = self.packed_to_all(norm_mat_packed_upper)
    diagonal_curvatures = self.helper.get_normal_matrix_diagonal()
    NM = sqr(norm_mat_all_elems)

    print "The normal matrix is:"
    self.pretty(NM)

    from scitbx.linalg.svd import inverse_via_svd
    svd_inverse,sigma = inverse_via_svd(NM.as_flex_double_matrix())

    print "ia",len(svd_inverse),len(sigma)
    IA = sqr(svd_inverse)
    for i in xrange(self.helper.x.size()):
      if i == self.N_I or i == self.N_I + self.N_G:  print
      print "%2d %10.4f %10.4f %10.4f"%(
        i, self.helper.x[i], math.sqrt(1./diagonal_curvatures[i]), math.sqrt(IA(i,i)))

    from matplotlib import pyplot as plt
    plt.plot(flex.sqrt(flex.double([IA(i,i) for i in xrange(self.N_I)])),
             flex.sqrt(1./diagonal_curvatures[:self.N_I]), "r.")
    plt.title("Structure factor e.s.d's from normal matrix curvatures vs. SVD variance diagonal")
    plt.axes().set_aspect("equal")
    plt.show()
    return

    # additional work to validate the Cholesky factorization and investigate stability:
    identity = IA * NM

    print "verify identity:"
    self.pretty(identity,max_col=58,format="%7.1g")
    # we can fool ourselves that the SVD gave us a perfect inverse:
    self.pretty(identity,max_col=72,format="%4.0f")

    ### figure out stuff about permutation matrices
    self.helper.build_up()

    ordering = self.helper.get_eigen_permutation_ordering()
    print "ordering:",list(ordering)
    matcode = self.permutation_ordering_to_matrix(ordering)

    print "matcode:"
    self.pretty(matcode,max_col=72,format="%1d",zformat="%1d")

    permuted_normal_matrix = (matcode.inverse())* NM *matcode
    print "product"
    self.pretty(permuted_normal_matrix)

    ### Now work with the Cholesky factorization
    cholesky_fac_packed_lower = self.helper.get_cholesky_lower()
    Lower = self.lower_triangular_packed_to_matrix(cholesky_fac_packed_lower)
    print "lower:"
    self.pretty(Lower,max_col=59,format="%7.0g",zformat="%7.0g")

    Transpose = Lower.transpose()
    print "transpose"
    self.pretty(Transpose,max_col=59,format="%7.0g",zformat="%7.0g")

    diagonal_factor = self.helper.get_cholesky_diagonal()
    Diag = self.diagonal_vector_to_matrix(diagonal_factor)
    print "diagonal:"
    self.pretty(Diag,max_col=59,format="%7.0g",zformat="%7.0g")

    Composed = Lower * Diag * Transpose
    print "composed"
    self.pretty(Composed,max_col=67,format="%6.0g",zformat="%6.0g")

    Diff = Composed - permuted_normal_matrix
    print "diff"
    self.prettynz(Diff,max_col=67,format="%6.0g",zformat="%6.0g")
    #  OK, this proves that L * D * LT = P * A * P-1
    #  in other words, Eigen has correctly factored the permuted normal matrix
    ############

    Variance_diagonal = self.unstable_matrix_inversion_diagonal(Lower,Diag,Transpose)

    for i in xrange(self.helper.x.size()):
      if i == self.N_I or i == self.N_I + self.N_G:  print
      print "%2d %10.4f %10.4f %10.4f"%(
        i, self.helper.x[i], math.sqrt(1./diagonal_curvatures[i]), math.sqrt(IA(i,i))),
      print "svd err diag: %10.4f"%(IA(i,i)),"eigen: %15.4f"%(Variance_diagonal[ordering[i]])