def test_lefebvre(): """Run the test presented in part 4 of the paper Lefebvre et al. (1999), http://arxiv.org/abs/hep-ex/9909031.""" # Simple test following the paper. First MC sim mat = matrix.sqr((0.7, 0.2, 0.4, 0.6)) sig_mat = mat * 0.01 p_mats = [perturb_mat(mat) for i in range(10000)] inv_p_mats = [m.inverse() for m in p_mats] # Here use the more exact formula for population covariance as we know the # true means rather than sample covariance where we calculate the means # from the data cov_inv_mat_MC = calc_monte_carlo_population_covariances( inv_p_mats, mean_matrix=mat.inverse()) # Now analytical formula cov_mat = matrix.diag([e**2 for e in sig_mat]) cov_inv_mat = matrix_inverse_error_propagation(mat, cov_mat) cov_inv_mat = cov_inv_mat.as_flex_double_matrix() # Get fractional differences frac = (cov_inv_mat_MC - cov_inv_mat) / cov_inv_mat_MC # assert all fractional errors are within 5%. When the random seed is allowed # to vary this fails about 7 times in every 100 runs. This is mainly due to # the random variation in the MC method itself, as with just 10000 samples # one MC estimate compared to another MC estimate regularly has some # fractional errors greater than 5%. assert all([abs(e) < 0.05 for e in frac]) return
def test_B_matrix(): """Test errors in an inverted B matrix, when the errors in B are known (and are independent)""" Bmat = create_Bmat() inv_Bmat = Bmat.inverse() # Note that elements in the strict upper triangle of B are zero # so their perturbed values will be zero too and the errors in these elements # will also be zero (this is what we want) # calculate the covariance of elements of inv_B by Monte Carlo simulation, # assuming that each element of B has an independent normal error given by a # sigma of 1% of the element value perturbed_Bmats = [ perturb_mat(Bmat, fraction_sd=0.01) for i in range(10000) ] invBs = [m.inverse() for m in perturbed_Bmats] cov_invB_MC = calc_monte_carlo_population_covariances(invBs, inv_Bmat) # Now calculate using the analytical formula. First need the covariance # matrix of B itself. This is the diagonal matrix of errors applied in the # simulation. n = Bmat.n_rows() sig_B = Bmat * 0.01 cov_B = matrix.diag([e**2 for e in sig_B]) # Now can use the analytical formula cov_invB = matrix_inverse_error_propagation(Bmat, cov_B) cov_invB = cov_invB.as_flex_double_matrix() # Get fractional differences frac = flex.double(flex.grid(n**2, n**2), 0.0) for i in range(frac.all()[0]): for j in range(frac.all()[1]): e1 = cov_invB_MC[i, j] e2 = cov_invB[i, j] if e1 < 1e-10: continue # avoid divide-by-zero errors below if e2 < 1e-10: continue # avoid elements that are supposed to be zero frac[i, j] = (e1 - e2) / e1 # assert all fractional errors are within 5% assert all([abs(e) < 0.05 for e in frac]) return
def _calc_cell_parameter_sd(self): # self._cov_B is the covariance matrix of elements of the B matrix. We # need to construct the covariance matrix of elements of the # transpose of B. The vector of elements of B is related to the # vector of elements of its transpose by a permutation, P. P = matrix.sqr(( 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, )) # We can use P to replace var_cov with the covariance matrix of # elements of the transpose of B. var_cov = P * self._cov_B * P # From B = (O^-1)^T we can convert this # to the covariance matrix of the real space orthogonalisation matrix Bt = self._B.transpose() O = Bt.inverse() # noqa: E741, is name of the matrix cov_O = matrix_inverse_error_propagation(Bt, var_cov) # The real space unit cell vectors are given by vec_a = O * matrix.col((1, 0, 0)) vec_b = O * matrix.col((0, 1, 0)) vec_c = O * matrix.col((0, 0, 1)) # So the unit cell parameters are a, b, c = vec_a.length(), vec_b.length(), vec_c.length() # alpha = acos(vec_b.dot(vec_c) / (b * c)) # beta = acos(vec_a.dot(vec_c) / (a * c)) # gamma = acos(vec_a.dot(vec_b) / (a * b)) # The estimated errors are calculated by error propagation from cov_O. In # each case we define a function F(O) that converts the matrix O into the # unit cell parameter of interest. To do error propagation to get the # variance of that cell parameter we need the Jacobian of the function. # This is a 1*9 matrix of derivatives in the order of the elements of O # # / dF dF dF dF dF dF dF dF dF \ # J = | ---, ---, ---, ---, ---, ---, ---, ---, --- | # \ da1 db1 dc1 da2 db2 dc2 da3 db3 dc3 / # # For cell length |a|, F = sqrt(a1^2 + a2^2 + a3^2) jacobian = matrix.rec( (vec_a[0] / a, 0, 0, vec_a[1] / a, 0, 0, vec_a[2] / a, 0, 0), (1, 9)) var_a = (jacobian * cov_O * jacobian.transpose())[0] # For cell length |b|, F = sqrt(b1^2 + b2^2 + b3^2) jacobian = matrix.rec( (0, vec_b[0] / b, 0, 0, vec_b[1] / b, 0, 0, vec_b[2] / b, 0), (1, 9)) var_b = (jacobian * cov_O * jacobian.transpose())[0] # For cell length |c|, F = sqrt(c1^2 + c2^2 + c3^2) jacobian = matrix.rec( (0, 0, vec_c[0] / c, 0, 0, vec_c[1] / c, 0, 0, vec_c[2] / c), (1, 9)) var_c = (jacobian * cov_O * jacobian.transpose())[0] # For cell volume (a X b).c, # F = c1(a2*b3 - b2*a3) + c2(a3*b1 - b3*a1) + c3(a1*b2 - b1*a2) a1, a2, a3 = vec_a b1, b2, b3 = vec_b c1, c2, c3 = vec_c jacobian = matrix.rec( ( c3 * b2 - c2 * b3, c1 * b3 - c3 * b1, c2 * b1 - c1 * b2, c2 * a3 - c3 * a2, c3 * a1 - c1 * a3, c1 * a2 - c2 * a1, a2 * b3 - b2 * a3, a3 * b1 - b3 * a1, a1 * b2 - b1 * a2, ), (1, 9), ) var_V = (jacobian * cov_O * jacobian.transpose())[0] self._cell_volume_sd = sqrt(var_V) # For the unit cell angles we need to calculate derivatives of the angles # with respect to the elements of O dalpha_db, dalpha_dc = angle_derivative_wrt_vectors(vec_b, vec_c) dbeta_da, dbeta_dc = angle_derivative_wrt_vectors(vec_a, vec_c) dgamma_da, dgamma_db = angle_derivative_wrt_vectors(vec_a, vec_b) # For angle alpha, F = acos( b.c / |b||c|) jacobian = matrix.rec( ( 0, dalpha_db[0], dalpha_dc[0], 0, dalpha_db[1], dalpha_dc[1], 0, dalpha_db[2], dalpha_dc[2], ), (1, 9), ) var_alpha = (jacobian * cov_O * jacobian.transpose())[0] # For angle beta, F = acos( a.c / |a||c|) jacobian = matrix.rec( ( dbeta_da[0], 0, dbeta_dc[0], dbeta_da[1], 0, dbeta_dc[1], dbeta_da[2], 0, dbeta_dc[2], ), (1, 9), ) var_beta = (jacobian * cov_O * jacobian.transpose())[0] # For angle gamma, F = acos( a.b / |a||b|) jacobian = matrix.rec( ( dgamma_da[0], dgamma_db[0], 0, dgamma_da[1], dgamma_db[1], 0, dgamma_da[2], dgamma_db[2], 0, ), (1, 9), ) var_gamma = (jacobian * cov_O * jacobian.transpose())[0] # Symmetry constraints may mean variances of the angles should be zero. # Floating point error could lead to negative variances. Ensure these are # caught before taking their square root! var_alpha = max(0, var_alpha) var_beta = max(0, var_beta) var_gamma = max(0, var_gamma) rad2deg = 180.0 / pi self._cell_sd = ( sqrt(var_a), sqrt(var_b), sqrt(var_c), sqrt(var_alpha) * rad2deg, sqrt(var_beta) * rad2deg, sqrt(var_gamma) * rad2deg, )
def _calc_cell_parameter_sd(self): from scitbx.math.lefebvre import matrix_inverse_error_propagation from scitbx.math import angle_derivative_wrt_vectors from math import pi, acos, sqrt # self._cov_B is the covariance matrix of elements of the B matrix. We # need to construct the covariance matrix of elements of the # transpose of B. The vector of elements of B is related to the # vector of elements of its transpose by a permutation, P. P = matrix.sqr((1,0,0,0,0,0,0,0,0, 0,0,0,1,0,0,0,0,0, 0,0,0,0,0,0,1,0,0, 0,1,0,0,0,0,0,0,0, 0,0,0,0,1,0,0,0,0, 0,0,0,0,0,0,0,1,0, 0,0,1,0,0,0,0,0,0, 0,0,0,0,0,1,0,0,0, 0,0,0,0,0,0,0,0,1)) # We can use P to replace var_cov with the covariance matrix of # elements of the transpose of B. var_cov = P*self._cov_B*P # From B = (O^-1)^T we can convert this # to the covariance matrix of the real space orthogonalisation matrix Bt = self._B.transpose() O = Bt.inverse() cov_O = matrix_inverse_error_propagation(Bt, var_cov) # The real space unit cell vectors are given by vec_a = (O * matrix.col((1,0,0))) vec_b = (O * matrix.col((0,1,0))) vec_c = (O * matrix.col((0,0,1))) # So the unit cell parameters are a, b, c = vec_a.length(), vec_b.length(), vec_c.length() alpha = acos(vec_b.dot(vec_c) / (b*c)) beta = acos(vec_a.dot(vec_c) / (a*c)) gamma = acos(vec_a.dot(vec_b) / (a*b)) # The estimated errors are calculated by error propagation from cov_O. In # each case we define a function F(O) that converts the matrix O into the # unit cell parameter of interest. To do error propagation to get the # variance of that cell parameter we need the Jacobian of the function. # This is a 1*9 matrix of derivatives in the order of the elements of O # # / dF dF dF dF dF dF dF dF dF \ # J = | ---, ---, ---, ---, ---, ---, ---, ---, --- | # \ da1 db1 dc1 da2 db2 dc2 da3 db3 dc3 / # # For cell length |a|, F = sqrt(a1^2 + a2^2 + a3^2) jacobian = matrix.rec( (vec_a[0]/a, 0, 0, vec_a[1]/a, 0, 0, vec_a[2]/a, 0, 0), (1, 9)) var_a = (jacobian * cov_O * jacobian.transpose())[0] # For cell length |b|, F = sqrt(b1^2 + b2^2 + b3^2) jacobian = matrix.rec( (0, vec_b[0]/b, 0, 0, vec_b[1]/b, 0, 0, vec_b[2]/b, 0), (1, 9)) var_b = (jacobian * cov_O * jacobian.transpose())[0] # For cell length |c|, F = sqrt(c1^2 + c2^2 + c3^2) jacobian = matrix.rec( (0, 0, vec_c[0]/c, 0, 0, vec_c[1]/c, 0, 0, vec_c[2]/c), (1, 9)) jacobian_t = jacobian.transpose() var_c = (jacobian * cov_O * jacobian.transpose())[0] # For cell volume (a X b).c, # F = c1(a2*b3 - b2*a3) + c2(a3*b1 - b3*a1) + c3(a1*b2 - b1*a2) a1, a2, a3 = vec_a b1, b2, b3 = vec_b c1, c2, c3 = vec_c jacobian = matrix.rec((c3*b2 - c2*b3, c1*b3 - c3*b1, c2*b1 - c1*b2, c2*a3 - c3*a2, c3*a1 - c1*a3, c1*a2 - c2*a1, a2*b3 - b2*a3, a3*b1 - b3*a1, a1*b2 - b1*a2), (1,9)) jacobian_t = jacobian.transpose() var_V = (jacobian * cov_O * jacobian.transpose())[0] self._cell_volume_sd = sqrt(var_V) # For the unit cell angles we need to calculate derivatives of the angles # with respect to the elements of O dalpha_db, dalpha_dc = angle_derivative_wrt_vectors(vec_b, vec_c) dbeta_da, dbeta_dc = angle_derivative_wrt_vectors(vec_a, vec_c) dgamma_da, dgamma_db = angle_derivative_wrt_vectors(vec_a, vec_b) # For angle alpha, F = acos( b.c / |b||c|) jacobian = matrix.rec( (0, dalpha_db[0], dalpha_dc[0], 0, dalpha_db[1], dalpha_dc[1], 0, dalpha_db[2], dalpha_dc[2]), (1, 9)) var_alpha = (jacobian * cov_O * jacobian.transpose())[0] # For angle beta, F = acos( a.c / |a||c|) jacobian = matrix.rec( (dbeta_da[0], 0, dbeta_dc[0], dbeta_da[1], 0, dbeta_dc[1], dbeta_da[2], 0, dbeta_dc[2]), (1, 9)) var_beta = (jacobian * cov_O * jacobian.transpose())[0] # For angle gamma, F = acos( a.b / |a||b|) jacobian = matrix.rec( (dgamma_da[0], dgamma_db[0], 0, dgamma_da[1], dgamma_db[1], 0, dgamma_da[2], dgamma_db[2], 0), (1, 9)) var_gamma = (jacobian * cov_O * jacobian.transpose())[0] # Symmetry constraints may mean variances of the angles should be zero. # Floating point error could lead to negative variances. Ensure these are # caught before taking their square root! var_alpha = max(0, var_alpha) var_beta = max(0, var_beta) var_gamma = max(0, var_gamma) from math import pi rad2deg = 180. / pi self._cell_sd = (sqrt(var_a), sqrt(var_b), sqrt(var_c), sqrt(var_alpha) * rad2deg, sqrt(var_beta) * rad2deg, sqrt(var_gamma) * rad2deg) return