def run(pdb_file_name, n_models, log, eps=1.e-7, output_file_name="ensemble.pdb"): pdb_inp = iotbx.pdb.input(file_name = pdb_file_name) pdb_hierarchy = pdb_inp.construct_hierarchy() xrs = pdb_hierarchy.extract_xray_structure( crystal_symmetry=pdb_inp.crystal_symmetry_from_cryst1()) tls_extract = mmtbx.tls.tools.tls_from_pdb_inp( remark_3_records = pdb_inp.extract_remark_iii_records(3), pdb_hierarchy = pdb_hierarchy) tlso = tls_extract.tls_params if(len(tlso)!=1): raise Sorry("Only one TLS group per PDB is currently supported.") tlso = tlso[0] # XXX one group only deg_to_rad_scale = math.pi/180 # Units: T[A], L[deg**2], S[A*deg] T = matrix.sym(sym_mat3=tlso.t) L = matrix.sym(sym_mat3=tlso.l)*(deg_to_rad_scale**2) S = matrix.sqr(tlso.s)*deg_to_rad_scale # sanity check if(not adptbx.is_positive_definite(tlso.t, eps)): raise Sorry("T matrix is not positive definite.") if(not adptbx.is_positive_definite(tlso.l, eps)): raise Sorry("L matrix is not positive definite.") r = analysis.run(T=T, L=L, S=S, log=log) ensemble_generator( decompose_tls_object = r, pdb_hierarchy = pdb_hierarchy, xray_structure = xrs, n_models = n_models, log = log).write_pdb_file(file_name=output_file_name)
def f(self): result = 0 for op in self.ops: op_u = (op*matrix.sym(sym_mat3=self.u)*op.transpose()).as_sym_mat3() huh = (matrix.row(self.hkl) \ * matrix.sym(sym_mat3=op_u)).dot(matrix.col(self.hkl)) result += math.exp(mtps * huh) return result
def exercise_covariance(): xs = xray.structure( crystal_symmetry=crystal.symmetry( (5.01,5.01,5.47,90,90,120), "P6222"), scatterers=flex.xray_scatterer([ xray.scatterer("Si", (1/2.,1/2.,1/3.)), xray.scatterer("O", (0.197,-0.197,0.83333))])) uc = xs.unit_cell() flags = xs.scatterer_flags() for f in flags: f.set_grad_site(True) xs.set_scatterer_flags(flags) cov = flex.double((1e-8,1e-9,2e-9,3e-9,4e-9,5e-9, 2e-8,1e-9,2e-9,3e-9,4e-9, 3e-8,1e-9,2e-9,3e-9, 2e-8,1e-9,2e-9, 3e-8,1e-9, 4e-8)) param_map = xs.parameter_map() assert approx_equal(cov, covariance.extract_covariance_matrix_for_sites(flex.size_t([0,1]), cov, param_map)) cov_cart = covariance.orthogonalize_covariance_matrix(cov, uc, param_map) O = matrix.sqr(uc.orthogonalization_matrix()) for i in range(param_map.n_scatterers): cov_i = covariance.extract_covariance_matrix_for_sites(flex.size_t([i]), cov, param_map) cov_i_cart = covariance.extract_covariance_matrix_for_sites(flex.size_t([i]), cov_cart, param_map) assert approx_equal( O * matrix.sym(sym_mat3=cov_i) * O.transpose(), matrix.sym(sym_mat3=cov_i_cart).as_mat3()) for f in flags: f.set_grads(False) flags[0].set_grad_u_aniso(True) flags[0].set_use_u_aniso(True) flags[1].set_grad_u_iso(True) flags[1].set_use_u_iso(True) xs.set_scatterer_flags(flags) param_map = xs.parameter_map() cov = flex.double(7*7, 0) cov.reshape(flex.grid(7,7)) cov.matrix_diagonal_set_in_place(flex.double([i for i in range(7)])) cov = cov.matrix_symmetric_as_packed_u() assert approx_equal([i for i in range(6)], covariance.extract_covariance_matrix_for_u_aniso( 0, cov, param_map).matrix_packed_u_diagonal()) assert covariance.variance_for_u_iso(1, cov, param_map) == 6 try: covariance.variance_for_u_iso(0, cov, param_map) except RuntimeError: pass else: raise Exception_expected try: covariance.extract_covariance_matrix_for_u_aniso(1, cov, param_map) except RuntimeError: pass else: raise Exception_expected approx_equal(covariance.extract_covariance_matrix_for_sites( flex.size_t([1]), cov, param_map), (0,0,0,0,0,0))
def d_u(self): result = flex.double(6, 0) h,k,l = self.hkl d_exp_huh_d_u = matrix.col([h**2, k**2, l**2, 2*h*k, 2*h*l, 2*k*l]) for op in self.ops: op_u = (op*matrix.sym(sym_mat3=self.u)*op.transpose()).as_sym_mat3() huh = (matrix.row(self.hkl) \ * matrix.sym(sym_mat3=op_u)).dot(matrix.col(self.hkl)) d_op_u = math.exp(mtps * huh) * mtps * d_exp_huh_d_u gtmx = tensor_rank_2_gradient_transform_matrix(op) d_u = gtmx.matrix_multiply(flex.double(d_op_u)) result += d_u return result
def d2_u(self): result = flex.double(flex.grid(6,6), 0) h,k,l = self.hkl d_exp_huh_d_u = flex.double([h**2, k**2, l**2, 2*h*k, 2*h*l, 2*k*l]) d2_exp_huh_d_uu = d_exp_huh_d_u.matrix_outer_product(d_exp_huh_d_u) for op in self.ops: op_u = (op*matrix.sym(sym_mat3=self.u)*op.transpose()).as_sym_mat3() huh = (matrix.row(self.hkl) \ * matrix.sym(sym_mat3=op_u)).dot(matrix.col(self.hkl)) d2_op_u = math.exp(mtps * huh) * mtps**2 * d2_exp_huh_d_uu gtmx = tensor_rank_2_gradient_transform_matrix(op) d2_u = gtmx.matrix_multiply(d2_op_u).matrix_multiply( gtmx.matrix_transpose()) result += d2_u return result
def step_h(self, V_L, b_o): """ Three uncorrelated translations. """ print_step("Step h:", self.log) V_M = b_o.R_PL * V_L * b_o.R_PL.transpose() self.show_matrix(x=V_M, title="V_M ") es = self.eigen_system_default_handler(m=V_M) v_x, v_y, v_z = es.x, es.y, es.z lam_u,lam_v,lam_w = es.vals self.show_vector(x=v_x, title="v_x") self.show_vector(x=v_y, title="v_y") self.show_vector(x=v_z, title="v_z") assert approx_equal(v_x.dot(v_y), 0) assert approx_equal(v_y.dot(v_z), 0) assert approx_equal(v_z.dot(v_x), 0) R_MV = matrix.sqr([ v_x[0], v_y[0], v_z[0], v_x[1], v_y[1], v_z[1], v_x[2], v_y[2], v_z[2]]) self.show_matrix(x=R_MV, title="R_MV") V_V = matrix.sym(sym_mat3=[lam_u, lam_v, lam_w, 0,0,0]) self.show_matrix(x=V_V, title="V_V") assert approx_equal(V_V, R_MV.transpose() * V_M * R_MV) # formula (20) return group_args( v_x = v_x, v_y = v_y, v_z = v_z, V_M = V_M, V_V = V_V, R_MV = R_MV)
def exercise_flood_fill(): uc = uctbx.unit_cell('10 10 10 90 90 90') for uc in (uctbx.unit_cell('10 10 10 90 90 90'), uctbx.unit_cell('9 10 11 87 91 95')): gridding = maptbx.crystal_gridding( unit_cell=uc, pre_determined_n_real=(5,5,5)) corner_cube = (0,4,20,24,100,104,120,124) # cube across all 8 corners channel = (12,37,38,39,42,43,62,63,67,68,87,112) data = flex.int(flex.grid(gridding.n_real())) for i in (corner_cube + channel): data[i] = 1 flood_fill = masks.flood_fill(data, uc) assert data.count(0) == 105 for i in corner_cube: assert data[i] == 2 for i in channel: assert data[i] == 3 assert approx_equal(flood_fill.centres_of_mass(), ((-0.5, -0.5, -0.5), (-2.5, 7/3, 2.5))) assert approx_equal(flood_fill.centres_of_mass_frac(), ((-0.1, -0.1, -0.1), (-0.5, 7/15, 0.5))) assert approx_equal(flood_fill.centres_of_mass_cart(), uc.orthogonalize(flood_fill.centres_of_mass_frac())) assert flood_fill.n_voids() == 2 assert approx_equal(flood_fill.grid_points_per_void(), (8, 12)) if 0: from crys3d import wx_map_viewer wx_map_viewer.display(raw_map=data.as_double(), unit_cell=uc, wires=False) # gridding = maptbx.crystal_gridding( unit_cell=uc, pre_determined_n_real=(10,10,10)) data = flex.int(flex.grid(gridding.n_real())) # parallelogram points = [(2,4,5),(3,4,5),(4,4,5),(5,4,5),(6,4,5), (3,5,5),(4,5,5),(5,5,5),(6,5,5),(7,5,5), (4,6,5),(5,6,5),(6,6,5),(7,6,5),(8,6,5)] points_frac = flex.vec3_double() for p in points: data[p] = 1 points_frac.append([p[i]/gridding.n_real()[i] for i in range(3)]) points_cart = uc.orthogonalize(points_frac) flood_fill = masks.flood_fill(data, uc) assert data.count(2) == 15 assert approx_equal(flood_fill.centres_of_mass_frac(), ((0.5,0.5,0.5),)) pai_cart = math.principal_axes_of_inertia( points=points_cart, weights=flex.double(points_cart.size(),1.0)) F = matrix.sqr(uc.fractionalization_matrix()) O = matrix.sqr(uc.orthogonalization_matrix()) assert approx_equal( pai_cart.center_of_mass(), flood_fill.centres_of_mass_cart()[0]) assert approx_equal( flood_fill.covariance_matrices_cart()[0], (F.transpose() * matrix.sym( sym_mat3=flood_fill.covariance_matrices_frac()[0]) * F).as_sym_mat3()) assert approx_equal( pai_cart.inertia_tensor(), flood_fill.inertia_tensors_cart()[0]) assert approx_equal(pai_cart.eigensystem().vectors(), flood_fill.eigensystems_cart()[0].vectors()) assert approx_equal(pai_cart.eigensystem().values(), flood_fill.eigensystems_cart()[0].values()) return
def f(self): result = 0 tphkl = 2 * math.pi * matrix.col(self.hkl) for scatterer in self.scatterers: w = scatterer.weight() if (not scatterer.flags.use_u_aniso()): huh = scatterer.u_iso * self.d_star_sq dw = math.exp(mtps * huh) gaussian = self.scattering_type_registry.gaussian_not_optional( scattering_type=scatterer.scattering_type) f0 = gaussian.at_d_star_sq(self.d_star_sq) ffp = f0 + scatterer.fp fdp = scatterer.fdp ff = ffp + 1j * fdp for s in self.space_group: s_site = s * scatterer.site alpha = matrix.col(s_site).dot(tphkl) if (scatterer.flags.use_u_aniso()): r = s.r().as_rational().as_float() s_u_star_s = r*matrix.sym(sym_mat3=scatterer.u_star)*r.transpose() huh = (matrix.row(self.hkl) * s_u_star_s).dot(matrix.col(self.hkl)) dw = math.exp(mtps * huh) e = cmath.exp(1j*alpha) result += w * dw * ff * e return result
def exercise_isotropic_adp(): i_seqs = (0,) weight = 2 u_cart = ((1,2,3,5,2,8),) u_iso = (0,) use_u_aniso = (True,) p = adp_restraints.isotropic_adp_proxy( i_seqs=i_seqs, weight=weight) assert p.i_seqs == i_seqs assert approx_equal(p.weight, weight) i = adp_restraints.isotropic_adp(u_cart=u_cart[0], weight=weight) expected_deltas = (-1, 0, 1, 5, 2, 8) expected_gradients = (-4, 0, 4, 40, 16, 64) assert approx_equal(i.weight, weight) assert approx_equal(i.deltas(), expected_deltas) assert approx_equal(i.rms_deltas(), 4.5704364002673632) assert approx_equal(i.residual(), 376.0) assert approx_equal(i.gradients(), expected_gradients) gradients_aniso_cart = flex.sym_mat3_double(1, (0,0,0,0,0,0)) gradients_iso = flex.double(1,0) proxies = adp_restraints.shared_isotropic_adp_proxy([p,p]) u_cart = flex.sym_mat3_double(u_cart) u_iso = flex.double(u_iso) use_u_aniso = flex.bool(use_u_aniso) params = adp_restraint_params(u_cart=u_cart, u_iso=u_iso, use_u_aniso=use_u_aniso) residuals = adp_restraints.isotropic_adp_residuals(params, proxies=proxies) assert approx_equal(residuals, (i.residual(),i.residual())) deltas_rms = adp_restraints.isotropic_adp_deltas_rms(params, proxies=proxies) assert approx_equal(deltas_rms, (i.rms_deltas(),i.rms_deltas())) residual_sum = adp_restraints.isotropic_adp_residual_sum( params, proxies=proxies, gradients_aniso_cart=gradients_aniso_cart ) assert approx_equal(residual_sum, 752.0) fd_grads_aniso, fd_grads_iso = finite_difference_gradients( restraint_type=adp_restraints.isotropic_adp, proxy=p, u_cart=u_cart, u_iso=u_iso, use_u_aniso=use_u_aniso ) for g,e in zip(gradients_aniso_cart, fd_grads_aniso): assert approx_equal(g, matrix.col(e)*2) # # check frame invariance of residual # u_cart = matrix.sym(sym_mat3=(0.1,0.2,0.05,0.03,0.02,0.01)) a = adp_restraints.isotropic_adp( u_cart=u_cart.as_sym_mat3(), weight=1) expected_residual = a.residual() gen = flex.mersenne_twister() for i in range(20): R = matrix.rec(gen.random_double_r3_rotation_matrix(),(3,3)) u_cart_rot = R * u_cart * R.transpose() a = adp_restraints.isotropic_adp( u_cart=u_cart_rot.as_sym_mat3(), weight=1) assert approx_equal(a.residual(), expected_residual)
def exercise_cholesky(): mt = flex.mersenne_twister(seed=0) for n in xrange(1,10): a = flex.double(n*n,0) a.resize(flex.grid(n, n)) for i in xrange(n): a[(i,i)] = 1 c = cholesky_decomposition(a) assert c is not None assert approx_equal(c.matrix_multiply(c.matrix_transpose()), a) b = mt.random_double(size=n, factor=4)-2 x = cholesky_solve(c, b) assert approx_equal(a.matrix_multiply(x), b) d = flex.random_size_t(size=n, modulus=10) for i in xrange(n): a[(i,i)] = d[i]+1 c = cholesky_decomposition(a) assert c is not None assert approx_equal(c.matrix_multiply(c.matrix_transpose()), a) b = mt.random_double(size=n, factor=4)-2 x = cholesky_solve(c, b) assert approx_equal(a.matrix_multiply(x), b) # a = flex.double([8, -6, 0, -6, 9, -2, 0, -2, 8]) a.resize(flex.grid(3,3)) c = cholesky_decomposition(a) assert c is not None assert approx_equal(c.matrix_multiply(c.matrix_transpose()), a) assert approx_equal(c, [ 2.828427125, 0, 0, -2.121320344, 2.121320343, 0, 0., -0.9428090418, 2.666666667]) # a0 = matrix.sym(sym_mat3=[3,5,7,1,2,-1]) for i_trial in xrange(100): r = scitbx.math.euler_angles_as_matrix( mt.random_double(size=3,factor=360), deg=True) a = flex.double(r * a0 * r.transpose()) a.resize(flex.grid(3,3)) c = cholesky_decomposition(a) assert c is not None assert approx_equal(c.matrix_multiply(c.matrix_transpose()), a) for b in [(0.1,-0.5,2), (-0.3,0.7,-1), (1.3,2.9,4), (-10,-20,17)]: b = flex.double(b) x = cholesky_solve(c, b) assert approx_equal(a.matrix_multiply(x), b) # for n in xrange(1,10): for i in xrange(10): r = mt.random_double(size=n*n, factor=10)-5 r.resize(flex.grid(n,n)) a = r.matrix_multiply(r.matrix_transpose()) c = cholesky_decomposition(a) assert c is not None b = mt.random_double(size=n, factor=4)-2 x = cholesky_solve(c, b) assert approx_equal(a.matrix_multiply(x), b) a[(i%n,i%n)] *= -1 c = cholesky_decomposition(a) assert c is None
def exercise_basic(): assert approx_equal(sum(fs.Xrotx(0.1)), 5.98001666111) assert approx_equal(sum(fs.Xroty(0.2)), 5.92026631136) assert approx_equal(sum(fs.Xrotz(0.3)), 5.8213459565) assert approx_equal(sum(fs.Xrot((1, 2, 3, 4, 5, 6, 7, 8, 9))), 90) assert approx_equal(sum(fs.Xtrans((1, 2, 3))), 6) assert approx_equal(sum(fs.crm((1, 2, 3, 4, 5, 6))), 0) assert approx_equal(sum(fs.crf((1, 2, 3, 4, 5, 6))), 0) assert approx_equal( sum(fs.mcI(m=1.234, c=matrix.col((1, 2, 3)), I=matrix.sym(sym_mat3=(2, 3, 4, 0.1, 0.2, 0.3)))), 21.306 )
def run(pdb_file_name, tls_object, crystal_info, n_models=1000, output_file_name = 'ensemble.pdb', eps=1.e-7): import sys log = sys.stdout pdb_inp = iotbx.pdb.input(file_name = pdb_file_name) pdb_hierarchy = pdb_inp.construct_hierarchy() xrs = pdb_hierarchy.extract_xray_structure( crystal_symmetry=crystal_info) #tls_extract = mmtbx.tls.tools.tls_from_pdb_inp( #remark_3_records = pdb_inp.extract_remark_iii_records(3), #pdb_hierarchy = pdb_hierarchy) #Here we pass in the TLS objects from earlier #tlso = tls_extract.tls_params #tlso = tls_object #if(len(tlso)!=1): #raise Sorry("Only one TLS group per PDB is currently supported.") #tlso = tlso[0] # XXX one group only tlso = tls_object deg_to_rad_scale = math.pi/180 # Units: T[A], L[deg**2], S[A*deg] T = matrix.sym(sym_mat3=tlso.t) L = matrix.sym(sym_mat3=tlso.l)*(deg_to_rad_scale**2) S = matrix.sqr(tlso.s)*deg_to_rad_scale # sanity check # need to change to check for positive semi- #if(not adptbx.is_positive_definite(tlso.t, eps)): #raise Sorry("T matrix is not positive definite.") #if(not adptbx.is_positive_definite(tlso.l, eps)): #raise Sorry("L matrix is not positive definite.") r = decompose_tls(T=T, L=L, S=S, log=log) ensemble_generator( decompose_tls_object = r, pdb_hierarchy = pdb_hierarchy, xray_structure = xrs, n_models = n_models, log = log).write_pdb_file(file_name=output_file_name)
def df_d_params(self): result = [] tphkl = 2 * math.pi * matrix.col(self.hkl) h,k,l = self.hkl d_exp_huh_d_u_star = matrix.col([h**2, k**2, l**2, 2*h*k, 2*h*l, 2*k*l]) for scatterer in self.scatterers: assert scatterer.scattering_type == "const" w = scatterer.occupancy if (not scatterer.flags.use_u_aniso()): huh = scatterer.u_iso * self.d_star_sq dw = math.exp(mtps * huh) ffp = 1 + scatterer.fp fdp = scatterer.fdp ff = ffp + 1j * fdp d_site = matrix.col([0,0,0]) if (not scatterer.flags.use_u_aniso()): d_u_iso = 0 d_u_star = None else: d_u_iso = None d_u_star = matrix.col([0,0,0,0,0,0]) d_occ = 0 d_fp = 0 d_fdp = 0 for s in self.space_group: r = s.r().as_rational().as_float() s_site = s * scatterer.site alpha = matrix.col(s_site).dot(tphkl) if (scatterer.flags.use_u_aniso()): s_u_star_s = r*matrix.sym(sym_mat3=scatterer.u_star)*r.transpose() huh = (matrix.row(self.hkl) * s_u_star_s).dot(matrix.col(self.hkl)) dw = math.exp(mtps * huh) e = cmath.exp(1j*alpha) site_gtmx = r.transpose() d_site += site_gtmx * ( w * dw * ff * e * 1j * tphkl) if (not scatterer.flags.use_u_aniso()): d_u_iso += w * dw * ff * e * mtps * self.d_star_sq else: u_star_gtmx = matrix.sqr(tensor_rank_2_gradient_transform_matrix(r)) d_u_star += u_star_gtmx * ( w * dw * ff * e * mtps * d_exp_huh_d_u_star) d_occ += dw * ff * e d_fp += w * dw * e d_fdp += w * dw * e * 1j result.append(gradients( site=d_site, u_iso=d_u_iso, u_star=d_u_star, occupancy=d_occ, fp=d_fp, fdp=d_fdp)) return result
def exercise_eigen_core(diag): u = adptbx.random_rotate_ellipsoid(diag + [0.0, 0.0, 0.0]) ev = list(adptbx.eigenvalues(u)) diag.sort() ev.sort() for i in xrange(3): check_eigenvalue(u, ev[i]) for i in xrange(3): assert abs(diag[i] - ev[i]) < 1.0e-4 if adptbx.is_positive_definite(ev): es = adptbx.eigensystem(u) ev = list(es.values()) ev.sort() for i in xrange(3): check_eigenvalue(u, ev[i]) for i in xrange(3): assert abs(diag[i] - ev[i]) < 1.0e-4 evec = [] for i in xrange(3): check_eigenvector(u, es.values()[i], es.vectors(i)) evec.extend(es.vectors(i)) return # XXX following tests disabled for the moment # sometimes fail if eigenvalues are very similar but not identical sqrt_eval = matrix.diag(flex.sqrt(flex.double(es.values()))) evec = matrix.sqr(evec).transpose() sqrt_u = evec * sqrt_eval * evec.transpose() u_full = matrix.sym(sym_mat3=u).elems assert approx_equal(u_full, (sqrt_u.transpose() * sqrt_u).elems, eps=1.0e-3) assert approx_equal(u_full, (sqrt_u * sqrt_u.transpose()).elems, eps=1.0e-3) assert approx_equal(u_full, (sqrt_u * sqrt_u).elems, eps=1.0e-3) sqrt_u_plus_shifts = matrix.sym(sym_mat3=[x + 10 * (random.random() - 0.5) for x in sqrt_u.as_sym_mat3()]) sts = (sqrt_u_plus_shifts.transpose() * sqrt_u_plus_shifts).as_sym_mat3() ev = adptbx.eigenvalues(sts) assert min(ev) >= 0 sts = (sqrt_u_plus_shifts * sqrt_u_plus_shifts.transpose()).as_sym_mat3() ev = adptbx.eigenvalues(sts) assert min(ev) >= 0 sts = (sqrt_u_plus_shifts * sqrt_u_plus_shifts).as_sym_mat3() ev = adptbx.eigenvalues(sts) assert min(ev) >= 0
def cmd_driver(pdb_file_name): pdb_inp = iotbx.pdb.input(file_name = pdb_file_name) pdb_hierarchy = pdb_inp.construct_hierarchy() xrs = pdb_hierarchy.extract_xray_structure( crystal_symmetry=pdb_inp.crystal_symmetry_from_cryst1()) tls_extract = mmtbx.tls.tools.tls_from_pdb_inp( remark_3_records = pdb_inp.extract_remark_iii_records(3), pdb_hierarchy = pdb_hierarchy) tlsos = tls_extract.tls_params for i_seq, tlso in enumerate(tlsos, 1): log = set_log(prefix=os.path.basename(pdb_file_name), i_current=i_seq, i_total=len(tlsos)) deg_to_rad_scale = math.pi/180 # Units: T[A], L[deg**2], S[A*deg] T = matrix.sym(sym_mat3=tlso.t) L = matrix.sym(sym_mat3=tlso.l)*(deg_to_rad_scale**2) S = matrix.sqr(tlso.s)*deg_to_rad_scale try: r = run(T=T, L=L, S=S, log=log) except Exception, e: print >> log, str(e) log.close()
def step_g(self, T_L, C_LW, e_o, S_C): """ Calculate translation contribution C_LS of rotation due to screw components and resulting V_L matrix. """ print_step("Step g:", self.log) C_LS = matrix.sym(sym_mat3=[ e_o.sx_bar*S_C[0], e_o.sy_bar*S_C[4], e_o.sz_bar*S_C[8], 0,0,0]) C_L = C_LW + C_LS V_L = T_L - C_L self.show_matrix(x=C_LS, title="C_LS") self.show_matrix(x=C_L , title="C_L ") self.show_matrix(x=V_L , title="V_L ") return group_args( V_L = V_L, C_L = C_L, C_LS = C_LS)
def step_f(self, c_o, T_L, L_L): """ Calculate translational contribution C_LW of rotations due to axes dispalcement """ print_step("Step f:", self.log) L_ = L_L.as_sym_mat3() Lxx, Lyy, Lzz = L_[0], L_[1], L_[2] wy_lx = c_o.wy_lx wz_lx = c_o.wz_lx wx_ly = c_o.wx_ly wz_ly = c_o.wz_ly wx_lz = c_o.wx_lz wy_lz = c_o.wy_lz C_LW = [ wz_ly**2*Lyy+wy_lz**2*Lzz, wz_lx**2*Lxx+wx_lz**2*Lzz, wy_lx**2*Lxx+wx_ly**2*Lyy, -wx_lz*wy_lz*Lzz, -wx_ly*wz_ly*Lyy, -wy_lx*wz_lx*Lxx] C_LW = matrix.sym(sym_mat3=C_LW) self.show_matrix(x=C_LW, title="C_LW") return C_LW
def exercise_reference_impl_quick(): sites_cart = fmri.create_triangle_with_center_of_mass_at_origin() assert approx_equal(flex.vec3_double(sites_cart).mean(), (0, 0, 0)) inertia1 = fmri.body_inertia(sites_cart=sites_cart) inertia2 = matrix.sym(sym_mat3=scitbx.math.inertia_tensor(points=flex.vec3_double(sites_cart), pivot=(0, 0, 0))) assert approx_equal(inertia1, inertia2) # for use_classical_accel in [False, True]: sim = fmri.simulation() assert approx_equal( [sim.e_pot, sim.e_kin_ang, sim.e_kin_lin, sim.e_kin, sim.e_tot], [0.64030878777041611, 0.012310594130384761, 0.02835, 0.04066059413038476, 0.68096938190080092], ) for i in xrange(100): sim.dynamics_step(delta_t=0.01, use_classical_accel=use_classical_accel) expected = [ [0.028505221929112364, 0.091503230553568404, 0.56329655444242244, 0.65479978499599079, 0.6833050069251031], [0.053276067541032097, 0.091503230553568404, 0.53805622991666513, 0.62955946047023348, 0.68283552801126557], ][int(use_classical_accel)] assert approx_equal([sim.e_pot, sim.e_kin_ang, sim.e_kin_lin, sim.e_kin, sim.e_tot], expected)
def f(self): result = 0 tphkl = 2 * math.pi * matrix.col(self.hkl) for scatterer in self.scatterers: assert scatterer.scattering_type == "const" w = scatterer.occupancy if (not scatterer.flags.use_u_aniso()): huh = scatterer.u_iso * self.d_star_sq dw = math.exp(mtps * huh) ffp = 1 + scatterer.fp fdp = scatterer.fdp ff = ffp + 1j * fdp for s in self.space_group: s_site = s * scatterer.site alpha = matrix.col(s_site).dot(tphkl) if (scatterer.flags.use_u_aniso()): r = s.r().as_rational().as_float() s_u_star_s = r*matrix.sym(sym_mat3=scatterer.u_star)*r.transpose() huh = (matrix.row(self.hkl) * s_u_star_s).dot(matrix.col(self.hkl)) dw = math.exp(mtps * huh) e = cmath.exp(1j*alpha) result += w * dw * ff * e return result
def d2f_d_params_diag(self): tphkl = 2 * math.pi * flex.double(self.hkl) tphkl_outer = tphkl.matrix_outer_product(tphkl) \ .matrix_symmetric_as_packed_u() h, k, l = self.hkl d_exp_huh_d_u_star = flex.double( [h**2, k**2, l**2, 2 * h * k, 2 * h * l, 2 * k * l]) d2_exp_huh_d_u_star_u_star = d_exp_huh_d_u_star.matrix_outer_product( d_exp_huh_d_u_star).matrix_symmetric_as_packed_u() for i_scatterer, scatterer in enumerate(self.scatterers): site_symmetry_ops = None if (self.site_symmetry_table.is_special_position(i_scatterer)): site_symmetry_ops = self.site_symmetry_table.get(i_scatterer) site_constraints = site_symmetry_ops.site_constraints() if (scatterer.flags.use_u_aniso()): adp_constraints = site_symmetry_ops.adp_constraints() w = scatterer.weight() if (not scatterer.flags.use_u_aniso()): huh = scatterer.u_iso * self.d_star_sq dw = math.exp(mtps * huh) gaussian = self.scattering_type_registry.gaussian_not_optional( scattering_type=scatterer.scattering_type) f0 = gaussian.at_d_star_sq(self.d_star_sq) ffp = f0 + scatterer.fp fdp = scatterer.fdp ff = (ffp + 1j * fdp) d2_site_site = flex.complex_double(3 * (3 + 1) // 2, 0j) if (not scatterer.flags.use_u_aniso()): d2_u_iso_u_iso = 0j else: d2_u_star_u_star = flex.complex_double(6 * (6 + 1) // 2, 0j) for s in self.space_group: r = s.r().as_rational().as_float() s_site = s * scatterer.site alpha = tphkl.dot(flex.double(s_site)) if (scatterer.flags.use_u_aniso()): s_u_star_s = r * matrix.sym( sym_mat3=scatterer.u_star) * r.transpose() huh = (matrix.row(self.hkl) * s_u_star_s).dot( matrix.col(self.hkl)) dw = math.exp(mtps * huh) e = cmath.exp(1j * alpha) site_gtmx = flex.double(r.transpose()) site_gtmx.reshape(flex.grid(3, 3)) d2_site_site += (w * dw * ff * e * (-1)) * ( site_gtmx.matrix_multiply_packed_u_multiply_lhs_transpose( tphkl_outer)) if (not scatterer.flags.use_u_aniso()): d2_u_iso_u_iso += w * dw * ff * e * (mtps * self.d_star_sq)**2 else: u_star_gtmx = tensor_rank_2_gradient_transform_matrix(r) d2_u_star_u_star +=(w * dw * ff * e * mtps**2) \ * u_star_gtmx.matrix_multiply_packed_u_multiply_lhs_transpose( d2_exp_huh_d_u_star_u_star) if (site_symmetry_ops is None): i_u = 3 else: i_u = site_constraints.n_independent_params() if (not scatterer.flags.use_u_aniso()): i_occ = i_u + 1 elif (site_symmetry_ops is None): i_occ = i_u + 6 else: i_occ = i_u + adp_constraints.n_independent_params() np = i_occ + 3 if (site_symmetry_ops is not None): gsm = site_constraints.gradient_sum_matrix() d2_site_site = gsm.matrix_multiply_packed_u_multiply_lhs_transpose( packed_u=d2_site_site) if (scatterer.flags.use_u_aniso()): gsm = adp_constraints.gradient_sum_matrix() d2_u_star_u_star = gsm \ .matrix_multiply_packed_u_multiply_lhs_transpose( packed_u=d2_u_star_u_star) # dpd = flex.complex_double(flex.grid(np, 1), 0j) def paste(d, i): d.reshape(flex.grid(d.size(), 1)) dpd.matrix_paste_block_in_place(d, i, 0) paste(d2_site_site.matrix_packed_u_diagonal(), 0) if (not scatterer.flags.use_u_aniso()): dpd[i_u] = d2_u_iso_u_iso else: paste(d2_u_star_u_star.matrix_packed_u_diagonal(), i_u) yield dpd
def d2f_d_params(self): tphkl = 2 * math.pi * flex.double(self.hkl) tphkl_outer = tphkl.matrix_outer_product(tphkl) \ .matrix_symmetric_as_packed_u() h, k, l = self.hkl d_exp_huh_d_u_star = flex.double( [h**2, k**2, l**2, 2 * h * k, 2 * h * l, 2 * k * l]) d2_exp_huh_d_u_star_u_star = d_exp_huh_d_u_star.matrix_outer_product( d_exp_huh_d_u_star).matrix_symmetric_as_packed_u() for i_scatterer, scatterer in enumerate(self.scatterers): site_symmetry_ops = None if (self.site_symmetry_table.is_special_position(i_scatterer)): site_symmetry_ops = self.site_symmetry_table.get(i_scatterer) site_constraints = site_symmetry_ops.site_constraints() if (scatterer.flags.use_u_aniso()): adp_constraints = site_symmetry_ops.adp_constraints() w = scatterer.weight() wwo = scatterer.weight_without_occupancy() if (not scatterer.flags.use_u_aniso()): huh = scatterer.u_iso * self.d_star_sq dw = math.exp(mtps * huh) gaussian = self.scattering_type_registry.gaussian_not_optional( scattering_type=scatterer.scattering_type) f0 = gaussian.at_d_star_sq(self.d_star_sq) ffp = f0 + scatterer.fp fdp = scatterer.fdp ff = (ffp + 1j * fdp) d2_site_site = flex.complex_double(3 * (3 + 1) // 2, 0j) if (not scatterer.flags.use_u_aniso()): d2_site_u_iso = flex.complex_double(flex.grid(3, 1), 0j) d2_site_u_star = None else: d2_site_u_iso = None d2_site_u_star = flex.complex_double(flex.grid(3, 6), 0j) d2_site_occ = flex.complex_double(flex.grid(3, 1), 0j) d2_site_fp = flex.complex_double(flex.grid(3, 1), 0j) d2_site_fdp = flex.complex_double(flex.grid(3, 1), 0j) if (not scatterer.flags.use_u_aniso()): d2_u_iso_u_iso = 0j d2_u_iso_occ = 0j d2_u_iso_fp = 0j d2_u_iso_fdp = 0j else: d2_u_star_u_star = flex.complex_double(6 * (6 + 1) // 2, 0j) d2_u_star_occ = flex.complex_double(flex.grid(6, 1), 0j) d2_u_star_fp = flex.complex_double(flex.grid(6, 1), 0j) d2_u_star_fdp = flex.complex_double(flex.grid(6, 1), 0j) d2_occ_fp = 0j d2_occ_fdp = 0j for s in self.space_group: r = s.r().as_rational().as_float() s_site = s * scatterer.site alpha = tphkl.dot(flex.double(s_site)) if (scatterer.flags.use_u_aniso()): s_u_star_s = r * matrix.sym( sym_mat3=scatterer.u_star) * r.transpose() huh = (matrix.row(self.hkl) * s_u_star_s).dot( matrix.col(self.hkl)) dw = math.exp(mtps * huh) e = cmath.exp(1j * alpha) site_gtmx = flex.double(r.transpose()) site_gtmx.reshape(flex.grid(3, 3)) d2_site_site += (w * dw * ff * e * (-1)) * ( site_gtmx.matrix_multiply_packed_u_multiply_lhs_transpose( tphkl_outer)) if (not scatterer.flags.use_u_aniso()): d2_site_u_iso += (w * dw * ff * e * 1j * mtps * self.d_star_sq) \ * site_gtmx.matrix_multiply(tphkl) else: u_star_gtmx = tensor_rank_2_gradient_transform_matrix(r) d2_site_u_star += (w * dw * ff * e * 1j * mtps) \ * site_gtmx.matrix_multiply( tphkl.matrix_outer_product(d_exp_huh_d_u_star)) \ .matrix_multiply(u_star_gtmx.matrix_transpose()) site_gtmx_tphkl = site_gtmx.matrix_multiply(tphkl) d2_site_occ += (wwo * dw * ff * e * 1j) * site_gtmx_tphkl d2_site_fp += (w * dw * e * 1j) * site_gtmx_tphkl d2_site_fdp += (w * dw * e * (-1)) * site_gtmx_tphkl if (not scatterer.flags.use_u_aniso()): d2_u_iso_u_iso += w * dw * ff * e * (mtps * self.d_star_sq)**2 d2_u_iso_occ += wwo * dw * ff * e * mtps * self.d_star_sq d2_u_iso_fp += w * dw * e * mtps * self.d_star_sq d2_u_iso_fdp += 1j * w * dw * e * mtps * self.d_star_sq else: d2_u_star_u_star +=(w * dw * ff * e * mtps**2) \ * u_star_gtmx.matrix_multiply_packed_u_multiply_lhs_transpose( d2_exp_huh_d_u_star_u_star) u_star_gtmx_d_exp_huh_d_u_star = u_star_gtmx.matrix_multiply( d_exp_huh_d_u_star) d2_u_star_occ += (wwo * dw * ff * e * mtps) \ * u_star_gtmx_d_exp_huh_d_u_star d2_u_star_fp += (w * dw * e * mtps) \ * u_star_gtmx_d_exp_huh_d_u_star d2_u_star_fdp += (w * dw * 1j * e * mtps) \ * u_star_gtmx_d_exp_huh_d_u_star d2_occ_fp += wwo * dw * e d2_occ_fdp += wwo * dw * e * 1j if (site_symmetry_ops is None): i_u = 3 else: i_u = site_constraints.n_independent_params() if (not scatterer.flags.use_u_aniso()): i_occ = i_u + 1 elif (site_symmetry_ops is None): i_occ = i_u + 6 else: i_occ = i_u + adp_constraints.n_independent_params() i_fp, i_fdp, np = i_occ + 1, i_occ + 2, i_occ + 3 if (site_symmetry_ops is not None): gsm = site_constraints.gradient_sum_matrix() d2_site_site = gsm.matrix_multiply_packed_u_multiply_lhs_transpose( packed_u=d2_site_site) if (not scatterer.flags.use_u_aniso()): d2_site_u_iso = gsm.matrix_multiply(d2_site_u_iso) else: d2_site_u_star = gsm.matrix_multiply(d2_site_u_star) d2_site_occ = gsm.matrix_multiply(d2_site_occ) d2_site_fp = gsm.matrix_multiply(d2_site_fp) d2_site_fdp = gsm.matrix_multiply(d2_site_fdp) if (scatterer.flags.use_u_aniso()): gsm = adp_constraints.gradient_sum_matrix() d2_site_u_star = d2_site_u_star.matrix_multiply( gsm.matrix_transpose()) d2_u_star_u_star = gsm \ .matrix_multiply_packed_u_multiply_lhs_transpose( packed_u=d2_u_star_u_star) d2_u_star_occ = gsm.matrix_multiply(d2_u_star_occ) d2_u_star_fp = gsm.matrix_multiply(d2_u_star_fp) d2_u_star_fdp = gsm.matrix_multiply(d2_u_star_fdp) dp = flex.complex_double(flex.grid(np, np), 0j) paste = dp.matrix_paste_block_in_place paste(d2_site_site.matrix_packed_u_as_symmetric(), 0, 0) if (not scatterer.flags.use_u_aniso()): paste(d2_site_u_iso, 0, i_u) paste(d2_site_u_iso.matrix_transpose(), i_u, 0) else: paste(d2_site_u_star, 0, i_u) paste(d2_site_u_star.matrix_transpose(), i_u, 0) paste(d2_site_occ, 0, i_occ) paste(d2_site_occ.matrix_transpose(), i_occ, 0) paste(d2_site_fp, 0, i_fp) paste(d2_site_fp.matrix_transpose(), i_fp, 0) paste(d2_site_fdp, 0, i_fdp) paste(d2_site_fdp.matrix_transpose(), i_fdp, 0) if (not scatterer.flags.use_u_aniso()): dp[i_u * np + i_u] = d2_u_iso_u_iso dp[i_u * np + i_occ] = d2_u_iso_occ dp[i_occ * np + i_u] = d2_u_iso_occ dp[i_u * np + i_fp] = d2_u_iso_fp dp[i_fp * np + i_u] = d2_u_iso_fp dp[i_u * np + i_fdp] = d2_u_iso_fdp dp[i_fdp * np + i_u] = d2_u_iso_fdp else: paste(d2_u_star_u_star.matrix_packed_u_as_symmetric(), i_u, i_u) paste(d2_u_star_occ, i_u, i_occ) paste(d2_u_star_occ.matrix_transpose(), i_occ, i_u) paste(d2_u_star_fp, i_u, i_fp) paste(d2_u_star_fp.matrix_transpose(), i_fp, i_u) paste(d2_u_star_fdp, i_u, i_fdp) paste(d2_u_star_fdp.matrix_transpose(), i_fdp, i_u) dp[i_occ * np + i_fp] = d2_occ_fp dp[i_fp * np + i_occ] = d2_occ_fp dp[i_occ * np + i_fdp] = d2_occ_fdp dp[i_fdp * np + i_occ] = d2_occ_fdp yield dp
def __init__(self, pdb_str, dx=0, dy=0, dz=0, sx=0, sy=0, sz=0, lx=[1, 0, 0], ly=[0, 1, 0], lz=[0, 0, 1], tx=0, ty=0, tz=0, vx=[1, 0, 0], vy=[0, 1, 0], vz=[0, 0, 1], w_M_lx=[0, 0, 0], w_M_ly=[0, 0, 0], w_M_lz=[0, 0, 0], origin=None, n_models=10000, assert_similarity=True, show=False, log=sys.stdout, write_pdb_files=False, smear_eps=0): from mmtbx.tls import analysis, tls_as_xyz from scitbx import matrix from libtbx.utils import null_out if (show): print >> log, "INPUTS:", "-" * 73 print >> log, "dx :", dx print >> log, "dy :", dy print >> log, "dz :", dz print >> log, "sx :", sx print >> log, "sy :", sy print >> log, "sz :", sz print >> log, "lx :", [i for i in lx] print >> log, "ly :", [i for i in ly] print >> log, "lz :", [i for i in lz] print >> log, "tx :", tx print >> log, "ty :", ty print >> log, "tz :", tz print >> log, "vx :", [i for i in vx] print >> log, "vy :", [i for i in vy] print >> log, "vz :", [i for i in vz] print >> log, "w_M_lx:", [i for i in w_M_lx] print >> log, "w_M_ly:", [i for i in w_M_ly] print >> log, "w_M_lz:", [i for i in w_M_lz] print >> log, "origin:", origin print >> log, "-" * 79 # pdb_inp = iotbx.pdb.input(source_info=None, lines=pdb_str) ph = pdb_inp.construct_hierarchy() xrs = ph.extract_xray_structure( crystal_symmetry=pdb_inp.crystal_symmetry()) sites_cart = xrs.sites_cart() ph.atoms().set_xyz(sites_cart) if (origin is None): origin = sites_cart.mean() # o_tfm = analysis.tls_from_motions(dx=dx, dy=dy, dz=dz, l_x=matrix.col(lx), l_y=matrix.col(ly), l_z=matrix.col(lz), sx=sx, sy=sy, sz=sz, tx=tx, ty=ty, tz=tz, v_x=matrix.col(vx), v_y=matrix.col(vy), v_z=matrix.col(vz), w_M_lx=matrix.col(w_M_lx), w_M_ly=matrix.col(w_M_ly), w_M_lz=matrix.col(w_M_lz)) # self.u_cart_tls = get_u_cart(o_tfm=o_tfm, origin=origin, sites_cart=sites_cart) tlso_ = tlso(t=o_tfm.T_M.as_sym_mat3(), l=o_tfm.L_M.as_sym_mat3(), s=o_tfm.S_M.as_mat3(), origin=origin) if (assert_similarity): T = matrix.sym(sym_mat3=tlso_.t) L = matrix.sym(sym_mat3=tlso_.l) S = matrix.sqr(tlso_.s) o_tfm = analysis.run(T=T, L=L, S=S, log=null_out()).self_check() # r = tls_as_xyz.ensemble_generator(tls_from_motions_object=o_tfm, pdb_hierarchy=ph, xray_structure=xrs, n_models=n_models, origin=origin, use_states=write_pdb_files, log=null_out()) if (write_pdb_files): r.write_pdb_file(file_name="ensemble_%s.pdb" % str(n_models)) # xyz_all = r.sites_cart_ens n_atoms = xyz_all[0].size() ### xyz_atoms_all = all_vs_all(xyz_all=xyz_all) ### self.u_cart_ens = flex.sym_mat3_double() for i in xrange(n_atoms): self.u_cart_ens.append( u_cart_from_xyz(sites_cart=xyz_atoms_all[i])) u1 = self.u_cart_tls.as_double() u2 = self.u_cart_ens.as_double() #self.r = flex.sum(flex.abs(u1-u2))/\ # flex.sum(flex.abs(flex.abs(u1)+flex.abs(u2)))*2 # LS #diff = u1-u2 #self.rLS = math.sqrt(flex.sum(diff*diff)/(9.*diff.size())) # # Merritt / Murshudov e = smear_eps eps = matrix.sqr([e, 0, 0, 0, e, 0, 0, 0, e]) I = matrix.sqr([2, 0, 0, 0, 2, 0, 0, 0, 2]) def add_const(u): es = eigensystem.real_symmetric(u) vecs = es.vectors() l_z = matrix.col((vecs[0], vecs[1], vecs[2])) l_y = matrix.col((vecs[3], vecs[4], vecs[5])) l_x = matrix.col((vecs[6], vecs[7], vecs[8])) #l_x = l_y.cross(l_z) u = matrix.sym(sym_mat3=u) R = matrix.sqr([ l_x[0], l_y[0], l_z[0], l_x[1], l_y[1], l_z[1], l_x[2], l_y[2], l_z[2] ]) uD = R.transpose() * u * R result = R * (uD + eps) * R.transpose() tmp = R * uD * R.transpose() for i in xrange(6): assert approx_equal(tmp[i], u[i]) return R * (uD + eps) * R.transpose() self.KL = 0 self.CC = 0 n1, n2, d1, d2 = 0, 0, 0, 0 for u1, u2 in zip(self.u_cart_tls, self.u_cart_ens): for i in xrange(6): n1 += abs(u1[i] - u2[i]) d1 += (abs(u1[i]) + abs(u2[i])) u1 = add_const(u=u1) u2 = add_const(u=u2) for i in xrange(6): n2 += abs(u1[i] - u2[i]) d2 += (abs(u1[i]) + abs(u2[i])) iu1 = u1.inverse() iu2 = u2.inverse() self.KL += (u1 * iu2 + u2 * iu1 - I).trace() diu1 = iu1.determinant() diu2 = iu2.determinant() den = (iu1 + iu2).determinant() self.CC += (diu1 * diu2)**0.25 / (den / 8)**0.5 self.KL = self.KL / self.u_cart_ens.size() self.CC = self.CC / self.u_cart_ens.size() self.R1 = n1 / d1 * 2 self.R2 = n2 / d2 * 2 self.r = self.R1 # ### for i in xrange(n_atoms): ut = ["%8.5f" % u for u in self.u_cart_tls[i]] ue = ["%8.5f" % u for u in self.u_cart_ens[i]] if (assert_similarity): for j in xrange(6): assert approx_equal(abs(float(ut[j])), abs(float(ue[j])), 1.e-3) # if (write_pdb_files): ph.atoms().set_uij(self.u_cart_tls) ph.write_pdb_file(file_name="u_from_tls.pdb", crystal_symmetry=xrs.crystal_symmetry()) ph.atoms().set_uij(self.u_cart_ens) ph.write_pdb_file(file_name="u_from_ens.pdb", crystal_symmetry=xrs.crystal_symmetry())
def lebedev_2005_perturbation(self, reduced_cell): s = matrix.sym(sym_mat3=reduced_cell.metrical_matrix()) m = self.as_rational().as_float() r = m.transpose() * s * m sirms = s.inverse() * (r - s) return ((sirms * sirms).trace() / 12)**0.5
def fd_grads(self, proxy): dynamic_restraint_proxy_classes = ( adp.adp_u_eq_similarity_proxy, adp.adp_volume_similarity_proxy, ) if isinstance(proxy, (dynamic_restraint_proxy_classes)): n_restraints = len(proxy.i_seqs) else: n_restraints = rows_per_restraint.get(proxy.__class__, 1) grads = [flex.double(self.param_map.n_parameters) for i in range(n_restraints)] eps = 1e-8 uc = self.xray_structure.unit_cell() xs = self.xray_structure u_cart = xs.scatterers().extract_u_cart(uc).deep_copy() u_star = xs.scatterers().extract_u_star().deep_copy() u_iso = xs.scatterers().extract_u_iso().deep_copy() single_delta_classes = ( adp.fixed_u_eq_adp, ) for n in xrange(n_restraints): for i in xrange(self.param_map.n_scatterers): use_u_aniso = self.param_map[i].u_aniso > -1 use_u_iso = self.param_map[i].u_iso > -1 for j in range(6): if use_u_aniso: h = [0,0,0,0,0,0] h[j] = eps h = matrix.sym(sym_mat3=h) u_star[i]=list((matrix.sym(sym_mat3=u_star[i]) + h).as_sym_mat3()) r = self.restraint(proxy, u_cart=flex.sym_mat3_double([ adptbx.u_star_as_u_cart(uc, u) for u in u_star])) if isinstance(r, adp.rigid_bond): d1 = r.delta_z() elif isinstance(r, single_delta_classes): d1 = r.delta() else: d1 = r.deltas()[n] u_star[i]=list((matrix.sym(sym_mat3=u_star[i]) - 2*h).as_sym_mat3()) r = self.restraint(proxy, u_cart=flex.sym_mat3_double([ adptbx.u_star_as_u_cart(uc, u) for u in u_star])) if isinstance(r, adp.rigid_bond): d2 = r.delta_z() elif isinstance(r, single_delta_classes): d2 = r.delta() else: d2 = r.deltas()[n] elif use_u_iso: u_iso[i] += eps r = self.restraint(proxy, u_iso=u_iso) if isinstance(r, adp.rigid_bond): d1 = r.delta_z() elif isinstance(r, single_delta_classes): d1 = r.delta() else: d1 = r.deltas()[n] u_iso[i] -= 2*eps r = self.restraint(proxy, u_iso=u_iso) if isinstance(r, adp.rigid_bond): d2 = r.delta_z() elif isinstance(r, single_delta_classes): d2 = r.delta() else: d2 = r.deltas()[n] d_delta = (d1-d2)/(2*eps) if not isinstance(r, adp.rigid_bond) and j > 2: d_delta *= 2 # off diagonals count twice if use_u_aniso: grads[n][self.param_map[i].u_aniso+j] = d_delta elif use_u_iso: grads[n][self.param_map[i].u_iso] = d_delta break return grads
def check_eigenvector(adp, x, v): v = matrix.col(v) assert approx_equal((matrix.sym(sym_mat3=adp) * v * (1. / x)).elems, v.elems, 1.e-4)
def exercise_rigid_bond(): i_seqs = (1,2) weight = 1 p = adp_restraints.rigid_bond_proxy(i_seqs=i_seqs,weight=weight) assert p.i_seqs == i_seqs assert p.weight == weight sites = ((1,2,3),(2,3,4)) u_cart = ((1,2,3,4,5,6), (3,4,5,6,7,8)) expected_gradients = ((-4, -4, -4, -8, -8, -8), (4, 4, 4, 8, 8, 8)) r = adp_restraints.rigid_bond(sites=sites, u_cart=u_cart, weight=weight) assert r.weight == weight assert approx_equal(r.delta_z(), -6) assert approx_equal(r.residual(), 36) assert approx_equal(r.gradients(), expected_gradients) sites_cart = flex.vec3_double(((1,2,3),(2,5,4),(3,4,5))) u_cart = flex.sym_mat3_double(((1,2,3,4,5,6), (2,3,3,5,7,7), (3,4,5,3,7,8))) r = adp_restraints.rigid_bond( adp_restraint_params(sites_cart=sites_cart, u_cart=u_cart), proxy=p) assert approx_equal(r.weight, weight) unit_cell = uctbx.unit_cell([15,25,30,90,90,90]) sites_frac = unit_cell.fractionalize(sites_cart=sites_cart) u_star = flex.sym_mat3_double([ adptbx.u_cart_as_u_star(unit_cell, u_cart_i) for u_cart_i in u_cart]) pair = adp_restraints.rigid_bond_pair(sites_frac[1], sites_frac[2], u_star[1], u_star[2], unit_cell) assert approx_equal(pair.delta_z(), abs(r.delta_z())) assert approx_equal(pair.z_12(), r.z_12()) assert approx_equal(pair.z_21(), r.z_21()) # gradients_aniso_cart = flex.sym_mat3_double(sites_cart.size(), (0,0,0,0,0,0)) gradients_iso = flex.double(sites_cart.size(), 0) proxies = adp_restraints.shared_rigid_bond_proxy([p,p]) params = adp_restraint_params(sites_cart=sites_cart, u_cart=u_cart) residuals = adp_restraints.rigid_bond_residuals(params, proxies=proxies) assert approx_equal(residuals, (r.residual(),r.residual())) deltas = adp_restraints.rigid_bond_deltas(params, proxies=proxies) assert approx_equal(deltas, (r.delta_z(),r.delta_z())) residual_sum = adp_restraints.rigid_bond_residual_sum( params=params, proxies=proxies, gradients_aniso_cart=gradients_aniso_cart) assert approx_equal(residual_sum, 2 * r.residual()) for g,e in zip(gradients_aniso_cart[1:3], r.gradients()): assert approx_equal(g, matrix.col(e)*2) fd_grads_aniso, fd_grads_iso = finite_difference_gradients( restraint_type=adp_restraints.rigid_bond, proxy=p, sites_cart=sites_cart, u_cart=u_cart) for g,e in zip(gradients_aniso_cart, fd_grads_aniso): assert approx_equal(g, matrix.col(e)*2) # # check frame invariance of residual # u_cart_1 = matrix.sym(sym_mat3=(0.1,0.2,0.05,0.03,0.02,0.01)) u_cart_2 = matrix.sym(sym_mat3=(0.21,0.32,0.11,0.02,0.02,0.07)) u_cart = (u_cart_1.as_sym_mat3(),u_cart_2.as_sym_mat3()) site_cart_1 = matrix.col((1,2,3)) site_cart_2 = matrix.col((3,1,4.2)) sites = (tuple(site_cart_1),tuple(site_cart_2)) a = adp_restraints.rigid_bond(sites=sites, u_cart=u_cart, weight=1) expected_residual = a.residual() gen = flex.mersenne_twister() for i in range(20): R = matrix.rec(gen.random_double_r3_rotation_matrix(),(3,3)) u_cart_1_rot = R * u_cart_1 * R.transpose() u_cart_2_rot = R * u_cart_2 * R.transpose() u_cart = (u_cart_1_rot.as_sym_mat3(),u_cart_2_rot.as_sym_mat3()) site_cart_1_rot = R * site_cart_1 site_cart_2_rot = R * site_cart_2 sites = (tuple(site_cart_1_rot),tuple(site_cart_2_rot)) a = adp_restraints.rigid_bond( sites=sites, u_cart=u_cart, weight=1) assert approx_equal(a.residual(), expected_residual)
def run(server_info, inp, status): print("<pre>") from scitbx import matrix p = p_from_string(string=inp.cb_expr) assert inp.p_or_q in ["P", "Q"] if (inp.p_or_q == "Q"): p = p.inverse() assert inp.p_transpose in ["off", "on"] if (inp.p_transpose == "on"): p = matrix.rt((p.r.transpose(), p.t)) print("P:") display_rt(p) print() q = p.inverse() print("Q:") display_rt(q) print() if (len(inp.obj_expr.strip()) != 0): if (inp.obj_type in ["xyz", "hkl"]): triple = xyz_from_string(string=inp.obj_expr) if (inp.obj_type == "xyz"): print("Transformation law: (Q,q) xyz") print() print(" xyz:", triple) print() print(" xyz':", ( q.r * matrix.col(triple) + q.t).elems) print() else: print("Transformation law: hkl P") print() print(" hkl:", triple) print() print(" hkl':", (matrix.row(triple) * p.r).elems) print() elif (inp.obj_type == "unit_cell"): from cctbx import uctbx uc = uctbx.unit_cell(inp.obj_expr) print("Transformation law: Pt G P") print() print("unit cell:", uc) print() g = matrix.sym(sym_mat3=uc.metrical_matrix()) print("metrical matrix:") display_r(g) print() gp = p.r.transpose() * g * p.r print("metrical matrix':") display_r(gp) print() ucp = uctbx.unit_cell(metrical_matrix=gp.as_sym_mat3()) print("unit cell':", ucp) print() elif (inp.obj_type == "Ww"): w = w_from_string(string=inp.obj_expr) print("Transformation law: (Q,q) (W,w) (P,p)") print() print("(W, w):") display_rt(w) print() wp = q * w * p print("(W, w)':") display_rt(wp) print() else: raise RuntimeError("Unknown obj_type: %s" % inp.obj_type) print("</pre>")
def exercise_covariance(): xs = xray.structure(crystal_symmetry=crystal.symmetry( (5.01, 5.01, 5.47, 90, 90, 120), "P6222"), scatterers=flex.xray_scatterer([ xray.scatterer("Si", (1 / 2., 1 / 2., 1 / 3.)), xray.scatterer("O", (0.197, -0.197, 0.83333)) ])) uc = xs.unit_cell() flags = xs.scatterer_flags() for f in flags: f.set_grad_site(True) xs.set_scatterer_flags(flags) cov = flex.double( (1e-8, 1e-9, 2e-9, 3e-9, 4e-9, 5e-9, 2e-8, 1e-9, 2e-9, 3e-9, 4e-9, 3e-8, 1e-9, 2e-9, 3e-9, 2e-8, 1e-9, 2e-9, 3e-8, 1e-9, 4e-8)) param_map = xs.parameter_map() assert approx_equal( cov, covariance.extract_covariance_matrix_for_sites(flex.size_t([0, 1]), cov, param_map)) cov_cart = covariance.orthogonalize_covariance_matrix(cov, uc, param_map) O = matrix.sqr(uc.orthogonalization_matrix()) for i in range(param_map.n_scatterers): cov_i = covariance.extract_covariance_matrix_for_sites( flex.size_t([i]), cov, param_map) cov_i_cart = covariance.extract_covariance_matrix_for_sites( flex.size_t([i]), cov_cart, param_map) assert approx_equal(O * matrix.sym(sym_mat3=cov_i) * O.transpose(), matrix.sym(sym_mat3=cov_i_cart).as_mat3()) for f in flags: f.set_grads(False) flags[0].set_grad_u_aniso(True) flags[0].set_use_u_aniso(True) flags[1].set_grad_u_iso(True) flags[1].set_use_u_iso(True) xs.set_scatterer_flags(flags) param_map = xs.parameter_map() cov = flex.double(7 * 7, 0) cov.reshape(flex.grid(7, 7)) cov.matrix_diagonal_set_in_place(flex.double([i for i in range(7)])) cov = cov.matrix_symmetric_as_packed_u() assert approx_equal([i for i in range(6)], covariance.extract_covariance_matrix_for_u_aniso( 0, cov, param_map).matrix_packed_u_diagonal()) assert covariance.variance_for_u_iso(1, cov, param_map) == 6 try: covariance.variance_for_u_iso(0, cov, param_map) except RuntimeError: pass else: raise Exception_expected try: covariance.extract_covariance_matrix_for_u_aniso(1, cov, param_map) except RuntimeError: pass else: raise Exception_expected approx_equal( covariance.extract_covariance_matrix_for_sites(flex.size_t([1]), cov, param_map), (0, 0, 0, 0, 0, 0))
def check_eigenvector(adp, x, v): v = matrix.col(v) assert approx_equal((matrix.sym(sym_mat3=adp) * v * (1.0 / x)).elems, v.elems, 1.0e-4)
def exercise_flood_fill(): uc = uctbx.unit_cell('10 10 10 90 90 90') for uc in (uctbx.unit_cell('10 10 10 90 90 90'), uctbx.unit_cell('9 10 11 87 91 95')): gridding = maptbx.crystal_gridding(unit_cell=uc, pre_determined_n_real=(5, 5, 5)) corner_cube = (0, 4, 20, 24, 100, 104, 120, 124 ) # cube across all 8 corners channel = (12, 37, 38, 39, 42, 43, 62, 63, 67, 68, 87, 112) data = flex.int(flex.grid(gridding.n_real())) for i in (corner_cube + channel): data[i] = 1 flood_fill = masks.flood_fill(data, uc) assert data.count(0) == 105 for i in corner_cube: assert data[i] == 2 for i in channel: assert data[i] == 3 assert approx_equal(flood_fill.centres_of_mass(), ((-0.5, -0.5, -0.5), (-2.5, 7 / 3, 2.5))) assert approx_equal(flood_fill.centres_of_mass_frac(), ((-0.1, -0.1, -0.1), (-0.5, 7 / 15, 0.5))) assert approx_equal( flood_fill.centres_of_mass_cart(), uc.orthogonalize(flood_fill.centres_of_mass_frac())) assert flood_fill.n_voids() == 2 assert approx_equal(flood_fill.grid_points_per_void(), (8, 12)) if 0: from crys3d import wx_map_viewer wx_map_viewer.display(raw_map=data.as_double(), unit_cell=uc, wires=False) # gridding = maptbx.crystal_gridding(unit_cell=uc, pre_determined_n_real=(10, 10, 10)) data = flex.int(flex.grid(gridding.n_real())) # parallelogram points = [(2, 4, 5), (3, 4, 5), (4, 4, 5), (5, 4, 5), (6, 4, 5), (3, 5, 5), (4, 5, 5), (5, 5, 5), (6, 5, 5), (7, 5, 5), (4, 6, 5), (5, 6, 5), (6, 6, 5), (7, 6, 5), (8, 6, 5)] points_frac = flex.vec3_double() for p in points: data[p] = 1 points_frac.append([p[i] / gridding.n_real()[i] for i in range(3)]) points_cart = uc.orthogonalize(points_frac) flood_fill = masks.flood_fill(data, uc) assert data.count(2) == 15 assert approx_equal(flood_fill.centres_of_mass_frac(), ((0.5, 0.5, 0.5), )) pai_cart = math.principal_axes_of_inertia(points=points_cart, weights=flex.double( points_cart.size(), 1.0)) F = matrix.sqr(uc.fractionalization_matrix()) O = matrix.sqr(uc.orthogonalization_matrix()) assert approx_equal(pai_cart.center_of_mass(), flood_fill.centres_of_mass_cart()[0]) assert approx_equal( flood_fill.covariance_matrices_cart()[0], (F.transpose() * matrix.sym(sym_mat3=flood_fill.covariance_matrices_frac()[0]) * F).as_sym_mat3()) assert approx_equal(pai_cart.inertia_tensor(), flood_fill.inertia_tensors_cart()[0]) assert approx_equal(pai_cart.eigensystem().vectors(), flood_fill.eigensystems_cart()[0].vectors()) assert approx_equal(pai_cart.eigensystem().values(), flood_fill.eigensystems_cart()[0].values()) return
def u_tls_vs_u_ens( pdb_str, dx=0,dy=0,dz=0, sx=0,sy=0,sz=0, lx=[1,0,0],ly=[0,1,0],lz=[0,0,1], tx=0,ty=0,tz=0, vx=[1,0,0],vy=[0,1,0],vz=[0,0,1], w_M_lx=[0,0,0], w_M_ly=[0,0,0], w_M_lz=[0,0,0], origin=None, n_models=10000, assert_similarity=True): from mmtbx.tls import analysis, tls_as_xyz from scitbx import matrix from libtbx.utils import null_out # print "INPUTS:","-"*73 print "dx :", dx print "dy :", dy print "dz :", dz print "sx :", sx print "sy :", sy print "sz :", sz print "lx :", [i for i in lx] print "ly :", [i for i in ly] print "lz :", [i for i in lz] print "tx :", tx print "ty :", ty print "tz :", tz print "vx :", [i for i in vx] print "vy :", [i for i in vy] print "vz :", [i for i in vz] print "w_M_lx:", [i for i in w_M_lx] print "w_M_ly:", [i for i in w_M_ly] print "w_M_lz:", [i for i in w_M_lz] print "origin:", origin print "-"*79 # #p1 = "dx"+str(dx)+"_"+"dy"+str(dy)+"_"+"dz"+str(dz) #p2 = "sx"+str(sx)+"_"+"sy"+str(sy)+"_"+"sz"+str(sz) #p3 = "lx"+"".join([str(i) for i in lx])+"_"+\ # "ly"+"".join([str(i) for i in ly])+"_"+\ # "lz"+"".join([str(i) for i in lz]) #prefix = "_".join([p1,p2,p3]) prefix="u_tls_vs_u_ens" # pdb_inp = iotbx.pdb.input(source_info=None, lines=pdb_str) xrs = pdb_inp.xray_structure_simple() sites_cart = xrs.sites_cart() xrs.set_sites_cart(sites_cart) ph = pdb_inp.construct_hierarchy() ph.atoms().set_xyz(sites_cart) if(origin is None): origin = sites_cart.mean() # o_tfm = analysis.tls_from_motions( dx=dx,dy=dy,dz=dz, l_x=matrix.col(lx),l_y=matrix.col(ly),l_z=matrix.col(lz), sx=sx,sy=sy,sz=sz, tx=tx,ty=ty,tz=tz, v_x=matrix.col(vx),v_y=matrix.col(vy),v_z=matrix.col(vz), w_M_lx=matrix.col(w_M_lx), w_M_ly=matrix.col(w_M_ly), w_M_lz=matrix.col(w_M_lz)) # u_cart_from_tls = get_u_cart(o_tfm=o_tfm, origin=origin, sites_cart=sites_cart) tlso_ = tlso( t = o_tfm.T_M.as_sym_mat3(), l = o_tfm.L_M.as_sym_mat3(), s = o_tfm.S_M.as_mat3(), origin = origin) if(assert_similarity): T = matrix.sym(sym_mat3=tlso_.t) L = matrix.sym(sym_mat3=tlso_.l) S = matrix.sqr(tlso_.s) o_tfm = analysis.run(T=T, L=L, S=S, log=null_out()).self_check() # r = tls_as_xyz.ensemble_generator( tls_from_motions_object = o_tfm, pdb_hierarchy = ph, xray_structure = xrs, n_models = n_models, origin = origin, log = null_out()) r.write_pdb_file(file_name="%s_ensemble.pdb"%prefix) # xyz_all = [] for m in r.states.root.models(): xyz_all.append(m.atoms().extract_xyz()) # n_atoms = xyz_all[0].size() xyz_atoms_all = [] for i in xrange(n_atoms): xyz_atoms = flex.vec3_double() for xyzs in xyz_all: xyz_atoms.append(xyzs[i]) xyz_atoms_all.append(xyz_atoms) ### u1 = u_cart_from_tls.as_double() u2 = flex.double() for i in xrange(n_atoms): ui=flex.double(u_cart_from_xyz(sites_cart=xyz_atoms_all[i])) u2.extend(ui) r = flex.sum(flex.abs(u1-u2))/\ flex.sum(flex.abs(flex.abs(u1)+flex.abs(u2)))*2 print "R(U_tls,U_ens)=%6.4f"%(r) print "-"*79 ### for i in xrange(n_atoms): print "atom %d:"%i ut=["%8.5f"%u for u in u_cart_from_tls[i]] ue=["%8.5f"%u for u in u_cart_from_xyz(sites_cart=xyz_atoms_all[i])] print " Ucart(from TLS):", ut print " Ucart(from ens):", ue if(assert_similarity): for j in xrange(6): assert approx_equal(abs(float(ut[j])), abs(float(ue[j])), 1.e-3)
def __init__(self, xray_structure, name='??', **kwds): super(xray_structure_viewer, self).__init__( unit_cell=xray_structure.unit_cell(), orthographic=True, light_position=(-1, 1, 1, 0), **kwds) assert self.bonding in ("covalent", "all") assert self.bonding != "all" or self.distance_cutoff is not None self.xray_structure = xs = xray_structure self.setWindowTitle("%s in %s" % (name, xs.space_group().type().hall_symbol())) sites_frac = xs.sites_frac() self.set_extent(sites_frac.min(), sites_frac.max()) self.is_unit_cell_shown = False sites_cart = self.sites_cart = xs.sites_cart() thermal_tensors = xs.extract_u_cart_plus_u_iso() self.ellipsoid_to_sphere_transforms = {} self.scatterer_indices = flex.std_string() self.scatterer_labels = flex.std_string() for i, (sc, site, u_cart) in enumerate(itertools.izip(xs.scatterers(), sites_cart, thermal_tensors)): t = quadrics.ellipsoid_to_sphere_transform(site, u_cart) self.ellipsoid_to_sphere_transforms.setdefault( sc.element_symbol(), quadrics.shared_ellipsoid_to_sphere_transforms()).append(t) self.scatterer_indices.append("# %i" % i) self.scatterer_labels.append(sc.label) self.labels = None self.label_font = QtGui.QFont("Arial Black", pointSize=18) if self.bonding == "covalent": radii = [ covalent_radii.table(elt).radius() for elt in xs.scattering_type_registry().type_index_pairs_as_dict() ] buffer_thickness = 2*max(radii) + self.covalent_bond_tolerance asu_mappings = xs.asu_mappings(buffer_thickness=buffer_thickness) bond_table = crystal.pair_asu_table(asu_mappings) bond_table.add_covalent_pairs(xs.scattering_types(), tolerance=self.covalent_bond_tolerance) elif self.bonding == "all": asu_mappings = xs.asu_mappings(buffer_thickness=self.distance_cutoff) bond_table = crystal.pair_asu_table(asu_mappings) bond_table.add_all_pairs(self.distance_cutoff) pair_sym_table = bond_table.extract_pair_sym_table( all_interactions_from_inside_asu=True) self.bonds = flex.vec3_double() self.bonds.reserve(len(xs.scatterers())) uc = self.xray_structure.unit_cell() frac = mat.rec(uc.fractionalization_matrix(), (3,3)) inv_frac = frac.inverse() site_symms = xs.site_symmetry_table() scatt = self.xray_structure.scatterers() for i, neighbours in enumerate(pair_sym_table): x0 = sites_cart[i] sc0 = scatt[i] for j, ops in neighbours.items(): sc1 = scatt[j] if sc0.scattering_type == 'H' and sc1.scattering_type == 'H': continue for op in ops: if op.is_unit_mx(): x1 = sites_cart[j] else: x1 = uc.orthogonalize(op*sites_frac[j]) op_cart = inv_frac*mat.rec(op.r().as_double(), (3,3))*frac u1 = (op_cart *mat.sym(sym_mat3=thermal_tensors[j]) *op_cart.transpose()) t = quadrics.ellipsoid_to_sphere_transform(x1, u1.as_sym_mat3()) self.ellipsoid_to_sphere_transforms[sc1.element_symbol()].append(t) self.sites_cart.append(x1) op_lbl = (" [%s]" % op).lower() self.scatterer_indices.append("# %i%s" % (j, op_lbl)) self.scatterer_labels.append("%s%s" % (sc1.label, op_lbl)) self.bonds.append(x0) self.bonds.append(x1)
def fd_grads(self, proxy): dynamic_restraint_proxy_classes = ( adp.adp_u_eq_similarity_proxy, adp.adp_volume_similarity_proxy, ) if isinstance(proxy, (dynamic_restraint_proxy_classes)): n_restraints = len(proxy.i_seqs) else: n_restraints = rows_per_restraint.get(proxy.__class__, 1) grads = [ flex.double(self.param_map.n_parameters) for i in range(n_restraints) ] eps = 1e-8 uc = self.xray_structure.unit_cell() xs = self.xray_structure u_cart = xs.scatterers().extract_u_cart(uc).deep_copy() u_star = xs.scatterers().extract_u_star().deep_copy() u_iso = xs.scatterers().extract_u_iso().deep_copy() single_delta_classes = (adp.fixed_u_eq_adp, ) for n in range(n_restraints): for i in range(self.param_map.n_scatterers): use_u_aniso = self.param_map[i].u_aniso > -1 use_u_iso = self.param_map[i].u_iso > -1 for j in range(6): if use_u_aniso: h = [0, 0, 0, 0, 0, 0] h[j] = eps h = matrix.sym(sym_mat3=h) u_star[i] = list( (matrix.sym(sym_mat3=u_star[i]) + h).as_sym_mat3()) r = self.restraint(proxy, u_cart=flex.sym_mat3_double([ adptbx.u_star_as_u_cart(uc, u) for u in u_star ])) if isinstance(r, adp.rigid_bond): d1 = r.delta_z() elif isinstance(r, single_delta_classes): d1 = r.delta() else: d1 = r.deltas()[n] u_star[i] = list((matrix.sym(sym_mat3=u_star[i]) - 2 * h).as_sym_mat3()) r = self.restraint(proxy, u_cart=flex.sym_mat3_double([ adptbx.u_star_as_u_cart(uc, u) for u in u_star ])) if isinstance(r, adp.rigid_bond): d2 = r.delta_z() elif isinstance(r, single_delta_classes): d2 = r.delta() else: d2 = r.deltas()[n] elif use_u_iso: u_iso[i] += eps r = self.restraint(proxy, u_iso=u_iso) if isinstance(r, adp.rigid_bond): d1 = r.delta_z() elif isinstance(r, single_delta_classes): d1 = r.delta() else: d1 = r.deltas()[n] u_iso[i] -= 2 * eps r = self.restraint(proxy, u_iso=u_iso) if isinstance(r, adp.rigid_bond): d2 = r.delta_z() elif isinstance(r, single_delta_classes): d2 = r.delta() else: d2 = r.deltas()[n] d_delta = (d1 - d2) / (2 * eps) if not isinstance(r, adp.rigid_bond) and j > 2: d_delta *= 2 # off diagonals count twice if use_u_aniso: grads[n][self.param_map[i].u_aniso + j] = d_delta elif use_u_iso: grads[n][self.param_map[i].u_iso] = d_delta break return grads
def exercise_adp_similarity(): u_cart = ((1,3,2,4,3,6),(2,4,2,6,5,1)) u_iso = (-1,-1) use_u_aniso = (True, True) weight = 1 a = adp_restraints.adp_similarity( u_cart=u_cart, weight=weight) assert approx_equal(a.use_u_aniso, use_u_aniso) assert a.weight == weight assert approx_equal(a.residual(), 68) assert approx_equal(a.gradients2(), ((-2.0, -2.0, 0.0, -8.0, -8.0, 20.0), (2.0, 2.0, -0.0, 8.0, 8.0, -20.0))) assert approx_equal(a.deltas(), (-1.0, -1.0, 0.0, -2.0, -2.0, 5.0)) assert approx_equal(a.rms_deltas(), 2.7487370837451071) # u_cart = ((1,3,2,4,3,6),(-1,-1,-1,-1,-1,-1)) u_iso = (-1,2) use_u_aniso = (True, False) a = adp_restraints.adp_similarity( u_cart[0], u_iso[1], weight=weight) assert approx_equal(a.use_u_aniso, use_u_aniso) assert a.weight == weight assert approx_equal(a.residual(), 124) assert approx_equal(a.gradients2(), ((-2, 2, 0, 16, 12, 24), (2, -2, 0, -16, -12, -24))) assert approx_equal(a.deltas(), (-1, 1, 0, 4, 3, 6)) assert approx_equal(a.rms_deltas(), 3.711842908553348) # i_seqs_aa = (1,2) # () - () i_seqs_ai = (1,0) # () - o i_seqs_ia = (3,2) # o - () i_seqs_ii = (0,3) # o - o p_aa = adp_restraints.adp_similarity_proxy(i_seqs=i_seqs_aa,weight=weight) p_ai = adp_restraints.adp_similarity_proxy(i_seqs=i_seqs_ai,weight=weight) p_ia = adp_restraints.adp_similarity_proxy(i_seqs=i_seqs_ia,weight=weight) p_ii = adp_restraints.adp_similarity_proxy(i_seqs=i_seqs_ii,weight=weight) assert p_aa.i_seqs == i_seqs_aa assert p_aa.weight == weight u_cart = flex.sym_mat3_double(((-1,-1,-1,-1,-1,-1), (1,2,2,4,3,6), (2,4,2,6,5,1), (-1,-1,-1,-1,-1,-1))) u_iso = flex.double((1,-1,-1,2)) use_u_aniso = flex.bool((False, True,True,False)) for p in (p_aa,p_ai,p_ia,p_ii): params = adp_restraint_params(u_cart=u_cart, u_iso=u_iso, use_u_aniso=use_u_aniso) a = adp_restraints.adp_similarity(params, proxy=p) assert approx_equal(a.weight, weight) # gradients_aniso_cart = flex.sym_mat3_double(u_cart.size(), (0,0,0,0,0,0)) gradients_iso = flex.double(u_cart.size(), 0) proxies = adp_restraints.shared_adp_similarity_proxy([p,p]) residuals = adp_restraints.adp_similarity_residuals(params, proxies=proxies) assert approx_equal(residuals, (a.residual(),a.residual())) deltas_rms = adp_restraints.adp_similarity_deltas_rms(params, proxies=proxies) assert approx_equal(deltas_rms, (a.rms_deltas(),a.rms_deltas())) residual_sum = adp_restraints.adp_similarity_residual_sum( params, proxies=proxies, gradients_aniso_cart=gradients_aniso_cart, gradients_iso=gradients_iso) assert approx_equal(residual_sum, 2 * a.residual()) fd_grads_aniso, fd_grads_iso = finite_difference_gradients( restraint_type=adp_restraints.adp_similarity, proxy=p, u_cart=u_cart, u_iso=u_iso, use_u_aniso=use_u_aniso) for g,e in zip(gradients_aniso_cart, fd_grads_aniso): assert approx_equal(g, matrix.col(e)*2) for g,e in zip(gradients_iso, fd_grads_iso): assert approx_equal(g, e*2) # # check frame invariance of residual # u_cart_1 = matrix.sym(sym_mat3=(0.1,0.2,0.05,0.03,0.02,0.01)) u_cart_2 = matrix.sym(sym_mat3=(0.21,0.32,0.11,0.02,0.02,0.07)) u_cart = (u_cart_1.as_sym_mat3(),u_cart_2.as_sym_mat3()) u_iso = (-1, -1) use_u_aniso = (True, True) a = adp_restraints.adp_similarity(u_cart, weight=1) expected_residual = a.residual() gen = flex.mersenne_twister() for i in range(20): R = matrix.rec(gen.random_double_r3_rotation_matrix(),(3,3)) u_cart_1_rot = R * u_cart_1 * R.transpose() u_cart_2_rot = R * u_cart_2 * R.transpose() u_cart = (u_cart_1_rot.as_sym_mat3(),u_cart_2_rot.as_sym_mat3()) a = adp_restraints.adp_similarity(u_cart, weight=1) assert approx_equal(a.residual(), expected_residual)
def d2f_d_params(self): tphkl = 2 * math.pi * matrix.col(self.hkl) tphkl_outer = tphkl.outer_product() h,k,l = self.hkl d_exp_huh_d_u_star = matrix.col([h**2, k**2, l**2, 2*h*k, 2*h*l, 2*k*l]) d2_exp_huh_d_u_star_u_star = d_exp_huh_d_u_star.outer_product() for scatterer in self.scatterers: assert scatterer.scattering_type == "const" w = scatterer.occupancy if (not scatterer.flags.use_u_aniso()): huh = scatterer.u_iso * self.d_star_sq dw = math.exp(mtps * huh) ffp = 1 + scatterer.fp fdp = scatterer.fdp ff = (ffp + 1j * fdp) d2_site_site = flex.complex_double(flex.grid(3,3), 0j) if (not scatterer.flags.use_u_aniso()): d2_site_u_iso = flex.complex_double(flex.grid(3,1), 0j) d2_site_u_star = None else: d2_site_u_iso = None d2_site_u_star = flex.complex_double(flex.grid(3,6), 0j) d2_site_occ = flex.complex_double(flex.grid(3,1), 0j) d2_site_fp = flex.complex_double(flex.grid(3,1), 0j) d2_site_fdp = flex.complex_double(flex.grid(3,1), 0j) if (not scatterer.flags.use_u_aniso()): d2_u_iso_u_iso = 0j d2_u_iso_occ = 0j d2_u_iso_fp = 0j d2_u_iso_fdp = 0j else: d2_u_star_u_star = flex.complex_double(flex.grid(6,6), 0j) d2_u_star_occ = flex.complex_double(flex.grid(6,1), 0j) d2_u_star_fp = flex.complex_double(flex.grid(6,1), 0j) d2_u_star_fdp = flex.complex_double(flex.grid(6,1), 0j) d2_occ_fp = 0j d2_occ_fdp = 0j for s in self.space_group: r = s.r().as_rational().as_float() s_site = s * scatterer.site alpha = matrix.col(s_site).dot(tphkl) if (scatterer.flags.use_u_aniso()): s_u_star_s = r*matrix.sym(sym_mat3=scatterer.u_star)*r.transpose() huh = (matrix.row(self.hkl) * s_u_star_s).dot(matrix.col(self.hkl)) dw = math.exp(mtps * huh) e = cmath.exp(1j*alpha) site_gtmx = r.transpose() d2_site_site += flex.complex_double( site_gtmx * (w * dw * ff * e * (-1) * tphkl_outer) * site_gtmx.transpose()) if (not scatterer.flags.use_u_aniso()): d2_site_u_iso += flex.complex_double(site_gtmx * ( w * dw * ff * e * 1j * mtps * self.d_star_sq * tphkl)) else: u_star_gtmx = matrix.sqr(tensor_rank_2_gradient_transform_matrix(r)) d2_site_u_star += flex.complex_double( site_gtmx * ((w * dw * ff * e * 1j * tphkl).outer_product( mtps * d_exp_huh_d_u_star)) * u_star_gtmx.transpose()) d2_site_occ += flex.complex_double(site_gtmx * ( dw * ff * e * 1j * tphkl)) d2_site_fp += flex.complex_double(site_gtmx * ( w * dw * e * 1j * tphkl)) d2_site_fdp += flex.complex_double(site_gtmx * ( w * dw * e * (-1) * tphkl)) if (not scatterer.flags.use_u_aniso()): d2_u_iso_u_iso += w * dw * ff * e * (mtps * self.d_star_sq)**2 d2_u_iso_occ += dw * ff * e * mtps * self.d_star_sq d2_u_iso_fp += w * dw * e * mtps * self.d_star_sq d2_u_iso_fdp += 1j * w * dw * e * mtps * self.d_star_sq else: d2_u_star_u_star += flex.complex_double( u_star_gtmx * (w * dw * ff * e * mtps**2 * d2_exp_huh_d_u_star_u_star) * u_star_gtmx.transpose()) d2_u_star_occ += flex.complex_double(u_star_gtmx * ( dw * ff * e * mtps * d_exp_huh_d_u_star)) d2_u_star_fp += flex.complex_double(u_star_gtmx * ( w * dw * e * mtps * d_exp_huh_d_u_star)) d2_u_star_fdp += flex.complex_double(u_star_gtmx * ( w * dw * 1j * e * mtps * d_exp_huh_d_u_star)) d2_occ_fp += dw * e d2_occ_fdp += dw * e * 1j if (not scatterer.flags.use_u_aniso()): i_occ, i_fp, i_fdp, np = 4, 5, 6, 7 else: i_occ, i_fp, i_fdp, np = 9, 10, 11, 12 dp = flex.complex_double(flex.grid(np,np), 0j) paste = dp.matrix_paste_block_in_place paste(d2_site_site, 0,0) if (not scatterer.flags.use_u_aniso()): paste(d2_site_u_iso, 0,3) paste(d2_site_u_iso.matrix_transpose(), 3,0) else: paste(d2_site_u_star, 0,3) paste(d2_site_u_star.matrix_transpose(), 3,0) paste(d2_site_occ, 0,i_occ) paste(d2_site_occ.matrix_transpose(), i_occ,0) paste(d2_site_fp, 0,i_fp) paste(d2_site_fp.matrix_transpose(), i_fp,0) paste(d2_site_fdp, 0,i_fdp) paste(d2_site_fdp.matrix_transpose(), i_fdp,0) if (not scatterer.flags.use_u_aniso()): dp[3*7+3] = d2_u_iso_u_iso dp[3*7+4] = d2_u_iso_occ dp[4*7+3] = d2_u_iso_occ dp[3*7+5] = d2_u_iso_fp dp[5*7+3] = d2_u_iso_fp dp[3*7+6] = d2_u_iso_fdp dp[6*7+3] = d2_u_iso_fdp else: paste(d2_u_star_u_star, 3,3) paste(d2_u_star_occ, 3, 9) paste(d2_u_star_occ.matrix_transpose(), 9, 3) paste(d2_u_star_fp, 3, 10) paste(d2_u_star_fp.matrix_transpose(), 10, 3) paste(d2_u_star_fdp, 3, 11) paste(d2_u_star_fdp.matrix_transpose(), 11, 3) dp[i_occ*np+i_fp] = d2_occ_fp dp[i_fp*np+i_occ] = d2_occ_fp dp[i_occ*np+i_fdp] = d2_occ_fdp dp[i_fdp*np+i_occ] = d2_occ_fdp yield dp
def finite_difference_gradients(restraint_type, proxy, sites_cart=None, u_cart=None, u_iso=None, use_u_aniso=None, eps=1.e-8): def residual(restraint_type, proxy, sites_cart=None, u_cart=None, u_iso=None, use_u_aniso=None): if sites_cart is not None: return restraint_type( adp_restraint_params(sites_cart=sites_cart, u_cart=u_cart), proxy=proxy).residual() elif u_iso is None: return restraint_type( adp_restraint_params(u_cart=u_cart), proxy=proxy).residual() else: assert use_u_aniso is not None return restraint_type( adp_restraint_params(u_cart=u_cart, u_iso=u_iso, use_u_aniso=use_u_aniso), proxy=proxy).residual() result_aniso = [(0,0,0,0,0,0)]*len(u_cart) result_iso = [0] * len(u_cart) if sites_cart is not None: assert len(sites_cart) == len(u_cart) for i in xrange(len(u_cart)): if u_iso is None: result_aniso_i = [] for j in xrange(6): h = [0,0,0,0,0,0] h[j] = eps h = matrix.sym(sym_mat3=h) u_cart[i]=list((matrix.sym(sym_mat3=u_cart[i]) + h).as_sym_mat3()) qp = residual(restraint_type, proxy, sites_cart=sites_cart, u_cart=u_cart) u_cart[i]=list((matrix.sym(sym_mat3=u_cart[i]) - 2*h).as_sym_mat3()) qm = residual(restraint_type, proxy, sites_cart=sites_cart, u_cart=u_cart) dq = (qp-qm)/2 result_aniso_i.append(dq/(eps)) result_aniso[i] = result_aniso_i else: if use_u_aniso[i]: result_aniso_i = [] for j in xrange(6): h = [0,0,0,0,0,0] h[j] = eps h = matrix.sym(sym_mat3=h) u_cart[i]=list((matrix.sym(sym_mat3=u_cart[i]) + h).as_sym_mat3()) qp = residual(restraint_type, proxy, u_cart=u_cart, u_iso=u_iso, use_u_aniso=use_u_aniso) u_cart[i]=list((matrix.sym(sym_mat3=u_cart[i]) - 2*h).as_sym_mat3()) qm = residual(restraint_type, proxy, u_cart=u_cart, u_iso=u_iso, use_u_aniso=use_u_aniso) dq = (qp-qm)/2 result_aniso_i.append(dq/(eps)) result_aniso[i] = result_aniso_i else: u_iso[i] += eps qp = residual(restraint_type, proxy, u_cart=u_cart, u_iso=u_iso, use_u_aniso=use_u_aniso) u_iso[i] -= 2*eps qm = residual(restraint_type, proxy, u_cart=u_cart, u_iso=u_iso, use_u_aniso=use_u_aniso) dq = (qp-qm)/2 result_iso[i] = dq/(eps) return result_aniso, result_iso
def run(server_info, inp, status): print "<pre>" from scitbx import matrix p = p_from_string(string=inp.cb_expr) assert inp.p_or_q in ["P", "Q"] if (inp.p_or_q == "Q"): p = p.inverse() assert inp.p_transpose in ["off", "on"] if (inp.p_transpose == "on"): p = matrix.rt((p.r.transpose(), p.t)) print "P:" display_rt(p) print q = p.inverse() print "Q:" display_rt(q) print if (len(inp.obj_expr.strip()) != 0): if (inp.obj_type in ["xyz", "hkl"]): triple = xyz_from_string(string=inp.obj_expr) if (inp.obj_type == "xyz"): print "Transformation law: (Q,q) xyz" print print " xyz:", triple print print " xyz':", ( q.r * matrix.col(triple) + q.t).elems print else: print "Transformation law: hkl P" print print " hkl:", triple print print " hkl':", (matrix.row(triple) * p.r).elems print elif (inp.obj_type == "unit_cell"): from cctbx import uctbx uc = uctbx.unit_cell(inp.obj_expr) print "Transformation law: Pt G P" print print "unit cell:", uc print g = matrix.sym(sym_mat3=uc.metrical_matrix()) print "metrical matrix:" display_r(g) print gp = p.r.transpose() * g * p.r print "metrical matrix':" display_r(gp) print ucp = uctbx.unit_cell(metrical_matrix=gp.as_sym_mat3()) print "unit cell':", ucp print elif (inp.obj_type == "Ww"): w = w_from_string(string=inp.obj_expr) print "Transformation law: (Q,q) (W,w) (P,p)" print print "(W, w):" display_rt(w) print wp = q * w * p print "(W, w)':" display_rt(wp) print else: raise RuntimeError("Unknown obj_type: %s" % inp.obj_type) print "</pre>"
def run(pdb_file_name, n_models, log, output_file_name_prefix, eps=1.e-7): pdb_inp = iotbx.pdb.input(file_name=pdb_file_name) pdb_hierarchy = pdb_inp.construct_hierarchy() asc = pdb_hierarchy.atom_selection_cache() cs = pdb_inp.crystal_symmetry_from_cryst1() tls_extract = mmtbx.tls.tools.tls_from_pdb_inp( remark_3_records=pdb_inp.extract_remark_iii_records(3), pdb_hierarchy=pdb_hierarchy) for i_group, tls_params_one_group in enumerate(tls_extract.tls_params): selection = asc.selection(tls_params_one_group.selection_string) pdb_hierarchy_sel = pdb_hierarchy.select(selection) xrs = pdb_hierarchy_sel.extract_xray_structure(crystal_symmetry=cs) deg_to_rad_scale = math.pi / 180 # Units: T[A], L[deg**2], S[A*deg] T = matrix.sym(sym_mat3=tls_params_one_group.t) L = matrix.sym(sym_mat3=tls_params_one_group.l) S = matrix.sqr(tls_params_one_group.s) origin = tls_params_one_group.origin tlso = tools.tlso(t=T.as_sym_mat3(), l=L.as_sym_mat3(), s=S, origin=origin) # sanity check if (not adptbx.is_positive_definite(tls_params_one_group.t, eps)): raise Sorry("T matrix is not positive definite.") if (not adptbx.is_positive_definite(tls_params_one_group.l, eps)): raise Sorry("L matrix is not positive definite.") r = analysis.run(T=T, L=L * (deg_to_rad_scale**2), S=S * deg_to_rad_scale, log=log).self_check() ensemble_generator_obj = ensemble_generator( tls_from_motions_object=r, pdb_hierarchy=pdb_hierarchy_sel, xray_structure=xrs, n_models=n_models, origin=origin, log=log) ensemble_generator_obj.write_pdb_file( file_name=output_file_name_prefix + "_ensemble_%s.pdb" % str(i_group)) # get U from TLS u_from_tls = tools.uaniso_from_tls_one_group( tlso=tlso, sites_cart=xrs.sites_cart(), zeroize_trace=False) # get U from ensemble pdb_hierarchy_from_tls = pdb_hierarchy_sel.deep_copy() pdb_hierarchy_from_ens = pdb_hierarchy_sel.deep_copy() u_from_ens = tools.u_cart_from_ensemble( models=ensemble_generator_obj.states.root.models()) for i in range(xrs.sites_cart().size()): print("atom %d:" % i) print(" Ucart(from TLS):", ["%8.5f" % u for u in u_from_tls[i]]) print(" Ucart(from ens):", ["%8.5f" % u for u in u_from_ens[i]]) # u1, u2 = u_from_tls.as_double(), u_from_ens.as_double() cc = flex.linear_correlation(x=u1, y=u2).coefficient() r = flex.sum(flex.abs(u1-u2))/\ flex.sum(flex.abs(flex.abs(u1)+flex.abs(u2)))*2 print("%6.4f %6.4f" % (cc, r)) # pdb_hierarchy_from_tls.atoms().set_uij(u_from_tls) pdb_hierarchy_from_ens.atoms().set_uij(u_from_ens) pdb_hierarchy_from_tls.write_pdb_file( file_name=output_file_name_prefix + "_u_from_tls_%s.pdb" % str(i_group), crystal_symmetry=cs) pdb_hierarchy_from_ens.write_pdb_file( file_name=output_file_name_prefix + "_u_from_ensemble_%s.pdb" % str(i_group), crystal_symmetry=cs) return ensemble_generator_obj
def __init__(self, xray_structure, name='??', **kwds): super(xray_structure_viewer, self).__init__(unit_cell=xray_structure.unit_cell(), orthographic=True, light_position=(-1, 1, 1, 0), **kwds) assert self.bonding in ("covalent", "all") assert self.bonding != "all" or self.distance_cutoff is not None self.xray_structure = xs = xray_structure self.setWindowTitle("%s in %s" % (name, xs.space_group().type().hall_symbol())) sites_frac = xs.sites_frac() self.set_extent(sites_frac.min(), sites_frac.max()) self.is_unit_cell_shown = False sites_cart = self.sites_cart = xs.sites_cart() thermal_tensors = xs.extract_u_cart_plus_u_iso() self.ellipsoid_to_sphere_transforms = {} self.scatterer_indices = flex.std_string() self.scatterer_labels = flex.std_string() for i, (sc, site, u_cart) in enumerate( zip(xs.scatterers(), sites_cart, thermal_tensors)): t = quadrics.ellipsoid_to_sphere_transform(site, u_cart) self.ellipsoid_to_sphere_transforms.setdefault( sc.element_symbol(), quadrics.shared_ellipsoid_to_sphere_transforms()).append(t) self.scatterer_indices.append("# %i" % i) self.scatterer_labels.append(sc.label) self.labels = None self.label_font = QtGui.QFont("Arial Black", pointSize=18) if self.bonding == "covalent": radii = [ covalent_radii.table(elt).radius() for elt in xs.scattering_type_registry().type_index_pairs_as_dict() ] buffer_thickness = 2 * max(radii) + self.covalent_bond_tolerance asu_mappings = xs.asu_mappings(buffer_thickness=buffer_thickness) bond_table = crystal.pair_asu_table(asu_mappings) bond_table.add_covalent_pairs( xs.scattering_types(), tolerance=self.covalent_bond_tolerance) elif self.bonding == "all": asu_mappings = xs.asu_mappings( buffer_thickness=self.distance_cutoff) bond_table = crystal.pair_asu_table(asu_mappings) bond_table.add_all_pairs(self.distance_cutoff) pair_sym_table = bond_table.extract_pair_sym_table( all_interactions_from_inside_asu=True) self.bonds = flex.vec3_double() self.bonds.reserve(len(xs.scatterers())) uc = self.xray_structure.unit_cell() frac = mat.rec(uc.fractionalization_matrix(), (3, 3)) inv_frac = frac.inverse() site_symms = xs.site_symmetry_table() scatt = self.xray_structure.scatterers() for i, neighbours in enumerate(pair_sym_table): x0 = sites_cart[i] sc0 = scatt[i] for j, ops in neighbours.items(): sc1 = scatt[j] if sc0.scattering_type == 'H' and sc1.scattering_type == 'H': continue for op in ops: if op.is_unit_mx(): x1 = sites_cart[j] else: x1 = uc.orthogonalize(op * sites_frac[j]) op_cart = inv_frac * mat.rec(op.r().as_double(), (3, 3)) * frac u1 = (op_cart * mat.sym(sym_mat3=thermal_tensors[j]) * op_cart.transpose()) t = quadrics.ellipsoid_to_sphere_transform( x1, u1.as_sym_mat3()) self.ellipsoid_to_sphere_transforms[ sc1.element_symbol()].append(t) self.sites_cart.append(x1) op_lbl = (" [%s]" % op).lower() self.scatterer_indices.append("# %i%s" % (j, op_lbl)) self.scatterer_labels.append("%s%s" % (sc1.label, op_lbl)) self.bonds.append(x0) self.bonds.append(x1)
def lebedev_2005_perturbation(self, reduced_cell): s = matrix.sym(sym_mat3=reduced_cell.metrical_matrix()) m = self.as_rational().as_float() r = m.transpose() * s * m sirms = s.inverse() * (r-s) return ((sirms * sirms).trace() / 12)**0.5
def run(pdb_file_name, n_models, log, output_file_name_prefix, eps=1.e-7): pdb_inp = iotbx.pdb.input(file_name = pdb_file_name) pdb_hierarchy = pdb_inp.construct_hierarchy() asc = pdb_hierarchy.atom_selection_cache() cs = pdb_inp.crystal_symmetry_from_cryst1() tls_extract = mmtbx.tls.tools.tls_from_pdb_inp( remark_3_records = pdb_inp.extract_remark_iii_records(3), pdb_hierarchy = pdb_hierarchy) for i_group, tls_params_one_group in enumerate(tls_extract.tls_params): selection = asc.selection(tls_params_one_group.selection_string) pdb_hierarchy_sel = pdb_hierarchy.select(selection) xrs = pdb_hierarchy_sel.extract_xray_structure(crystal_symmetry=cs) deg_to_rad_scale = math.pi/180 # Units: T[A], L[deg**2], S[A*deg] T = matrix.sym(sym_mat3=tls_params_one_group.t) L = matrix.sym(sym_mat3=tls_params_one_group.l) S = matrix.sqr(tls_params_one_group.s) origin = tls_params_one_group.origin tlso = tools.tlso( t = T.as_sym_mat3(), l = L.as_sym_mat3(), s = S, origin = origin) # sanity check if(not adptbx.is_positive_definite(tls_params_one_group.t, eps)): raise Sorry("T matrix is not positive definite.") if(not adptbx.is_positive_definite(tls_params_one_group.l, eps)): raise Sorry("L matrix is not positive definite.") r = analysis.run(T=T, L=L*(deg_to_rad_scale**2), S=S*deg_to_rad_scale, log=log).self_check() ensemble_generator_obj = ensemble_generator( tls_from_motions_object = r, pdb_hierarchy = pdb_hierarchy_sel, xray_structure = xrs, n_models = n_models, origin = origin, log = log) ensemble_generator_obj.write_pdb_file( file_name=output_file_name_prefix+"_ensemble_%s.pdb"%str(i_group)) # get U from TLS u_from_tls = tools.uaniso_from_tls_one_group( tlso = tlso, sites_cart = xrs.sites_cart(), zeroize_trace = False) # get U from ensemble pdb_hierarchy_from_tls = pdb_hierarchy_sel.deep_copy() pdb_hierarchy_from_ens = pdb_hierarchy_sel.deep_copy() u_from_ens = tools.u_cart_from_ensemble( models = ensemble_generator_obj.states.root.models()) for i in xrange(xrs.sites_cart().size()): print "atom %d:"%i print " Ucart(from TLS):", ["%8.5f"%u for u in u_from_tls[i]] print " Ucart(from ens):", ["%8.5f"%u for u in u_from_ens[i]] # u1, u2 = u_from_tls.as_double(), u_from_ens.as_double() cc = flex.linear_correlation(x=u1, y=u2).coefficient() r = flex.sum(flex.abs(u1-u2))/\ flex.sum(flex.abs(flex.abs(u1)+flex.abs(u2)))*2 print "%6.4f %6.4f"%(cc, r) # pdb_hierarchy_from_tls.atoms().set_uij(u_from_tls) pdb_hierarchy_from_ens.atoms().set_uij(u_from_ens) pdb_hierarchy_from_tls.write_pdb_file( file_name = output_file_name_prefix+"_u_from_tls_%s.pdb"%str(i_group), crystal_symmetry = cs) pdb_hierarchy_from_ens.write_pdb_file( file_name = output_file_name_prefix+"_u_from_ensemble_%s.pdb"%str(i_group), crystal_symmetry = cs) return ensemble_generator_obj
def __init__(self, pdb_str, dx=0, dy=0, dz=0, sx=0, sy=0, sz=0, lx=[1, 0, 0], ly=[0, 1, 0], lz=[0, 0, 1], tx=0, ty=0, tz=0, vx=[1, 0, 0], vy=[0, 1, 0], vz=[0, 0, 1], w_M_lx=[0, 0, 0], w_M_ly=[0, 0, 0], w_M_lz=[0, 0, 0], origin=None, n_models=10000, assert_similarity=True, show=False, log=sys.stdout, write_pdb_files=False): from mmtbx.tls import analysis, tls_as_xyz from scitbx import matrix from libtbx.utils import null_out if (show): print >> log, "INPUTS:", "-" * 73 print >> log, "dx :", dx print >> log, "dy :", dy print >> log, "dz :", dz print >> log, "sx :", sx print >> log, "sy :", sy print >> log, "sz :", sz print >> log, "lx :", [i for i in lx] print >> log, "ly :", [i for i in ly] print >> log, "lz :", [i for i in lz] print >> log, "tx :", tx print >> log, "ty :", ty print >> log, "tz :", tz print >> log, "vx :", [i for i in vx] print >> log, "vy :", [i for i in vy] print >> log, "vz :", [i for i in vz] print >> log, "w_M_lx:", [i for i in w_M_lx] print >> log, "w_M_ly:", [i for i in w_M_ly] print >> log, "w_M_lz:", [i for i in w_M_lz] print >> log, "origin:", origin print >> log, "-" * 79 # pdb_inp = iotbx.pdb.input(source_info=None, lines=pdb_str) ph = pdb_inp.construct_hierarchy() xrs = ph.extract_xray_structure( crystal_symmetry=pdb_inp.crystal_symmetry()) sites_cart = xrs.sites_cart() ph.atoms().set_xyz(sites_cart) if (origin is None): origin = sites_cart.mean() # o_tfm = analysis.tls_from_motions(dx=dx, dy=dy, dz=dz, l_x=matrix.col(lx), l_y=matrix.col(ly), l_z=matrix.col(lz), sx=sx, sy=sy, sz=sz, tx=tx, ty=ty, tz=tz, v_x=matrix.col(vx), v_y=matrix.col(vy), v_z=matrix.col(vz), w_M_lx=matrix.col(w_M_lx), w_M_ly=matrix.col(w_M_ly), w_M_lz=matrix.col(w_M_lz)) # self.u_cart_tls = get_u_cart(o_tfm=o_tfm, origin=origin, sites_cart=sites_cart) tlso_ = tlso(t=o_tfm.T_M.as_sym_mat3(), l=o_tfm.L_M.as_sym_mat3(), s=o_tfm.S_M.as_mat3(), origin=origin) if (assert_similarity): T = matrix.sym(sym_mat3=tlso_.t) L = matrix.sym(sym_mat3=tlso_.l) S = matrix.sqr(tlso_.s) o_tfm = analysis.run(T=T, L=L, S=S, log=null_out()).self_check() # r = tls_as_xyz.ensemble_generator(tls_from_motions_object=o_tfm, pdb_hierarchy=ph, xray_structure=xrs, n_models=n_models, origin=origin, use_states=write_pdb_files, log=null_out()) if (write_pdb_files): r.write_pdb_file(file_name="ensemble_%s.pdb" % str(n_models)) # xyz_all = r.sites_cart_ens n_atoms = xyz_all[0].size() ### xyz_atoms_all = all_vs_all(xyz_all=xyz_all) ### self.u_cart_ens = flex.sym_mat3_double() for i in xrange(n_atoms): self.u_cart_ens.append( u_cart_from_xyz(sites_cart=xyz_atoms_all[i])) u1 = self.u_cart_tls.as_double() u2 = self.u_cart_ens.as_double() self.r = flex.sum(flex.abs(u1-u2))/\ flex.sum(flex.abs(flex.abs(u1)+flex.abs(u2)))*2 ### for i in xrange(n_atoms): ut = ["%8.5f" % u for u in self.u_cart_tls[i]] ue = ["%8.5f" % u for u in self.u_cart_ens[i]] if (assert_similarity): for j in xrange(6): assert approx_equal(abs(float(ut[j])), abs(float(ue[j])), 1.e-3) # if (write_pdb_files): ph.atoms().set_uij(self.u_cart_tls) ph.write_pdb_file(file_name="u_from_tls.pdb", crystal_symmetry=xrs.crystal_symmetry()) ph.atoms().set_uij(self.u_cart_ens) ph.write_pdb_file(file_name="u_from_ens.pdb", crystal_symmetry=xrs.crystal_symmetry())