def apply_aniso_correction(self, f_array=None): if self.b_cart is None or self.b_cart_aniso_removed is None: return f_array # nothing to do from mmtbx.scaling import absolute_scaling from cctbx import adptbx u_star = adptbx.u_cart_as_u_star(f_array.unit_cell(), adptbx.b_as_u(self.b_cart)) u_star_aniso_removed = adptbx.u_cart_as_u_star( f_array.unit_cell(), adptbx.b_as_u(self.b_cart_aniso_removed)) no_aniso_array = absolute_scaling.anisotropic_correction( f_array, 0.0, u_star, must_be_greater_than=-0.0001) no_aniso_array = absolute_scaling.anisotropic_correction( no_aniso_array, 0.0, u_star_aniso_removed, must_be_greater_than=-0.0001) no_aniso_array = no_aniso_array.set_observation_type(f_array) return no_aniso_array
def run_00(): time_aniso_u_scaler = 0 for symbol in sgtbx.bravais_types.acentric + sgtbx.bravais_types.centric: #print symbol, "-"*50 space_group_info = sgtbx.space_group_info(symbol=symbol) xrs = random_structure.xray_structure( space_group_info=space_group_info, elements=["N"] * 100, volume_per_atom=50.0, random_u_iso=True) # XXX ad a method to adptbx to do this point_group = sgtbx.space_group_info( symbol=symbol).group().build_derived_point_group() adp_constraints = sgtbx.tensor_rank_2_constraints( space_group=point_group, reciprocal_space=True) u_star = adptbx.u_cart_as_u_star( xrs.unit_cell(), adptbx.random_u_cart(u_scale=1, u_min=0.1)) u_indep = adp_constraints.independent_params(all_params=u_star) u_star = adp_constraints.all_params(independent_params=u_indep) b_cart_start = adptbx.u_as_b( adptbx.u_star_as_u_cart(xrs.unit_cell(), u_star)) # tr = (b_cart_start[0] + b_cart_start[1] + b_cart_start[2]) / 3 b_cart_start = [ b_cart_start[0] - tr, b_cart_start[1] - tr, b_cart_start[2] - tr, b_cart_start[3], b_cart_start[4], b_cart_start[5] ] tr = (b_cart_start[0] + b_cart_start[1] + b_cart_start[2]) / 3 # #print "Input b_cart :", " ".join(["%8.4f"%i for i in b_cart_start]), "tr:", tr F = xrs.structure_factors(d_min=2.0).f_calc() u_star = adptbx.u_cart_as_u_star(F.unit_cell(), adptbx.b_as_u(b_cart_start)) fbc = mmtbx.f_model.ext.k_anisotropic(F.indices(), u_star) fc = F.structure_factors_from_scatterers(xray_structure=xrs).f_calc() f_obs = F.customized_copy(data=flex.abs(fc.data() * fbc)) t0 = time.time() # obj = bulk_solvent.aniso_u_scaler( f_model_abs=flex.abs(fc.data()), f_obs=f_obs.data(), miller_indices=f_obs.indices(), adp_constraint_matrix=adp_constraints.gradient_sum_matrix()) time_aniso_u_scaler += (time.time() - t0) b_cart_final = adptbx.u_as_b( adptbx.u_star_as_u_cart( f_obs.unit_cell(), adp_constraints.all_params(tuple(obj.u_star_independent)))) # obj = bulk_solvent.aniso_u_scaler(f_model_abs=flex.abs(fc.data()), f_obs=f_obs.data(), miller_indices=f_obs.indices()) b_cart_final2 = adptbx.u_as_b( adptbx.u_star_as_u_cart(f_obs.unit_cell(), tuple(obj.u_star))) # assert approx_equal(b_cart_final, b_cart_final2) #print "Output b_cart:", " ".join(["%8.4f"%i for i in b_cart_final]) assert approx_equal(b_cart_start, b_cart_final, 1.e-4) print("Time (aniso_u_scaler only): %6.4f" % time_aniso_u_scaler)
def run_00(): time_aniso_u_scaler = 0 for symbol in sgtbx.bravais_types.acentric + sgtbx.bravais_types.centric: #print symbol, "-"*50 space_group_info = sgtbx.space_group_info(symbol = symbol) xrs = random_structure.xray_structure( space_group_info = space_group_info, elements = ["N"]*100, volume_per_atom = 50.0, random_u_iso = True) # XXX ad a method to adptbx to do this point_group = sgtbx.space_group_info( symbol=symbol).group().build_derived_point_group() adp_constraints = sgtbx.tensor_rank_2_constraints( space_group=point_group, reciprocal_space=True) u_star = adptbx.u_cart_as_u_star(xrs.unit_cell(), adptbx.random_u_cart(u_scale=1,u_min=0.1)) u_indep = adp_constraints.independent_params(all_params=u_star) u_star = adp_constraints.all_params(independent_params=u_indep) b_cart_start=adptbx.u_as_b(adptbx.u_star_as_u_cart(xrs.unit_cell(), u_star)) # tr = (b_cart_start[0]+b_cart_start[1]+b_cart_start[2])/3 b_cart_start = [b_cart_start[0]-tr,b_cart_start[1]-tr,b_cart_start[2]-tr, b_cart_start[3],b_cart_start[4],b_cart_start[5]] tr = (b_cart_start[0]+b_cart_start[1]+b_cart_start[2])/3 # #print "Input b_cart :", " ".join(["%8.4f"%i for i in b_cart_start]), "tr:", tr F = xrs.structure_factors(d_min = 2.0).f_calc() u_star = adptbx.u_cart_as_u_star( F.unit_cell(), adptbx.b_as_u(b_cart_start)) fbc = mmtbx.f_model.ext.k_anisotropic(F.indices(), u_star) fc = F.structure_factors_from_scatterers(xray_structure=xrs).f_calc() f_obs = F.customized_copy(data = flex.abs(fc.data()*fbc)) t0 = time.time() # obj = bulk_solvent.aniso_u_scaler( f_model_abs = flex.abs(fc.data()), f_obs = f_obs.data(), miller_indices = f_obs.indices(), adp_constraint_matrix = adp_constraints.gradient_sum_matrix()) time_aniso_u_scaler += (time.time()-t0) b_cart_final = adptbx.u_as_b(adptbx.u_star_as_u_cart(f_obs.unit_cell(), adp_constraints.all_params(tuple(obj.u_star_independent)))) # obj = bulk_solvent.aniso_u_scaler( f_model_abs = flex.abs(fc.data()), f_obs = f_obs.data(), miller_indices = f_obs.indices()) b_cart_final2 = adptbx.u_as_b(adptbx.u_star_as_u_cart(f_obs.unit_cell(), tuple(obj.u_star))) # assert approx_equal(b_cart_final, b_cart_final2) #print "Output b_cart:", " ".join(["%8.4f"%i for i in b_cart_final]) assert approx_equal(b_cart_start, b_cart_final, 1.e-4) print "Time (aniso_u_scaler only): %6.4f"%time_aniso_u_scaler
def exercise_05_k_sol_b_sol_only(d_min=2.0): xray_structure = get_xray_structure_from_file() k_sol = 0.33 b_sol = 34.0 b_cart = [1, 2, 3, 0, 4, 0] f_obs, r_free_flags = get_f_obs_freer(d_min=d_min, k_sol=k_sol, b_sol=b_sol, b_cart=b_cart, xray_structure=xray_structure) fmodel = mmtbx.f_model.manager(r_free_flags=r_free_flags, f_obs=f_obs, xray_structure=xray_structure) params = bss.master_params.extract() params.anisotropic_scaling = False params.number_of_macro_cycles = 5 u_star = adptbx.u_cart_as_u_star(fmodel.f_obs().unit_cell(), adptbx.b_as_u(b_cart)) fmodel_kbu = mmtbx.f_model.manager_kbu(f_obs=fmodel.f_obs(), f_calc=fmodel.f_calc(), f_masks=fmodel.arrays.core.f_masks, f_part1=fmodel.arrays.core.f_part1, f_part2=fmodel.arrays.core.f_part2, ss=fmodel.ss, u_star=u_star) r_work_start = fmodel_kbu.r_factor() result = bss.bulk_solvent_and_scales(fmodel_kbu=fmodel_kbu, params=params) r_work = result.fmodel_kbu.r_factor() * 100. assert r_work_start > 0.05 # assert approx_equal(r_work, 0.0, eps=1.e-4) assert approx_equal(result.k_sols()[0], k_sol, eps=1.e-4) assert approx_equal(result.b_sols()[0], b_sol, eps=1.e-4) assert approx_equal(result.b_cart(), b_cart, eps=1.e-4)
def get_sf(k_sol, b_sol, b_cart, xrs, miller_set=None, d_min=None, twin_law=None, sfg_params=None): random.seed(0) flex.set_random_seed(0) if (miller_set is None): assert d_min is not None f_dummy = abs( xrs.structure_factors(d_min=d_min, anomalous_flag=False).f_calc()) else: f_dummy = miller_set assert d_min is None r_free_flags = f_dummy.generate_r_free_flags(fraction=0.1) fmodel = mmtbx.f_model.manager(r_free_flags=r_free_flags, f_obs=f_dummy, sf_and_grads_accuracy_params=sfg_params, xray_structure=xrs, twin_law=twin_law) ss = 1. / flex.pow2(r_free_flags.d_spacings().data()) / 4. k_mask = mmtbx.f_model.ext.k_mask(ss, k_sol, b_sol) u_star = adptbx.u_cart_as_u_star(xrs.unit_cell(), adptbx.b_as_u(b_cart)) k_anisotropic = mmtbx.f_model.ext.k_anisotropic(r_free_flags.indices(), u_star) fmodel.update_xray_structure(xray_structure=xrs, update_f_calc=True, update_f_mask=True) fmodel.update_core(k_mask=k_mask, k_anisotropic=k_anisotropic) f_obs = abs(fmodel.f_model()) return f_obs, r_free_flags
def exercise_tensor_constraints_core(crystal_symmetry): from cctbx import crystal from cctbx import adptbx from scitbx import matrix site_symmetry = crystal.special_position_settings( crystal_symmetry).site_symmetry(site=(0,0,0)) unit_cell = crystal_symmetry.unit_cell() group = crystal_symmetry.space_group() assert site_symmetry.n_matrices() == group.order_p() for reciprocal_space in [False, True]: c_tensor_constraints = sgtbx.tensor_rank_2_constraints( space_group=group, reciprocal_space=reciprocal_space).row_echelon_form() p_tensor_constraints = python_tensor_constraints( self=group, reciprocal_space=reciprocal_space) assert c_tensor_constraints.all_eq(p_tensor_constraints) adp_constraints = group.adp_constraints() u_cart_p1 = adptbx.random_u_cart() u_star_p1 = adptbx.u_cart_as_u_star(unit_cell, u_cart_p1) u_star = site_symmetry.average_u_star(u_star_p1) f = unit_cell.volume()**(2/3.) assert approx_equal( list(matrix.col(group.average_u_star(u_star=u_star_p1))*f), list(matrix.col(u_star)*f)) independent_params = adp_constraints.independent_params(u_star) assert adp_constraints.n_independent_params() == len(independent_params) assert adp_constraints.n_independent_params() \ + adp_constraints.n_dependent_params() == 6 u_star_vfy = adp_constraints.all_params(independent_params) u_cart = adptbx.u_star_as_u_cart(unit_cell, u_star) u_cart_vfy = adptbx.u_star_as_u_cart(unit_cell, list(u_star_vfy)) assert approx_equal(u_cart_vfy, u_cart)
def exercise_tensor_constraints_core(crystal_symmetry): from cctbx import crystal from cctbx import adptbx from scitbx import matrix site_symmetry = crystal.special_position_settings( crystal_symmetry).site_symmetry(site=(0, 0, 0)) unit_cell = crystal_symmetry.unit_cell() group = crystal_symmetry.space_group() assert site_symmetry.n_matrices() == group.order_p() for reciprocal_space in [False, True]: c_tensor_constraints = sgtbx.tensor_rank_2_constraints( space_group=group, reciprocal_space=reciprocal_space).row_echelon_form() p_tensor_constraints = python_tensor_constraints( self=group, reciprocal_space=reciprocal_space) assert c_tensor_constraints.all_eq(p_tensor_constraints) adp_constraints = group.adp_constraints() u_cart_p1 = adptbx.random_u_cart() u_star_p1 = adptbx.u_cart_as_u_star(unit_cell, u_cart_p1) u_star = site_symmetry.average_u_star(u_star_p1) f = unit_cell.volume()**(2 / 3.) assert approx_equal( list(matrix.col(group.average_u_star(u_star=u_star_p1)) * f), list(matrix.col(u_star) * f)) independent_params = adp_constraints.independent_params(u_star) assert adp_constraints.n_independent_params() == len(independent_params) assert adp_constraints.n_independent_params() \ + adp_constraints.n_dependent_params() == 6 u_star_vfy = adp_constraints.all_params(independent_params) u_cart = adptbx.u_star_as_u_cart(unit_cell, u_star) u_cart_vfy = adptbx.u_star_as_u_cart(unit_cell, list(u_star_vfy)) assert approx_equal(u_cart_vfy, u_cart)
def debye_waller_factors(self, miller_index=None, miller_indices=None, u_iso=None, b_iso=None, u_cart=None, b_cart=None, u_cif=None, u_star=None, exp_arg_limit=50, truncate_exp_arg=False): assert [miller_index, miller_indices].count(None) == 1 assert [u_iso, b_iso, u_cart, b_cart, u_cif, u_star].count(None) == 5 from cctbx import adptbx h = miller_index if (h is None): h = miller_indices if (u_iso is not None): b_iso = adptbx.u_as_b(u_iso) if (b_iso is not None): return adptbx.debye_waller_factor_b_iso( self.stol_sq(h), b_iso, exp_arg_limit, truncate_exp_arg) if (b_cart is not None): u_cart = adptbx.b_as_u(b_cart) if (u_cart is not None): u_star = adptbx.u_cart_as_u_star(self, u_cart) if (u_cif is not None): u_star = adptbx.u_cif_as_u_star(self, u_cif) assert u_star is not None return adptbx.debye_waller_factor_u_star( h, u_star, exp_arg_limit, truncate_exp_arg)
def exercise_derivatives(space_group_info, out): crystal_symmetry = space_group_info.any_compatible_crystal_symmetry( volume=1000) space_group = space_group_info.group() adp_constraints = space_group.adp_constraints() m = adp_constraints.row_echelon_form() print >> out, matrix.rec(m, (m.size()//6, 6)).mathematica_form( one_row_per_line=True) print >> out, list(adp_constraints.independent_indices) u_cart_p1 = adptbx.random_u_cart() u_star_p1 = adptbx.u_cart_as_u_star(crystal_symmetry.unit_cell(), u_cart_p1) u_star = space_group.average_u_star(u_star_p1) miller_set = miller.build_set( crystal_symmetry=crystal_symmetry, d_min=3, anomalous_flag=False) for h in miller_set.indices(): grads_fin = d_dw_d_u_star_finite(h=h, u_star=u_star) print >> out, "grads_fin:", list(grads_fin) grads_ana = d_dw_d_u_star_analytical(h=h, u_star=u_star) print >> out, "grads_ana:", list(grads_ana) compare_derivatives(grads_ana, grads_fin) curvs_fin = d2_dw_d_u_star_d_u_star_finite(h=h, u_star=u_star) print >> out, "curvs_fin:", list(curvs_fin) curvs_ana = d2_dw_d_u_star_d_u_star_analytical(h=h, u_star=u_star) print >> out, "curvs_ana:", list(curvs_ana) compare_derivatives(curvs_ana, curvs_fin) # u_indep = adp_constraints.independent_params(u_star) grads_indep_fin = d_dw_d_u_indep_finite( adp_constraints=adp_constraints, h=h, u_indep=u_indep) print >> out, "grads_indep_fin:", list(grads_indep_fin) grads_indep_ana = flex.double(adp_constraints.independent_gradients( all_gradients=list(grads_ana))) print >> out, "grads_indep_ana:", list(grads_indep_ana) compare_derivatives(grads_indep_ana, grads_indep_fin) curvs_indep_fin = d2_dw_d_u_indep_d_u_indep_finite( adp_constraints=adp_constraints, h=h, u_indep=u_indep) print >> out, "curvs_indep_fin:", list(curvs_indep_fin) curvs_indep_ana = adp_constraints.independent_curvatures( all_curvatures=curvs_ana) print >> out, "curvs_indep_ana:", list(curvs_indep_ana) compare_derivatives(curvs_indep_ana, curvs_indep_fin) # curvs_indep_mm = None if (str(space_group_info) == "P 1 2 1"): assert list(adp_constraints.independent_indices) == [0,1,2,4] curvs_indep_mm = p2_curv(h, u_star) elif (str(space_group_info) == "P 4"): assert list(adp_constraints.independent_indices) == [1,2] curvs_indep_mm = p4_curv(h, u_star) elif (str(space_group_info) in ["P 3", "P 6"]): assert list(adp_constraints.independent_indices) == [2,3] curvs_indep_mm = p3_curv(h, u_star) elif (str(space_group_info) == "P 2 3"): assert list(adp_constraints.independent_indices) == [2] curvs_indep_mm = p23_curv(h, u_star) if (curvs_indep_mm is not None): curvs_indep_mm = flex.double( curvs_indep_mm).matrix_symmetric_as_packed_u() print >> out, "curvs_indep_mm:", list(curvs_indep_mm) compare_derivatives(curvs_indep_ana, curvs_indep_mm)
def run_group(symbol): group = space_group_info(symbol); print "\n==" elements = ('C', 'N', 'O', 'H')*11 xrs = random_structure.xray_structure( space_group_info = group, volume_per_atom = 25., general_positions_only = False, elements = elements, min_distance = 1.0) fo = abs(xrs.structure_factors(d_min=2).f_calc()) fmodel = mmtbx.f_model.manager( f_obs = fo, xray_structure = xrs) # k_sol=flex.double([10.35,5.34]) b_sol=flex.double([30.0, 24.0]) b_cart = [10,20,30,40,50,60] u_star = flex.double(adptbx.b_as_u( adptbx.u_cart_as_u_star(xrs.unit_cell(), b_cart))) # TGO = cpp_tg(fmodel=fmodel) tg = TGO.get_tg(k_sol=k_sol, b_sol=b_sol, u_star=u_star) # k_sol gk_a=list(tg.grad_k_sols()) ck_a=list(tg.curv_k_sols()) gk_fd, ck_fd=fd(TGO=TGO, k_sol=k_sol, b_sol=b_sol, u_star=u_star, param="k_sol") # b_sol gb_a=list(tg.grad_b_sols()) cb_a=list(tg.curv_b_sols()) gb_fd, cb_fd=fd(TGO=TGO, k_sol=k_sol, b_sol=b_sol, u_star=u_star, param="b_sol") # u_star gu_a=list(tg.grad_u_star()) gu_fd, junk=fd(TGO=TGO, k_sol=k_sol, b_sol=b_sol, u_star=u_star, param="u_star") print "u_star:",gu_a print "u_star:",gu_fd TGO2 = cpp_tg_u_star_only(fmodel=fmodel) tg2 = TGO2.get_tg(k_sol=k_sol, b_sol=b_sol, u_star=u_star) gu_a2=list(tg2.grad_u_star()) gu_fd2, junk=fd(TGO=TGO2, k_sol=k_sol, b_sol=b_sol, u_star=u_star, param="u_star") print "u_star:",gu_a2 print "u_star:",gu_fd2 # print "k_sol:", gk_a, ck_a print "k_sol:", gk_fd, ck_fd print "b_sol:", gb_a, cb_a print "b_sol:", gb_fd, cb_fd # assert approx_equal(gk_a, gk_fd, eps=1.e-4) assert approx_equal(gb_a, gb_fd, eps=1.e-4) assert approx_equal(ck_a, ck_fd, eps=1.e-4) assert approx_equal(cb_a, cb_fd, eps=1.e-4) assert approx_equal(gu_a, gu_fd, eps=1.e-4) assert approx_equal(gu_a2, gu_fd2, eps=1.e-6)
def exercise_factor_u_star_u_iso(): for i_trial in xrange(100): a = flex.random_double(size=9, factor=3) a.resize(flex.grid(3, 3)) u = a.matrix_transpose().matrix_multiply(a) # always positive-definite u_cart = [u[0], u[4], u[8], u[1], u[2], u[5]] unit_cell = uctbx.unit_cell((3, 5, 7, 80, 100, 110)) u_star = adptbx.u_cart_as_u_star(unit_cell, u_cart) airlie = u_star_minus_u_iso_airlie(unit_cell, u_star) ralf = u_star_minus_u_iso_ralf(unit_cell, u_star) assert approx_equal(ralf, airlie, 1.0e-10) f = adptbx.factor_u_star_u_iso(unit_cell=unit_cell, u_star=u_star) assert approx_equal(f.u_iso, adptbx.u_cart_as_u_iso(u_cart)) assert approx_equal(f.u_star_minus_u_iso, airlie, 1.0e-10)
def exercise_factor_u_star_u_iso(): for i_trial in xrange(100): a = flex.random_double(size=9, factor=3) a.resize(flex.grid(3,3)) u = a.matrix_transpose().matrix_multiply(a) # always positive-definite u_cart = [u[0],u[4],u[8],u[1],u[2],u[5]] unit_cell = uctbx.unit_cell((3,5,7,80,100,110)) u_star = adptbx.u_cart_as_u_star(unit_cell, u_cart) airlie = u_star_minus_u_iso_airlie(unit_cell, u_star) ralf = u_star_minus_u_iso_ralf(unit_cell, u_star) assert approx_equal(ralf, airlie, 1.e-10) f = adptbx.factor_u_star_u_iso(unit_cell=unit_cell, u_star=u_star) assert approx_equal(f.u_iso, adptbx.u_cart_as_u_iso(u_cart)) assert approx_equal(f.u_star_minus_u_iso, airlie, 1.e-10)
def ru(crystal_symmetry, u_scale=1, u_min=0.1): from cctbx import sgtbx symbol = crystal_symmetry.space_group().type().lookup_symbol() point_group = sgtbx.space_group_info( symbol=symbol).group().build_derived_point_group() adp_constraints = sgtbx.tensor_rank_2_constraints( space_group=point_group, reciprocal_space=True) u_star = adptbx.u_cart_as_u_star(crystal_symmetry.unit_cell(), adptbx.random_u_cart(u_scale=u_scale,u_min=u_min)) u_indep = adp_constraints.independent_params(all_params=u_star) u_star = adp_constraints.all_params(independent_params=u_indep) r = flex.sym_mat3_double() r.append(adptbx.u_star_as_u_cart(crystal_symmetry.unit_cell(), u_star)) return r
def ru(crystal_symmetry, u_scale=1, u_min=0.1): from cctbx import sgtbx symbol = crystal_symmetry.space_group().type().lookup_symbol() point_group = sgtbx.space_group_info( symbol=symbol).group().build_derived_point_group() adp_constraints = sgtbx.tensor_rank_2_constraints(space_group=point_group, reciprocal_space=True) u_star = adptbx.u_cart_as_u_star( crystal_symmetry.unit_cell(), adptbx.random_u_cart(u_scale=u_scale, u_min=u_min)) u_indep = adp_constraints.independent_params(all_params=u_star) u_star = adp_constraints.all_params(independent_params=u_indep) r = flex.sym_mat3_double() r.append(adptbx.u_star_as_u_cart(crystal_symmetry.unit_cell(), u_star)) return r
def __init__(self): sgi = sgtbx.space_group_info("Hall: %s" % self.hall) cs = sgi.any_compatible_crystal_symmetry(volume=1000) xs = xray.structure(crystal.special_position_settings(cs)) for i, sc in enumerate(self.scatterers()): sc.flags.set_use_u_iso(False).set_use_u_aniso(True)\ .set_grad_u_aniso(True) xs.add_scatterer(sc) site_symm = xs.site_symmetry_table().get(i) u_cart = adptbx.random_u_cart(u_scale=self.random_u_cart_scale) u_star = adptbx.u_cart_as_u_star(cs.unit_cell(), u_cart) xs.scatterers()[-1].u_star = site_symm.average_u_star(u_star) self.xray_structure = xs mi = cs.build_miller_set(d_min=0.5, anomalous_flag=False) ma = mi.structure_factors_from_scatterers(xs, algorithm="direct").f_calc() self.fo_sq = ma.norm().customized_copy( sigmas=flex.double(ma.size(), 1.))
def anisotropic_correction(self): if not self.params.anisotropic_correction: return self.aniso_scale_and_b = None n_copies_solc = self.params.asu_contents.n_copies_per_asu n_residues = self.params.asu_contents.n_residues n_bases = self.params.asu_contents.n_bases self.aniso_scale_and_b = absolute_scaling.ml_aniso_absolute_scaling( miller_array=self.f_obs, n_residues=n_residues*self.f_obs.space_group().order_z()*n_copies_solc, n_bases=n_bases*self.f_obs.space_group().order_z()*n_copies_solc) self.aniso_scale_and_b.show(out=self.log) b_cart = self.aniso_scale_and_b.b_cart trace = sum(b_cart[:3])/3 b_cart = [b_cart[0]-trace, b_cart[1]-trace, b_cart[2]-trace, b_cart[3], b_cart[4], b_cart[5]] u_star = adptbx.u_cart_as_u_star(self.f_obs.unit_cell(), adptbx.b_as_u(b_cart)) self.f_obs = absolute_scaling.anisotropic_correction( self.f_obs, 0.0, u_star).set_observation_type(self.f_obs)
def exercise_02_b_cart_sym_constr(d_min = 2.0, tolerance = 1.e-6): for symbol in sgtbx.bravais_types.acentric + sgtbx.bravais_types.centric: space_group_info = sgtbx.space_group_info(symbol = symbol) xray_structure = get_xray_structure_random(space_group_info) sg = xray_structure.space_group() uc = xray_structure.unit_cell() u_cart_p1 = adptbx.random_u_cart(u_scale=5, u_min=5) u_star_p1 = adptbx.u_cart_as_u_star(uc, u_cart_p1) b_cart_1 = adptbx.u_star_as_u_cart(uc, u_star_p1) b_cart_2 = adptbx.u_star_as_u_cart(uc, sg.average_u_star(u_star = u_star_p1)) for b_cart in (b_cart_1, b_cart_2): f_obs, r_free_flags = \ get_f_obs_freer(d_min = d_min, k_sol = 0, b_sol = 0, b_cart = b_cart, xray_structure = xray_structure) fmodel = mmtbx.f_model.manager( r_free_flags = r_free_flags, f_obs = f_obs, xray_structure = xray_structure) for flag in (True, False): params = bss.master_params.extract() params.bulk_solvent = False params.anisotropic_scaling = True params.k_sol_b_sol_grid_search = False params.minimization_k_sol_b_sol = False params.minimization_b_cart = True params.symmetry_constraints_on_b_cart = flag params.max_iterations = 50 params.min_iterations = 50 result = bss.bulk_solvent_and_scales( fmodel_kbu = fmodel.fmodel_kbu(), params = params) if(flag == False and approx_equal(b_cart, b_cart_1, out=None)): assert approx_equal(result.b_cart(), b_cart, tolerance) if(flag == True and approx_equal(b_cart, b_cart_2, out=None)): assert approx_equal(result.b_cart(), b_cart, tolerance) if(flag == False and approx_equal(b_cart, b_cart_2, out=None)): assert approx_equal(result.b_cart(), b_cart, tolerance) if(flag == True and approx_equal(b_cart, b_cart_1, out=None)): for u2, ufm in zip(b_cart_2, result.b_cart()): if(abs(u2) < 1.e-6): assert approx_equal(ufm, 0.0, tolerance)
def combine_tls_and_u_local(xray_structure, tls_selections, tls_groups): assert len(tls_selections) == len(tls_groups) for sel in tls_selections: xray_structure.convert_to_anisotropic(selection=sel) tlsos = [] for tls_group in tls_groups: tlsos.append( tlso(t=tls_group.t, l=tls_group.l, s=tls_group.s, origin=tls_group.origin)) u_cart_tls = u_cart_from_tls(sites_cart=xray_structure.sites_cart(), selections=tls_selections, tlsos=tlsos) unit_cell = xray_structure.unit_cell() for i_seq, sc in enumerate(xray_structure.scatterers()): if (u_cart_tls[i_seq] != (0, 0, 0, 0, 0, 0)): assert sc.flags.use_u_aniso() u_star_tls = adptbx.u_cart_as_u_star(unit_cell, tuple(u_cart_tls[i_seq])) sc.u_star = tuple(flex.double(sc.u_star) + flex.double(u_star_tls))
def combine_tls_and_u_local(xray_structure, tls_selections, tls_groups): assert len(tls_selections) == len(tls_groups) for sel in tls_selections: xray_structure.convert_to_anisotropic(selection = sel) tlsos = [] for tls_group in tls_groups: tlsos.append(tlso(t = tls_group.t, l = tls_group.l, s = tls_group.s, origin = tls_group.origin)) u_cart_tls = u_cart_from_tls( sites_cart = xray_structure.sites_cart(), selections = tls_selections, tlsos = tlsos) unit_cell = xray_structure.unit_cell() for i_seq, sc in enumerate(xray_structure.scatterers()): if(u_cart_tls[i_seq] != (0,0,0,0,0,0)): assert sc.flags.use_u_aniso() u_star_tls = adptbx.u_cart_as_u_star(unit_cell, tuple(u_cart_tls[i_seq])) sc.u_star = tuple(flex.double(sc.u_star) + flex.double(u_star_tls))
def exercise_02_b_cart_sym_constr(d_min=2.0, tolerance=1.e-6): for symbol in sgtbx.bravais_types.acentric + sgtbx.bravais_types.centric: space_group_info = sgtbx.space_group_info(symbol=symbol) xray_structure = get_xray_structure_random(space_group_info) sg = xray_structure.space_group() uc = xray_structure.unit_cell() u_cart_p1 = adptbx.random_u_cart(u_scale=5, u_min=5) u_star_p1 = adptbx.u_cart_as_u_star(uc, u_cart_p1) b_cart_1 = adptbx.u_star_as_u_cart(uc, u_star_p1) b_cart_2 = adptbx.u_star_as_u_cart(uc, sg.average_u_star(u_star=u_star_p1)) for b_cart in (b_cart_1, b_cart_2): f_obs, r_free_flags = \ get_f_obs_freer(d_min = d_min, k_sol = 0, b_sol = 0, b_cart = b_cart, xray_structure = xray_structure) fmodel = mmtbx.f_model.manager(r_free_flags=r_free_flags, f_obs=f_obs, xray_structure=xray_structure) flag = True params = bss.master_params.extract() params.number_of_macro_cycles = 3 params.bulk_solvent = False params.anisotropic_scaling = True params.k_sol_b_sol_grid_search = False params.minimization_k_sol_b_sol = False params.minimization_b_cart = True params.symmetry_constraints_on_b_cart = flag params.max_iterations = 50 params.min_iterations = 50 result = bss.bulk_solvent_and_scales( fmodel_kbu=fmodel.fmodel_kbu(), params=params) if (flag == True and approx_equal(b_cart, b_cart_2, out=None)): assert approx_equal(result.b_cart(), b_cart, tolerance) if (flag == True and approx_equal(b_cart, b_cart_1, out=None)): for u2, ufm in zip(b_cart_2, result.b_cart()): if (abs(u2) < 1.e-6): assert approx_equal(ufm, 0.0, tolerance)
def apply_back_trace_of_overall_exp_scale_matrix(self, xray_structure=None): if (xray_structure is None): return None k_sol, b_sol, b_cart = self.k_sols(), self.b_sols(), self.b_cart() assert len(k_sol) == 1 # XXX Only one mask! k_sol = k_sol[0] b_sol = b_sol[0] # xrs = xray_structure if (xrs is None): return b_min = min(b_sol, xrs.min_u_cart_eigenvalue() * adptbx.u_as_b(1.)) if (b_min < 0): xrs.tidy_us() b_iso = (b_cart[0] + b_cart[1] + b_cart[2]) / 3.0 b_test = b_min + b_iso if (b_test < 0.0): b_adj = b_iso + abs(b_test) + 0.001 else: b_adj = b_iso b_cart_new = [ b_cart[0] - b_adj, b_cart[1] - b_adj, b_cart[2] - b_adj, b_cart[3], b_cart[4], b_cart[5] ] b_sol_new = b_sol + b_adj xrs.shift_us(b_shift=b_adj) b_min = min(b_sol_new, xrs.min_u_cart_eigenvalue() * adptbx.u_as_b(1.)) assert b_min >= 0.0 xrs.tidy_us() # assert self.fmodel_kbu k_masks = [ext.k_mask(self.fmodel_kbu.ss, k_sol, b_sol_new)] u_star = adptbx.u_cart_as_u_star(self.fmodel_kbu.f_obs.unit_cell(), adptbx.b_as_u(b_cart_new)) k_anisotropic = ext.k_anisotropic(self.fmodel_kbu.f_obs.indices(), u_star) self.fmodel_kbu = self.fmodel_kbu.update(b_sols=[b_sol_new], b_cart=b_cart_new) return group_args(xray_structure=xrs, b_adj=b_adj, b_sol=b_sol_new, b_cart=b_cart_new)
def exercise_05_k_sol_b_sol_only(d_min = 2.0): xray_structure = get_xray_structure_from_file() k_sol = 0.33 b_sol = 34.0 b_cart = [1,2,3,0,4,0] f_obs, r_free_flags = get_f_obs_freer( d_min = d_min, k_sol = k_sol, b_sol = b_sol, b_cart = b_cart, xray_structure = xray_structure) fmodel = mmtbx.f_model.manager( r_free_flags = r_free_flags, f_obs = f_obs, xray_structure = xray_structure) params = bss.master_params.extract() params.anisotropic_scaling = False u_star = adptbx.u_cart_as_u_star( fmodel.f_obs().unit_cell(),adptbx.b_as_u(b_cart)) fmodel_kbu = mmtbx.f_model.manager_kbu( f_obs = fmodel.f_obs(), f_calc = fmodel.f_calc(), f_masks = fmodel.arrays.core.f_masks, f_part1 = fmodel.arrays.core.f_part1, f_part2 = fmodel.arrays.core.f_part2, ss = fmodel.ss, u_star = u_star) r_work_start = fmodel_kbu.r_factor() result = bss.bulk_solvent_and_scales( fmodel_kbu = fmodel_kbu, params = params) r_work = result.fmodels.r_factor()*100. assert r_work_start > 0.05 assert approx_equal(r_work, 0.0, eps = 1.e-6) assert approx_equal(result.k_sol(0), k_sol, eps = 1.e-6) assert approx_equal(result.b_sol(), b_sol, eps = 1.e-6) assert approx_equal(result.b_cart(), b_cart, eps = 1.e-6)
def apply_back_trace_of_overall_exp_scale_matrix(self, xray_structure=None): if(xray_structure is None): return None k_sol, b_sol, b_cart = self.k_sols(), self.b_sols(), self.b_cart() assert len(k_sol)==1 # XXX Only one mask! k_sol = k_sol[0] b_sol = b_sol[0] # xrs = xray_structure if(xrs is None): return b_min = min(b_sol, xrs.min_u_cart_eigenvalue()*adptbx.u_as_b(1.)) if(b_min < 0): xrs.tidy_us() b_iso = (b_cart[0]+b_cart[1]+b_cart[2])/3.0 b_test = b_min+b_iso if(b_test < 0.0): b_adj = b_iso + abs(b_test) + 0.001 else: b_adj = b_iso b_cart_new = [b_cart[0]-b_adj,b_cart[1]-b_adj,b_cart[2]-b_adj, b_cart[3], b_cart[4], b_cart[5]] b_sol_new = b_sol + b_adj xrs.shift_us(b_shift = b_adj) b_min = min(b_sol_new, xrs.min_u_cart_eigenvalue()*adptbx.u_as_b(1.)) assert b_min >= 0.0 xrs.tidy_us() # assert self.fmodel_kbu k_masks = [ext.k_mask(self.fmodel_kbu.ss, k_sol, b_sol_new)] u_star=adptbx.u_cart_as_u_star( self.fmodel_kbu.f_obs.unit_cell(), adptbx.b_as_u(b_cart_new)) k_anisotropic = ext.k_anisotropic(self.fmodel_kbu.f_obs.indices(), u_star) self.fmodel_kbu = self.fmodel_kbu.update( b_sols = [b_sol_new], b_cart = b_cart_new) return group_args( xray_structure = xrs, b_adj = b_adj, b_sol = b_sol_new, b_cart = b_cart_new)
def run_02(): time_aniso_u_scaler = 0 for symbol in sgtbx.bravais_types.acentric + sgtbx.bravais_types.centric: #print symbol, "-"*50 space_group_info = sgtbx.space_group_info(symbol=symbol) xrs = random_structure.xray_structure( space_group_info=space_group_info, elements=["N"] * 100, volume_per_atom=50.0, random_u_iso=True) xrs.scattering_type_registry(table="wk1995") # XXX ad a method to adptbx to do this point_group = sgtbx.space_group_info( symbol=symbol).group().build_derived_point_group() adp_constraints = sgtbx.tensor_rank_2_constraints( space_group=point_group, reciprocal_space=True) u_star = adptbx.u_cart_as_u_star( xrs.unit_cell(), adptbx.random_u_cart(u_scale=1, u_min=0.1)) u_indep = adp_constraints.independent_params(all_params=u_star) u_star = adp_constraints.all_params(independent_params=u_indep) b_cart_start = adptbx.u_as_b( adptbx.u_star_as_u_cart(xrs.unit_cell(), u_star)) # tr = (b_cart_start[0] + b_cart_start[1] + b_cart_start[2]) / 3 b_cart_start = [ b_cart_start[0] - tr, b_cart_start[1] - tr, b_cart_start[2] - tr, b_cart_start[3], b_cart_start[4], b_cart_start[5] ] tr = (b_cart_start[0] + b_cart_start[1] + b_cart_start[2]) / 3 # #print "Input b_cart :", " ".join(["%8.4f"%i for i in b_cart_start]), "tr:", tr reg = xrs.scattering_type_registry(table="wk1995", d_min=1 / 12) f_000 = reg.sum_of_scattering_factors_at_diffraction_angle_0() F = xrs.structure_factors(d_min=2.0).f_calc() i = F.indices() i.append([0, 0, 0]) d = F.data() d.append(f_000) F = F.customized_copy(indices=i, data=d) u_star = adptbx.u_cart_as_u_star(F.unit_cell(), adptbx.b_as_u(b_cart_start)) fbc = mmtbx.f_model.ext.k_anisotropic(F.indices(), u_star) fc = F.structure_factors_from_scatterers(xray_structure=xrs).f_calc() f_obs = F.customized_copy(data=flex.abs(fc.data() * fbc)) #print bulk_solvent.r_factor(f_obs.data(), fmodel.f_model().data()) obj = bulk_solvent.aniso_u_scaler(f_model_abs=flex.abs(fc.data()), f_obs=f_obs.data(), miller_indices=f_obs.indices(), unit_cell=f_obs.unit_cell()) a = obj.a #### #print "Input a :", " ".join(["%7.3f"%i for i in a]) overall_anisotropic_scale = mmtbx.f_model.ext.k_anisotropic( f_obs.indices(), a, f_obs.unit_cell()) #print bulk_solvent.r_factor(f_obs.data(), fmodel.f_model().data()*overall_anisotropic_scale) f_obs = abs(fc) f_obs = f_obs.customized_copy(data=f_obs.data() * overall_anisotropic_scale) #print bulk_solvent.r_factor(f_obs.data(), fmodel.f_model().data()) #print bulk_solvent.r_factor(f_obs.data(), fmodel.f_model().data()) t0 = time.time() obj = bulk_solvent.aniso_u_scaler(f_model_abs=flex.abs(fc.data()), f_obs=f_obs.data(), miller_indices=f_obs.indices(), unit_cell=f_obs.unit_cell()) time_aniso_u_scaler += (time.time() - t0) overall_anisotropic_scale = mmtbx.f_model.ext.k_anisotropic( f_obs.indices(), obj.a, f_obs.unit_cell()) assert approx_equal( bulk_solvent.r_factor(f_obs.data(), fc.data() * overall_anisotropic_scale), 0.0, 1.e-2) # XXX seems to be low #print "Output a:", " ".join(["%7.3f"%i for i in obj.a]) assert approx_equal(a, obj.a, 1.e-4) # XXX can it be smaller? assert overall_anisotropic_scale[len(overall_anisotropic_scale) - 1] == 1 print("Time (aniso_u_scaler only): %6.4f" % time_aniso_u_scaler)
def __init__(self, miller_array, parameters, out=None, n_residues=100, n_bases=0): self.params=parameters self.miller_array=miller_array.deep_copy().set_observation_type( miller_array).merge_equivalents().array() self.out = out if self.out is None: self.out = sys.stdout if self.out == "silent": self.out = null_out() self.no_aniso_array = self.miller_array if self.params.aniso.action == "remove_aniso": # first perfom aniso scaling aniso_scale_and_b = absolute_scaling.ml_aniso_absolute_scaling( miller_array = self.miller_array, n_residues = n_residues, n_bases = n_bases) aniso_scale_and_b.p_scale = 0 # set the p_scale back to 0! aniso_scale_and_b.show(out=out) # now do aniso correction please self.aniso_p_scale = aniso_scale_and_b.p_scale self.aniso_u_star = aniso_scale_and_b.u_star self.aniso_b_cart = aniso_scale_and_b.b_cart if self.params.aniso.final_b == "eigen_min": b_use=aniso_scale_and_b.eigen_values[2] elif self.params.aniso.final_b == "eigen_mean" : b_use=flex.mean(aniso_scale_and_b.eigen_values) elif self.params.aniso.final_b == "user_b_iso": assert self.params.aniso.b_iso is not None b_use=self.params.aniso.b_iso else: b_use = 30 b_cart_aniso_removed = [ -b_use, -b_use, -b_use, 0, 0, 0] u_star_aniso_removed = adptbx.u_cart_as_u_star( miller_array.unit_cell(), adptbx.b_as_u( b_cart_aniso_removed ) ) ## I do things in two steps, but can easely be done in 1 step ## just for clarity, thats all. self.no_aniso_array = absolute_scaling.anisotropic_correction( self.miller_array,0.0,aniso_scale_and_b.u_star ) self.no_aniso_array = absolute_scaling.anisotropic_correction( self.no_aniso_array,0.0,u_star_aniso_removed) self.no_aniso_array = self.no_aniso_array.set_observation_type( miller_array ) # that is done now, now we can do outlier detection if desired outlier_manager = outlier_rejection.outlier_manager( self.no_aniso_array, None, out=self.out) self.new_miller_array = self.no_aniso_array if self.params.outlier.action == "basic": print >> self.out, "Non-outliers found by the basic wilson statistics" print >> self.out, "protocol will be written out." basic_array = outlier_manager.basic_wilson_outliers( p_basic_wilson = self.params.outlier.parameters.basic_wilson.level, return_data = True) self.new_miller_array = basic_array if self.params.outlier.action == "extreme": print >> self.out, "Non-outliers found by the extreme value wilson statistics" print >> self.out, "protocol will be written out." extreme_array = outlier_manager.extreme_wilson_outliers( p_extreme_wilson = self.params.outlier.parameters.extreme_wilson.level, return_data = True) self.new_miller_array = extreme_array if self.params.outlier.action == "beamstop": print >> self.out, "Outliers found for the beamstop shadow" print >> self.out, "problems detection protocol will be written out." beamstop_array = outlier_manager.beamstop_shadow_outliers( level = self.params.outlier.parameters.beamstop.level, d_min = self.params.outlier.parameters.beamstop.d_min, return_data=True) self.new_miller_array = beamstop_array if self.params.outlier.action == "None": self.new_miller_array = self.no_aniso_array # now we can twin or detwin the data if needed self.final_array = self.new_miller_array if self.params.symmetry.action == "twin": alpha = self.params.symmetry.twinning_parameters.fraction if (alpha is None): raise Sorry("Twin fraction not specified, not twinning data") elif not (0 <= alpha <= 0.5): raise Sorry("Twin fraction must be between 0 and 0.5.") print >> self.out print >> self.out, "Twinning given data" print >> self.out, "-------------------" print >> self.out print >> self.out, "Artifically twinning the data with fraction %3.2f" %\ alpha self.final_array = self.new_miller_array.twin_data( twin_law = self.params.symmetry.twinning_parameters.twin_law, alpha=alpha).as_intensity_array() elif (self.params.symmetry.action == "detwin"): twin_law = self.params.symmetry.twinning_parameters.twin_law alpha = self.params.symmetry.twinning_parameters.fraction if (alpha is None): raise Sorry("Twin fraction not specified, not detwinning data") elif not (0 <= alpha <= 0.5): raise Sorry("Twin fraction must be between 0 and 0.5.") print >> self.out, """ Attempting to detwin data ------------------------- Detwinning data with: - twin law: %s - twin fraciton: %.2f BE WARNED! DETWINNING OF DATA DOES NOT SOLVE YOUR TWINNING PROBLEM! PREFERABLY, REFINEMENT SHOULD BE CARRIED OUT AGAINST ORIGINAL DATA ONLY USING A TWIN SPECIFIC TARGET FUNCTION! """ % (twin_law, alpha) self.final_array = self.new_miller_array.detwin_data( twin_law=twin_law, alpha=alpha).as_intensity_array() assert self.final_array is not None
def run(args, command_name="phenix.tls"): if (len(args) == 0): args = ["--help"] usage_fmt = "%s pdb_file [parameters: file or command line string]" des_fmt = "Example: %s model.pdb fit_tls_to.selection='%s' fit_tls_to.selection='%s'" command_line = (iotbx_option_parser( usage=usage_fmt % command_name, description=banner).option( "--show_defaults", action="store_true", help="Do not output to the screen (except errors).").option( "--silent", action="store_true", help="Suppress output to the screen.")).process(args=args) # log = sys.stdout if (not command_line.options.silent): utils.print_header("TLS tools", out=log) if (command_line.options.show_defaults): master_params.show(out=log) print(file=log) return if (not command_line.options.silent): print(banner, file=log) # processed_args = utils.process_command_line_args( args=command_line.args, master_params=master_params, log=log) reflection_files = processed_args.reflection_files if (processed_args.crystal_symmetry is None): raise Sorry("No crystal symmetry found.") if (len(processed_args.pdb_file_names) == 0): raise Sorry("No PDB file found.") params = processed_args.params if (not command_line.options.silent): utils.print_header("Input parameters", out=log) params.show(out=log) params = params.extract() # if (processed_args.crystal_symmetry.unit_cell() is None or processed_args.crystal_symmetry.space_group() is None): raise Sorry("No CRYST1 record found.") pdb_combined = iotbx.pdb.combine_unique_pdb_files( file_names=processed_args.pdb_file_names) pdb_combined.report_non_unique(out=log) if (len(pdb_combined.unique_file_names) == 0): raise Sorry("No coordinate file given.") raw_records = pdb_combined.raw_records try: pdb_inp = iotbx.pdb.input(source_info=None, lines=flex.std_string(raw_records)) except ValueError as e: raise Sorry("Model format (PDB or mmCIF) error:\n%s" % str(e)) model = mmtbx.model.manager( model_input=pdb_inp, restraint_objects=processed_args.cif_objects, crystal_symmetry=processed_args.crystal_symmetry, log=log) if (not command_line.options.silent): utils.print_header("TLS groups from PDB file header", out=log) pdb_inp_tls = mmtbx.tls.tools.tls_from_pdb_inp( remark_3_records=model._model_input.extract_remark_iii_records(3), pdb_hierarchy=model.get_hierarchy()) # tls_groups = [] if (pdb_inp_tls.tls_present): if (pdb_inp_tls.error_string is not None): raise Sorry(pdb_inp_tls.error_string) pdb_tls = mmtbx.tls.tools.extract_tls_from_pdb(pdb_inp_tls=pdb_inp_tls, model=model) tls_groups = pdb_tls.pdb_inp_tls.tls_params # tls_selections_strings = [] # if (len(tls_groups) == 0 and not command_line.options.silent): print("No TLS groups found in PDB file header.", file=log) else: for i_seq, tls_group in enumerate(tls_groups): tls_selections_strings.append(tls_group.selection_string) if (not command_line.options.silent): print("TLS group %d: %s" % (i_seq + 1, tls_group.selection_string), file=log) mmtbx.tls.tools.show_tls_one_group(tlso=tls_group, out=log) print(file=log) # if (len(tls_selections_strings) > 0 and len(params.selection) > 0): raise Sorry( "Two TLS selection sources found: PDB file header and parameters.") if (len(params.selection) > 0): tls_selections_strings = params.selection if ([params.combine_tls, params.extract_tls].count(True) > 1): raise Sorry( "Cannot simultaneously pereform: combine_tls and extract_tls") if ([params.combine_tls, params.extract_tls].count(True) > 0): if (len(tls_selections_strings) == 0): raise Sorry("No TLS selections found.") # if (len(tls_selections_strings)): if (not command_line.options.silent): utils.print_header("TLS groups selections", out=log) selections = utils.get_atom_selections( model=model, selection_strings=tls_selections_strings) if (not command_line.options.silent): print("Number of TLS groups: ", len(selections), file=log) print("Number of atoms: %d" % model.get_number_of_atoms(), file=log) n_atoms_in_tls = 0 for sel_a in selections: n_atoms_in_tls += sel_a.size() if (not command_line.options.silent): print("Number of atoms in TLS groups: %d" % n_atoms_in_tls, file=log) print(file=log) assert len(tls_selections_strings) == len(selections) if (not command_line.options.silent): for sel_a, sel_s in zip(selections, tls_selections_strings): print("Selection string:\n%s" % sel_s, file=log) print("selects %d atoms." % sel_a.size(), file=log) print(file=log) print("Ready-to-use in phenix.refine:\n", file=log) for sel_a, sel_s in zip(selections, tls_selections_strings): print(sel_s, file=log) # ofn = params.output_file_name if (ofn is None): ofn = os.path.splitext( os.path.basename(processed_args.pdb_file_names[0]))[0] if (len(processed_args.pdb_file_names) > 1): ofn = ofn + "_el_al" if (params.combine_tls): ofn = ofn + "_combine_tls.pdb" elif (params.extract_tls): ofn = ofn + "_extract_tls.pdb" else: ofn = None if (ofn is not None): ofo = open(ofn, "w") # if (params.extract_tls): utils.print_header( "Fit TLS matrices to B-factors of selected sets of atoms", out=log) tlsos = mmtbx.tls.tools.generate_tlsos( selections=selections, xray_structure=model.get_xray_structure(), value=0.0) for rt, rl, rs in [[1, 0, 1], [1, 1, 1], [0, 1, 1], [1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 1, 1], [0, 0, 1]] * 10: tlsos = mmtbx.tls.tools.tls_from_uanisos( xray_structure=model.get_xray_structure(), selections=selections, tlsos_initial=tlsos, number_of_macro_cycles=10, max_iterations=100, refine_T=rt, refine_L=rl, refine_S=rs, enforce_positive_definite_TL=params. enforce_positive_definite_TL, verbose=-1, out=log) mmtbx.tls.tools.show_tls(tlsos=tlsos, out=log) u_cart_from_tls = mmtbx.tls.tools.u_cart_from_tls( sites_cart=model.get_sites_cart(), selections=selections, tlsos=tlsos) unit_cell = model.get_xray_structure().unit_cell() for i_seq, sc in enumerate(model.get_xray_structure().scatterers()): if (u_cart_from_tls[i_seq] != (0, 0, 0, 0, 0, 0)): u_star_tls = adptbx.u_cart_as_u_star( unit_cell, tuple(u_cart_from_tls[i_seq])) sc.u_star = tuple( flex.double(sc.u_star) - flex.double(u_star_tls)) for sel in selections: model.get_xray_structure().convert_to_isotropic(selection=sel) mmtbx.tls.tools.remark_3_tls(tlsos=tlsos, selection_strings=tls_selections_strings, out=ofo) # if (params.combine_tls): utils.print_header("Combine B_tls with B_residual", out=log) mmtbx.tls.tools.combine_tls_and_u_local( xray_structure=model.get_xray_structure(), tls_selections=selections, tls_groups=tls_groups) print("All done.", file=log) # if (ofn is not None): utils.print_header("Write output PDB file %s" % ofn, out=log) model.set_sites_cart_from_xrs() pdb_str = model.model_as_pdb() ofo.write(pdb_str) ofo.close() print("All done.", file=log)
def exercise_rigid_bond(): i_seqs = (1,2) weight = 1 p = adp_restraints.rigid_bond_proxy(i_seqs=i_seqs,weight=weight) assert p.i_seqs == i_seqs assert p.weight == weight sites = ((1,2,3),(2,3,4)) u_cart = ((1,2,3,4,5,6), (3,4,5,6,7,8)) expected_gradients = ((-4, -4, -4, -8, -8, -8), (4, 4, 4, 8, 8, 8)) r = adp_restraints.rigid_bond(sites=sites, u_cart=u_cart, weight=weight) assert r.weight == weight assert approx_equal(r.delta_z(), -6) assert approx_equal(r.residual(), 36) assert approx_equal(r.gradients(), expected_gradients) sites_cart = flex.vec3_double(((1,2,3),(2,5,4),(3,4,5))) u_cart = flex.sym_mat3_double(((1,2,3,4,5,6), (2,3,3,5,7,7), (3,4,5,3,7,8))) r = adp_restraints.rigid_bond( adp_restraint_params(sites_cart=sites_cart, u_cart=u_cart), proxy=p) assert approx_equal(r.weight, weight) unit_cell = uctbx.unit_cell([15,25,30,90,90,90]) sites_frac = unit_cell.fractionalize(sites_cart=sites_cart) u_star = flex.sym_mat3_double([ adptbx.u_cart_as_u_star(unit_cell, u_cart_i) for u_cart_i in u_cart]) pair = adp_restraints.rigid_bond_pair(sites_frac[1], sites_frac[2], u_star[1], u_star[2], unit_cell) assert approx_equal(pair.delta_z(), abs(r.delta_z())) assert approx_equal(pair.z_12(), r.z_12()) assert approx_equal(pair.z_21(), r.z_21()) # gradients_aniso_cart = flex.sym_mat3_double(sites_cart.size(), (0,0,0,0,0,0)) gradients_iso = flex.double(sites_cart.size(), 0) proxies = adp_restraints.shared_rigid_bond_proxy([p,p]) params = adp_restraint_params(sites_cart=sites_cart, u_cart=u_cart) residuals = adp_restraints.rigid_bond_residuals(params, proxies=proxies) assert approx_equal(residuals, (r.residual(),r.residual())) deltas = adp_restraints.rigid_bond_deltas(params, proxies=proxies) assert approx_equal(deltas, (r.delta_z(),r.delta_z())) residual_sum = adp_restraints.rigid_bond_residual_sum( params=params, proxies=proxies, gradients_aniso_cart=gradients_aniso_cart) assert approx_equal(residual_sum, 2 * r.residual()) for g,e in zip(gradients_aniso_cart[1:3], r.gradients()): assert approx_equal(g, matrix.col(e)*2) fd_grads_aniso, fd_grads_iso = finite_difference_gradients( restraint_type=adp_restraints.rigid_bond, proxy=p, sites_cart=sites_cart, u_cart=u_cart) for g,e in zip(gradients_aniso_cart, fd_grads_aniso): assert approx_equal(g, matrix.col(e)*2) # # check frame invariance of residual # u_cart_1 = matrix.sym(sym_mat3=(0.1,0.2,0.05,0.03,0.02,0.01)) u_cart_2 = matrix.sym(sym_mat3=(0.21,0.32,0.11,0.02,0.02,0.07)) u_cart = (u_cart_1.as_sym_mat3(),u_cart_2.as_sym_mat3()) site_cart_1 = matrix.col((1,2,3)) site_cart_2 = matrix.col((3,1,4.2)) sites = (tuple(site_cart_1),tuple(site_cart_2)) a = adp_restraints.rigid_bond(sites=sites, u_cart=u_cart, weight=1) expected_residual = a.residual() gen = flex.mersenne_twister() for i in range(20): R = matrix.rec(gen.random_double_r3_rotation_matrix(),(3,3)) u_cart_1_rot = R * u_cart_1 * R.transpose() u_cart_2_rot = R * u_cart_2 * R.transpose() u_cart = (u_cart_1_rot.as_sym_mat3(),u_cart_2_rot.as_sym_mat3()) site_cart_1_rot = R * site_cart_1 site_cart_2_rot = R * site_cart_2 sites = (tuple(site_cart_1_rot),tuple(site_cart_2_rot)) a = adp_restraints.rigid_bond( sites=sites, u_cart=u_cart, weight=1) assert approx_equal(a.residual(), expected_residual)
def run_0(symbol="C 2"): space_group_info = sgtbx.space_group_info(symbol=symbol) xrs = random_structure.xray_structure(space_group_info=space_group_info, elements=["N"] * 50, volume_per_atom=100.0, random_u_iso=True) # b_cart = adptbx.random_traceless_symmetry_constrained_b_cart( crystal_symmetry=xrs.crystal_symmetry()) u_star = adptbx.u_cart_as_u_star(xrs.unit_cell(), adptbx.b_as_u(b_cart)) # F = xrs.structure_factors(d_min=1.5).f_calc() k_anisotropic = mmtbx.f_model.ext.k_anisotropic(F.indices(), u_star) # bin_selections = [] F.setup_binner(reflections_per_bin=50) for i_bin in F.binner().range_used(): sel = F.binner().selection(i_bin) bin_selections.append(sel) # d_spacings = F.d_spacings().data() ss = 1. / flex.pow2(d_spacings) / 4. k_mask_tmp = mmtbx.f_model.ext.k_mask(ss, 0.35, 80.) k_mask = flex.double(F.data().size(), 0) k_isotropic = flex.double(F.data().size(), 0) for s in bin_selections: d = d_spacings.select(s) k_mask.set_selected(s, flex.mean(k_mask_tmp.select(s))) k_isotropic.set_selected(s, random.randint(1, 10)) # fmodel = mmtbx.f_model.manager(xray_structure=xrs, f_obs=abs(F), k_isotropic=k_isotropic, k_anisotropic=k_anisotropic, k_mask=k_mask) f_calc = fmodel.f_calc() f_masks = fmodel.f_masks() f_model = fmodel.f_model() f_obs = abs(f_model) r_free_flags = f_obs.generate_r_free_flags(use_lattice_symmetry=False) # assert approx_equal(bulk_solvent.r_factor(f_obs.data(), f_model.data()), 0) aso = scaler.run(f_obs=f_obs, f_calc=f_calc, f_mask=f_masks, r_free_flags=r_free_flags, bin_selections=bin_selections, number_of_cycles=500, auto_convergence_tolerance=1.e-9, ss=ss, try_poly=True, try_expanal=True, try_expmin=True, verbose=True) print("r_f:", aso.r_final) print("r_l:", aso.r_low) print("r_h:", aso.r_high) assert aso.r_final < 0.0009, [aso.r_final, 0.0009] assert aso.r_low < 0.0017, [aso.r_low, 0.0017] assert aso.r_high < 0.0006, [aso.r_high, 0.0006] assert approx_equal( bulk_solvent.r_factor(f_obs.data(), abs(aso.core.f_model).data(), 1), bulk_solvent.r_factor(f_obs.data(), abs(aso.core.f_model).data()))
def build_scatterers(self, elements, sites_frac=None, grid=None, t_centre_of_inversion=None): existing_sites = [scatterer.site for scatterer in self.scatterers()] if (sites_frac is None): all_sites = random_sites( special_position_settings=self, existing_sites=existing_sites, n_new=len(elements), min_hetero_distance=self.min_distance, general_positions_only=self.general_positions_only, grid=grid, t_centre_of_inversion=t_centre_of_inversion) else: assert len(sites_frac) == len(elements) all_sites = existing_sites + list(sites_frac) assert len(all_sites) <= self.n_scatterers sf_dict = {} for element in elements: if (not sf_dict.has_key(element)): sf_dict[element] = eltbx.xray_scattering.best_approximation(element) fp = 0 fdp = 0 n_existing = self.scatterers().size() i_label = n_existing for element,site in zip(elements, all_sites[n_existing:]): i_label += 1 scatterer = xray.scatterer( label=element + str(i_label), scattering_type=element, site=site) site_symmetry = scatterer.apply_symmetry( self.unit_cell(), self.space_group(), self.min_distance_sym_equiv()) if (self.random_f_prime_d_min): f0 = sf_dict[element].at_d_star_sq(1./self.random_f_prime_d_min**2) assert f0 > 0 fp = -f0 * random.random() * self.random_f_prime_scale if (self.random_f_double_prime): f0 = sf_dict[element].at_d_star_sq(0) fdp = f0 * random.random() * self.random_f_double_prime_scale scatterer.fp = fp scatterer.fdp = fdp if (self.use_u_iso_): scatterer.flags.set_use_u_iso_only() u_iso = self.u_iso if (not u_iso and self.random_u_iso): u_iso = random.random() * self.random_u_iso_scale \ + self.random_u_iso_min scatterer.u_iso = u_iso if (self.use_u_aniso): scatterer.flags.set_use_u_aniso_only() run_away_counter = 0 while 1: run_away_counter += 1 assert run_away_counter < 100 u_cart = adptbx.random_u_cart(u_scale=self.random_u_cart_scale) scatterer.u_star = site_symmetry.average_u_star( adptbx.u_cart_as_u_star( self.unit_cell(), u_cart)) u_cart = adptbx.u_star_as_u_cart(self.unit_cell(), scatterer.u_star) eigenvalues = adptbx.eigenvalues(u_cart) if (min(eigenvalues) > 0.001): break if (self.random_occupancy): scatterer.occupancy = self.random_occupancy_min \ + (1-self.random_occupancy_min)*random.random() self.add_scatterer(scatterer)
def exercise_interface(): episq = 8*(math.pi**2) assert approx_equal(adptbx.u_as_b(2.3), 2.3*episq) assert approx_equal(adptbx.b_as_u(adptbx.u_as_b(2.3)), 2.3) u = (3,4,9, 2,1,7) assert approx_equal(adptbx.u_as_b(u), [x*episq for x in u]) assert approx_equal(adptbx.b_as_u(adptbx.u_as_b(u)), u) uc = uctbx.unit_cell((5,4,7,80,110,100)) for fw,bw in ((adptbx.u_cif_as_u_star, adptbx.u_star_as_u_cif), (adptbx.u_cart_as_u_star, adptbx.u_star_as_u_cart), (adptbx.u_cart_as_u_cif, adptbx.u_cif_as_u_cart), (adptbx.u_cart_as_beta, adptbx.beta_as_u_cart), (adptbx.u_cif_as_beta, adptbx.beta_as_u_cif)): assert approx_equal(bw(uc, fw(uc, u)), u) assert approx_equal(adptbx.beta_as_u_star(adptbx.u_star_as_beta(u)), u) assert approx_equal(adptbx.u_cart_as_u_iso(adptbx.u_iso_as_u_cart(2.3)), 2.3) for fw,bw in ((adptbx.u_iso_as_u_star, adptbx.u_star_as_u_iso), (adptbx.u_iso_as_u_cif, adptbx.u_cif_as_u_iso), (adptbx.u_iso_as_beta, adptbx.beta_as_u_iso)): assert approx_equal(bw(uc, fw(uc, 2.3)), 2.3) fc = adptbx.factor_u_cart_u_iso(u_cart=u) assert approx_equal(fc.u_iso, adptbx.u_cart_as_u_iso(u)) assert approx_equal( fc.u_cart_minus_u_iso, [uii-fc.u_iso for uii in u[:3]]+list(u[3:])) f = adptbx.factor_u_star_u_iso( unit_cell=uc, u_star=adptbx.u_cart_as_u_star(uc, u)) assert approx_equal(f.u_iso, fc.u_iso) assert approx_equal( f.u_star_minus_u_iso, adptbx.u_cart_as_u_star(uc, fc.u_cart_minus_u_iso)) f = adptbx.factor_u_cif_u_iso( unit_cell=uc, u_cif=adptbx.u_cart_as_u_cif(uc, u)) assert approx_equal(f.u_iso, fc.u_iso) assert approx_equal( f.u_cif_minus_u_iso, adptbx.u_cart_as_u_cif(uc, fc.u_cart_minus_u_iso)) f = adptbx.factor_beta_u_iso( unit_cell=uc, beta=adptbx.u_cart_as_beta(uc, u)) assert approx_equal(f.u_iso, fc.u_iso) assert approx_equal( f.beta_minus_u_iso, adptbx.u_cart_as_beta(uc, fc.u_cart_minus_u_iso)) assert approx_equal(adptbx.debye_waller_factor_b_iso(0.25,2.3), math.exp(-2.3*0.25)) assert approx_equal(adptbx.debye_waller_factor_u_iso(0.25,2.3), math.exp(-2.3*episq*0.25)) assert approx_equal(adptbx.debye_waller_factor_b_iso(uc, (1,2,3), 2.3), adptbx.debye_waller_factor_u_iso(uc, (1,2,3), 2.3/episq)) u_star = adptbx.u_cart_as_u_star(uc, u) dw = adptbx.debye_waller_factor_u_star((1,2,3), u_star) assert approx_equal(dw, adptbx.debye_waller_factor_beta((1,2,3), adptbx.u_star_as_beta(u_star))) assert approx_equal(dw, adptbx.debye_waller_factor_u_cif(uc, (1,2,3), adptbx.u_star_as_u_cif(uc, u_star))) assert approx_equal(dw, adptbx.debye_waller_factor_u_cart(uc, (1,2,3), adptbx.u_star_as_u_cart(uc, u_star))) for e in adptbx.eigenvalues(u): check_eigenvalue(u, e) assert not adptbx.is_positive_definite(adptbx.eigenvalues(u)) assert not adptbx.is_positive_definite(adptbx.eigenvalues(u), 0) assert adptbx.is_positive_definite(adptbx.eigenvalues(u), 1.22) assert not adptbx.is_positive_definite(u) assert not adptbx.is_positive_definite(u, 0) assert adptbx.is_positive_definite(u, 1.22) up = (0.534, 0.812, 0.613, 0.0166, 0.134, -0.0124) s = adptbx.eigensystem(up) assert approx_equal(s.values(), (0.813132, 0.713201, 0.432668)) for i in xrange(3): check_eigenvector(up, s.values()[i], s.vectors(i)) c = (1,2,3, 3,-4,5, 4,5,6) v = (198,18,1020,116,447,269) assert approx_equal(adptbx.c_u_c_transpose(c, u), v) assert approx_equal(adptbx.eigensystem(u).values(), (14.279201519086316, 2.9369143826320214, -1.2161159017183376)) s = adptbx.eigensystem(up) try: s.vectors(4) except RuntimeError, e: assert str(e).endswith("Index out of range.") else: raise Exception_expected uf = adptbx.eigenvalue_filtering(u_cart=u, u_min=0) assert approx_equal(uf, (3.0810418, 4.7950710, 9.3400030, 1.7461615, 1.1659954, 6.4800706)) uf = adptbx.eigenvalue_filtering(u_cart=u, u_min=0, u_max=3) assert approx_equal(uf, (2.7430890, 1.0378360, 2.1559895, 0.6193215, -0.3921632, 1.2846854)) uf = adptbx.eigenvalue_filtering(u_cart=u, u_min=0, u_max=3) assert approx_equal(scitbx.linalg.eigensystem.real_symmetric(u).values(), (14.2792015, 2.9369144, -1.2161159)) assert approx_equal(scitbx.linalg.eigensystem.real_symmetric(uf).values(), (3, 2.9369144, 0)) uf = adptbx.eigenvalue_filtering(up) assert approx_equal(uf, up)
def exercise_negative_parameters(verbose=0): structure_default = xray.structure( crystal_symmetry = crystal.symmetry( unit_cell=((10,13,17,75,80,85)), space_group_symbol="P 1"), scatterers=flex.xray_scatterer([ xray.scatterer(label="C", site=(0,0,0), u=0.25)])) negative_gaussian = eltbx.xray_scattering.gaussian((1,2), (2,3), -4) for i_trial in xrange(7): structure = structure_default.deep_copy_scatterers() scatterer = structure.scatterers()[0] if (i_trial == 1): scatterer.occupancy *= -1 elif (i_trial == 2): structure.scattering_type_registry(custom_dict={"C": negative_gaussian}) elif (i_trial == 3): scatterer.u_iso *= -1 elif (i_trial == 4): u_cart = adptbx.random_u_cart(u_scale=1, u_min=-1.1) assert max(adptbx.eigenvalues(u_cart)) < 0 u_star = adptbx.u_cart_as_u_star(structure.unit_cell(), u_cart) scatterer.u_star = u_star scatterer.flags.set_use_u_aniso_only() elif (i_trial == 5): scatterer.fp = -10 elif (i_trial == 6): scatterer.fp = -3 f_direct = structure.structure_factors( d_min=1, algorithm="direct", cos_sin_table=False).f_calc() f_fft = structure.structure_factors( d_min=1, algorithm="fft", quality_factor=1.e8, wing_cutoff=1.e-10).f_calc() if (i_trial == 2): assert negative_gaussian.at_d_star_sq(f_fft.d_star_sq().data()).all_lt(0) if (i_trial in [5,6]): f = structure.scattering_type_registry().gaussian_not_optional( scattering_type="C").at_d_star_sq(f_fft.d_star_sq().data()) if (i_trial == 5): assert flex.max(f) + scatterer.fp < 0 else: assert flex.max(f) + scatterer.fp > 0 assert flex.min(f) + scatterer.fp < 0 cc = flex.linear_correlation( abs(f_direct).data(), abs(f_fft).data()).coefficient() if (cc < 0.999): raise AssertionError("i_trial=%d, correlation=%.6g" % (i_trial, cc)) elif (0 or verbose): print "correlation=%.6g" % cc # # very simple test of gradient calculations with negative parameters structure_factor_gradients = \ cctbx.xray.structure_factors.gradients( miller_set=f_direct, cos_sin_table=False) target_functor = xray.target_functors.intensity_correlation( f_obs=abs(f_direct)) target_result = target_functor(f_fft, True) xray.set_scatterer_grad_flags(scatterers = structure.scatterers(), site = True, u_iso = True, u_aniso = True, occupancy = True, fp = True, fdp = True) for algorithm in ["direct", "fft"]: grads = structure_factor_gradients( xray_structure=structure, u_iso_refinable_params=None, miller_set=f_direct, d_target_d_f_calc=target_result.derivatives(), n_parameters = structure.n_parameters(), algorithm=algorithm).packed()
def exercise(): """Test prepare_map_for_docking using data with known errors.""" # Generate two half-maps with same anisotropic signal, independent anisotropic # noise. Test to see how well optimal map coefficients are estimated. # Start by working out how large the padding will have to be so that # starting automatically-generated map will be large enough to contain # sphere with room to spare around model. n_residues = 25 d_min = 2.5 from cctbx.development.create_models_or_maps import generate_model test_model = generate_model(n_residues=n_residues) sites_cart = test_model.get_sites_cart() cart_min = flex.double(sites_cart.min()) cart_max = flex.double(sites_cart.max()) box_centre = (cart_min + cart_max) / 2 dsqrmax = flex.max((sites_cart - tuple(box_centre)).norms())**2 model_radius = math.sqrt(dsqrmax) min_model_extent = flex.min(cart_max - cart_min) pad_to_allow_cube = model_radius - min_model_extent / 2 # Extra space needed for eventual masking boundary_to_smoothing_ratio = 2 soft_mask_radius = d_min padding = soft_mask_radius * boundary_to_smoothing_ratio box_cushion = padding + pad_to_allow_cube + d_min # A bit extra # Make map in box big enough to cut out cube containing sphere mmm = map_model_manager() mmm.generate_map(n_residues=n_residues, d_min=d_min, k_sol=0.1, b_sol=50., box_cushion=box_cushion) # Keep copy of perfect map for tests of success mm_start = mmm.map_manager().deep_copy() mmm.add_map_manager_by_id(mm_start, 'perfect_map') model = mmm.model() sites_cart = model.get_sites_cart() cart_min = flex.double(sites_cart.min()) cart_max = flex.double(sites_cart.max()) # Turn starting map into map coeffs for the signal ucpars = mmm.map_manager().unit_cell().parameters() d_max = max(ucpars[0], ucpars[1], ucpars[2]) start_map_coeffs = mmm.map_as_fourier_coefficients(d_min=d_min, d_max=d_max) # Apply anisotropic scaling to map coeffs b_target = (100., 200., 300., -50., 50., 100.) u_star_s = adptbx.u_cart_as_u_star(start_map_coeffs.unit_cell(), adptbx.b_as_u(b_target)) # b_model = (30.,30.,30.,0.,0.,0.) # All atoms in model have B=30 # b_expected = list((flex.double(b_target) + flex.double(b_model))) scaled_map_coeffs = start_map_coeffs.apply_debye_waller_factors( u_star=u_star_s) # Generate map coefficient errors for first half-map from complex normal # distribution b_target_e = (0., 0., 0., -50., -50., 100.) # Anisotropy for error terms u_star_e = adptbx.u_cart_as_u_star(start_map_coeffs.unit_cell(), adptbx.b_as_u(b_target_e)) se_target = 10. # Target for SigmaE variance term rsigma = math.sqrt(se_target / 2.) jj = 0. + 1.j # Define I for generating complex numbers random_complexes1 = flex.complex_double() ncoeffs = start_map_coeffs.size() random.seed(123457) # Make runs reproducible for i in range(ncoeffs): random_complexes1.append( random.gauss(0., rsigma) + random.gauss(0., rsigma) * jj) rc1_miller = start_map_coeffs.customized_copy(data=random_complexes1) mc1_delta = rc1_miller.apply_debye_waller_factors(u_star=u_star_e) map1_coeffs = scaled_map_coeffs.customized_copy( data=scaled_map_coeffs.data() + mc1_delta.data()) # Repeat for second half map with independent errors from same distribution random_complexes2 = flex.complex_double() for i in range(ncoeffs): random_complexes2.append( random.gauss(0., rsigma) + random.gauss(0., rsigma) * jj) rc2_miller = start_map_coeffs.customized_copy(data=random_complexes2) mc2_delta = rc2_miller.apply_debye_waller_factors(u_star=u_star_e) map2_coeffs = scaled_map_coeffs.customized_copy( data=scaled_map_coeffs.data() + mc2_delta.data()) # mmm.write_model("fake_map.pdb") mmm.add_map_from_fourier_coefficients(map1_coeffs, map_id='map_manager_1') mmm.add_map_from_fourier_coefficients(map2_coeffs, map_id='map_manager_2') # Replace original map_manager with mean of half-maps mm_mean_data = (mmm.map_manager_1().map_data() + mmm.map_manager_2().map_data()) / 2 mmm.map_manager().set_map_data(map_data=mm_mean_data) # Add mask map for ordered component of map protein_mw = n_residues * 110. # MW from model would be better... nucleic_mw = None mask_id = 'ordered_volume_mask' add_ordered_volume_mask(mmm, d_min, protein_mw=protein_mw, nucleic_mw=nucleic_mw, map_id_out=mask_id) box_centre = tuple(flex.double((ucpars[0], ucpars[1], ucpars[2])) / 2) # Now refine to assess parameters describing map errors results = assess_cryoem_errors(mmm, d_min, sphere_cent=tuple(box_centre), radius=model_radius + d_min, verbosity=0) # resultsdict = results.resultsdict # b_refined_a = resultsdict["a_baniso"] # print("\nIdeal A tensor as Baniso: ", b_expected) # print("Refined A tensor as Baniso", b_refined_a) # Note that all maps have been cut out with a spherical mask, so compare using these new_mmm = results.new_mmm perfect_mapCC = new_mmm.map_model_cc(map_id='perfect_map') mapCC = new_mmm.map_model_cc(map_id='map_manager_wtd') # Achieved map start_mapCC = new_mmm.map_model_cc( ) # Starting map with noise and anisotropy mc_perfect = new_mmm.map_as_fourier_coefficients(d_min=d_min, d_max=d_max, map_id='perfect_map') mc_achieved = new_mmm.map_as_fourier_coefficients(d_min=d_min, d_max=d_max, map_id='map_manager_wtd') # Compare with results using theoretically perfect error parameters to compute # ideal map coefficients. sigmaS_terms = flex.pow2(get_power_spectrum( mc_perfect)) # Actual signal power before anisotropy mc_start = new_mmm.map_as_fourier_coefficients(d_min=d_min, d_max=d_max) eE_ideal = mc_start.deep_copy() ones_array = flex.double(eE_ideal.size(), 1) all_ones = eE_ideal.customized_copy(data=ones_array) u_star_s2 = tuple(flex.double(u_star_s) * 2.) # Square anisotropy for signal power calc sigmaS_terms = sigmaS_terms * all_ones.apply_debye_waller_factors( u_star=u_star_s2).data() # Corrected for anisotropy u_star_e2 = tuple(flex.double(u_star_e) * 2.) sigmaE_terms = all_ones.apply_debye_waller_factors( u_star=u_star_e2).data() * se_target scale_terms = 1. / flex.sqrt(sigmaS_terms + sigmaE_terms / 2.) dobs_terms = 1. / flex.sqrt(1. + sigmaE_terms / (2 * sigmaS_terms)) mc_ideal = eE_ideal.customized_copy(data=eE_ideal.data() * scale_terms * dobs_terms) # write_mtz(mc_achieved,"achieved_map.mtz","achieved") # write_mtz(mc_ideal,"ideal_map.mtz","ideal") mapCC_ideal_achieved = mc_ideal.map_correlation(other=mc_achieved) # print("CC between ideal and achieved maps:",mapCC_ideal_achieved) assert (mapCC_ideal_achieved > 0.92) new_mmm.add_map_from_fourier_coefficients(mc_ideal, map_id='ideal_map') ideal_mapCC = new_mmm.map_model_cc(map_id='ideal_map') # print("Perfect, starting, ideal and achieved mapCC: ", perfect_mapCC, start_mapCC, ideal_mapCC, mapCC) assert (mapCC > 0.98 * ideal_mapCC)
def __init__(self, miller_array, parameters, out=None, n_residues=100, n_bases=0): self.params=parameters self.miller_array=miller_array.deep_copy().set_observation_type( miller_array).merge_equivalents().array() self.out = out if self.out is None: self.out = sys.stdout if self.out == "silent": self.out = null_out() self.no_aniso_array = self.miller_array if self.params.aniso.action == "remove_aniso": # first perfom aniso scaling aniso_scale_and_b = absolute_scaling.ml_aniso_absolute_scaling( miller_array = self.miller_array, n_residues = n_residues, n_bases = n_bases) aniso_scale_and_b.p_scale = 0 # set the p_scale back to 0! aniso_scale_and_b.show(out=out) # now do aniso correction please self.aniso_p_scale = aniso_scale_and_b.p_scale self.aniso_u_star = aniso_scale_and_b.u_star self.aniso_b_cart = aniso_scale_and_b.b_cart if self.params.aniso.final_b == "eigen_min": b_use=aniso_scale_and_b.eigen_values[2] elif self.params.aniso.final_b == "eigen_mean" : b_use=flex.mean(aniso_scale_and_b.eigen_values) elif self.params.aniso.final_b == "user_b_iso": assert self.params.aniso.b_iso is not None b_use=self.params.aniso.b_iso else: b_use = 30 b_cart_aniso_removed = [ -b_use, -b_use, -b_use, 0, 0, 0] u_star_aniso_removed = adptbx.u_cart_as_u_star( miller_array.unit_cell(), adptbx.b_as_u( b_cart_aniso_removed ) ) ## I do things in two steps, but can easely be done in 1 step ## just for clarity, thats all. self.no_aniso_array = absolute_scaling.anisotropic_correction( self.miller_array,0.0,aniso_scale_and_b.u_star ) self.no_aniso_array = absolute_scaling.anisotropic_correction( self.no_aniso_array,0.0,u_star_aniso_removed) self.no_aniso_array = self.no_aniso_array.set_observation_type( miller_array ) # that is done now, now we can do outlier detection if desired outlier_manager = outlier_rejection.outlier_manager( self.no_aniso_array, None, out=self.out) self.new_miller_array = self.no_aniso_array if self.params.outlier.action == "basic": print >> self.out, "Non-outliers found by the basic wilson statistics" print >> self.out, "protocol will be written out." basic_array = outlier_manager.basic_wilson_outliers( p_basic_wilson = self.params.outlier.parameters.basic_wilson.level, return_data = True) self.new_miller_array = basic_array if self.params.outlier.action == "extreme": print >> self.out, "Non-outliers found by the extreme value wilson statistics" print >> self.out, "protocol will be written out." extreme_array = outlier_manager.extreme_wilson_outliers( p_extreme_wilson = self.params.outlier.parameters.extreme_wilson.level, return_data = True) self.new_miller_array = extreme_array if self.params.outlier.action == "beamstop": print >> self.out, "Outliers found for the beamstop shadow" print >> self.out, "problems detection protocol will be written out." beamstop_array = outlier_manager.beamstop_shadow_outliers( level = self.params.outlier.parameters.beamstop.level, d_min = self.params.outlier.parameters.beamstop.d_min, return_data=True) self.new_miller_array = beamstop_array if self.params.outlier.action == "None": self.new_miller_array = self.no_aniso_array # now we can twin or detwin the data if needed self.final_array = self.new_miller_array if self.params.symmetry.action == "twin": alpha = self.params.symmetry.twinning_parameters.fraction if (alpha is None) : raise Sorry("Twin fraction not specified, not twinning data") elif not (0 <= alpha <= 0.5): raise Sorry("Twin fraction must be between 0 and 0.5.") print >> self.out print >> self.out, "Twinning given data" print >> self.out, "-------------------" print >> self.out print >> self.out, "Artifically twinning the data with fraction %3.2f" %\ alpha self.final_array = self.new_miller_array.twin_data( twin_law = self.params.symmetry.twinning_parameters.twin_law, alpha=alpha).as_intensity_array() elif (self.params.symmetry.action == "detwin") : twin_law = self.params.symmetry.twinning_parameters.twin_law alpha = self.params.symmetry.twinning_parameters.fraction if (alpha is None) : raise Sorry("Twin fraction not specified, not detwinning data") elif not (0 <= alpha <= 0.5): raise Sorry("Twin fraction must be between 0 and 0.5.") print >> self.out, """ Attempting to detwin data ------------------------- Detwinning data with: - twin law: %s - twin fraciton: %.2f BE WARNED! DETWINNING OF DATA DOES NOT SOLVE YOUR TWINNING PROBLEM! PREFERABLY, REFINEMENT SHOULD BE CARRIED OUT AGAINST ORIGINAL DATA ONLY USING A TWIN SPECIFIC TARGET FUNCTION! """ % (twin_law, alpha) self.final_array = self.new_miller_array.detwin_data( twin_law=twin_law, alpha=alpha).as_intensity_array() assert self.final_array is not None
def exercise_radial_shells(k_sol=0.33, d_min=1.5, grid_search=False, shell_width=0.6): xray_structure = get_xray_structure_from_file() b_sol = 34.0 if (type(k_sol) is list): b_sol = [ b_sol, ] * len(k_sol) b_cart = [1, 2, 3, 0, 4, 0] f_obs, r_free_flags = get_f_obs_freer(d_min=d_min, k_sol=k_sol, b_sol=b_sol, b_cart=b_cart, xray_structure=xray_structure, radial_shell_width=shell_width) mask_params = mmtbx.masks.mask_master_params.extract() mask_params.radial_shell_width = shell_width if (type(k_sol) is list): mask_params.n_radial_shells = len(k_sol) else: mask_params.n_radial_shells = 2 fmodel = mmtbx.f_model.manager(r_free_flags=r_free_flags, f_obs=f_obs, xray_structure=xray_structure, mask_params=mask_params) u_star = adptbx.u_cart_as_u_star(fmodel.f_obs().unit_cell(), adptbx.b_as_u(b_cart)) fmodel_kbu = fmodel.fmodel_kbu() fmodel_kbu.update(u_star=u_star) r_work_start = fmodel_kbu.r_factor() * 100. msk = fmodel.mask_manager print 'Solvent content: ', msk.solvent_content_via_mask print 'Layer volume fractions: ', msk.layer_volume_fractions if (type(k_sol) is list): for i in range(len(k_sol)): if (msk.layer_volume_fractions[i] == 0.): k_sol[i] = 0. params = bss.master_params.extract() params.anisotropic_scaling = False params.k_sol_b_sol_grid_search = grid_search params.number_of_macro_cycles = 10 params.k_sol_max = 1.2 result = bss.bulk_solvent_and_scales(fmodel_kbu=fmodel_kbu, params=params) r_work = result.fmodel_kbu.r_factor() print 'R-work: ', r_work print 'Solvent radius: ', fmodel.mask_params.solvent_radius assert r_work_start > 0.0 assert approx_equal(r_work, 0.0, eps=1.e-3) if (type(k_sol) is list): ksols = list(result.fmodel_kbu.k_sols()) # XXX if layer_volume_fractions=0, then ksol is more or less undefined ? # XXX should it be done in bulk_solvent_and_scaling.py ? for i in range(len(ksols)): if (msk.layer_volume_fractions[i] == 0.): ksols[i] = 0. assert len(k_sol) == len(ksols) for ik in range(len(k_sol)): assert approx_equal(ksols[ik], k_sol[ik], eps=0.005), [ksols[ik], k_sol[ik]] else: for ksol in result.fmodel_kbu.k_sols(): assert approx_equal(ksol, k_sol, eps=1.e-3) n = len(result.b_sols()) if (n > 1 and type(b_sol) is float): b_sol = [ b_sol, ] * n assert approx_equal(result.b_sols(), b_sol, eps=1.) assert approx_equal(result.b_cart(), b_cart, eps=1.e-6)
def u_star_minus_u_iso_ralf(unit_cell, u_star): u_cart = adptbx.u_star_as_u_cart(unit_cell, u_star) u_iso = adptbx.u_cart_as_u_iso(u_cart) u_cart_minus_u_iso = [a - u_iso for a in u_cart[:3]] + list(u_cart[3:]) return adptbx.u_cart_as_u_star(unit_cell, u_cart_minus_u_iso)
def exercise_5_bulk_sol_and_scaling(d_min, symbol="C 2", k_sol=0.37, b_sol=64.0): x = random_structure.xray_structure( space_group_info=sgtbx.space_group_info(symbol=symbol), elements=(("O", "N", "C") * 150), volume_per_atom=200, min_distance=1.5, general_positions_only=True, random_u_iso=True, random_occupancy=False) x.scattering_type_registry(table="wk1995") f_calc = x.structure_factors(d_min=d_min, algorithm="direct").f_calc() mask_manager = mmtbx.masks.manager(miller_array=f_calc) f_mask = mask_manager.shell_f_masks(xray_structure=x)[0] assert flex.mean(abs(f_mask).data()) > 0 b_cart = [-15, -5, 20, 0, 6, 0] u_star = adptbx.u_cart_as_u_star(x.unit_cell(), adptbx.b_as_u(b_cart)) k_anisotropic = mmtbx.f_model.ext.k_anisotropic(f_calc.indices(), u_star) ss = 1. / flex.pow2(f_calc.d_spacings().data()) / 4. k_mask = mmtbx.f_model.ext.k_mask(ss, k_sol, b_sol) scale = 17. k_isotropic = flex.double(f_calc.data().size(), scale) f_model_data = scale * k_anisotropic * (f_calc.data() + k_mask * f_mask.data()) f_model = f_calc.customized_copy(data=f_model_data) f_obs = abs(f_model) r_free_flags = f_obs.generate_r_free_flags(use_lattice_symmetry=False) sfg_params = mmtbx.f_model.sf_and_grads_accuracy_master_params.extract() sfg_params.algorithm = "direct" bin_selections = [] f_calc.setup_binner(reflections_per_bin=100) for i_bin in f_calc.binner().range_used(): sel = f_calc.binner().selection(i_bin) bin_selections.append(sel) # test 1 for k_isotropic_ in [None, k_isotropic]: fmodel = mmtbx.f_model.manager(xray_structure=x, f_obs=f_obs, k_mask=k_mask, k_anisotropic=k_anisotropic, k_isotropic=k_isotropic, r_free_flags=r_free_flags, bin_selections=bin_selections, sf_and_grads_accuracy_params=sfg_params) assert approx_equal(fmodel.r_work(), 0) assert approx_equal(fmodel.r_free(), 0) assert approx_equal(fmodel.target_w(), 0) assert approx_equal(fmodel.k_masks()[0], k_mask) assert approx_equal(fmodel.k_anisotropic(), k_anisotropic) assert approx_equal(fmodel.f_model_scaled_with_k1().data(), f_model_data) assert approx_equal(fmodel.f_model().data(), f_model_data) # test 2 fmodel = mmtbx.f_model.manager(f_calc=f_calc, f_mask=f_mask, f_obs=f_obs, k_mask=k_mask, k_anisotropic=k_anisotropic, k_isotropic=k_isotropic, r_free_flags=r_free_flags, bin_selections=bin_selections, sf_and_grads_accuracy_params=sfg_params) assert approx_equal(fmodel.r_work(), 0) assert approx_equal(fmodel.r_free(), 0) assert approx_equal(fmodel.target_w(), 0) assert approx_equal(fmodel.k_masks()[0], k_mask) assert approx_equal(fmodel.k_anisotropic(), k_anisotropic) assert approx_equal(fmodel.f_model_scaled_with_k1().data(), f_model_data) assert approx_equal(fmodel.f_model().data(), f_model_data) assert fmodel.f_calc().data().all_eq(f_calc.data()) assert fmodel.f_masks()[0].data().all_eq(f_mask.data()) # test 3 params = bss.master_params.extract() params.number_of_macro_cycles = 5 fmodel = mmtbx.f_model.manager(f_calc=f_calc, f_mask=f_mask, f_obs=f_obs, r_free_flags=r_free_flags, bin_selections=bin_selections, sf_and_grads_accuracy_params=sfg_params) assert fmodel.r_work() > 0.3 o = fmodel.update_all_scales(fast=False, params=params, remove_outliers=False) assert approx_equal(o.k_sol[0], k_sol, 0.01) assert approx_equal(o.b_sol[0], b_sol, 0.1) assert approx_equal(o.b_cart, b_cart, 1.e-1) assert approx_equal(fmodel.r_work(), 0, 1.e-3) assert approx_equal(fmodel.r_free(), 0, 1.e-3) # test 4 - part 1 fmodel = mmtbx.f_model.manager(f_calc=f_calc, f_mask=f_mask, f_obs=f_obs, r_free_flags=r_free_flags, bin_selections=bin_selections, sf_and_grads_accuracy_params=sfg_params) assert fmodel.r_work() > 0.3 fmodel.update_all_scales(fast=True, remove_outliers=False) assert fmodel.r_work() < 0.045, [fmodel.r_work(), d_min] # part 2 of test 4 d = fmodel.k_isotropic() * fmodel.k_anisotropic() * ( f_calc.data() + fmodel.k_masks()[0] * f_mask.data()) fmodel = mmtbx.f_model.manager(f_calc=f_calc, f_mask=f_mask, f_obs=abs(f_calc.customized_copy(data=d)), r_free_flags=r_free_flags, bin_selections=bin_selections, sf_and_grads_accuracy_params=sfg_params) assert fmodel.r_work() > 0.3 fmodel.update_all_scales(fast=True) assert fmodel.r_work() < 0.052, [fmodel.r_work(), d_min]
def random_aniso_adp(space_group, unit_cell, u_scale=2, u_min=0): return adptbx.u_star_as_u_cart( unit_cell, space_group.average_u_star(u_star=adptbx.u_cart_as_u_star( unit_cell, adptbx.random_u_cart(u_scale=u_scale, u_min=u_min))))
def exercise_6_instantiate_consistency(symbol="C 2"): random.seed(0) flex.set_random_seed(0) for scale in [1.e-4, 1.0, 1.e+4]: for k_sol in [0, 0.3]: for b_sol in [0, 50]: for set_h_occ_to_zero in [True, False]: for update_f_part1 in [True, False]: for apply_scale_to in ["f_obs", "f_model"]: # Simulate Fobs START x = random_structure.xray_structure( space_group_info=sgtbx.space_group_info( symbol=symbol), elements=(("O", "N", "C") * 3 + ("H", ) * 10), volume_per_atom=50, min_distance=3, general_positions_only=True, random_u_iso=True, random_occupancy=False) x.scattering_type_registry(table="wk1995") x.set_occupancies(value=0.8, selection=x.hd_selection()) f_calc = x.structure_factors(d_min=2.0).f_calc() mask_manager = mmtbx.masks.manager( miller_array=f_calc) f_mask = mask_manager.shell_f_masks( xray_structure=x)[0] assert flex.mean(abs(f_mask).data()) > 0 b_cart = adptbx.random_traceless_symmetry_constrained_b_cart( crystal_symmetry=x.crystal_symmetry()) u_star = adptbx.u_cart_as_u_star( x.unit_cell(), adptbx.b_as_u(b_cart)) k_anisotropic = mmtbx.f_model.ext.k_anisotropic( f_calc.indices(), u_star) ss = 1. / flex.pow2( f_calc.d_spacings().data()) / 4. k_mask = mmtbx.f_model.ext.k_mask(ss, k_sol, b_sol) if (apply_scale_to == "f_model"): k_isotropic = flex.double( f_calc.data().size(), scale) else: k_isotropic = flex.double( f_calc.data().size(), 1) f_model_data = scale * k_anisotropic * ( f_calc.data() + k_mask * f_mask.data()) f_model = f_calc.customized_copy(data=f_model_data) f_obs = abs(f_model) if (apply_scale_to == "f_obs"): f_obs = f_obs.customized_copy( data=f_obs.data() * scale) r_free_flags = f_obs.generate_r_free_flags() # Simulate Fobs END if (set_h_occ_to_zero): x.set_occupancies(value=0.0, selection=x.hd_selection()) x.shake_sites_in_place(mean_distance=5) sel = x.random_remove_sites_selection(fraction=0.3) x = x.select(sel) fmodel = mmtbx.f_model.manager( xray_structure=x, f_obs=f_obs, r_free_flags=r_free_flags) fmodel.update_all_scales( fast=True, show=False, update_f_part1=update_f_part1) f_part1_data = fmodel.f_calc().data( ) * flex.random_double( fmodel.f_calc().data().size()) f_part1 = fmodel.f_calc().customized_copy( data=f_part1_data) fmodel.update(f_part1=f_part1) r1 = fmodel.r_work() # zero = fmodel.f_calc().customized_copy( data=fmodel.f_calc().data() * 0) fmodel_dc = mmtbx.f_model.manager( f_obs=fmodel.f_obs(), r_free_flags=fmodel.r_free_flags(), k_isotropic=fmodel.k_isotropic(), k_anisotropic=fmodel.k_anisotropic(), f_calc=fmodel.f_model_no_scales(), f_part1=fmodel.f_part1(), f_part2=fmodel.f_part2(), f_mask=zero) r2 = fmodel_dc.r_work() if (0): print("r1=%8.6f r2=%8.6f fp1=%6.3f fp2=%6.3f fc=%6.3f"%(r1, r2, flex.mean(abs(fmodel.f_part1()).data()), \ flex.mean(abs(fmodel.f_part2()).data()), \ flex.mean(abs(fmodel.f_calc()).data())), \ "set_h_occ_to_zero=", set_h_occ_to_zero,\ "update_f_part1=", update_f_part1) assert approx_equal(r1, r2), [r1, r2]
def exercise(space_group_info, n_elements = 10, table = "wk1995", d_min = 2.0, k_sol = 0.35, b_sol = 45.0, b_cart = None, quick=False, verbose=0): xray_structure = random_structure.xray_structure( space_group_info = space_group_info, elements =(("O","N","C")*(n_elements//3+1))[:n_elements], volume_per_atom = 100, min_distance = 1.5, general_positions_only = True, random_u_iso = False, random_occupancy = False) xray_structure.scattering_type_registry(table = table) sg = xray_structure.space_group() uc = xray_structure.unit_cell() u_cart_1 = adptbx.random_u_cart(u_scale=5, u_min=5) u_star_1 = adptbx.u_cart_as_u_star(uc, u_cart_1) b_cart = adptbx.u_star_as_u_cart(uc, sg.average_u_star(u_star = u_star_1)) for anomalous_flag in [False, True]: scatterers = xray_structure.scatterers() if (anomalous_flag): assert scatterers.size() >= 7 for i in [1,7]: scatterers[i].fp = -0.2 scatterers[i].fdp = 5 have_non_zero_fdp = True else: for i in [1,7]: scatterers[i].fp = 0 scatterers[i].fdp = 0 have_non_zero_fdp = False f_obs = abs(xray_structure.structure_factors( d_min = d_min, anomalous_flag = anomalous_flag, cos_sin_table = sfg_params.cos_sin_table, algorithm = sfg_params.algorithm).f_calc()) f_obs_comp = f_obs.structure_factors_from_scatterers( xray_structure = xray_structure, algorithm = sfg_params.algorithm, cos_sin_table = sfg_params.cos_sin_table).f_calc() f_obs = abs(f_obs_comp) flags = f_obs.generate_r_free_flags(fraction = 0.1, max_free = 99999999) #flags = flags.array(data = flex.bool(f_obs.data().size(), False)) xrs = xray_structure.deep_copy_scatterers() xrs.shake_sites_in_place(rms_difference=0.3) for target in mmtbx.refinement.targets.target_names: if (quick): if (target not in ["ls_wunit_k1", "ml", "mlhl", "ml_sad"]): continue if (target == "mlhl"): if (have_non_zero_fdp): continue # XXX gradients not correct! experimental_phases = generate_random_hl(miller_set=f_obs) else: experimental_phases = None if (target == "ml_sad" and (not anomalous_flag or mmtbx.refinement.targets.phaser is None)): continue print " ",target xray.set_scatterer_grad_flags( scatterers = xrs.scatterers(), site = True) ss = 1./flex.pow2(f_obs.d_spacings().data()) / 4. u_star = adptbx.u_cart_as_u_star( f_obs.unit_cell(), adptbx.b_as_u(b_cart)) k_anisotropic = mmtbx.f_model.ext.k_anisotropic( f_obs.indices(), u_star) k_mask = mmtbx.f_model.ext.k_mask(ss, k_sol, b_sol) fmodel = mmtbx.f_model.manager( xray_structure = xrs, f_obs = f_obs, r_free_flags = flags, target_name = target, abcd = experimental_phases, sf_and_grads_accuracy_params = sfg_params, k_mask = k_mask, k_anisotropic = k_anisotropic, mask_params = masks.mask_master_params.extract()) fmodel.update_xray_structure( xray_structure=xrs, update_f_calc=True, update_f_mask=True) xray.set_scatterer_grad_flags( scatterers=fmodel.xray_structure.scatterers(), site=True) fmodel.update_xray_structure(update_f_calc=True) t_f = fmodel.target_functor() t_f.prepare_for_minimization() gs = t_f(compute_gradients=True).d_target_d_site_cart().as_double() gfd = finite_differences_site(target_functor=t_f) cc = flex.linear_correlation(gs, gfd).coefficient() if (0 or verbose): print "ana:", list(gs) print "fin:", list(gfd) print "rat:", [f/a for a,f in zip(gs,gfd)] print target, "corr:", cc, space_group_info print diff = gs - gfd diff /= max(1, flex.max(flex.abs(gfd))) tolerance = 1.2e-5 assert approx_equal(abs(flex.min(diff) ), 0.0, tolerance) assert approx_equal(abs(flex.mean(diff)), 0.0, tolerance) assert approx_equal(abs(flex.max(diff) ), 0.0, tolerance) assert approx_equal(cc, 1.0, tolerance) fmodel.model_error_ml()
def exercise_negative_parameters(verbose=0): structure_default = xray.structure( crystal_symmetry=crystal.symmetry(unit_cell=((10, 13, 17, 75, 80, 85)), space_group_symbol="P 1"), scatterers=flex.xray_scatterer( [xray.scatterer(label="C", site=(0, 0, 0), u=0.25)])) negative_gaussian = eltbx.xray_scattering.gaussian((1, 2), (2, 3), -4) for i_trial in range(7): structure = structure_default.deep_copy_scatterers() scatterer = structure.scatterers()[0] if (i_trial == 1): scatterer.occupancy *= -1 elif (i_trial == 2): structure.scattering_type_registry( custom_dict={"C": negative_gaussian}) elif (i_trial == 3): scatterer.u_iso *= -1 elif (i_trial == 4): u_cart = adptbx.random_u_cart(u_scale=1, u_min=-1.1) assert max(adptbx.eigenvalues(u_cart)) < 0 u_star = adptbx.u_cart_as_u_star(structure.unit_cell(), u_cart) scatterer.u_star = u_star scatterer.flags.set_use_u_aniso_only() elif (i_trial == 5): scatterer.fp = -10 elif (i_trial == 6): scatterer.fp = -3 f_direct = structure.structure_factors(d_min=1, algorithm="direct", cos_sin_table=False).f_calc() f_fft = structure.structure_factors(d_min=1, algorithm="fft", quality_factor=1.e8, wing_cutoff=1.e-10).f_calc() if (i_trial == 2): assert negative_gaussian.at_d_star_sq( f_fft.d_star_sq().data()).all_lt(0) if (i_trial in [5, 6]): f = structure.scattering_type_registry().gaussian_not_optional( scattering_type="C").at_d_star_sq(f_fft.d_star_sq().data()) if (i_trial == 5): assert flex.max(f) + scatterer.fp < 0 else: assert flex.max(f) + scatterer.fp > 0 assert flex.min(f) + scatterer.fp < 0 cc = flex.linear_correlation(abs(f_direct).data(), abs(f_fft).data()).coefficient() if (cc < 0.999): raise AssertionError("i_trial=%d, correlation=%.6g" % (i_trial, cc)) elif (0 or verbose): print("correlation=%.6g" % cc) # # very simple test of gradient calculations with negative parameters structure_factor_gradients = \ cctbx.xray.structure_factors.gradients( miller_set=f_direct, cos_sin_table=False) target_functor = xray.target_functors.intensity_correlation( f_obs=abs(f_direct)) target_result = target_functor(f_fft, True) xray.set_scatterer_grad_flags(scatterers=structure.scatterers(), site=True, u_iso=True, u_aniso=True, occupancy=True, fp=True, fdp=True) for algorithm in ["direct", "fft"]: grads = structure_factor_gradients( xray_structure=structure, u_iso_refinable_params=None, miller_set=f_direct, d_target_d_f_calc=target_result.derivatives(), n_parameters=structure.n_parameters(), algorithm=algorithm).packed()
def run_02(): time_aniso_u_scaler = 0 for symbol in sgtbx.bravais_types.acentric + sgtbx.bravais_types.centric: #print symbol, "-"*50 space_group_info = sgtbx.space_group_info(symbol = symbol) xrs = random_structure.xray_structure( space_group_info = space_group_info, elements = ["N"]*100, volume_per_atom = 50.0, random_u_iso = True) xrs.scattering_type_registry(table = "wk1995") # XXX ad a method to adptbx to do this point_group = sgtbx.space_group_info( symbol=symbol).group().build_derived_point_group() adp_constraints = sgtbx.tensor_rank_2_constraints( space_group=point_group, reciprocal_space=True) u_star = adptbx.u_cart_as_u_star(xrs.unit_cell(), adptbx.random_u_cart(u_scale=1,u_min=0.1)) u_indep = adp_constraints.independent_params(all_params=u_star) u_star = adp_constraints.all_params(independent_params=u_indep) b_cart_start=adptbx.u_as_b(adptbx.u_star_as_u_cart(xrs.unit_cell(), u_star)) # tr = (b_cart_start[0]+b_cart_start[1]+b_cart_start[2])/3 b_cart_start = [b_cart_start[0]-tr,b_cart_start[1]-tr,b_cart_start[2]-tr, b_cart_start[3],b_cart_start[4],b_cart_start[5]] tr = (b_cart_start[0]+b_cart_start[1]+b_cart_start[2])/3 # #print "Input b_cart :", " ".join(["%8.4f"%i for i in b_cart_start]), "tr:", tr reg = xrs.scattering_type_registry(table="wk1995", d_min=1/12) f_000 = reg.sum_of_scattering_factors_at_diffraction_angle_0() F = xrs.structure_factors(d_min = 2.0).f_calc() i = F.indices() i.append([0,0,0]) d = F.data() d.append(f_000) F = F.customized_copy(indices = i, data = d) u_star = adptbx.u_cart_as_u_star( F.unit_cell(), adptbx.b_as_u(b_cart_start)) fbc = mmtbx.f_model.ext.k_anisotropic(F.indices(), u_star) fc = F.structure_factors_from_scatterers(xray_structure=xrs).f_calc() f_obs = F.customized_copy(data = flex.abs(fc.data()*fbc)) #print bulk_solvent.r_factor(f_obs.data(), fmodel.f_model().data()) obj = bulk_solvent.aniso_u_scaler( f_model = fc.data(), f_obs = f_obs.data(), miller_indices = f_obs.indices(), unit_cell = f_obs.unit_cell()) a = obj.a #### #print "Input a :", " ".join(["%7.3f"%i for i in a]) overall_anisotropic_scale = mmtbx.f_model.ext.k_anisotropic( f_obs.indices(), a, f_obs.unit_cell()) #print bulk_solvent.r_factor(f_obs.data(), fmodel.f_model().data()*overall_anisotropic_scale) f_obs = abs(fc) f_obs = f_obs.customized_copy(data = f_obs.data() * overall_anisotropic_scale) #print bulk_solvent.r_factor(f_obs.data(), fmodel.f_model().data()) #print bulk_solvent.r_factor(f_obs.data(), fmodel.f_model().data()) t0 = time.time() obj = bulk_solvent.aniso_u_scaler( f_model = fc.data(), f_obs = f_obs.data(), miller_indices = f_obs.indices(), unit_cell = f_obs.unit_cell()) time_aniso_u_scaler += (time.time()-t0) overall_anisotropic_scale = mmtbx.f_model.ext.k_anisotropic( f_obs.indices(), obj.a, f_obs.unit_cell()) assert approx_equal(bulk_solvent.r_factor(f_obs.data(), fc.data()*overall_anisotropic_scale), 0.0, 1.e-2) # XXX seems to be low #print "Output a:", " ".join(["%7.3f"%i for i in obj.a]) assert approx_equal(a, obj.a, 1.e-4) # XXX can it be smaller? assert overall_anisotropic_scale[len(overall_anisotropic_scale)-1]==1 print "Time (aniso_u_scaler only): %6.4f"%time_aniso_u_scaler
def run_group(symbol): group = space_group_info(symbol) print("\n==") elements = ('C', 'N', 'O', 'H') * 11 xrs = random_structure.xray_structure(space_group_info=group, volume_per_atom=25., general_positions_only=False, elements=elements, min_distance=1.0) fo = abs(xrs.structure_factors(d_min=2).f_calc()) fmodel = mmtbx.f_model.manager(f_obs=fo, xray_structure=xrs) # k_sol = flex.double([10.35, 5.34]) b_sol = flex.double([30.0, 24.0]) b_cart = [10, 20, 30, 40, 50, 60] u_star = flex.double( adptbx.b_as_u(adptbx.u_cart_as_u_star(xrs.unit_cell(), b_cart))) # TGO = cpp_tg(fmodel=fmodel) tg = TGO.get_tg(k_sol=k_sol, b_sol=b_sol, u_star=u_star) # k_sol gk_a = list(tg.grad_k_sols()) ck_a = list(tg.curv_k_sols()) gk_fd, ck_fd = fd(TGO=TGO, k_sol=k_sol, b_sol=b_sol, u_star=u_star, param="k_sol") # b_sol gb_a = list(tg.grad_b_sols()) cb_a = list(tg.curv_b_sols()) gb_fd, cb_fd = fd(TGO=TGO, k_sol=k_sol, b_sol=b_sol, u_star=u_star, param="b_sol") # u_star gu_a = list(tg.grad_u_star()) gu_fd, junk = fd(TGO=TGO, k_sol=k_sol, b_sol=b_sol, u_star=u_star, param="u_star") print("u_star:", gu_a) print("u_star:", gu_fd) TGO2 = cpp_tg_u_star_only(fmodel=fmodel) tg2 = TGO2.get_tg(k_sol=k_sol, b_sol=b_sol, u_star=u_star) gu_a2 = list(tg2.grad_u_star()) gu_fd2, junk = fd(TGO=TGO2, k_sol=k_sol, b_sol=b_sol, u_star=u_star, param="u_star") print("u_star:", gu_a2) print("u_star:", gu_fd2) # print("k_sol:", gk_a, ck_a) print("k_sol:", gk_fd, ck_fd) print("b_sol:", gb_a, cb_a) print("b_sol:", gb_fd, cb_fd) # assert approx_equal(gk_a, gk_fd, eps=1.e-4) assert approx_equal(gb_a, gb_fd, eps=1.e-4) assert approx_equal(ck_a, ck_fd, eps=1.e-4) assert approx_equal(cb_a, cb_fd, eps=1.e-4) assert approx_equal(gu_a, gu_fd, eps=1.e-4) assert approx_equal(gu_a2, gu_fd2, eps=1.e-6)
def exercise_radial_shells(k_sol=0.33,d_min=2,grid_search=False,shell_width=0.3): xray_structure = get_xray_structure_from_file() b_sol = 34.0 b_cart = [1,2,3,0,4,0] f_obs, r_free_flags = get_f_obs_freer( d_min = d_min, k_sol = k_sol, b_sol = b_sol, b_cart = b_cart, xray_structure = xray_structure, radial_shell_width=shell_width) mask_params = mmtbx.masks.mask_master_params.extract() mask_params.radial_shell_width = shell_width if( type(k_sol) is list ): mask_params.n_radial_shells = len(k_sol) else: mask_params.n_radial_shells = 2 fmodel = mmtbx.f_model.manager( r_free_flags = r_free_flags, f_obs = f_obs, xray_structure = xray_structure, mask_params = mask_params) u_star = adptbx.u_cart_as_u_star( fmodel.f_obs().unit_cell(),adptbx.b_as_u(b_cart)) fmodel_kbu = fmodel.fmodel_kbu() fmodel_kbu.update(u_star = u_star) r_work_start = fmodel_kbu.r_factor()*100. msk = fmodel.mask_manager print 'Solvent content: ', msk.solvent_content_via_mask print 'Layer volume fractions: ', msk.layer_volume_fractions if( type(k_sol) is list): for i in range(len(k_sol)): if( msk.layer_volume_fractions[i] == 0. ): k_sol[i] = 0. params = bss.master_params.extract() params.anisotropic_scaling = False params.k_sol_b_sol_grid_search = grid_search if( not params.k_sol_b_sol_grid_search ): params.number_of_macro_cycles = 3 params.k_sol_max = 1.2 result = bss.bulk_solvent_and_scales( fmodel_kbu = fmodel_kbu, params = params) r_work = result.fmodels.r_factor() print 'R-work: ', r_work print 'Solvent radius: ', fmodel.mask_params.solvent_radius assert r_work_start > 0.0 assert approx_equal(r_work, 0.0, eps = 1.e-4) if( type(k_sol) is list ): ksols = list(result.fmodels.fmodel.k_sols()) # XXX if layer_volume_fractions=0, then ksol is more or less undefined ? # XXX should it be done in bulk_solvent_and_scaling.py ? for i in range(len(ksols)): if(msk.layer_volume_fractions[i] == 0.): ksols[i] = 0. assert len(k_sol) == len(ksols) for ik in range(len(k_sol)): assert approx_equal(ksols[ik], k_sol[ik], eps=1.e-6) else: for ksol in result.fmodels.fmodel.k_sols(): assert approx_equal(ksol, k_sol, eps = 1.e-6) assert approx_equal(result.b_sol(), b_sol, eps = 1.e-6) assert approx_equal(result.b_cart(), b_cart, eps = 1.e-6)
def exercise_5_bulk_sol_and_scaling(d_min, symbol = "C 2", k_sol = 0.37, b_sol = 64.0): x = random_structure.xray_structure( space_group_info = sgtbx.space_group_info(symbol=symbol), elements =(("O","N","C")*150), volume_per_atom = 200, min_distance = 1.5, general_positions_only = True, random_u_iso = True, random_occupancy = False) x.scattering_type_registry(table="wk1995") f_calc = x.structure_factors(d_min = d_min, algorithm="direct").f_calc() mask_manager = mmtbx.masks.manager(miller_array = f_calc) f_mask = mask_manager.shell_f_masks(xray_structure = x)[0] assert flex.mean(abs(f_mask).data()) > 0 b_cart=[-15,-5,20, 0,6,0] u_star = adptbx.u_cart_as_u_star(x.unit_cell(), adptbx.b_as_u(b_cart)) k_anisotropic = mmtbx.f_model.ext.k_anisotropic(f_calc.indices(), u_star) ss = 1./flex.pow2(f_calc.d_spacings().data()) / 4. k_mask = mmtbx.f_model.ext.k_mask(ss, k_sol, b_sol) scale = 17. k_isotropic = flex.double(f_calc.data().size(), scale) f_model_data = scale*k_anisotropic*(f_calc.data()+k_mask*f_mask.data()) f_model = f_calc.customized_copy(data = f_model_data) f_obs = abs(f_model) r_free_flags = f_obs.generate_r_free_flags(use_lattice_symmetry=False) sfg_params = mmtbx.f_model.sf_and_grads_accuracy_master_params.extract() sfg_params.algorithm = "direct" bin_selections = [] f_calc.setup_binner(reflections_per_bin=100) for i_bin in f_calc.binner().range_used(): sel = f_calc.binner().selection(i_bin) bin_selections.append(sel) # test 1 for k_isotropic_ in [None, k_isotropic]: fmodel = mmtbx.f_model.manager( xray_structure = x, f_obs = f_obs, k_mask = k_mask, k_anisotropic = k_anisotropic, k_isotropic = k_isotropic, r_free_flags = r_free_flags, bin_selections = bin_selections, sf_and_grads_accuracy_params = sfg_params) assert approx_equal(fmodel.r_work(), 0) assert approx_equal(fmodel.r_free(), 0) assert approx_equal(fmodel.target_w(), 0) assert approx_equal(fmodel.k_masks()[0], k_mask) assert approx_equal(fmodel.k_anisotropic(), k_anisotropic) assert approx_equal(fmodel.f_model_scaled_with_k1().data(), f_model_data) assert approx_equal(fmodel.f_model().data(), f_model_data) # test 2 fmodel = mmtbx.f_model.manager( f_calc = f_calc, f_mask = f_mask, f_obs = f_obs, k_mask = k_mask, k_anisotropic = k_anisotropic, k_isotropic = k_isotropic, r_free_flags = r_free_flags, bin_selections = bin_selections, sf_and_grads_accuracy_params = sfg_params) assert approx_equal(fmodel.r_work(), 0) assert approx_equal(fmodel.r_free(), 0) assert approx_equal(fmodel.target_w(), 0) assert approx_equal(fmodel.k_masks()[0], k_mask) assert approx_equal(fmodel.k_anisotropic(), k_anisotropic) assert approx_equal(fmodel.f_model_scaled_with_k1().data(), f_model_data) assert approx_equal(fmodel.f_model().data(), f_model_data) assert fmodel.f_calc().data().all_eq(f_calc.data()) assert fmodel.f_masks()[0].data().all_eq(f_mask.data()) # test 3 params = bss.master_params.extract() params.number_of_macro_cycles=5 fmodel = mmtbx.f_model.manager( f_calc = f_calc, f_mask = f_mask, f_obs = f_obs, r_free_flags = r_free_flags, bin_selections = bin_selections, sf_and_grads_accuracy_params = sfg_params) assert fmodel.r_work() > 0.3 o = fmodel.update_all_scales(fast=False, params=params, remove_outliers=False) assert approx_equal(o.k_sol[0], k_sol, 0.01 ) assert approx_equal(o.b_sol[0], b_sol, 0.1) assert approx_equal(o.b_cart, b_cart, 1.e-1) assert approx_equal(fmodel.r_work(), 0, 1.e-3) assert approx_equal(fmodel.r_free(), 0, 1.e-3) # test 4 - part 1 fmodel = mmtbx.f_model.manager( f_calc = f_calc, f_mask = f_mask, f_obs = f_obs, r_free_flags = r_free_flags, bin_selections = bin_selections, sf_and_grads_accuracy_params = sfg_params) assert fmodel.r_work() > 0.3 fmodel.update_all_scales(fast=True, remove_outliers=False) assert fmodel.r_work() < 0.04, [fmodel.r_work(), d_min] # part 2 of test 4 d=fmodel.k_isotropic()*fmodel.k_anisotropic()*( f_calc.data()+fmodel.k_masks()[0]*f_mask.data()) fmodel = mmtbx.f_model.manager( f_calc = f_calc, f_mask = f_mask, f_obs = abs(f_calc.customized_copy(data = d)), r_free_flags = r_free_flags, bin_selections = bin_selections, sf_and_grads_accuracy_params = sfg_params) assert fmodel.r_work() > 0.3 fmodel.update_all_scales(fast=True) assert fmodel.r_work() < 0.04, [fmodel.r_work(), d_min]
def run_0(symbol = "C 2"): space_group_info = sgtbx.space_group_info(symbol = symbol) xrs = random_structure.xray_structure( space_group_info = space_group_info, elements = ["N"]*50, volume_per_atom = 100.0, random_u_iso = True) # b_cart = adptbx.random_traceless_symmetry_constrained_b_cart( crystal_symmetry=xrs.crystal_symmetry()) u_star = adptbx.u_cart_as_u_star(xrs.unit_cell(), adptbx.b_as_u(b_cart)) # F = xrs.structure_factors(d_min = 1.5).f_calc() k_anisotropic = mmtbx.f_model.ext.k_anisotropic(F.indices(), u_star) # bin_selections = [] F.setup_binner(reflections_per_bin=50) for i_bin in F.binner().range_used(): sel = F.binner().selection(i_bin) bin_selections.append(sel) # d_spacings = F.d_spacings().data() ss = 1./flex.pow2(d_spacings) / 4. k_mask_tmp = mmtbx.f_model.ext.k_mask(ss, 0.35, 80.) k_mask = flex.double(F.data().size(), 0) k_isotropic = flex.double(F.data().size(), 0) for s in bin_selections: d = d_spacings.select(s) k_mask.set_selected(s, flex.mean(k_mask_tmp.select(s))) k_isotropic.set_selected(s, random.randint(1,10)) # fmodel = mmtbx.f_model.manager( xray_structure = xrs, f_obs = abs(F), k_isotropic = k_isotropic, k_anisotropic = k_anisotropic, k_mask = k_mask) f_calc = fmodel.f_calc() f_masks = fmodel.f_masks() f_model = fmodel.f_model() f_obs = abs(f_model) r_free_flags = f_obs.generate_r_free_flags(use_lattice_symmetry=False) # assert approx_equal(bulk_solvent.r_factor(f_obs.data(), f_model.data()), 0) aso = scaler.run( f_obs = f_obs, f_calc = f_calc, f_mask = f_masks, r_free_flags = r_free_flags, bin_selections = bin_selections, number_of_cycles = 500, auto_convergence_tolerance = 1.e-9, ss = ss, try_poly = True, try_expanal = True, try_expmin = True, verbose = False) assert approx_equal(aso.r_final, 0.00037, 0.00001) assert approx_equal(aso.r_low, 0.00002, 0.00001) assert approx_equal(aso.r_high, 0.00006, 0.00001) assert approx_equal( bulk_solvent.r_factor(f_obs.data(), abs(aso.core.f_model).data(), 1), bulk_solvent.r_factor(f_obs.data(), abs(aso.core.f_model).data()))
def exercise_6_instantiate_consistency(symbol = "C 2"): random.seed(0) flex.set_random_seed(0) for scale in [1.e-4, 1.0, 1.e+4]: for k_sol in [0, 0.3]: for b_sol in [0, 50]: for set_h_occ_to_zero in [True, False]: for update_f_part1 in [True, False]: for apply_scale_to in ["f_obs", "f_model"]: # Simulate Fobs START x = random_structure.xray_structure( space_group_info = sgtbx.space_group_info(symbol=symbol), elements =(("O","N","C")*3+("H",)*10), volume_per_atom = 50, min_distance = 3, general_positions_only = True, random_u_iso = True, random_occupancy = False) x.scattering_type_registry(table="wk1995") x.set_occupancies(value=0.8, selection = x.hd_selection()) f_calc = x.structure_factors(d_min = 2.0).f_calc() mask_manager = mmtbx.masks.manager(miller_array = f_calc) f_mask = mask_manager.shell_f_masks(xray_structure = x)[0] assert flex.mean(abs(f_mask).data()) > 0 b_cart=adptbx.random_traceless_symmetry_constrained_b_cart( crystal_symmetry=x.crystal_symmetry()) u_star = adptbx.u_cart_as_u_star(x.unit_cell(), adptbx.b_as_u(b_cart)) k_anisotropic = mmtbx.f_model.ext.k_anisotropic(f_calc.indices(), u_star) ss = 1./flex.pow2(f_calc.d_spacings().data()) / 4. k_mask = mmtbx.f_model.ext.k_mask(ss, k_sol, b_sol) if(apply_scale_to=="f_model"): k_isotropic = flex.double(f_calc.data().size(), scale) else: k_isotropic = flex.double(f_calc.data().size(), 1) f_model_data = scale*k_anisotropic*(f_calc.data()+k_mask*f_mask.data()) f_model = f_calc.customized_copy(data = f_model_data) f_obs = abs(f_model) if(apply_scale_to=="f_obs"): f_obs = f_obs.customized_copy(data = f_obs.data()*scale) r_free_flags = f_obs.generate_r_free_flags() # Simulate Fobs END if(set_h_occ_to_zero): x.set_occupancies(value=0.0, selection = x.hd_selection()) x.shake_sites_in_place(mean_distance=5) sel = x.random_remove_sites_selection(fraction=0.3) x = x.select(sel) fmodel = mmtbx.f_model.manager( xray_structure = x, f_obs = f_obs, r_free_flags = r_free_flags) fmodel.update_all_scales(fast=True, show=False, update_f_part1=update_f_part1) f_part1_data = fmodel.f_calc().data()*flex.random_double( fmodel.f_calc().data().size()) f_part1 = fmodel.f_calc().customized_copy(data = f_part1_data) fmodel.update(f_part1 = f_part1) r1 = fmodel.r_work() # zero=fmodel.f_calc().customized_copy(data=fmodel.f_calc().data()*0) fmodel_dc = mmtbx.f_model.manager( f_obs = fmodel.f_obs(), r_free_flags = fmodel.r_free_flags(), k_isotropic = fmodel.k_isotropic(), k_anisotropic = fmodel.k_anisotropic(), f_calc = fmodel.f_model_no_scales(), f_part1 = fmodel.f_part1(), f_part2 = fmodel.f_part2(), f_mask = zero) r2 = fmodel_dc.r_work() if(0): print "r1=%8.6f r2=%8.6f fp1=%6.3f fp2=%6.3f fc=%6.3f"%(r1, r2, flex.mean(abs(fmodel.f_part1()).data()), \ flex.mean(abs(fmodel.f_part2()).data()), \ flex.mean(abs(fmodel.f_calc()).data())), \ "set_h_occ_to_zero=", set_h_occ_to_zero,\ "update_f_part1=", update_f_part1 assert approx_equal(r1, r2), [r1, r2]
def random_aniso_adp(space_group, unit_cell, u_scale=2, u_min=0): return adptbx.u_star_as_u_cart(unit_cell, space_group.average_u_star( u_star = adptbx.u_cart_as_u_star(unit_cell, adptbx.random_u_cart( u_scale=u_scale, u_min=u_min))))
class basic_analyses(object): # XXX is this ever used? def __init__(self, miller_array, phil_object, out=None, out_plot=None, miller_calc=None, original_intensities=None, completeness_as_non_anomalous=None, verbose=0): if out is None: out = sys.stdout if verbose > 0: print >> out print >> out print >> out, "Matthews coefficient and Solvent content statistics" n_copies_solc = 1.0 self.nres_known = False if (phil_object.scaling.input.asu_contents.n_residues is not None or phil_object.scaling.input.asu_contents.n_bases is not None): self.nres_known = True if (phil_object.scaling.input.asu_contents.sequence_file is not None): print >> out, " warning: ignoring sequence file" elif (phil_object.scaling.input.asu_contents.sequence_file is not None): print >> out, " determining composition from sequence file %s" % \ phil_object.scaling.input.asu_contents.sequence_file seq_comp = iotbx.bioinformatics.composition_from_sequence_file( file_name=phil_object.scaling.input.asu_contents.sequence_file, log=out) if (seq_comp is not None): phil_object.scaling.input.asu_contents.n_residues = seq_comp.n_residues phil_object.scaling.input.asu_contents.n_bases = seq_comp.n_bases self.nres_known = True matthews_results = matthews.matthews_rupp( crystal_symmetry=miller_array, n_residues=phil_object.scaling.input.asu_contents.n_residues, n_bases=phil_object.scaling.input.asu_contents.n_bases, out=out, verbose=1) phil_object.scaling.input.asu_contents.n_residues = matthews_results[0] phil_object.scaling.input.asu_contents.n_bases = matthews_results[1] n_copies_solc = matthews_results[2] self.matthews_results = matthews_results if phil_object.scaling.input.asu_contents.n_copies_per_asu is not None: n_copies_solc = phil_object.scaling.input.asu_contents.n_copies_per_asu self.defined_copies = n_copies_solc if verbose > 0: print >> out, "Number of copies per asymmetric unit provided" print >> out, " Will use user specified value of ", n_copies_solc else: phil_object.scaling.input.asu_contents.n_copies_per_asu = n_copies_solc self.guessed_copies = n_copies_solc # first report on I over sigma miller_array_new = miller_array self.data_strength = None miller_array_intensities = miller_array if (original_intensities is not None): assert original_intensities.is_xray_intensity_array() miller_array_intensities = original_intensities if miller_array_intensities.sigmas() is not None: data_strength = data_statistics.i_sigi_completeness_stats( miller_array_intensities, isigi_cut=phil_object.scaling.input.parameters. misc_twin_parameters.twin_test_cuts.isigi_cut, completeness_cut=phil_object.scaling.input.parameters. misc_twin_parameters.twin_test_cuts.completeness_cut, completeness_as_non_anomalous=completeness_as_non_anomalous) data_strength.show(out) self.data_strength = data_strength if phil_object.scaling.input.parameters.misc_twin_parameters.twin_test_cuts.high_resolution is None: if data_strength.resolution_cut > data_strength.resolution_at_least: phil_object.scaling.input.parameters.misc_twin_parameters.twin_test_cuts.high_resolution = data_strength.resolution_at_least else: phil_object.scaling.input.parameters.misc_twin_parameters.twin_test_cuts.high_resolution = data_strength.resolution_cut ## Isotropic wilson scaling if verbose > 0: print >> out print >> out print >> out, "Maximum likelihood isotropic Wilson scaling " n_residues = phil_object.scaling.input.asu_contents.n_residues n_bases = phil_object.scaling.input.asu_contents.n_bases if n_residues is None: n_residues = 0 if n_bases is None: n_bases = 0 if n_bases + n_residues == 0: raise Sorry("No scatterers available") iso_scale_and_b = absolute_scaling.ml_iso_absolute_scaling( miller_array=miller_array_new, n_residues=n_residues * miller_array.space_group().order_z() * n_copies_solc, n_bases=n_bases * miller_array.space_group().order_z() * n_copies_solc) iso_scale_and_b.show(out=out, verbose=verbose) self.iso_scale_and_b = iso_scale_and_b ## Store the b and scale values from isotropic ML scaling self.iso_p_scale = iso_scale_and_b.p_scale self.iso_b_wilson = iso_scale_and_b.b_wilson ## Anisotropic ml wilson scaling if verbose > 0: print >> out print >> out print >> out, "Maximum likelihood anisotropic Wilson scaling " aniso_scale_and_b = absolute_scaling.ml_aniso_absolute_scaling( miller_array=miller_array_new, n_residues=n_residues * miller_array.space_group().order_z() * n_copies_solc, n_bases=n_bases * miller_array.space_group().order_z() * n_copies_solc) aniso_scale_and_b.show(out=out, verbose=1) self.aniso_scale_and_b = aniso_scale_and_b try: b_cart = aniso_scale_and_b.b_cart except AttributeError, e: print >> out, "*** ERROR ***" print >> out, str(e) show_exception_info_if_full_testing() return self.aniso_p_scale = aniso_scale_and_b.p_scale self.aniso_u_star = aniso_scale_and_b.u_star self.aniso_b_cart = aniso_scale_and_b.b_cart # XXX: for GUI self.overall_b_cart = getattr(aniso_scale_and_b, "overall_b_cart", None) ## Correcting for anisotropy if verbose > 0: print >> out, "Correcting for anisotropy in the data" print >> out b_cart_observed = aniso_scale_and_b.b_cart b_trace_average = (b_cart_observed[0] + b_cart_observed[1] + b_cart_observed[2]) / 3.0 b_trace_min = b_cart_observed[0] if b_cart_observed[1] < b_trace_min: b_trace_min = b_cart_observed[1] if b_cart_observed[2] < b_trace_min: b_trace_min = b_cart_observed[2] if phil_object.scaling.input.optional.aniso.final_b == "eigen_min": b_use = aniso_scale_and_b.eigen_values[2] elif phil_object.scaling.input.optional.aniso.final_b == "eigen_mean": b_use = flex.mean(aniso_scale_and_b.eigen_values) elif phil_object.scaling.input.optional.aniso.final_b == "user_b_iso": assert phil_object.scaling.input.optional.aniso.b_iso is not None b_use = phil_object.scaling.input.optional.aniso.b_iso else: b_use = 30 b_cart_aniso_removed = [-b_use, -b_use, -b_use, 0, 0, 0] u_star_aniso_removed = adptbx.u_cart_as_u_star( miller_array.unit_cell(), adptbx.b_as_u(b_cart_aniso_removed)) ## I do things in two steps, but can easely be done in 1 step ## just for clarity, thats all. self.no_aniso_array = absolute_scaling.anisotropic_correction( miller_array_new, 0.0, aniso_scale_and_b.u_star) self.no_aniso_array = absolute_scaling.anisotropic_correction( self.no_aniso_array, 0.0, u_star_aniso_removed) self.no_aniso_array = self.no_aniso_array.set_observation_type( miller_array) ## Make normalised structure factors please sel_big = self.no_aniso_array.data() > 1.e+50 self.no_aniso_array = self.no_aniso_array.array( data=self.no_aniso_array.data().set_selected(sel_big, 0)) self.no_aniso_array = self.no_aniso_array.set_observation_type( miller_array) normalistion = absolute_scaling.kernel_normalisation( self.no_aniso_array, auto_kernel=True) self.normalised_miller = normalistion.normalised_miller.deep_copy() self.phil_object = phil_object ## Some basic statistics and sanity checks follow if verbose > 0: print >> out, "Some basic intensity statistics follow." print >> out basic_data_stats = data_statistics.basic_intensity_statistics( miller_array, aniso_scale_and_b.p_scale, aniso_scale_and_b.u_star, iso_scale_and_b.scat_info, out=out, out_plot=out_plot) self.basic_data_stats = basic_data_stats self.miller_array = basic_data_stats.new_miller #relative wilson plot self.rel_wilson = None if (miller_calc is not None) and (miller_calc.d_min() < 4.0): try: self.rel_wilson = relative_wilson.relative_wilson( miller_obs=miller_array, miller_calc=miller_calc) except RuntimeError, e: print >> out, "*** Error calculating relative Wilson plot - skipping." print >> out, ""
def run(args, command_name = "phenix.tls"): if(len(args) == 0): args = ["--help"] usage_fmt = "%s pdb_file [parameters: file or command line string]" des_fmt = "Example: %s model.pdb fit_tls_to.selection='%s' fit_tls_to.selection='%s'" command_line = (iotbx_option_parser( usage = usage_fmt % command_name, description = banner) .option("--show_defaults", action="store_true", help="Do not output to the screen (except errors).") .option("--silent", action="store_true", help="Suppress output to the screen.") ).process(args=args) # log = sys.stdout if(not command_line.options.silent): utils.print_header("TLS tools", out = log) if(command_line.options.show_defaults): master_params.show(out = log) print >> log return if(not command_line.options.silent): print >> log, banner # processed_args = utils.process_command_line_args(args = command_line.args, master_params = master_params, log = log) reflection_files = processed_args.reflection_files if(processed_args.crystal_symmetry is None): raise Sorry("No crystal symmetry found.") if(len(processed_args.pdb_file_names) == 0): raise Sorry("No PDB file found.") params = processed_args.params if(not command_line.options.silent): utils.print_header("Input parameters", out = log) params.show(out = log) params = params.extract() # if(processed_args.crystal_symmetry.unit_cell() is None or processed_args.crystal_symmetry.space_group() is None): raise Sorry("No CRYST1 record found.") mmtbx_pdb_file = utils.pdb_file( pdb_file_names = processed_args.pdb_file_names, cif_objects = processed_args.cif_objects, crystal_symmetry = processed_args.crystal_symmetry, log = log) # if(not command_line.options.silent): utils.print_header("TLS groups from PDB file header", out = log) pdb_inp_tls = mmtbx.tls.tools.tls_from_pdb_inp( remark_3_records = mmtbx_pdb_file.pdb_inp.extract_remark_iii_records(3), pdb_hierarchy = mmtbx_pdb_file.pdb_inp.construct_hierarchy()) # tls_groups = [] if(pdb_inp_tls.tls_present): if(pdb_inp_tls.error_string is not None): raise Sorry(pdb_inp_tls.error_string) mmtbx_pdb_file.set_ppf() xray_structure = get_xrs_helper(mmtbx_pdb_file = mmtbx_pdb_file, log = log, silent = command_line.options.silent) pdb_tls = mmtbx.tls.tools.extract_tls_from_pdb( pdb_inp_tls = pdb_inp_tls, all_chain_proxies = mmtbx_pdb_file.processed_pdb_file.all_chain_proxies, xray_structure = xray_structure) tls_groups = pdb_tls.pdb_inp_tls.tls_params # tls_selections_strings = [] # if(len(tls_groups) == 0 and not command_line.options.silent): print >> log, "No TLS groups found in PDB file header." else: for i_seq, tls_group in enumerate(tls_groups): tls_selections_strings.append(tls_group.selection_string) if(not command_line.options.silent): print >> log, "TLS group %d: %s" % (i_seq+1, tls_group.selection_string) mmtbx.tls.tools.show_tls_one_group(tlso = tls_group, out = log) print >> log # if(len(tls_selections_strings) > 0 and len(params.selection) > 0): raise Sorry("Two TLS selection sources found: PDB file header and parameters.") if(len(params.selection) > 0): tls_selections_strings = params.selection xray_structure = get_xrs_helper(mmtbx_pdb_file = mmtbx_pdb_file, log = log, silent = command_line.options.silent) if([params.combine_tls, params.extract_tls].count(True) > 1): raise Sorry("Cannot simultaneously pereform: combine_tls and extract_tls") if([params.combine_tls, params.extract_tls].count(True) > 0): if(len(tls_selections_strings)==0): raise Sorry("No TLS selections found.") # if(len(tls_selections_strings)): if(not command_line.options.silent): utils.print_header("TLS groups selections", out = log) selections = utils.get_atom_selections( all_chain_proxies = mmtbx_pdb_file.processed_pdb_file.all_chain_proxies, selection_strings = tls_selections_strings, xray_structure = xray_structure) if(not command_line.options.silent): print >> log, "Number of TLS groups: ", len(selections) print >> log, "Number of atoms: %d" % xray_structure.scatterers().size() n_atoms_in_tls = 0 for sel_a in selections: n_atoms_in_tls += sel_a.size() if(not command_line.options.silent): print >> log, "Number of atoms in TLS groups: %d" % n_atoms_in_tls print >> log assert len(tls_selections_strings) == len(selections) if(not command_line.options.silent): for sel_a, sel_s in zip(selections,tls_selections_strings): print >> log, "Selection string:\n%s" % sel_s print >> log, "selects %d atoms." % sel_a.size() print >> log print >> log, "Ready-to-use in phenix.refine:\n" for sel_a, sel_s in zip(selections,tls_selections_strings): print >> log, sel_s # ofn = params.output_file_name if(ofn is None): ofn = os.path.splitext(os.path.basename(processed_args.pdb_file_names[0]))[0] if(len(processed_args.pdb_file_names) > 1): ofn = ofn+"_el_al" if(params.combine_tls): ofn = ofn+"_combine_tls.pdb" elif(params.extract_tls): ofn = ofn+"_extract_tls.pdb" else: ofn = None if(ofn is not None): ofo = open(ofn, "w") # if(params.extract_tls): utils.print_header( "Fit TLS matrices to B-factors of selected sets of atoms", out = log) tlsos = mmtbx.tls.tools.generate_tlsos( selections = selections, xray_structure = xray_structure, value = 0.0) for rt,rl,rs in [[1,0,1],[1,1,1],[0,1,1], [1,0,0],[0,1,0],[0,0,1],[1,1,1], [0,0,1]]*10: tlsos = mmtbx.tls.tools.tls_from_uanisos( xray_structure = xray_structure, selections = selections, tlsos_initial = tlsos, number_of_macro_cycles = 10, max_iterations = 100, refine_T = rt, refine_L = rl, refine_S = rs, enforce_positive_definite_TL = params.enforce_positive_definite_TL, verbose = -1, out = log) mmtbx.tls.tools.show_tls(tlsos = tlsos, out = log) u_cart_from_tls = mmtbx.tls.tools.u_cart_from_tls( sites_cart = xray_structure.sites_cart(), selections = selections, tlsos = tlsos) unit_cell = xray_structure.unit_cell() for i_seq, sc in enumerate(xray_structure.scatterers()): if(u_cart_from_tls[i_seq] != (0,0,0,0,0,0)): u_star_tls = adptbx.u_cart_as_u_star(unit_cell, tuple(u_cart_from_tls[i_seq])) sc.u_star = tuple(flex.double(sc.u_star) - flex.double(u_star_tls)) for sel in selections: xray_structure.convert_to_isotropic(selection = sel) mmtbx.tls.tools.remark_3_tls(tlsos = tlsos, selection_strings = tls_selections_strings, out = ofo) # if(params.combine_tls): utils.print_header("Combine B_tls with B_residual", out = log) mmtbx.tls.tools.combine_tls_and_u_local(xray_structure = xray_structure, tls_selections = selections, tls_groups = tls_groups) print >> log, "All done." # if(ofn is not None): utils.print_header("Write output PDB file %s"%ofn, out = log) utils.write_pdb_file( xray_structure = xray_structure, pdb_hierarchy = mmtbx_pdb_file.processed_pdb_file.all_chain_proxies.pdb_hierarchy, out = ofo) ofo.close() print >> log, "All done."
def exercise(space_group_info, n_elements=10, table="wk1995", d_min=2.0, k_sol=0.35, b_sol=45.0, b_cart=None, quick=False, verbose=0): xray_structure = random_structure.xray_structure( space_group_info=space_group_info, elements=(("O", "N", "C") * (n_elements // 3 + 1))[:n_elements], volume_per_atom=100, min_distance=1.5, general_positions_only=True, random_u_iso=False, random_occupancy=False) xray_structure.scattering_type_registry(table=table) sg = xray_structure.space_group() uc = xray_structure.unit_cell() u_cart_1 = adptbx.random_u_cart(u_scale=5, u_min=5) u_star_1 = adptbx.u_cart_as_u_star(uc, u_cart_1) b_cart = adptbx.u_star_as_u_cart(uc, sg.average_u_star(u_star=u_star_1)) for anomalous_flag in [False, True]: scatterers = xray_structure.scatterers() if (anomalous_flag): assert scatterers.size() >= 7 for i in [1, 7]: scatterers[i].fp = -0.2 scatterers[i].fdp = 5 have_non_zero_fdp = True else: for i in [1, 7]: scatterers[i].fp = 0 scatterers[i].fdp = 0 have_non_zero_fdp = False f_obs = abs( xray_structure.structure_factors( d_min=d_min, anomalous_flag=anomalous_flag, cos_sin_table=sfg_params.cos_sin_table, algorithm=sfg_params.algorithm).f_calc()) f_obs_comp = f_obs.structure_factors_from_scatterers( xray_structure=xray_structure, algorithm=sfg_params.algorithm, cos_sin_table=sfg_params.cos_sin_table).f_calc() f_obs = abs(f_obs_comp) flags = f_obs.generate_r_free_flags(fraction=0.1, max_free=99999999) #flags = flags.array(data = flex.bool(f_obs.data().size(), False)) xrs = xray_structure.deep_copy_scatterers() xrs.shake_sites_in_place(rms_difference=0.3) for target in mmtbx.refinement.targets.target_names: if target == "mli": continue if (quick): if (target not in ["ls_wunit_k1", "ml", "mlhl", "ml_sad"]): continue if (target == "mlhl"): if (have_non_zero_fdp): continue # XXX gradients not correct! experimental_phases = generate_random_hl(miller_set=f_obs) else: experimental_phases = None if (target == "ml_sad" and (not anomalous_flag or mmtbx.refinement.targets.phaser is None)): continue print(" ", target) xray.set_scatterer_grad_flags(scatterers=xrs.scatterers(), site=True) ss = 1. / flex.pow2(f_obs.d_spacings().data()) / 4. u_star = adptbx.u_cart_as_u_star(f_obs.unit_cell(), adptbx.b_as_u(b_cart)) k_anisotropic = mmtbx.f_model.ext.k_anisotropic( f_obs.indices(), u_star) k_mask = mmtbx.f_model.ext.k_mask(ss, k_sol, b_sol) fmodel = mmtbx.f_model.manager( xray_structure=xrs, f_obs=f_obs, r_free_flags=flags, target_name=target, abcd=experimental_phases, sf_and_grads_accuracy_params=sfg_params, k_mask=k_mask, k_anisotropic=k_anisotropic, mask_params=masks.mask_master_params.extract()) fmodel.update_xray_structure(xray_structure=xrs, update_f_calc=True, update_f_mask=True) xray.set_scatterer_grad_flags( scatterers=fmodel.xray_structure.scatterers(), site=True) fmodel.update_xray_structure(update_f_calc=True) t_f = fmodel.target_functor() t_f.prepare_for_minimization() gs = t_f(compute_gradients=True).d_target_d_site_cart().as_double() gfd = finite_differences_site(target_functor=t_f) cc = flex.linear_correlation(gs, gfd).coefficient() if (0 or verbose): print("ana:", list(gs)) print("fin:", list(gfd)) print("rat:", [f / a for a, f in zip(gs, gfd)]) print(target, "corr:", cc, space_group_info) print() diff = gs - gfd diff /= max(1, flex.max(flex.abs(gfd))) tolerance = 1.2e-5 assert approx_equal(abs(flex.min(diff)), 0.0, tolerance) assert approx_equal(abs(flex.mean(diff)), 0.0, tolerance) assert approx_equal(abs(flex.max(diff)), 0.0, tolerance) assert approx_equal(cc, 1.0, tolerance) fmodel.model_error_ml()
def exercise_interface(): episq = 8 * (math.pi ** 2) assert approx_equal(adptbx.u_as_b(2.3), 2.3 * episq) assert approx_equal(adptbx.b_as_u(adptbx.u_as_b(2.3)), 2.3) u = (3, 4, 9, 2, 1, 7) assert approx_equal(adptbx.u_as_b(u), [x * episq for x in u]) assert approx_equal(adptbx.b_as_u(adptbx.u_as_b(u)), u) uc = uctbx.unit_cell((5, 4, 7, 80, 110, 100)) for fw, bw in ( (adptbx.u_cif_as_u_star, adptbx.u_star_as_u_cif), (adptbx.u_cart_as_u_star, adptbx.u_star_as_u_cart), (adptbx.u_cart_as_u_cif, adptbx.u_cif_as_u_cart), (adptbx.u_cart_as_beta, adptbx.beta_as_u_cart), (adptbx.u_cif_as_beta, adptbx.beta_as_u_cif), ): assert approx_equal(bw(uc, fw(uc, u)), u) assert approx_equal(adptbx.beta_as_u_star(adptbx.u_star_as_beta(u)), u) assert approx_equal(adptbx.u_cart_as_u_iso(adptbx.u_iso_as_u_cart(2.3)), 2.3) for fw, bw in ( (adptbx.u_iso_as_u_star, adptbx.u_star_as_u_iso), (adptbx.u_iso_as_u_cif, adptbx.u_cif_as_u_iso), (adptbx.u_iso_as_beta, adptbx.beta_as_u_iso), ): assert approx_equal(bw(uc, fw(uc, 2.3)), 2.3) fc = adptbx.factor_u_cart_u_iso(u_cart=u) assert approx_equal(fc.u_iso, adptbx.u_cart_as_u_iso(u)) assert approx_equal(fc.u_cart_minus_u_iso, [uii - fc.u_iso for uii in u[:3]] + list(u[3:])) f = adptbx.factor_u_star_u_iso(unit_cell=uc, u_star=adptbx.u_cart_as_u_star(uc, u)) assert approx_equal(f.u_iso, fc.u_iso) assert approx_equal(f.u_star_minus_u_iso, adptbx.u_cart_as_u_star(uc, fc.u_cart_minus_u_iso)) f = adptbx.factor_u_cif_u_iso(unit_cell=uc, u_cif=adptbx.u_cart_as_u_cif(uc, u)) assert approx_equal(f.u_iso, fc.u_iso) assert approx_equal(f.u_cif_minus_u_iso, adptbx.u_cart_as_u_cif(uc, fc.u_cart_minus_u_iso)) f = adptbx.factor_beta_u_iso(unit_cell=uc, beta=adptbx.u_cart_as_beta(uc, u)) assert approx_equal(f.u_iso, fc.u_iso) assert approx_equal(f.beta_minus_u_iso, adptbx.u_cart_as_beta(uc, fc.u_cart_minus_u_iso)) assert approx_equal(adptbx.debye_waller_factor_b_iso(0.25, 2.3), math.exp(-2.3 * 0.25)) assert approx_equal(adptbx.debye_waller_factor_u_iso(0.25, 2.3), math.exp(-2.3 * episq * 0.25)) assert approx_equal( adptbx.debye_waller_factor_b_iso(uc, (1, 2, 3), 2.3), adptbx.debye_waller_factor_u_iso(uc, (1, 2, 3), 2.3 / episq), ) u_star = adptbx.u_cart_as_u_star(uc, u) dw = adptbx.debye_waller_factor_u_star((1, 2, 3), u_star) assert approx_equal(dw, adptbx.debye_waller_factor_beta((1, 2, 3), adptbx.u_star_as_beta(u_star))) assert approx_equal(dw, adptbx.debye_waller_factor_u_cif(uc, (1, 2, 3), adptbx.u_star_as_u_cif(uc, u_star))) assert approx_equal(dw, adptbx.debye_waller_factor_u_cart(uc, (1, 2, 3), adptbx.u_star_as_u_cart(uc, u_star))) for e in adptbx.eigenvalues(u): check_eigenvalue(u, e) assert not adptbx.is_positive_definite(adptbx.eigenvalues(u)) assert not adptbx.is_positive_definite(adptbx.eigenvalues(u), 0) assert adptbx.is_positive_definite(adptbx.eigenvalues(u), 1.22) assert not adptbx.is_positive_definite(u) assert not adptbx.is_positive_definite(u, 0) assert adptbx.is_positive_definite(u, 1.22) up = (0.534, 0.812, 0.613, 0.0166, 0.134, -0.0124) s = adptbx.eigensystem(up) assert approx_equal(s.values(), (0.813132, 0.713201, 0.432668)) for i in xrange(3): check_eigenvector(up, s.values()[i], s.vectors(i)) c = (1, 2, 3, 3, -4, 5, 4, 5, 6) v = (198, 18, 1020, 116, 447, 269) assert approx_equal(adptbx.c_u_c_transpose(c, u), v) assert approx_equal(adptbx.eigensystem(u).values(), (14.279201519086316, 2.9369143826320214, -1.2161159017183376)) s = adptbx.eigensystem(up) try: s.vectors(4) except RuntimeError, e: assert str(e).endswith("Index out of range.")
def exercise_5_bulk_sol_and_scaling_and_H(symbol = "C 2"): random.seed(0) flex.set_random_seed(0) x = random_structure.xray_structure( space_group_info = sgtbx.space_group_info(symbol=symbol), elements =(("O","N","C")*5+("H",)*10), volume_per_atom = 200, min_distance = 1.5, general_positions_only = True, random_u_iso = True, random_occupancy = False) x.scattering_type_registry(table="wk1995") x.set_occupancies(value=0.6, selection = x.hd_selection()) f_calc = x.structure_factors(d_min = 1.5, algorithm="direct").f_calc() mask_manager = mmtbx.masks.manager(miller_array = f_calc) f_mask = mask_manager.shell_f_masks(xray_structure = x)[0] assert flex.mean(abs(f_mask).data()) > 0 b_cart=adptbx.random_traceless_symmetry_constrained_b_cart(crystal_symmetry=x.crystal_symmetry()) u_star = adptbx.u_cart_as_u_star(x.unit_cell(), adptbx.b_as_u(b_cart)) k_anisotropic = mmtbx.f_model.ext.k_anisotropic(f_calc.indices(), u_star) ss = 1./flex.pow2(f_calc.d_spacings().data()) / 4. k_mask = mmtbx.f_model.ext.k_mask(ss, 0.37, 64.0) scale = 17. k_isotropic = flex.double(f_calc.data().size(), scale) f_model_data = scale*k_anisotropic*(f_calc.data()+k_mask*f_mask.data()) f_model = f_calc.customized_copy(data = f_model_data) f_obs = abs(f_model) r_free_flags = f_obs.generate_r_free_flags() sfg_params = mmtbx.f_model.sf_and_grads_accuracy_master_params.extract() sfg_params.algorithm = "direct" for it in [(0,0.6), (1,-0.4), (0.2,0.4), (-0.2,0.8)]: value, k_h_ = it x.set_occupancies(value=value, selection = x.hd_selection()) fmodel = mmtbx.f_model.manager( xray_structure = x, f_obs = f_obs, k_mask = k_mask, k_anisotropic = k_anisotropic, k_isotropic = k_isotropic, r_free_flags = r_free_flags, sf_and_grads_accuracy_params = sfg_params) fmodel_dc = fmodel.deep_copy() assert fmodel.r_work() > 0.02, fmodel.r_work() fmodel.update_f_hydrogens_grid_search() assert approx_equal(fmodel.k_h, k_h_), [it, fmodel.k_h, fmodel.r_work()] assert approx_equal(fmodel.b_h, 0) assert approx_equal(fmodel.r_work(), 0) fmodel_dc.update_f_hydrogens() assert fmodel_dc.r_work() < 0.01 # test 2 fmodel = mmtbx.f_model.manager( xray_structure = x, f_obs = f_obs, r_free_flags = r_free_flags, sf_and_grads_accuracy_params = sfg_params) fmodel_dc = fmodel.deep_copy() assert fmodel.r_work() > 0.25 fmodel.update_all_scales(cycles=6, fast=False, update_f_part1=False, refine_hd_scattering_method="slow") assert approx_equal(fmodel.k_h, k_h_) assert approx_equal(fmodel.b_h, 0) assert approx_equal(fmodel.r_work(), 0) fmodel_dc.update_all_scales(update_f_part1=False, refine_hd_scattering_method="fast") assert fmodel_dc.r_work() < 0.05 # test 3 fmodel = mmtbx.f_model.manager( xray_structure = x, f_obs = f_obs, r_free_flags = r_free_flags, sf_and_grads_accuracy_params = sfg_params) assert fmodel.r_work() > 0.25 fmodel.update_all_scales(cycles=6, fast=True, show=False, update_f_part1=False, refine_hd_scattering_method="slow") assert approx_equal(fmodel.k_h, k_h_) assert approx_equal(fmodel.b_h, 0) assert fmodel.r_work() < 0.025 map_coeffs = fmodel.map_coefficients(map_type="2mFo-DFc") fmodel.export_f_obs_flags_as_mtz(file_name="tmp_tst_fmodel.mtz") assert os.path.isfile("tmp_tst_fmodel.mtz")
def u_star_minus_u_iso_ralf(unit_cell, u_star): u_cart = adptbx.u_star_as_u_cart(unit_cell, u_star) u_iso = adptbx.u_cart_as_u_iso(u_cart) u_cart_minus_u_iso = [a-u_iso for a in u_cart[:3]] + list(u_cart[3:]) return adptbx.u_cart_as_u_star(unit_cell, u_cart_minus_u_iso)