def linear_regression_test(d_analytical, d_numerical, test_hard=True, slope_tolerance=1.e-3, correlation_min=0.999, verbose=0): if (type(d_analytical) != type(flex.double())): d_analytical = flex_tuple_as_flex_double(d_analytical) if (type(d_numerical) != type(flex.double())): d_numerical = flex_tuple_as_flex_double(d_numerical) if (0 or verbose): print "analytical:", tuple(d_analytical) print "numerical: ", tuple(d_numerical) if ( flex.max(flex.abs(d_analytical)) == 0 and flex.max(flex.abs(d_numerical)) == 0): return regr = flex.linear_regression(d_analytical, d_numerical) corr = flex.linear_correlation(d_analytical, d_numerical).coefficient() assert regr.is_well_defined() if (abs(regr.slope() - 1) > slope_tolerance or corr < correlation_min): print "Error: finite difference mismatch:" print "slope:", regr.slope() print "correlation:", corr if (0 or verbose): for a, n in zip(d_analytical, d_numerical): print a, n assert not test_hard
def exercise_01(grid_step = 0.03, d_min = 1.0, wing_cutoff = 1.e-9): xrs = random_structure.xray_structure( space_group_info = sgtbx.space_group_info("P 1"), elements = ["O","N","C","P","S","U","AU"]*1, random_u_iso = True, general_positions_only = False) # avoid excessive_range_error_limit crash bs = xrs.extract_u_iso_or_u_equiv()*adptbx.u_as_b(1) sel = bs < 1 bs = bs.set_selected(sel, 1) xrs.set_b_iso(values = bs) # p = xrs.unit_cell().parameters() timer = user_plus_sys_time() res = manager(nx = int(p[0]/grid_step), ny = int(p[1]/grid_step), nz = int(p[2]/grid_step), scattering_type_registry = xrs.scattering_type_registry(), unit_cell = xrs.unit_cell(), scatterers = xrs.scatterers(), wing_cutoff = wing_cutoff) print "time: %10.4f" % (timer.elapsed()) f_calc_dir = xrs.structure_factors( d_min = d_min, algorithm = "direct").f_calc() # f_calc_den = f_calc_dir.structure_factors_from_map(map = res.density_array, use_scale = True) f1 = flex.abs(f_calc_dir.data()) f2 = flex.abs(f_calc_den.data()) r = flex.sum(flex.abs(f1-f2))/flex.sum(f2) print "r-factor:", r assert r < 1.e-4, r
def angle_deviations_z(self): ''' Calculate rmsz of angles deviations Compute rmsz, the Root-Mean-Square of the z-scors for a set of data using z_i = {x_i - mu / sigma} and rmsz = sqrt(mean(z*z)) Compute rmsz, the Root-Mean-Square of the z-scors for a set of data using z_i = {x_i - mu / sigma} and rmsz = sqrt(mean(z*z)) x_i: atcual bond angle mu: geometry restraints mean sigma: geometry restraints standard deviation z_i: z-score for bond i z: array of z_i The sigma and the (x_i - mu) are model constrains, geometry restraints. They function extracts from self, not calculated from data. :returns: a_rmsz: rmsz, root mean square of the z-scors of all angles a_z_min/max: min/max values of z-scors ''' if(self.n_angle_proxies is not None): angle_deltas = self.angle_proxies.proxy_select(origin_id=0).deltas( sites_cart=self.sites_cart) if len(angle_deltas) > 0: sigmas = [geometry_restraints.weight_as_sigma(x.weight) for x in self.angle_proxies] z_scores = flex.double([(angle_delta/sigma) for angle_delta,sigma in zip(angle_deltas,sigmas)]) a_rmsz = math.sqrt(flex.mean_default(z_scores*z_scores,0)) a_z_max = flex.max_default(flex.abs(z_scores), 0) a_z_min = flex.min_default(flex.abs(z_scores), 0) return a_z_min, a_z_max, a_rmsz else: return 0,0,0
def exercise_recycle(space_group_info, anomalous_flag, n_scatterers=8, d_min=2.5, verbose=0): f_calc = random_f_calc( space_group_info=space_group_info, n_scatterers=n_scatterers, d_min=d_min, anomalous_flag=anomalous_flag, verbose=verbose, ) if f_calc is None: return recycle(f_calc, "f_calc", verbose=verbose) for column_root_label, column_types in [("f_obs", None), ("Ework", "E")]: if anomalous_flag and column_types == "E": continue recycle( miller_array=abs(f_calc), column_root_label=column_root_label, column_types=column_types, verbose=verbose ) if not anomalous_flag: recycle(abs(f_calc), "f_obs", column_types="R", verbose=verbose) for column_root_label, column_types in [("f_obs", None), ("Ework", "EQ")]: if anomalous_flag and column_types == "EQ": continue recycle( miller_array=miller.array( miller_set=f_calc, data=flex.abs(f_calc.data()), sigmas=flex.abs(f_calc.data()) / 10 ), column_root_label=column_root_label, column_types=column_types, verbose=verbose, ) recycle(f_calc.centric_flags(), "cent", verbose=verbose) recycle(generate_random_hl(miller_set=f_calc), "prob", verbose=verbose)
def bond_deviations_z(self): ''' Calculate rmsz of bond deviations Compute rmsz, the Root-Mean-Square of the z-scors for a set of data using z_i = {x_i - mu / sigma} and rmsz = sqrt(mean(z*z)) x_i: atcual bond length mu: geometry restraints mean sigma: geometry restraints standard deviation z_i: z-score for bond i z: array of z_i The sigma and the (x_i - mu) are model constrains, geometry restraints. They function extracts from self, not calculated from data. :returns: b_rmsz: rmsz, root mean square of the z-scors of all bonds b_z_min/max: min/max abolute values of z-scors ''' if(self.n_bond_proxies is not None): bond_deltas = self.bond_proxies.deltas( sites_cart=self.sites_cart, origin_id=0) if len(bond_deltas) >0: sigmas = [geometry_restraints.weight_as_sigma(x.weight) for x in self.bond_proxies.simple] z_scores = flex.double([(bond_delta/sigma) for bond_delta,sigma in zip(bond_deltas,sigmas)]) b_rmsz = math.sqrt(flex.mean_default(z_scores*z_scores,0)) b_z_max = flex.max_default(flex.abs(z_scores), 0) b_z_min = flex.min_default(flex.abs(z_scores), 0) return b_z_min, b_z_max, b_rmsz else: return 0,0,0
def plot_positions(values, positions, file_name, cmap=pyplot.cm.Reds, vmin=None, vmax=None, invalid='white'): values = values.as_double() assert positions.size() >= values.size() positions = positions[:values.size()] if vmin is None: vmin = flex.min(values) if vmax is None: vmax = flex.max(values) x, y = positions.parts() dx = flex.abs(x[1:] - x[:-1]) dy = flex.abs(y[1:] - y[:-1]) dx = dx.select(dx > 0) dy = dy.select(dy > 0) scale = 1/flex.min(dx) #print scale x = (x * scale).iround() y = (y * scale).iround() from libtbx.math_utils import iceil z = flex.double(flex.grid(iceil(flex.max(y))+1, iceil(flex.max(x))+1), -2) #print z.all() for x_, y_, z_ in zip(x, y, values): z[y_, x_] = z_ plot_grid(z.as_1d(), z.all(), file_name, cmap=cmap, vmin=vmin, vmax=vmax, invalid=invalid) return
def test_grid_step(n_sites = 50, volume_per_atom = 50, d_min = 2.0): grid_step = (0.2,0.4,0.6,0.7,0.9,1.0) for step in grid_step: symmetry = crystal.symmetry(space_group_symbol="P1") structure = random_structure.xray_structure(space_group_info = symmetry.space_group_info(), elements=["C"]*n_sites, volume_per_atom=volume_per_atom, random_u_iso=False) fc = structure.structure_factors(d_min = d_min, anomalous_flag=False, algorithm="fft").f_calc() manager = max_like_non_uniform.ordered_solvent_distribution( structure = structure, fo = fc, grid_step = step) f_water_dist = manager.fcalc_from_distribution() ### check phase compatibility with the symmetry: centrics = f_water_dist.select_centric() if(centrics.indices().size() > 0): ideal = centrics.phase_transfer(centrics) assert flex.max(flex.abs(ideal.data() - centrics.data())) < 1.e-6 ### #print "max = ", flex.max( flex.abs( f_water_dist.data() ) ) #print "min = ", flex.min( flex.abs( f_water_dist.data() ) ) #print "ave = ", flex.mean( flex.abs( f_water_dist.data() ) ) assert flex.max( flex.abs( f_water_dist.data() ) ) < 1.0
def exercise_SFweight_spline_core(structure, d_min, verbose=0): structure.scattering_type_registry(d_min=d_min) f_obs = abs(structure.structure_factors( d_min=d_min, anomalous_flag=False).f_calc()) if (0 or verbose): f_obs.show_summary() f_obs = miller.array( miller_set=f_obs, data=f_obs.data(), sigmas=flex.sqrt(f_obs.data())) partial_structure = xray.structure( crystal_symmetry=structure, scatterers=structure.scatterers()[:-2]) f_calc = f_obs.structure_factors_from_scatterers( xray_structure=partial_structure).f_calc() test_set_flags = (flex.random_double(size=f_obs.indices().size()) < 0.1) sfweight = clipper.SFweight_spline_interface( unit_cell=f_obs.unit_cell(), space_group=f_obs.space_group(), miller_indices=f_obs.indices(), anomalous_flag=f_obs.anomalous_flag(), f_obs_data=f_obs.data(), f_obs_sigmas=f_obs.sigmas(), f_calc=f_calc.data(), test_set_flags=test_set_flags, n_refln=f_obs.indices().size()//10, n_param=20) if (0 or verbose): print "number_of_spline_parameters:",sfweight.number_of_spline_parameters() print "mean fb: %.8g" % flex.mean(flex.abs(sfweight.fb())) print "mean fd: %.8g" % flex.mean(flex.abs(sfweight.fd())) print "mean phi: %.8g" % flex.mean(sfweight.centroid_phases()) print "mean fom: %.8g" % flex.mean(sfweight.figures_of_merit()) return sfweight
def r_value(self,out): top = flex.abs(self.der_primset.data()- self.nat_primset.data()) bottom = flex.abs(self.der_primset.data() + self.nat_primset.data())/2.0 top=flex.sum(top) bottom=flex.sum(bottom) print >> out, "Current R value: %4.3f"%(top/bottom)
def get_pseudo_curvs(): ag_max = flex.max(flex.abs(inp_info.grads)) assert ag_max != 0 dests = (-inp_info.grads/ag_max) * (limits/2) assert flex.abs(dests).all_le(limits/2*(1+1e-6)) assert (dests > 0).all_eq(inp_info.grads < 0) O.pseudo_curvs_i_info = inp_i_info return dests
def compute_step_just_grads(O): inp_i_info = len(O.xfgc_infos) - 1 inp_info = O.xfgc_infos[-1] limits = flex.double() for ix,dsl,g in zip(count(), O.dynamic_shift_limits, inp_info.grads): limits.append(dsl.pair(x=O.x[ix]).get(grad=g)) assert limits.all_gt(0) def get_pseudo_curvs(): ag_max = flex.max(flex.abs(inp_info.grads)) assert ag_max != 0 dests = (-inp_info.grads/ag_max) * (limits/2) assert flex.abs(dests).all_le(limits/2*(1+1e-6)) assert (dests > 0).all_eq(inp_info.grads < 0) O.pseudo_curvs_i_info = inp_i_info return dests if (O.pseudo_curvs is None): dests = get_pseudo_curvs() else: active_infos = O.get_active_infos(O.pseudo_curvs_i_info) assert len(active_infos) > 1 memory = O.build_bfgs_memory(active_infos=active_infos) if (memory is None): O.pseudo_curvs = None dests = get_pseudo_curvs() else: hk0 = 1 / O.pseudo_curvs dests = -bfgs.hg_two_loop_recursion( memory=memory, hk0=hk0, gk=inp_info.grads) madl = flex.max(flex.abs(dests / limits)) if (madl > 1): print "madl:", madl dests *= (1/madl) assert flex.abs(dests).all_le(limits*(1+1e-6)) dest_adj = O.line_search(dests, stpmax=2.0) print "dest_adj:", dest_adj if (dest_adj is not None): dests *= dest_adj elif (O.pseudo_curvs is not None): O.pseudo_curvs = None dests = get_pseudo_curvs() dest_adj = O.line_search(dests, stpmax=2.0) if (dest_adj is not None): dests *= dest_adj if (O.pseudo_curvs is None): assert (dests > 0).all_eq(inp_info.grads < 0) assert flex.abs(dests).all_le(limits*(1+1e-6)) O.pseudo_curvs = -inp_info.grads / dests assert O.pseudo_curvs.all_gt(0) O.x = inp_info.x + dests O.update_fgc(is_iterate=True) O.aq_sel_size = None O.aq_n_used = None
def __init__(self, nat, der, nsr_bias=1.0): self.nat=nat.deep_copy() self.der=der.deep_copy() self.nsr_bias=1.0/nsr_bias assert self.nat.is_real_array() assert self.nat.is_real_array() if self.nat.is_xray_intensity_array(): self.nat.f_sq_as_f() if self.der.is_xray_intensity_array(): self.der.f_sq_as_f() self.nat,self.der = self.nat.common_sets(self.der) self.der = self.der.customized_copy( data = self.der.data()*self.nsr_bias, sigmas = self.der.sigmas()*self.nsr_bias).set_observation_type( self.der) self.delta_f=self.nat.customized_copy( data = ( self.der.data() - self.nat.data() ), sigmas = flex.sqrt( self.der.sigmas()*self.der.sigmas()+ self.nat.sigmas()*self.nat.sigmas() ) ).set_observation_type( self.nat ) self.abs_delta_f=self.nat.customized_copy( data = flex.abs( self.der.data() - self.nat.data() ), sigmas = flex.sqrt( self.der.sigmas()*self.der.sigmas()+ self.nat.sigmas()*self.nat.sigmas() ) ).set_observation_type( self.der ) if not self.nat.is_xray_intensity_array(): self.nat.f_as_f_sq() if not self.der.is_xray_intensity_array(): self.der.f_as_f_sq() self.delta_i=self.nat.customized_copy( data = ( self.der.data() - self.nat.data() ), sigmas = flex.sqrt( self.der.sigmas()*self.der.sigmas()+ self.nat.sigmas()*self.nat.sigmas() ) ).set_observation_type( self.nat ) self.abs_delta_i=self.nat.customized_copy( data = flex.abs( self.der.data() - self.nat.data() ), sigmas = flex.sqrt( self.der.sigmas()*self.der.sigmas()+ self.nat.sigmas()*self.nat.sigmas() ) ).set_observation_type( self.der )
def exercise(prefix="tst_helix_sheet_recs_as_pdb_files"): of = open(prefix+".pdb", "w") print >> of, pdb_str of.close() xrs1 = iotbx.pdb.input(file_name=prefix+".pdb").xray_structure_simple() easy_run.call("phenix.helix_sheet_recs_as_pdb_files %s"%(prefix+".pdb")) xrs2 = iotbx.pdb.input( file_name="HELIX_1_1_ALA_E_1_ALA_E_16_1_16.pdb").xray_structure_simple(crystal_symmetry=xrs1.crystal_symmetry()) fc1 = xrs1.structure_factors(d_min=3).f_calc() fc2 = fc1.structure_factors_from_scatterers( xray_structure=xrs2).f_calc() fc1=flex.abs(abs(fc1).data()) fc2=flex.abs(abs(fc2).data()) assert flex.sum(flex.abs(fc1-fc2))/flex.sum(flex.abs(fc1+fc2)) < 1.e-3
def angle_deviations_weighted(self): if(self.n_angle_proxies is not None): angle_deltas = self.angle_proxies.proxy_select(origin_id=0).deltas( sites_cart=self.sites_cart) if len(angle_deltas) > 0: sigmas = flex.double([geometry_restraints.weight_as_sigma(x.weight) for x in self.angle_proxies]) sigma_mean = flex.mean_default(sigmas, 0) z_scores = flex.double([(angle_delta/sigma*sigma_mean) for angle_delta,sigma in zip(angle_deltas,sigmas)]) a_rmsz = math.sqrt(flex.mean_default(z_scores*z_scores,0)) a_z_max = flex.max_default(flex.abs(z_scores), 0) a_z_min = flex.min_default(flex.abs(z_scores), 0) return a_z_min, a_z_max, a_rmsz else: return 0,0,0
def f_obs(self): fo2 = self.fo2.as_intensity_array() f_obs = fo2.as_amplitude_array() if self.use_set_completion: if self._f_mask is not None: f_model = self.f_model() else: f_model = self.f_calc data_substitute = flex.abs(f_model.data()) scale_factor = flex.sum(f_obs.data()) / flex.sum(f_model.common_set(f_obs).as_amplitude_array().data()) f_obs = f_obs.matching_set( other=self.complete_set, data_substitute=scale_factor * flex.abs(f_model.data()), sigmas_substitute=0 ) return f_obs
def bond_deviations_weighted(self): if(self.n_bond_proxies is not None): bond_deltas = self.bond_proxies.deltas( sites_cart=self.sites_cart, origin_id=0) if len(bond_deltas) >0: sigmas = flex.double([geometry_restraints.weight_as_sigma(x.weight) for x in self.bond_proxies.simple]) sigma_mean = flex.mean_default(sigmas, 0) z_scores = flex.double([(bond_delta/sigma*sigma_mean) for bond_delta,sigma in zip(bond_deltas,sigmas)]) b_rmsz = math.sqrt(flex.mean_default(z_scores*z_scores,0)) b_z_max = flex.max_default(flex.abs(z_scores), 0) b_z_min = flex.min_default(flex.abs(z_scores), 0) return b_z_min, b_z_max, b_rmsz else: return 0,0,0
def verify_miller_arrays(a1, a2, eps=1.0e-5): v = a2.adopt_set(a1) if a1.is_bool_array(): if a2.is_integer_array(): assert flex.max(flex.abs(a1.data().as_int() - v.data())) == 0 else: assert flex.max(flex.abs(a1.data().as_double() - v.data())) < eps elif a1.is_hendrickson_lattman_array(): for i in xrange(4): assert flex.max(flex.abs(a1.data().slice(i) - v.data().slice(i))) < eps else: assert flex.max(flex.abs(a1.data() - v.data())) < eps if v.sigmas() is not None: assert flex.max(flex.abs(a1.sigmas() - v.sigmas())) < eps
def verify_miller_arrays(a1, a2, eps=1.e-5): v = a2.adopt_set(a1) if (a1.is_bool_array()): if (a2.is_integer_array()): assert flex.max(flex.abs(a1.data().as_int() - v.data())) == 0 else: assert flex.max(flex.abs(a1.data().as_double() - v.data())) < eps elif (a1.is_hendrickson_lattman_array()): for i in range(4): assert flex.max( flex.abs(a1.data().slice(i) - v.data().slice(i))) < eps else: assert flex.max(flex.abs(a1.data() - v.data())) < eps if (v.sigmas() is not None): assert flex.max(flex.abs(a1.sigmas() - v.sigmas())) < eps
def plot_positions( values, positions, file_name, cmap=pyplot.cm.Reds, vmin=None, vmax=None, invalid="white", ): values = values.as_double() assert positions.size() >= values.size() positions = positions[:values.size()] if vmin is None: vmin = flex.min(values) if vmax is None: vmax = flex.max(values) x, y = positions.parts() dx = flex.abs(x[1:] - x[:-1]) dy = flex.abs(y[1:] - y[:-1]) dx = dx.select(dx > 0) dy = dy.select(dy > 0) scale = 1 / flex.min(dx) # print scale x = (x * scale).iround() y = (y * scale).iround() from libtbx.math_utils import iceil z = flex.double( flex.grid(iceil(flex.max(y)) + 1, iceil(flex.max(x)) + 1), -2) # print z.all() for x_, y_, z_ in zip(x, y, values): z[y_, x_] = z_ plot_grid( z.as_1d(), z.all(), file_name, cmap=cmap, vmin=vmin, vmax=vmax, invalid=invalid, ) return
def __init__(self, nat, der, nsr_bias=1.0): self.nat = nat.deep_copy() self.der = der.deep_copy() self.nsr_bias = 1.0 / nsr_bias assert self.nat.is_real_array() assert self.nat.is_real_array() if self.nat.is_xray_intensity_array(): self.nat.f_sq_as_f() if self.der.is_xray_intensity_array(): self.der.f_sq_as_f() self.nat, self.der = self.nat.common_sets(self.der) self.der = self.der.customized_copy( data=self.der.data() * self.nsr_bias, sigmas=self.der.sigmas() * self.nsr_bias).set_observation_type( self.der) self.delta_f = self.nat.customized_copy( data=(self.der.data() - self.nat.data()), sigmas=flex.sqrt(self.der.sigmas() * self.der.sigmas() + self.nat.sigmas() * self.nat.sigmas())).set_observation_type(self.nat) self.abs_delta_f = self.nat.customized_copy( data=flex.abs(self.der.data() - self.nat.data()), sigmas=flex.sqrt(self.der.sigmas() * self.der.sigmas() + self.nat.sigmas() * self.nat.sigmas())).set_observation_type(self.der) if not self.nat.is_xray_intensity_array(): self.nat.f_as_f_sq() if not self.der.is_xray_intensity_array(): self.der.f_as_f_sq() self.delta_i = self.nat.customized_copy( data=(self.der.data() - self.nat.data()), sigmas=flex.sqrt(self.der.sigmas() * self.der.sigmas() + self.nat.sigmas() * self.nat.sigmas())).set_observation_type(self.nat) self.abs_delta_i = self.nat.customized_copy( data=flex.abs(self.der.data() - self.nat.data()), sigmas=flex.sqrt(self.der.sigmas() * self.der.sigmas() + self.nat.sigmas() * self.nat.sigmas())).set_observation_type(self.der)
def run(files, params): print "filename", for cut in params.cut_ios: print "cut_ios_%.2f" % cut, print for f in files: is_xac = xds_ascii.is_xds_ascii(f) i_obs = None if is_xac: xac = xds_ascii.XDS_ASCII(f, read_data=True, i_only=True) xac.remove_rejected() i_obs = xac.i_obs().resolution_filter(d_min=params.d_min, d_max=params.d_max) if params.fix_variance_model: ao, bo = xac.variance_model an, bn = params.variance_model i_obs = i_obs.customized_copy(sigmas = flex.sqrt(flex.abs(an * (i_obs.sigmas()**2/ao + (bn-bo)*flex.pow2(i_obs.data()))))) else: ihkl = integrate_hkl_as_flex.reader(f, read_columns=("IOBS","SIGMA")) i_obs = ihkl.i_obs().resolution_filter(d_min=params.d_min, d_max=params.d_max) if params.fix_variance_model: a, b = params.variance_model i_obs = i_obs.customized_copy(sigmas = flex.sqrt(a * (i_obs.sigmas()**2 + b*flex.pow2(i_obs.data())))) cutoffs = eval_resolution(i_obs, params.n_bins, params.cut_ios) print "%s %s" % (f, " ".join(map(lambda x: "%.2f"%x, cutoffs)))
def anomalous_probability_plot(intensities, expected_delta=None): from scitbx.math import distributions from scitbx.array_family import flex assert intensities.is_unique_set_under_symmetry() assert intensities.anomalous_flag() dI = intensities.anomalous_differences() y = dI.data()/dI.sigmas() perm = flex.sort_permutation(y) y = y.select(perm) distribution = distributions.normal_distribution() x = distribution.quantiles(y.size()) if expected_delta is not None: sel = flex.abs(x) < expected_delta x = x.select(sel) y = y.select(sel) fit = flex.linear_regression(x, y) correlation = flex.linear_correlation(x, y) assert fit.is_well_defined() if 0: from matplotlib import pyplot pyplot.scatter(x, y) m = fit.slope() c = fit.y_intercept() pyplot.plot(pyplot.xlim(), [m * x_ + c for x_ in pyplot.xlim()]) pyplot.show() return fit.slope(), fit.y_intercept(), x.size()
def exercise_centrics(space_group_info, n_sites=10): structure = random_structure.xray_structure( space_group_info=space_group_info, elements=(("O", "N", "C") * (n_sites // 3 + 1))[:n_sites], volume_per_atom=30, min_distance=1) for anomalous_flag in [False, True]: miller_set = miller.build_set(crystal_symmetry=structure, d_min=1, anomalous_flag=anomalous_flag) for shrink_truncation_radius in [0, .5 * 6**.5]: for solvent_radius in [0, .5 * 5**.5]: bulk_solvent_mask = mmtbx.masks.bulk_solvent( xray_structure=structure, grid_step=0.5, ignore_zero_occupancy_atoms=False, solvent_radius=solvent_radius, shrink_truncation_radius=shrink_truncation_radius) f_mask = bulk_solvent_mask.structure_factors( miller_set=miller_set) centrics = f_mask.select_centric() if (centrics.indices().size() > 0): ideal = centrics.phase_transfer(centrics) assert flex.max( flex.abs(ideal.data() - centrics.data())) < 1.e-6
def __init__(self, millarr): from iotbx.gui_tools.reflections import get_array_description data = millarr.data() if (isinstance(data, flex.int)): data = [e for e in data if e!= display.inanval] if millarr.is_complex_array(): data = flex.abs(millarr.data()) self.maxdata =max( data ) self.mindata =min( data ) self.maxsigmas = self.minsigmas = display.nanval if millarr.sigmas() is not None: data = millarr.sigmas() self.maxsigmas =max( data ) self.minsigmas =min( data ) self.minmaxstr = "MinMaxValues:[%s; %s], MinMaxSigmaValues:[%s; %s]" \ %(roundoff(self.mindata), roundoff(self.maxdata), \ roundoff(self.minsigmas), roundoff(self.maxsigmas)) else: self.minmaxstr = "MinMaxValues:[%s; %s]" %(roundoff(self.mindata), roundoff(self.maxdata)) self.labels = self.desc = "" if millarr.info(): self.labels = millarr.info().label_string() self.desc = get_array_description(millarr) self.span = "HKLs: %s to %s" % \ ( millarr.index_span().min(), millarr.index_span().max()) self.infostr = "%s (%s), %s %s, %s, d_min: %s" % \ (self.labels, self.desc, millarr.size(), self.span, self.minmaxstr, roundoff(millarr.d_min()))
def get_phase_scores(miller_arrays): result = [] for miller_array in miller_arrays: score = 0 if ( miller_array.is_complex_array() or miller_array.is_hendrickson_lattman_array()): score = 4 elif (miller_array.is_real_array()): if (miller_array.is_xray_reconstructed_amplitude_array()): pass elif (miller_array.is_xray_amplitude_array()): pass elif (miller_array.is_xray_intensity_array()): pass elif (miller_array.data().size() == 0): pass else: m = flex.mean(flex.abs(miller_array.data())) if (m < 5): score = 2 elif (m < 500): score = 3 else: score = 1 result.append(score) return result
def main(): parser = OptionParser(usage="usage: python %prog [options] file_name ...") parser.add_option("-c", "--cutoff", type="float", default=6.05, metavar="FLOAT", help="maximum sin(theta)/lambda") (options, args) = parser.parse_args() if (len(args) < 1): parser.print_help() return cutoff = options.cutoff for file_name in args: tab = read_table(file_name) if (tab.element == "Es"): continue wk = xray_scattering.wk1995(tab.element, True).fetch() sel = tab.x <= cutoff tab_x = tab.x.select(sel) tab_y = tab.y.select(sel) sigmas = flex.double(tab_x.size(), 0.0005) wky = wk.at_x(tab_x) errors_abs = flex.abs(wky - tab_y) fit = scitbx.math.gaussian.fit(tab_x, tab_y, sigmas, wk) errors_rel = fit.significant_relative_errors(1.e-6) print(tab.element, tab.atomic_number, end=' ') print("max error < %.1fA-1 abs, rel: %7.4f %7.4f" % (cutoff, flex.max(errors_abs), flex.max(errors_rel))) for x, y, f, ea, er in zip(tab_x, tab_y, wky, errors_abs, errors_rel): print("%7.4f %7.4f %7.4f %7.4f %7.4f" % (x, y, f, ea, er)) print()
def tst_hes_ls_f_wt(self,h=0.0000001): hes_anal = self.ls_f_wt.hessian_as_packed_u() hes_anal=hes_anal.matrix_packed_u_as_symmetric() grads = self.ls_f_wt.get_gradient() self.ls_f_wt.set_p_scale(self.p_scale+h) tmp = self.ls_f_wt.get_gradient() tmp = list( (grads-tmp)/-h ) tmp_hess=[] tmp_hess.append( tmp ) self.ls_f_wt.set_p_scale(self.p_scale) for ii in range(6): u_tmp=list(flex.double(self.u).deep_copy()) u_tmp[ii]+=h self.ls_f_wt.set_u_rwgk(u_tmp) tmp = self.ls_f_wt.get_gradient() tmp = (grads - tmp)/-h tmp_hess.append( list(tmp) ) self.ls_f_wt.set_u_rwgk(self.u) f = max(1, flex.max(flex.abs(hes_anal))) count=0 for ii in range(7): for jj in range(7): assert approx_equal(tmp_hess[ii][jj]/f, hes_anal[count]/f) count+=1
def exercise_2(): xray_structure = random_structure.xray_structure( space_group_info=sgtbx.space_group_info("C 1 2/c 1"), elements=("O", "N", "C") * 50, volume_per_atom=100, min_distance=1.5, general_positions_only=True, random_u_iso=True, random_occupancy=True) xray_structure.scattering_type_registry(table="wk1995") f_obs = abs(xray_structure.structure_factors(d_min=2.0).f_calc()) sfg_params = mmtbx.f_model.sf_and_grads_accuracy_master_params.extract() for algorithm in ["fft", "direct"]: flags = f_obs.generate_r_free_flags(fraction=0.1, max_free=99999999) fmodel = mmtbx.f_model.manager(xray_structure=xray_structure, f_obs=f_obs, r_free_flags=flags, sf_and_grads_accuracy_params=sfg_params) f_calc_1 = abs(fmodel.f_calc()).data() f_calc_2 = abs( f_obs.structure_factors_from_scatterers( xray_structure=xray_structure, algorithm=sfg_params.algorithm).f_calc()).data() delta = flex.abs(f_calc_1 - f_calc_2) assert approx_equal(flex.sum(delta), 0.0)
def main(): parser = OptionParser(usage="usage: python %prog [options] file_name ...") parser.add_option("-c", "--cutoff", type="float", default=6.05, metavar="FLOAT", help="maximum sin(theta)/lambda") (options, args) = parser.parse_args() if len(args) < 1: parser.print_help() return cutoff = options.cutoff for file_name in args: tab = read_table(file_name) if tab.element == "Es": continue wk = xray_scattering.wk1995(tab.element, True).fetch() sel = tab.x <= cutoff tab_x = tab.x.select(sel) tab_y = tab.y.select(sel) sigmas = flex.double(tab_x.size(), 0.0005) wky = wk.at_x(tab_x) errors_abs = flex.abs(wky - tab_y) fit = scitbx.math.gaussian.fit(tab_x, tab_y, sigmas, wk) errors_rel = fit.significant_relative_errors(1.0e-6) print tab.element, tab.atomic_number, print "max error < %.1fA-1 abs, rel: %7.4f %7.4f" % (cutoff, flex.max(errors_abs), flex.max(errors_rel)) for x, y, f, ea, er in zip(tab_x, tab_y, wky, errors_abs, errors_rel): print "%7.4f %7.4f %7.4f %7.4f %7.4f" % (x, y, f, ea, er) print
def dump_R_in_bins(obs, calc, scale_B=True, log_out=sys.stdout, n_bins=20): #obs, calc = obs.common_sets(calc, assert_is_similar_symmetry=False) if scale_B: scale, B = kBdecider(obs, calc).run() d_star_sq = calc.d_star_sq().data() calc = calc.customized_copy(data = scale * flex.exp(-B*d_star_sq) * calc.data()) binner = obs.setup_binner(n_bins=n_bins) count=0 log_out.write("dmax - dmin: R (nref) <I1> <I2> scale\n") for i_bin in binner.range_used(): tmp_obs = obs.select(binner.bin_indices() == i_bin) tmp_calc = calc.select(binner.bin_indices() == i_bin) low = binner.bin_d_range(i_bin)[0] high = binner.bin_d_range(i_bin)[1] if scale_B: scale = 1. else: scale = flex.sum(tmp_obs.data()*tmp_calc.data()) / flex.sum(flex.pow2(tmp_calc.data())) R = flex.sum(flex.abs(tmp_obs.data() - scale*tmp_calc.data())) / flex.sum(0.5 * tmp_obs.data() + 0.5 * scale*tmp_calc.data()) log_out.write("%5.2f - %5.2f: %.5f (%d) %.1f %.1f %.3e\n" % (low, high, R, len(tmp_obs.data()), flex.mean(tmp_obs.data()), flex.mean(tmp_calc.data()), scale)) log_out.write("Overall R = %.5f (scale=%.3e, %%comp=%.3f)\n\n" % (calc_R(obs, calc, do_scale=not scale_B) + (obs.completeness()*100.,)) )
def exercise(prefix="tst_helix_sheet_recs_as_pdb_files"): of = open(prefix + ".pdb", "w") print(pdb_str, file=of) of.close() xrs1 = iotbx.pdb.input(file_name=prefix + ".pdb").xray_structure_simple() easy_run.call("phenix.helix_sheet_recs_as_pdb_files %s" % (prefix + ".pdb")) xrs2 = iotbx.pdb.input( file_name="HELIX_1_1_ALA_E_1_ALA_E_16_1_16.pdb").xray_structure_simple( crystal_symmetry=xrs1.crystal_symmetry()) fc1 = xrs1.structure_factors(d_min=3).f_calc() fc2 = fc1.structure_factors_from_scatterers(xray_structure=xrs2).f_calc() fc1 = flex.abs(abs(fc1).data()) fc2 = flex.abs(abs(fc2).data()) assert flex.sum(flex.abs(fc1 - fc2)) / flex.sum( flex.abs(fc1 + fc2)) < 1.e-3
def __init__(self, millarr, mprint=sys.stdout.write): from iotbx.gui_tools.reflections import get_array_description data = millarr.data() if (isinstance(data, flex.int)): data = [e for e in data if e != display.inanval] if millarr.is_complex_array(): data = flex.abs(millarr.data()) data = [e for e in data if not math.isnan(e)] self.maxdata = max(data) self.mindata = min(data) self.maxsigmas = self.minsigmas = None if millarr.sigmas() is not None: data = millarr.sigmas() data = [e for e in data if not math.isnan(e)] self.maxsigmas = max(data) self.minsigmas = min(data) self.minmaxdata = (roundoff(self.mindata), roundoff(self.maxdata)) self.minmaxsigs = (roundoff(self.minsigmas), roundoff(self.maxsigmas)) self.labels = self.desc = "" #import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) ) if millarr.info(): self.labels = millarr.info().label_string() self.desc = get_array_description(millarr) self.span = ("?", "?") dmin = 0.0 dmax = 0.0 try: self.span = (millarr.index_span().min(), millarr.index_span().max()) dmin = millarr.d_max_min()[1] dmax = millarr.d_max_min()[0] except Exception, e: mprint(to_str(e))
def f_obs(self): fo2 = self.fo2.as_intensity_array() f_obs = fo2.as_amplitude_array() if self.use_set_completion: if self._f_mask is not None: f_model = self.f_model() else: f_model = self.f_calc data_substitute = flex.abs(f_model.data()) scale_factor = flex.sum(f_obs.data())/flex.sum( f_model.common_set(f_obs).as_amplitude_array().data()) f_obs = f_obs.matching_set( other=self.complete_set, data_substitute=scale_factor*flex.abs(f_model.data()), sigmas_substitute=0) return f_obs
def run(hklin, n_bins): for array in iotbx.file_reader.any_file(hklin).file_server.miller_arrays: # skip if not anomalous intensity data if not (array.is_xray_intensity_array() and array.anomalous_flag()): print "skipping", array.info() continue # We assume that data is already merged assert array.is_unique_set_under_symmetry() # take anomalous differences dano = array.anomalous_differences() # process with binning dano.setup_binner(n_bins=n_bins) binner = dano.binner() print "Array:", array.info() print " dmax dmin nrefs dano" for i_bin in binner.range_used(): # selection for this bin. sel is flex.bool object (list of True of False) sel = binner.selection(i_bin) # take mean of absolute value of anomalous differences in a bin bin_mean = flex.mean(flex.abs(dano.select(sel).data())) d_max, d_min = binner.bin_d_range(i_bin) print "%7.2f %7.2f %6d %.2f" % (d_max, d_min, binner.count(i_bin), bin_mean)
def random_f_calc(space_group_info, n_scatterers, d_min, anomalous_flag, verbose=0): if (anomalous_flag and space_group_info.group().is_centric()): return None structure = random_structure.xray_structure(space_group_info, elements=["const"] * n_scatterers, volume_per_atom=500, min_distance=2., general_positions_only=True) if (0 or verbose): structure.show_summary().show_scatterers() f_calc = structure.structure_factors( d_min=d_min, anomalous_flag=anomalous_flag).f_calc() f_calc = miller.array(miller_set=f_calc, data=f_calc.data() / flex.mean(flex.abs(f_calc.data()))) if (f_calc.anomalous_flag()): selection = flex.bool(f_calc.indices().size(), True) for i in xrange(f_calc.indices().size() // 10): j = random.randrange(f_calc.indices().size()) selection[j] = False f_calc = f_calc.select(selection) return f_calc
def exercise_expand(): sg = sgtbx.space_group("P 41 (1,-1,0)") h = flex.miller_index(((3,1,-2), (1,-2,0))) assert tuple(sg.is_centric(h)) == (0, 1) p1 = miller.expand_to_p1_iselection( space_group=sg, anomalous_flag=False, indices=h, build_iselection=False) p1_i0 = ((-3,-1,2), (-1, 3,2),(3,1,2),(1,-3,2),(1,-2, 0),(2,1,0)) assert tuple(p1.indices) == p1_i0 assert p1.iselection.size() == 0 p1 = miller.expand_to_p1_iselection( space_group=sg, anomalous_flag=True, indices=h, build_iselection=False) assert tuple(p1.indices) \ == ((3,1,-2), (1,-3,-2), (-3,-1,-2), (-1,3,-2), (1,-2,0), (-2,-1,0), (-1,2,0), (2,1,0)) p1 = miller.expand_to_p1_iselection( space_group=sg, anomalous_flag=False, indices=h, build_iselection=True) assert tuple(p1.indices) == p1_i0 assert tuple(p1.iselection) == (0,0,0,0,1,1) a = flex.double((1,2)) p = flex.double((10,90)) p1 = miller.expand_to_p1_phases( space_group=sg, anomalous_flag=False, indices=h, data=p, deg=True) assert approx_equal(tuple(p1.data), (-10,110,110,-10, 90,30)) p1 = miller.expand_to_p1_phases( space_group=sg, anomalous_flag=True, indices=h, data=p, deg=True) assert approx_equal(tuple(p1.data), (10,-110,-110,10, 90,-30,-90,30)) p = flex.double([x * math.pi/180 for x in p]) v = [x * math.pi/180 for x in p1.data] p1 = miller.expand_to_p1_phases( space_group=sg, anomalous_flag=True, indices=h, data=p, deg=False) assert approx_equal(tuple(p1.data), v) f = flex.polar(a, p) p1 = miller.expand_to_p1_complex( space_group=sg, anomalous_flag=True, indices=h, data=f) assert approx_equal(tuple(flex.abs(p1.data)), (1,1,1,1,2,2,2,2)) assert approx_equal(tuple(flex.arg(p1.data)), v) hl = flex.hendrickson_lattman([(1,2,3,4), (5,6,7,8)]) p1 = miller.expand_to_p1_hendrickson_lattman( space_group=sg, anomalous_flag=True, indices=h, data=hl) assert approx_equal(p1.data, [ [1,2,3,4], [1.232051,-1.866025,-4.964102,0.5980762], [1.232051,-1.866025,-4.964102,0.5980762], [1,2,3,4], [5,6,7,8], [2.696152,-7.330127,-10.4282,2.062178], [-5,-6,7,8], [7.696152,-1.330127,3.428203,-10.06218]]) b = flex.bool([True,False]) p1 = miller.expand_to_p1_iselection( space_group=sg, anomalous_flag=True, indices=h, build_iselection=True) assert b.select(p1.iselection).all_eq( flex.bool([True, True, True, True, False, False, False, False])) i = flex.int([13,17]) p1 = miller.expand_to_p1_iselection( space_group=sg, anomalous_flag=True, indices=h, build_iselection=True) assert i.select(p1.iselection).all_eq(flex.int([13,13,13,13,17,17,17,17])) # assert approx_equal(miller.statistical_mean(sg, False, h, a), 4/3.) assert approx_equal(miller.statistical_mean(sg, True, h, a), 3/2.)
def anomalous_probability_plot(intensities, expected_delta=None): from scitbx.math import distributions from scitbx.array_family import flex assert intensities.is_unique_set_under_symmetry() assert intensities.anomalous_flag() dI = intensities.anomalous_differences() y = dI.data() / dI.sigmas() perm = flex.sort_permutation(y) y = y.select(perm) distribution = distributions.normal_distribution() x = distribution.quantiles(y.size()) if expected_delta is not None: sel = flex.abs(x) < expected_delta x = x.select(sel) y = y.select(sel) fit = flex.linear_regression(x, y) correlation = flex.linear_correlation(x, y) assert fit.is_well_defined() if 0: from matplotlib import pyplot pyplot.scatter(x, y) m = fit.slope() c = fit.y_intercept() pyplot.plot(pyplot.xlim(), [m * x_ + c for x_ in pyplot.xlim()]) pyplot.show() return fit.slope(), fit.y_intercept(), x.size()
def __init__(self, lambda1, lambda2, k1=1.0): ## assumed is of course that the data are scaled. ## lambda1 is the 'reference' self.w1=lambda1.deep_copy() self.w2=lambda2.deep_copy() if not self.w1.is_xray_amplitude_array(): self.w1 = self.w1.f_sq_as_f() if not self.w2.is_xray_amplitude_array(): self.w2 = self.w2.f_sq_as_f() self.w1, self.w2 = self.w1.common_sets( self.w2 ) l1p, l1n = self.w1.hemispheres_acentrics() self.mean1 = l1p.data()+l1n.data() self.diff1 = l1p.data()-l1n.data() self.v1 = ( l1p.sigmas()*l1p.sigmas() + l1n.sigmas()*l1n.sigmas() ) l2p, l2n = self.w2.hemispheres_acentrics() self.mean2 = l2p.data()+l2n.data() self.diff2 = l2p.data()-l2n.data() self.v2 = ( l2p.sigmas()*l2p.sigmas() + l2n.sigmas()*l2n.sigmas() ) self.new_diff = flex.abs( (self.diff1 + k1*self.diff2)/2.0 ) self.new_sigma_mean = flex.sqrt( (self.v1+k1*k1*self.v2)/2.0 ) self.dad = l1p.customized_copy( data = self.new_diff, sigmas = self.new_sigma_mean ).set_observation_type( self.w1 )
def tst_hes_ls_f_wt(self, h=0.0000001): hes_anal = self.ls_f_wt.hessian_as_packed_u() hes_anal = hes_anal.matrix_packed_u_as_symmetric() grads = self.ls_f_wt.get_gradient() self.ls_f_wt.set_p_scale(self.p_scale + h) tmp = self.ls_f_wt.get_gradient() tmp = list((grads - tmp) / -h) tmp_hess = [] tmp_hess.append(tmp) self.ls_f_wt.set_p_scale(self.p_scale) for ii in range(6): u_tmp = list(flex.double(self.u).deep_copy()) u_tmp[ii] += h self.ls_f_wt.set_u_rwgk(u_tmp) tmp = self.ls_f_wt.get_gradient() tmp = (grads - tmp) / -h tmp_hess.append(list(tmp)) self.ls_f_wt.set_u_rwgk(self.u) f = max(1, flex.max(flex.abs(hes_anal))) count = 0 for ii in range(7): for jj in range(7): assert approx_equal(tmp_hess[ii][jj] / f, hes_anal[count] / f) count += 1
def exercise_core_LS(target_class, verbose): n_refl = 10 f_calc = flex.polar( flex.random_double(n_refl) * 10 - 5, flex.random_double(n_refl) * 10 - 5) f_obs = flex.abs(f_calc) + (flex.random_double(n_refl) * 2 - 1) weights = flex.random_double(n_refl) r = xray.targets_least_squares_residual(f_obs, weights, f_calc, True, 0) scale_factor = r.scale_factor() gr_ana = r.derivatives() gr_fin = flex.complex_double() eps = 1.e-6 for i_refl in xrange(n_refl): gc = [] for i_part in [0, 1]: fc0 = f_calc[i_refl] ts = [] for signed_eps in [eps, -eps]: if (i_part == 0): f_calc[i_refl] = complex(fc0.real + signed_eps, fc0.imag) else: f_calc[i_refl] = complex(fc0.real, fc0.imag + signed_eps) r = xray.targets_least_squares_residual( f_obs, weights, f_calc, False, scale_factor) ts.append(r.target()) f_calc[i_refl] = fc0 gc.append((ts[0] - ts[1]) / (2 * eps)) gr_fin.append(complex(*gc)) if (verbose): print "ana:", list(gr_ana) print "fin:", list(gr_fin) assert approx_equal(gr_fin, gr_ana)
def exercise(xray_structure, anomalous_flag, max_n_indices, out): xray_structure.show_summary(f=out).show_scatterers(f=out) miller_set = miller.build_set( crystal_symmetry=xray_structure, anomalous_flag=anomalous_flag, d_min=max(1, min(xray_structure.unit_cell().parameters()[:3]) / 2.5)) n_indices = miller_set.indices().size() if (n_indices > max_n_indices): miller_set = miller_set.select( flex.random_size_t(size=max_n_indices) % n_indices) sf = structure_factors(xray_structure=xray_structure, miller_set=miller_set) f_calc = miller_set.structure_factors_from_scatterers( xray_structure=xray_structure, algorithm="direct", cos_sin_table=False).f_calc() f_calc.show_summary(f=out) assert approx_equal(sf.fs(), f_calc.data()) f_obs = miller_set.array(data=flex.abs(sf.fs())) noise_fin = compare_analytical_and_finite(f_obs=f_obs, xray_structure=xray_structure, gradients_should_be_zero=True, eps=1.e-5, out=out) compare_analytical_and_finite(f_obs=f_obs.customized_copy( data=f_obs.data() * (flex.random_double(size=f_obs.size()) + 0.5)), xray_structure=xray_structure, gradients_should_be_zero=False, eps=max(1.e-5, noise_fin), out=out)
def __init__(self, lambda1, lambda2, k1=1.0): ## assumed is of course that the data are scaled. ## lambda1 is the 'reference' self.w1 = lambda1.deep_copy() self.w2 = lambda2.deep_copy() if not self.w1.is_xray_amplitude_array(): self.w1 = self.w1.f_sq_as_f() if not self.w2.is_xray_amplitude_array(): self.w2 = self.w2.f_sq_as_f() self.w1, self.w2 = self.w1.common_sets(self.w2) l1p, l1n = self.w1.hemispheres_acentrics() self.mean1 = l1p.data() + l1n.data() self.diff1 = l1p.data() - l1n.data() self.v1 = (l1p.sigmas() * l1p.sigmas() + l1n.sigmas() * l1n.sigmas()) l2p, l2n = self.w2.hemispheres_acentrics() self.mean2 = l2p.data() + l2n.data() self.diff2 = l2p.data() - l2n.data() self.v2 = (l2p.sigmas() * l2p.sigmas() + l2n.sigmas() * l2n.sigmas()) self.new_diff = flex.abs((self.diff1 + k1 * self.diff2) / 2.0) self.new_sigma_mean = flex.sqrt((self.v1 + k1 * k1 * self.v2) / 2.0) self.dad = l1p.customized_copy( data=self.new_diff, sigmas=self.new_sigma_mean).set_observation_type(self.w1)
def minimize_kbu(self, n_cycles=10): #print "minimize_kbu start r:", self.kbu.r_factor() for use_curvatures in [False, True] * n_cycles: #print " minimize_kbu r:", self.kbu.r_factor() start_r = self.kbu.r_factor() save_k_sols = self.kbu.k_sols() save_b_sols = self.kbu.b_sols() save_b_cart = self.kbu.b_cart() #self.set_use_scale(value = random.choice([True, False])) self.set_use_scale(value=True) m = self.minimize_kb_once(use_curvatures=use_curvatures) r = self.kbu.r_factor() if (r > start_r and r > 1.e-2 and (flex.min(self.kbu.k_sols()) < 0 or flex.max(self.kbu.k_sols()) > 1 or flex.min(self.kbu.b_sols()) < 0 or flex.max(self.kbu.k_sols()) > 100.)): self.kbu.update(k_sols=save_k_sols, b_sols=save_b_sols) # assert m.minimizer.n_calls == m.minimizer.nfun() m = self.minimize_u_once() # assert m.minimizer.n_calls == m.minimizer.nfun() r = self.kbu.r_factor() bc = list(flex.abs(flex.double(self.kbu.b_cart()))) if (r > start_r and r > 1.e-2 and max(bc) > 100): self.kbu.update(b_cart=save_b_cart) break
def exercise_core_LS(target_class, verbose): n_refl = 10 f_calc = flex.polar( flex.random_double(n_refl)*10-5, flex.random_double(n_refl)*10-5) f_obs = flex.abs(f_calc) + (flex.random_double(n_refl)*2-1) weights = flex.random_double(n_refl) r = xray.targets_least_squares_residual( f_obs, weights, f_calc, True, 0) scale_factor = r.scale_factor() gr_ana = r.derivatives() gr_fin = flex.complex_double() eps = 1.e-6 for i_refl in xrange(n_refl): gc = [] for i_part in [0,1]: fc0 = f_calc[i_refl] ts = [] for signed_eps in [eps,-eps]: if (i_part == 0): f_calc[i_refl] = complex(fc0.real + signed_eps, fc0.imag) else: f_calc[i_refl] = complex(fc0.real, fc0.imag + signed_eps) r = xray.targets_least_squares_residual( f_obs, weights, f_calc, False, scale_factor) ts.append(r.target()) f_calc[i_refl] = fc0 gc.append((ts[0]-ts[1])/(2*eps)) gr_fin.append(complex(*gc)) if (verbose): print "ana:", list(gr_ana) print "fin:", list(gr_fin) assert approx_equal(gr_fin, gr_ana)
def run(hklin): arrays = iotbx.file_reader.any_file(hklin).file_server.miller_arrays for arr in arrays: if not arr.anomalous_flag(): continue print arr.info() if arr.is_complex_array(): arr = arr.as_amplitude_array() # must be F ano = arr.anomalous_differences() ave = arr.average_bijvoet_mates() ano, ave = ano.common_sets(ave) print " <d''/mean>=", flex.mean(flex.abs(ano.data()) / ave.data()) print " <d''>/<mean>=", flex.mean(flex.abs(ano.data())) / flex.mean(ave.data()) print
def exercise_2(): symmetry = crystal.symmetry( unit_cell=(5.67, 10.37, 10.37, 90, 135.49, 90), space_group_symbol="C2") structure = xray.structure(crystal_symmetry=symmetry) atmrad = flex.double() xyzf = flex.vec3_double() for k in xrange(100): scatterer = xray.scatterer( site = ((1.+k*abs(math.sin(k)))/1000.0, (1.+k*abs(math.cos(k)))/1000.0, (1.+ k)/1000.0), scattering_type = "C") structure.add_scatterer(scatterer) atmrad.append(van_der_waals_radii.vdw.table[scatterer.element_symbol()]) xyzf.append(scatterer.site) miller_set = miller.build_set( crystal_symmetry=structure, d_min=1.0, anomalous_flag=False) step = 0.5 crystal_gridding = maptbx.crystal_gridding( unit_cell=structure.unit_cell(), step=step) nxyz = crystal_gridding.n_real() shrink_truncation_radius = 1.0 solvent_radius = 1.0 m1 = around_atoms( structure.unit_cell(), structure.space_group().order_z(), structure.sites_frac(), atmrad, nxyz, solvent_radius, shrink_truncation_radius) assert m1.solvent_radius == 1 assert m1.shrink_truncation_radius == 1 assert flex.max(m1.data) == 1 assert flex.min(m1.data) == 0 assert m1.data.size() == m1.data.count(1) + m1.data.count(0) m2 = mmtbx.masks.bulk_solvent( xray_structure=structure, gridding_n_real=nxyz, ignore_zero_occupancy_atoms = False, solvent_radius=solvent_radius, shrink_truncation_radius=shrink_truncation_radius) assert m2.data.all_eq(m1.data) m3 = mmtbx.masks.bulk_solvent( xray_structure=structure, grid_step=step, ignore_zero_occupancy_atoms = False, solvent_radius=solvent_radius, shrink_truncation_radius=shrink_truncation_radius) assert m3.data.all_eq(m1.data) f_mask2 = m2.structure_factors(miller_set=miller_set) f_mask3 = m3.structure_factors(miller_set=miller_set) assert approx_equal(f_mask2.data(), f_mask3.data()) assert approx_equal(flex.sum(flex.abs(f_mask3.data())), 1095.17999134)
def exercise_sampled_model_density_1(): import iotbx.pdb pdb_str1 = """ CRYST1 10.000 10.000 10.000 90.00 90.00 90.00 P 1 ATOM 1 CB PHE A 1 5.000 5.000 5.000 1.00 15.00 C ANISOU 1 CB PHE A 1 900 2900 100 0 0 0 C TER END """ pdb_str2 = """ CRYST1 10.000 10.000 10.000 90.00 90.00 90.00 P 1 ATOM 1 CB PHE A 1 5.000 5.000 5.000 1.00 15.00 C TER END """ # for pdb_str in [pdb_str1, pdb_str2]: print pdb_inp = iotbx.pdb.input(source_info=None, lines=pdb_str) xrs = pdb_inp.xray_structure_simple() # crystal_gridding = maptbx.crystal_gridding( unit_cell=xrs.unit_cell(), space_group_info=xrs.space_group_info(), symmetry_flags=maptbx.use_space_group_symmetry, step=0.1) m = mmtbx.real_space.sampled_model_density( xray_structure=xrs, n_real=crystal_gridding.n_real()).data() # max_index = [(i - 1) // 2 for i in crystal_gridding.n_real()] complete_set = miller.build_set( crystal_symmetry=xrs.crystal_symmetry(), anomalous_flag=False, max_index=max_index) indices = complete_set.indices() indices.append((0, 0, 0)) # complete_set = complete_set.customized_copy(indices=indices) f_obs_cmpl = complete_set.structure_factors_from_map( map=m, use_scale=True, anomalous_flag=False, use_sg=False) fc = complete_set.structure_factors_from_scatterers( xray_structure=xrs).f_calc() # f1 = abs(fc).data() f2 = abs(f_obs_cmpl).data() r = 200 * flex.sum(flex.abs(f1 - f2)) / flex.sum(f1 + f2) assert r < 0.5 print r # fft_map = miller.fft_map(crystal_gridding=crystal_gridding, fourier_coefficients=f_obs_cmpl) fft_map.apply_volume_scaling() m_ = fft_map.real_map_unpadded() print m.as_1d().min_max_mean().as_tuple() print m_.as_1d().min_max_mean().as_tuple() assert approx_equal(m.as_1d().min_max_mean().as_tuple(), m_.as_1d().min_max_mean().as_tuple(), 1.e-3) # Must be smaller!?
def run(hklin): arrays = iotbx.file_reader.any_file(hklin).file_server.miller_arrays for arr in arrays: if not arr.anomalous_flag(): continue print arr.info() if arr.is_complex_array(): arr = arr.as_amplitude_array() # must be F ano = arr.anomalous_differences() ave = arr.average_bijvoet_mates() ano, ave = ano.common_sets(ave) print " <d''/mean>=", flex.mean(flex.abs(ano.data()) / ave.data()) print " <d''>/<mean>=", flex.mean(flex.abs(ano.data())) / flex.mean( ave.data()) print
def exercise_with_fixed_structure(): structure = xray.structure( crystal_symmetry=crystal.symmetry( unit_cell=(46.7058, 46.7058, 79.3998, 90, 90, 120), space_group_symbol="P 31"), scatterers=flex.xray_scatterer( [xray.scatterer(scattering_type="const", site=site) for site in [ (0.0169, 0.8953, 0.1115), (0.9395, 0.1282, 0.1780), (0.2998, 0.3497, 0.0593), (0.8220, 0.8814, 0.1601), (0.6478, 0.4879, 0.3141)]])) sfweight = exercise_SFweight_spline_core( structure=structure, d_min=5, verbose="--Verbose" in sys.argv[1:]) assert approx_equal(flex.mean(flex.abs(sfweight.fb())), 1.7545459) assert approx_equal(flex.mean(flex.abs(sfweight.fd())), 1.8437204) assert approx_equal(flex.mean(sfweight.centroid_phases()), -0.033979132) assert approx_equal(flex.mean(sfweight.figures_of_merit()), 0.018943642)
def get_r_split(self): try: r_split_bin = (1 / math.sqrt(2)) * ( flex.sum(flex.abs(self.I_even - self.I_odd)) / (flex.sum(self.I_even + self.I_odd) * 0.5)) except Exception, e: print "Warning: R_split calculation failed." print e r_split_bin = 0
def compare_derivs(out, ana, fin, expect_failure, eps=1.e-6): s = 1 / max(1, flex.max(flex.abs(ana.as_double()))) print >> out, " ana:", list(ana*s) print >> out, " fin:", list(fin*s) if (not expect_failure): assert approx_equal(ana*s, fin*s, eps=eps) else: approx_equal(ana*s, fin*s, eps=eps) print >> out
def check_occ_randomize( cmd, xrsp_init, output, selection,selection_str, verbose, tolerance=1.e-3): remove_files(output) run_command(command=cmd, verbose=verbose) xrsp = xray_structure_plus(file_name = output) assert approx_equal(xrsp.sites_cart,xrsp_init.sites_cart,tolerance) assert approx_equal(xrsp.u_iso, xrsp_init.u_iso,tolerance) assert approx_equal(xrsp.u_cart, xrsp_init.u_cart,tolerance) if(selection_str is None): diff = flex.abs(xrsp.occ - xrsp_init.occ) assert flex.mean(diff) > 0.0 assert flex.max(diff) > 0.0 else: diff = flex.abs(xrsp.occ - xrsp_init.occ) assert flex.mean(diff) > 0.0 assert flex.max(diff) > 0.0 assert approx_equal(flex.mean(diff.select(~selection)),0.,tolerance)