def get_map_values_and_grid_sites_frac(fmodel, map_type, grid_step, d_min, apply_sigma_scaling, apply_volume_scaling, include_f000, sel_bb, use_exact_phases): # resolution_factor = grid_step / d_min mp = mmtbx.masks.mask_master_params.extract() mp.grid_step_factor = 1. / resolution_factor mmtbx_masks_asu_mask_obj = mmtbx.masks.asu_mask( xray_structure=fmodel.xray_structure, d_min=d_min, mask_params=mp) bulk_solvent_mask = mmtbx_masks_asu_mask_obj.mask_data_whole_uc() sel = bulk_solvent_mask > 0 bulk_solvent_mask = bulk_solvent_mask.set_selected(sel, 1) cr_gr = maptbx.crystal_gridding( unit_cell=fmodel.xray_structure.unit_cell(), space_group_info=fmodel.f_obs().space_group_info(), pre_determined_n_real=bulk_solvent_mask.focus()) from mmtbx import map_tools from cctbx import miller # #mc = map_tools.electron_density_map(fmodel = fmodel).map_coefficients( # map_type = map_type, # acentrics_scale = 1.0, # centrics_pre_scale = 1.0) if not use_exact_phases: k = fmodel.k_isotropic() * fmodel.k_anisotropic() print("flex.mean(k):", flex.mean(k)) f_model = fmodel.f_model() mc_data = abs(fmodel.f_obs()).data() / k - abs(f_model).data() / k tmp = miller.array(miller_set=f_model, data=flex.double( f_model.indices().size(), 1)).phase_transfer(phase_source=f_model) mc = miller.array(miller_set=tmp, data=mc_data * tmp.data()) else: fmodel.update_all_scales(fast=True, remove_outliers=False) k = fmodel.k_isotropic() * fmodel.k_anisotropic() fo = fmodel.f_obs().customized_copy(data=fmodel.f_obs().data() / k) fo = fo.phase_transfer(phase_source=fmodel.f_model()) fc = fmodel.f_calc().customized_copy(data=fmodel.f_calc().data()) mc = miller.array(miller_set=fo, data=fo.data() - fc.data()) ######## XXX fft_map = miller.fft_map(crystal_gridding=cr_gr, fourier_coefficients=mc) fft_map.apply_volume_scaling() map_data = fft_map.real_map_unpadded() xrs = fmodel.xray_structure sites_cart = xrs.sites_cart().select(sel_bb) sel = maptbx.grid_indices_around_sites(unit_cell=xrs.unit_cell(), fft_n_real=map_data.focus(), fft_m_real=map_data.all(), sites_cart=sites_cart, site_radii=flex.double( sites_cart.size(), 0.5)) map_in = map_data.select(sel) mm = flex.mean(map_in) print("mean in (1):", mm) # #sites_frac = xrs.sites_frac().select(sel_bb) #mm = 0 #for sf in sites_frac: # mm += map_data.eight_point_interpolation(sf) #mm = mm/sites_frac.size() #print "mean in (2):", mm ######## # # Add F000 #reg = fmodel.xray_structure.scattering_type_registry(table = "wk1995") #f_000 = reg.sum_of_scattering_factors_at_diffraction_angle_0() +\ # 0.4*fmodel.xray_structure.unit_cell().volume() if (include_f000): #f_000 = include_f000*fmodel.xray_structure.unit_cell().volume()*0.3 #f_000 = None # XXX f_000 = abs(mm * xrs.unit_cell().volume()) #f_000 = 0.626*fmodel.xray_structure.unit_cell().volume()*0.35 else: f_000 = None print("f_000:", f_000) #print "XXX", include_f000*fmodel.xray_structure.unit_cell().volume()*0.3 # fft_map = miller.fft_map(crystal_gridding=cr_gr, fourier_coefficients=mc, f_000=f_000) # assert [apply_sigma_scaling, apply_volume_scaling].count(True) == 1 if (apply_sigma_scaling): fft_map.apply_sigma_scaling() elif (apply_volume_scaling): fft_map.apply_volume_scaling() else: assert RuntimeError nx, ny, nz = fft_map.n_real() map_data = fft_map.real_map_unpadded() #map_data = map_data * bulk_solvent_mask print("n_real:", nx, ny, nz, map_data.size()) grid_sites_frac = flex.vec3_double() map_values = flex.double() for ix in range(nx): for iy in range(ny): for iz in range(nz): mv = map_data[(ix, iy, iz)] if 1: #if(mv != 0): xf, yf, zf = ix / float(nx), iy / float(ny), iz / float(nz) grid_sites_frac.append([xf, yf, zf]) map_at_ixiyiz = map_data[(ix, iy, iz)] map_values.append(map_at_ixiyiz) return map_values, grid_sites_frac
def __init__ (self, data_arrays, xray_structure, log=None, silent=False, output_file=None, peak_search=False, map_cutoff=None, peak_search_params=None, r_free_arrays=None, write_map=True, multiscale=False, anomalous=False) : if (log is None) : log = sys.stdout adopt_init_args(self, locals()) fmodels = [] for i_seq, d in enumerate(data_arrays): if(not silent): print >> log, "Data set: %d"%i_seq if(d.anomalous_flag()) and (not anomalous) : d = d.average_bijvoet_mates() elif (anomalous) : assert d.anomalous_flag() if (r_free_arrays is not None) and (i_seq < len(r_free_arrays)) : r_free_flags = r_free_arrays[i_seq] else : r_free_flags = d.array(data = flex.bool(d.data().size(), False)) fmodel = mmtbx.f_model.manager( xray_structure = xray_structure, r_free_flags = r_free_flags, target_name = "ls_wunit_k1", f_obs = d) fmodel.update_all_scales(log=None) if(not silent): fmodel.info().show_rfactors_targets_scales_overall(out=log) print >> log fmodels.append(fmodel) self.fmodel = fmodels[0] # prepare Fobs for map calculation (apply scaling): f_obss = [] for fmodel in fmodels: obs = fmodel.f_obs() f_obs_scale = 1.0 / fmodel.k_anisotropic() / fmodel.k_isotropic() obs = miller.array(miller_set = fmodel.f_model(), data = obs.data()*f_obs_scale) f_obss.append(obs) # given two Fobs sets, make them one-to-one matching, get phases and map coefficients # Note: f_calc below is just f_calc from atoms (no bulk solvent etc applied) fobs_1, f_model = f_obss[0].common_sets(other = fmodels[1].f_model()) fobs_1, fobs_2 = fobs_1.common_sets(other = f_obss[1]) fobs_1, f_model = fobs_1.common_sets(other = f_model) self.f_model = f_model assert fobs_2.indices().all_eq(fobs_1.indices()) assert f_model.indices().all_eq(fobs_1.indices()) # scale again scale_k1 = 1 den = flex.sum(flex.abs(fobs_2.data())*flex.abs(fobs_2.data())) if(den != 0): scale_k1 = flex.sum(flex.abs(fobs_1.data())*flex.abs(fobs_2.data())) / den # fobs_2 = fobs_2.array(data = fobs_2.data()*scale_k1) if multiscale: fobs_1 = fobs_2.multiscale(other = fobs_1, reflections_per_bin=250) if(not silent): print >> log, "" print >> log, "Fobs1_vs_Fobs2 statistics:" print >> log, "Bin# Resolution range Compl. No.of refl. CC R-factor" fobs_1.setup_binner(reflections_per_bin = min(500, fobs_1.data().size())) fobs_2.use_binning_of(fobs_1) for i_bin in fobs_1.binner().range_used(): sel = fobs_1.binner().selection(i_bin) f1 = fobs_1.select(sel) f2 = fobs_2.select(sel) d_max, d_min = fobs_1.d_max_min() compl = fobs_1.completeness(d_max = d_max) n_ref = sel.count(True) num = flex.sum(flex.abs(f1.data()-f2.data())) den = flex.sum(flex.abs(f1.data()+f2.data())/2) r = None if(den!=0): r = num/den cc = flex.linear_correlation(x=f1.data(), y=f2.data()).coefficient() d_range = fobs_1.binner().bin_legend( i_bin = i_bin, show_bin_number = False, show_counts = False) fmt = "%3d: %-17s %4.2f %6d %5.3f %6s" print >> log, fmt % (i_bin, d_range, compl, n_ref, cc, format_value("%6.4f", r)) # overall statistics self.cc = flex.linear_correlation( x=fobs_1.data(), y=fobs_2.data()).coefficient() num = flex.sum(flex.abs(fobs_1.data()-fobs_2.data())) den = flex.sum(flex.abs(fobs_2.data()+fobs_2.data())/2) self.r_factor = None if (den != 0) : self.r_factor = num / den # map coefficients def phase_transfer(miller_array, phase_source): tmp = miller.array(miller_set = miller_array, data = flex.double(miller_array.indices().size(), 1) ).phase_transfer(phase_source = phase_source) return miller.array(miller_set = miller_array, data = miller_array.data() * tmp.data() ) if (not anomalous) : diff = miller.array( miller_set = f_model, data = fobs_1.data()-fobs_2.data()) self.map_coeff = phase_transfer( miller_array = diff, phase_source = f_model) else : dano_1 = fobs_1.anomalous_differences() dano_2 = fobs_2.anomalous_differences() assert dano_1.indices().all_eq(dano_2.indices()) diff = miller.array( miller_set = dano_1, data = dano_1.data() - dano_2.data()) f_model_phases = f_model.average_bijvoet_mates().common_set(diff) map_coeffs = phase_transfer( miller_array = diff, phase_source = f_model_phases) self.map_coeff = map_coeffs.customized_copy(data=map_coeffs.data()/(2j)) if(self.map_coeff.anomalous_flag()): self.map_coeff = map_coeff.average_bijvoet_mates() self.file_names = [] if (write_map) : self.file_names = self.write_map_file()
def __init__(self, data_arrays, xray_structure, log=None, silent=False, output_file=None, peak_search=False, map_cutoff=None, peak_search_params=None, r_free_arrays=None, write_map=True, multiscale=False, anomalous=False): if (log is None): log = sys.stdout adopt_init_args(self, locals()) fmodels = [] for i_seq, d in enumerate(data_arrays): if (not silent): print("Data set: %d" % i_seq, file=log) if (d.anomalous_flag()) and (not anomalous): d = d.average_bijvoet_mates() elif (anomalous): assert d.anomalous_flag() if (r_free_arrays is not None) and (i_seq < len(r_free_arrays)): r_free_flags = r_free_arrays[i_seq] else: r_free_flags = d.array(data=flex.bool(d.data().size(), False)) fmodel = mmtbx.f_model.manager(xray_structure=xray_structure, r_free_flags=r_free_flags, target_name="ls_wunit_k1", f_obs=d) fmodel.update_all_scales(log=None) if (not silent): fmodel.info().show_rfactors_targets_scales_overall(out=log) print(file=log) fmodels.append(fmodel) self.fmodel = fmodels[0] # prepare Fobs for map calculation (apply scaling): f_obss = [] for fmodel in fmodels: obs = fmodel.f_obs() f_obs_scale = 1.0 / fmodel.k_anisotropic() / fmodel.k_isotropic() obs = miller.array(miller_set=fmodel.f_model(), data=obs.data() * f_obs_scale) f_obss.append(obs) # given two Fobs sets, make them one-to-one matching, get phases and map coefficients # Note: f_calc below is just f_calc from atoms (no bulk solvent etc applied) fobs_1, f_model = f_obss[0].common_sets(other=fmodels[1].f_model()) fobs_1, fobs_2 = fobs_1.common_sets(other=f_obss[1]) fobs_1, f_model = fobs_1.common_sets(other=f_model) self.f_model = f_model assert fobs_2.indices().all_eq(fobs_1.indices()) assert f_model.indices().all_eq(fobs_1.indices()) # scale again scale_k1 = 1 den = flex.sum(flex.abs(fobs_2.data()) * flex.abs(fobs_2.data())) if (den != 0): scale_k1 = flex.sum( flex.abs(fobs_1.data()) * flex.abs(fobs_2.data())) / den # fobs_2 = fobs_2.array(data=fobs_2.data() * scale_k1) if multiscale: fobs_1 = fobs_2.multiscale(other=fobs_1, reflections_per_bin=250) if (not silent): print("", file=log) print("Fobs1_vs_Fobs2 statistics:", file=log) print("Bin# Resolution range Compl. No.of refl. CC R-factor", file=log) fobs_1.setup_binner( reflections_per_bin=min(500, fobs_1.data().size())) fobs_2.use_binning_of(fobs_1) for i_bin in fobs_1.binner().range_used(): sel = fobs_1.binner().selection(i_bin) f1 = fobs_1.select(sel) f2 = fobs_2.select(sel) d_max, d_min = fobs_1.d_max_min() compl = fobs_1.completeness(d_max=d_max) n_ref = sel.count(True) num = flex.sum(flex.abs(f1.data() - f2.data())) den = flex.sum(flex.abs(f1.data() + f2.data()) / 2) r = None if (den != 0): r = num / den cc = flex.linear_correlation(x=f1.data(), y=f2.data()).coefficient() d_range = fobs_1.binner().bin_legend(i_bin=i_bin, show_bin_number=False, show_counts=False) fmt = "%3d: %-17s %4.2f %6d %5.3f %6s" print(fmt % (i_bin, d_range, compl, n_ref, cc, format_value("%6.4f", r)), file=log) # overall statistics self.cc = flex.linear_correlation(x=fobs_1.data(), y=fobs_2.data()).coefficient() num = flex.sum(flex.abs(fobs_1.data() - fobs_2.data())) den = flex.sum(flex.abs(fobs_2.data() + fobs_2.data()) / 2) self.r_factor = None if (den != 0): self.r_factor = num / den # map coefficients def phase_transfer(miller_array, phase_source): tmp = miller.array( miller_set=miller_array, data=flex.double(miller_array.indices().size(), 1)).phase_transfer(phase_source=phase_source) return miller.array(miller_set=miller_array, data=miller_array.data() * tmp.data()) if (not anomalous): diff = miller.array(miller_set=f_model, data=fobs_1.data() - fobs_2.data()) self.map_coeff = phase_transfer(miller_array=diff, phase_source=f_model) else: dano_1 = fobs_1.anomalous_differences() dano_2 = fobs_2.anomalous_differences() assert dano_1.indices().all_eq(dano_2.indices()) diff = miller.array(miller_set=dano_1, data=dano_1.data() - dano_2.data()) f_model_phases = f_model.average_bijvoet_mates().common_set(diff) map_coeffs = phase_transfer(miller_array=diff, phase_source=f_model_phases) self.map_coeff = map_coeffs.customized_copy( data=map_coeffs.data() / (2j)) if (self.map_coeff.anomalous_flag()): self.map_coeff = map_coeff.average_bijvoet_mates() self.file_names = [] if (write_map): self.file_names = self.write_map_file()
def get_map_values_and_grid_sites_frac( fmodel, map_type, grid_step, d_min, apply_sigma_scaling, apply_volume_scaling, include_f000, sel_bb, use_exact_phases): # resolution_factor = grid_step/d_min mp = mmtbx.masks.mask_master_params.extract() mp.grid_step_factor = 1./resolution_factor mmtbx_masks_asu_mask_obj = mmtbx.masks.asu_mask( xray_structure = fmodel.xray_structure, d_min = d_min, mask_params = mp) bulk_solvent_mask = mmtbx_masks_asu_mask_obj.mask_data_whole_uc() sel = bulk_solvent_mask > 0 bulk_solvent_mask = bulk_solvent_mask.set_selected(sel, 1) cr_gr = maptbx.crystal_gridding( unit_cell = fmodel.xray_structure.unit_cell(), space_group_info = fmodel.f_obs().space_group_info(), pre_determined_n_real = bulk_solvent_mask.focus()) from mmtbx import map_tools from cctbx import miller # #mc = map_tools.electron_density_map(fmodel = fmodel).map_coefficients( # map_type = map_type, # acentrics_scale = 1.0, # centrics_pre_scale = 1.0) if not use_exact_phases: k = fmodel.k_isotropic()*fmodel.k_anisotropic() print "flex.mean(k):", flex.mean(k) f_model = fmodel.f_model() mc_data = abs(fmodel.f_obs()).data()/k - abs(f_model).data()/k tmp = miller.array(miller_set = f_model, data = flex.double(f_model.indices().size(), 1) ).phase_transfer(phase_source = f_model) mc = miller.array(miller_set = tmp, data = mc_data * tmp.data()) else: fmodel.update_all_scales(fast=True, remove_outliers=False) k = fmodel.k_isotropic()*fmodel.k_anisotropic() fo = fmodel.f_obs().customized_copy(data = fmodel.f_obs().data()/k) fo = fo.phase_transfer(phase_source = fmodel.f_model()) fc = fmodel.f_calc().customized_copy(data = fmodel.f_calc().data()) mc = miller.array(miller_set = fo, data = fo.data()-fc.data()) ######## XXX fft_map = miller.fft_map( crystal_gridding = cr_gr, fourier_coefficients = mc) fft_map.apply_volume_scaling() map_data = fft_map.real_map_unpadded() xrs = fmodel.xray_structure sites_cart = xrs.sites_cart().select(sel_bb) sel = maptbx.grid_indices_around_sites( unit_cell = xrs.unit_cell(), fft_n_real = map_data.focus(), fft_m_real = map_data.all(), sites_cart = sites_cart, site_radii = flex.double(sites_cart.size(), 0.5)) map_in = map_data.select(sel) mm = flex.mean(map_in) print "mean in (1):", mm # #sites_frac = xrs.sites_frac().select(sel_bb) #mm = 0 #for sf in sites_frac: # mm += map_data.eight_point_interpolation(sf) #mm = mm/sites_frac.size() #print "mean in (2):", mm ######## # # Add F000 #reg = fmodel.xray_structure.scattering_type_registry(table = "wk1995") #f_000 = reg.sum_of_scattering_factors_at_diffraction_angle_0() +\ # 0.4*fmodel.xray_structure.unit_cell().volume() if(include_f000): #f_000 = include_f000*fmodel.xray_structure.unit_cell().volume()*0.3 #f_000 = None # XXX f_000 = abs(mm * xrs.unit_cell().volume()) #f_000 = 0.626*fmodel.xray_structure.unit_cell().volume()*0.35 else: f_000 = None print "f_000:", f_000 #print "XXX", include_f000*fmodel.xray_structure.unit_cell().volume()*0.3 # fft_map = miller.fft_map( crystal_gridding = cr_gr, fourier_coefficients = mc, f_000 = f_000) # assert [apply_sigma_scaling, apply_volume_scaling].count(True) == 1 if(apply_sigma_scaling): fft_map.apply_sigma_scaling() elif(apply_volume_scaling): fft_map.apply_volume_scaling() else: assert RuntimeError nx,ny,nz = fft_map.n_real() map_data = fft_map.real_map_unpadded() #map_data = map_data * bulk_solvent_mask print "n_real:", nx,ny,nz, map_data.size() grid_sites_frac = flex.vec3_double() map_values = flex.double() for ix in xrange(nx): for iy in xrange(ny): for iz in xrange(nz): mv = map_data[(ix,iy,iz)] if 1: #if(mv != 0): xf,yf,zf = ix/float(nx), iy/float(ny), iz/float(nz) grid_sites_frac.append([xf,yf,zf]) map_at_ixiyiz = map_data[(ix,iy,iz)] map_values.append(map_at_ixiyiz) return map_values, grid_sites_frac