def apply_target_scale_factors(f_array=None, phases=None, resolution=None, target_scale_factors=None, n_real=None, return_map_coeffs=None, out=sys.stdout): from cctbx.maptbx.segment_and_split_map import get_b_iso f_array_b_iso = get_b_iso(f_array, d_min=resolution) scale_array = get_scale_factors(f_array, target_scale_factors=target_scale_factors) scaled_f_array = f_array.customized_copy(data=f_array.data() * scale_array) scaled_f_array_b_iso = get_b_iso(scaled_f_array, d_min=resolution) print("\nInitial b_iso for "+\ "map: %5.1f A**2 After applying scaling: %5.1f A**2" %( f_array_b_iso,scaled_f_array_b_iso), file=out) new_map_coeffs = scaled_f_array.phase_transfer(phase_source=phases, deg=True) if return_map_coeffs: return new_map_coeffs map_data = calculate_map(map_coeffs=new_map_coeffs, n_real=n_real) return map_and_b_object(map_data=map_data, starting_b_iso=f_array_b_iso, final_b_iso=scaled_f_array_b_iso)
def scale_amplitudes(model_map_coeffs=None, map_coeffs=None, first_half_map_coeffs=None, second_half_map_coeffs=None, si=None,resolution=None,overall_b=None, fraction_complete=None, min_fraction_complete=0.05, map_calculation=True, verbose=False, out=sys.stdout): # Figure out resolution_dependent sharpening to optimally # match map and model. Then apply it as usual. # if second_half_map_coeffs instead of model, use second_half_map_coeffs same as # normalized model map_coeffs, except that the target fall-off should be # skipped (could use fall-off based on a dummy model...) if model_map_coeffs and ( not first_half_map_coeffs or not second_half_map_coeffs): is_model_based=True else: assert si.target_scale_factors or ( first_half_map_coeffs and second_half_map_coeffs) is_model_based=False if si.verbose and not verbose: verbose=True # if si.target_scale_factors is set, just use those scale factors from cctbx.maptbx.segment_and_split_map import map_coeffs_as_fp_phi,get_b_iso f_array,phases=map_coeffs_as_fp_phi(map_coeffs) (d_max,d_min)=f_array.d_max_min() if not f_array.binner(): f_array.setup_binner(n_bins=si.n_bins,d_max=d_max,d_min=d_min) f_array.binner().require_all_bins_have_data(min_counts=1, error_string="Please use a lower value of n_bins") if resolution is None: resolution=si.resolution if resolution is None: raise Sorry("Need resolution for model sharpening") obs_b_iso=get_b_iso(f_array,d_min=resolution) print("\nEffective b_iso of observed data: %6.1f A**2" %(obs_b_iso), file=out) if not si.target_scale_factors: # get scale factors if don't already have them si=calculate_fsc(si=si, f_array=f_array, # just used for binner map_coeffs=map_coeffs, model_map_coeffs=model_map_coeffs, first_half_map_coeffs=first_half_map_coeffs, second_half_map_coeffs=second_half_map_coeffs, resolution=resolution, fraction_complete=fraction_complete, min_fraction_complete=min_fraction_complete, is_model_based=is_model_based, cc_cut=si.cc_cut, scale_using_last=si.scale_using_last, max_cc_for_rescale=si.max_cc_for_rescale, pseudo_likelihood=si.pseudo_likelihood, verbose=verbose, out=out) # now si.target_scale_factors array are the scale factors # Now create resolution-dependent coefficients from the scale factors if not si.target_scale_factors: # nothing to do print("\nNo scaling applied", file=out) map_data=calculate_map(map_coeffs=map_coeffs,n_real=si.n_real) return map_and_b_object(map_data=map_data) elif not map_calculation: return map_and_b_object() else: # apply scaling if si.pseudo_likelihood: print("Normalizing structure factors", file=out) f_array=quasi_normalize_structure_factors(f_array,set_to_minimum=0.01, pseudo_likelihood=si.pseudo_likelihood) f_array.setup_binner(n_bins=si.n_bins,d_max=d_max,d_min=d_min) map_and_b=apply_target_scale_factors( f_array=f_array,phases=phases,resolution=resolution, target_scale_factors=si.target_scale_factors, n_real=si.n_real, out=out) return map_and_b