def ml_normalisation(self, aniso=False): # estimate number of residues per unit cell mr = matthews.matthews_rupp(self.intensities.crystal_symmetry()) n_residues = mr.n_residues # estimate B-factor and scale factors for normalisation if aniso: normalisation = absolute_scaling.ml_aniso_absolute_scaling( self.intensities, n_residues=n_residues) u_star = normalisation.u_star else: normalisation = absolute_scaling.ml_iso_absolute_scaling( self.intensities, n_residues=n_residues) u_star = adptbx.b_as_u( adptbx.u_iso_as_u_star( self.intensities.unit_cell(), normalisation.b_wilson)) # apply scales self.intensities = self.intensities.customized_copy( data=scaling.ml_normalise_aniso( self.intensities.indices(), self.intensities.data(), normalisation.p_scale, self.intensities.unit_cell(), u_star), sigmas=scaling.ml_normalise_aniso( self.intensities.indices(), self.intensities.sigmas(), normalisation.p_scale, self.intensities.unit_cell(), u_star)).set_info(self.intensities.info()) # record output in log file s = StringIO() mr.show(out=s) normalisation.show(out=s) logger.info(s.getvalue())
def anisotropic_correction(cache_0, p_scale, u_star, b_add=None, must_be_greater_than=0.): ## Make sure that u_star is not rwgk scaled, i.e. like you get it from ## the ml_absolute_scale_aniso routine (!which is !!NOT!! scaled!) work_array = None try: work_array = cache_0.input.select( cache_0.input.data() > must_be_greater_than) except KeyboardInterrupt: raise except Exception: pass if work_array is None: work_array = cache_0.select( cache_0.data() > must_be_greater_than) change_back_to_intensity=False if work_array.is_xray_intensity_array(): work_array = work_array.f_sq_as_f() change_back_to_intensity=True assert not work_array.is_xray_intensity_array() if b_add is not None: u_star_add = adptbx.b_iso_as_u_star( work_array.unit_cell(), b_add ) u_star = u_star+u_star_add corrected_amplitudes = scaling.ml_normalise_aniso( work_array.indices(), work_array.data(), p_scale, work_array.unit_cell(), u_star ) if work_array.sigmas() is not None: corrected_sigmas = scaling.ml_normalise_aniso( work_array.indices(), work_array.sigmas(), p_scale, work_array.unit_cell(), u_star ) else: corrected_sigmas = None work_array = work_array.customized_copy( data = corrected_amplitudes, sigmas = corrected_sigmas ).set_observation_type(work_array) if change_back_to_intensity: # XXX check for floating-point overflows (which trigger the Boost trap # and crash the interpreter). The only known case is 2q8o:IOBS2,SIGIOBS2 # which is missing nearly all acentric hkls but it clearly points to a bug # in this routine when dealing with pathological data. f_max = flex.max(work_array.data()) if (not f_max < math.sqrt(sys.float_info.max)) : raise OverflowError("Amplitudes will exceed floating point limit if "+ "converted to intensities (max F = %e)." % f_max) work_array = work_array.f_as_f_sq() return work_array
def anisotropic_correction(cache_0, p_scale, u_star, b_add=None, must_be_greater_than=0.): ## Make sure that u_star is not rwgk scaled, i.e. like you get it from ## the ml_absolute_scale_aniso routine (!which is !!NOT!! scaled!) work_array = None try: work_array = cache_0.input.select( cache_0.input.data() > must_be_greater_than) except KeyboardInterrupt: raise except Exception: pass if work_array is None: work_array = cache_0.select(cache_0.data() > must_be_greater_than) change_back_to_intensity = False if work_array.is_xray_intensity_array(): work_array = work_array.f_sq_as_f() change_back_to_intensity = True assert not work_array.is_xray_intensity_array() if b_add is not None: u_star_add = adptbx.b_iso_as_u_star(work_array.unit_cell(), b_add) u_star = u_star + u_star_add corrected_amplitudes = scaling.ml_normalise_aniso(work_array.indices(), work_array.data(), p_scale, work_array.unit_cell(), u_star) if work_array.sigmas() is not None: corrected_sigmas = scaling.ml_normalise_aniso(work_array.indices(), work_array.sigmas(), p_scale, work_array.unit_cell(), u_star) else: corrected_sigmas = None work_array = work_array.customized_copy( data=corrected_amplitudes, sigmas=corrected_sigmas).set_observation_type(work_array) if change_back_to_intensity: # XXX check for floating-point overflows (which trigger the Boost trap # and crash the interpreter). The only known case is 2q8o:IOBS2,SIGIOBS2 # which is missing nearly all acentric hkls but it clearly points to a bug # in this routine when dealing with pathological data. f_max = flex.max(work_array.data()) if (not f_max < math.sqrt(sys.float_info.max)): raise OverflowError( "Amplitudes will exceed floating point limit if " + "converted to intensities (max F = %e)." % f_max) work_array = work_array.f_as_f_sq() return work_array
def _ml_normalisation(intensities, aniso): # estimate number of residues per unit cell mr = matthews.matthews_rupp(intensities.crystal_symmetry()) n_residues = mr.n_residues # estimate B-factor and scale factors for normalisation if aniso: normalisation = absolute_scaling.ml_aniso_absolute_scaling( intensities, n_residues=n_residues ) u_star = normalisation.u_star else: normalisation = absolute_scaling.ml_iso_absolute_scaling( intensities, n_residues=n_residues ) u_star = adptbx.b_as_u( adptbx.u_iso_as_u_star(intensities.unit_cell(), normalisation.b_wilson) ) # record output in log file if aniso: b_cart = normalisation.b_cart logger.info("ML estimate of overall B_cart value:") logger.info( """\ %5.2f, %5.2f, %5.2f %12.2f, %5.2f %19.2f""" % (b_cart[0], b_cart[3], b_cart[4], b_cart[1], b_cart[5], b_cart[2]) ) else: logger.info("ML estimate of overall B value:") logger.info(" %5.2f A**2" % normalisation.b_wilson) logger.info("ML estimate of -log of scale factor:") logger.info(" %5.2f" % (normalisation.p_scale)) s = StringIO() mr.show(out=s) normalisation.show(out=s) logger.debug(s.getvalue()) # apply scales return intensities.customized_copy( data=scaling.ml_normalise_aniso( intensities.indices(), intensities.data(), normalisation.p_scale, intensities.unit_cell(), u_star, ), sigmas=scaling.ml_normalise_aniso( intensities.indices(), intensities.sigmas(), normalisation.p_scale, intensities.unit_cell(), u_star, ), )