def exercise_2 () : hkl_file = libtbx.env.find_in_repositories( relative_path="phenix_regression/wizards/p9_se_w2.sca", test=os.path.isfile) if (hkl_file is None) : warnings.warn("phenix_regression not available, skipping test") return hkl_in = file_reader.any_file(hkl_file).assert_file_type("hkl") i_obs_raw = hkl_in.file_object.as_miller_arrays( merge_equivalents=False, crystal_symmetry=crystal.symmetry( space_group_symbol="I4", unit_cell=(113.949,113.949,32.474,90,90,90)))[0] i_obs = i_obs_raw.merge_equivalents().array() # completeness and data strength cstats = ds.i_sigi_completeness_stats(i_obs) d_min_cut = cstats.resolution_cut assert approx_equal(d_min_cut, 2.150815) ws = ds.wilson_scaling( miller_array=i_obs, n_residues=120) # outliers - this shouldn't actually work, since it requires additional # processing steps on the input data try : outliers = ds.possible_outliers(i_obs) except AssertionError : pass else : raise Exception_expected ###################################################################### # OVERALL ANALYSIS pdb_file = libtbx.env.find_in_repositories( relative_path="phenix_examples/p9-build/p9.pdb", test=os.path.isfile) f_calc = None if (pdb_file is not None) : pdb_in = file_reader.any_file(pdb_file).assert_file_type("pdb") hierarchy = pdb_in.file_object.hierarchy xrs = pdb_in.file_object.xray_structure_simple( crystal_symmetry=i_obs) f_calc = xrs.structure_factors(d_min=i_obs.d_min()).f_calc() f_calc = abs(f_calc).generate_bijvoet_mates() f_calc = f_calc.set_observation_type_xray_amplitude() i_obs, f_calc = i_obs.common_sets(other=f_calc) open("tmp_xtriage.pdb", "w").write(hierarchy.as_pdb_string( crystal_symmetry=i_obs)) pdb_file = "tmp_xtriage.pdb" params = xtriage.master_params.extract() params.scaling.input.asu_contents.n_residues = 141 result = xtriage.xtriage_analyses( miller_obs=i_obs, miller_calc=f_calc, params=params, unmerged_obs=i_obs_raw, text_out=open("logfile3.log", "w"))#sys.stdout) # XXX there appears to be some system-dependence here, hence sloppy limits assert (15.5 < result.aniso_b_min < 15.9) assert (10 < result.aniso_range_of_b < 11) # check relative Wilson if (pdb_file is not None) : assert (result.relative_wilson is not None) # FIXME #assert (result.relative_wilson.n_outliers() == 34) #show_pickled_object_sizes(result) test_pickle_consistency_and_size(result) # XXX PDB validation server assert approx_equal(result.iso_b_wilson, 18.33, eps=0.1) assert approx_equal(result.aniso_b_ratio, 0.546, eps=0.1) assert (result.number_of_wilson_outliers == 0) assert approx_equal(result.l_test_mean_l, 0.493, eps=0.1) assert approx_equal(result.l_test_mean_l_squared, 0.326, eps=0.1) assert approx_equal(result.i_over_sigma_outer_shell, 3.25, eps=0.1) assert ("No significant pseudotranslation is detected" in result.patterson_verdict) # test consistency of output after pickling and unpickling try : from phenix_dev.phenix_cloud import xtriage_json except ImportError : pass else : json_out = xtriage_json.json_output("p9.sca") result.show(out=json_out) open("xtriage.json", "w").write(json_out.export()) # unmerged data assert result.merging_stats is not None out = StringIO() result.merging_stats.show(out=out) assert ("R-merge: 0.073" in out.getvalue()) assert approx_equal(result.estimate_d_min(min_i_over_sigma=10), 1.9645, eps=0.001) # FIXME PDB doesn't actually have unit cell! # test detection of symmetry in reference file if (pdb_file is not None) : args = [hkl_file, pdb_file] result = xtriage.run(args=args, out=null_out())
def __init__(self, miller_array, phil_object, out=None, out_plot=None, miller_calc=None, original_intensities=None, completeness_as_non_anomalous=None, verbose=0): if out is None: out = sys.stdout if verbose > 0: print >> out print >> out print >> out, "Matthews coefficient and Solvent content statistics" n_copies_solc = 1.0 self.nres_known = False if (phil_object.scaling.input.asu_contents.n_residues is not None or phil_object.scaling.input.asu_contents.n_bases is not None): self.nres_known = True if (phil_object.scaling.input.asu_contents.sequence_file is not None): print >> out, " warning: ignoring sequence file" elif (phil_object.scaling.input.asu_contents.sequence_file is not None): print >> out, " determining composition from sequence file %s" % \ phil_object.scaling.input.asu_contents.sequence_file seq_comp = iotbx.bioinformatics.composition_from_sequence_file( file_name=phil_object.scaling.input.asu_contents.sequence_file, log=out) if (seq_comp is not None): phil_object.scaling.input.asu_contents.n_residues = seq_comp.n_residues phil_object.scaling.input.asu_contents.n_bases = seq_comp.n_bases self.nres_known = True matthews_results = matthews.matthews_rupp( crystal_symmetry=miller_array, n_residues=phil_object.scaling.input.asu_contents.n_residues, n_bases=phil_object.scaling.input.asu_contents.n_bases, out=out, verbose=1) phil_object.scaling.input.asu_contents.n_residues = matthews_results[0] phil_object.scaling.input.asu_contents.n_bases = matthews_results[1] n_copies_solc = matthews_results[2] self.matthews_results = matthews_results if phil_object.scaling.input.asu_contents.n_copies_per_asu is not None: n_copies_solc = phil_object.scaling.input.asu_contents.n_copies_per_asu self.defined_copies = n_copies_solc if verbose > 0: print >> out, "Number of copies per asymmetric unit provided" print >> out, " Will use user specified value of ", n_copies_solc else: phil_object.scaling.input.asu_contents.n_copies_per_asu = n_copies_solc self.guessed_copies = n_copies_solc # first report on I over sigma miller_array_new = miller_array self.data_strength = None miller_array_intensities = miller_array if (original_intensities is not None): assert original_intensities.is_xray_intensity_array() miller_array_intensities = original_intensities if miller_array_intensities.sigmas() is not None: data_strength = data_statistics.i_sigi_completeness_stats( miller_array_intensities, isigi_cut=phil_object.scaling.input.parameters. misc_twin_parameters.twin_test_cuts.isigi_cut, completeness_cut=phil_object.scaling.input.parameters. misc_twin_parameters.twin_test_cuts.completeness_cut, completeness_as_non_anomalous=completeness_as_non_anomalous) data_strength.show(out) self.data_strength = data_strength if phil_object.scaling.input.parameters.misc_twin_parameters.twin_test_cuts.high_resolution is None: if data_strength.resolution_cut > data_strength.resolution_at_least: phil_object.scaling.input.parameters.misc_twin_parameters.twin_test_cuts.high_resolution = data_strength.resolution_at_least else: phil_object.scaling.input.parameters.misc_twin_parameters.twin_test_cuts.high_resolution = data_strength.resolution_cut ## Isotropic wilson scaling if verbose > 0: print >> out print >> out print >> out, "Maximum likelihood isotropic Wilson scaling " n_residues = phil_object.scaling.input.asu_contents.n_residues n_bases = phil_object.scaling.input.asu_contents.n_bases if n_residues is None: n_residues = 0 if n_bases is None: n_bases = 0 if n_bases + n_residues == 0: raise Sorry("No scatterers available") iso_scale_and_b = absolute_scaling.ml_iso_absolute_scaling( miller_array=miller_array_new, n_residues=n_residues * miller_array.space_group().order_z() * n_copies_solc, n_bases=n_bases * miller_array.space_group().order_z() * n_copies_solc) iso_scale_and_b.show(out=out, verbose=verbose) self.iso_scale_and_b = iso_scale_and_b ## Store the b and scale values from isotropic ML scaling self.iso_p_scale = iso_scale_and_b.p_scale self.iso_b_wilson = iso_scale_and_b.b_wilson ## Anisotropic ml wilson scaling if verbose > 0: print >> out print >> out print >> out, "Maximum likelihood anisotropic Wilson scaling " aniso_scale_and_b = absolute_scaling.ml_aniso_absolute_scaling( miller_array=miller_array_new, n_residues=n_residues * miller_array.space_group().order_z() * n_copies_solc, n_bases=n_bases * miller_array.space_group().order_z() * n_copies_solc) aniso_scale_and_b.show(out=out, verbose=1) self.aniso_scale_and_b = aniso_scale_and_b try: b_cart = aniso_scale_and_b.b_cart except AttributeError, e: print >> out, "*** ERROR ***" print >> out, str(e) show_exception_info_if_full_testing() return
def __init__(self, miller_array, phil_object, out=None, out_plot=None, miller_calc=None, original_intensities=None, completeness_as_non_anomalous=None, verbose=0): if out is None: out = sys.stdout if verbose > 0: print(file=out) print(file=out) print("Matthews coefficient and Solvent content statistics", file=out) n_copies_solc = 1.0 self.nres_known = False if (phil_object.scaling.input.asu_contents.n_residues is not None or phil_object.scaling.input.asu_contents.n_bases is not None): self.nres_known = True if (phil_object.scaling.input.asu_contents.sequence_file is not None): print(" warning: ignoring sequence file", file=out) elif (phil_object.scaling.input.asu_contents.sequence_file is not None): print(" determining composition from sequence file %s" % \ phil_object.scaling.input.asu_contents.sequence_file, file=out) seq_comp = iotbx.bioinformatics.composition_from_sequence_file( file_name=phil_object.scaling.input.asu_contents.sequence_file, log=out) if (seq_comp is not None): phil_object.scaling.input.asu_contents.n_residues = seq_comp.n_residues phil_object.scaling.input.asu_contents.n_bases = seq_comp.n_bases self.nres_known = True matthews_results = matthews.matthews_rupp( crystal_symmetry=miller_array, n_residues=phil_object.scaling.input.asu_contents.n_residues, n_bases=phil_object.scaling.input.asu_contents.n_bases, out=out, verbose=1) phil_object.scaling.input.asu_contents.n_residues = matthews_results[0] phil_object.scaling.input.asu_contents.n_bases = matthews_results[1] n_copies_solc = matthews_results[2] self.matthews_results = matthews_results if phil_object.scaling.input.asu_contents.n_copies_per_asu is not None: n_copies_solc = phil_object.scaling.input.asu_contents.n_copies_per_asu self.defined_copies = n_copies_solc if verbose > 0: print("Number of copies per asymmetric unit provided", file=out) print(" Will use user specified value of ", n_copies_solc, file=out) else: phil_object.scaling.input.asu_contents.n_copies_per_asu = n_copies_solc self.guessed_copies = n_copies_solc # first report on I over sigma miller_array_new = miller_array self.data_strength = None miller_array_intensities = miller_array if (original_intensities is not None): assert original_intensities.is_xray_intensity_array() miller_array_intensities = original_intensities if miller_array_intensities.sigmas() is not None: data_strength = data_statistics.i_sigi_completeness_stats( miller_array_intensities, isigi_cut=phil_object.scaling.input.parameters. misc_twin_parameters.twin_test_cuts.isigi_cut, completeness_cut=phil_object.scaling.input.parameters. misc_twin_parameters.twin_test_cuts.completeness_cut, completeness_as_non_anomalous=completeness_as_non_anomalous) data_strength.show(out) self.data_strength = data_strength if phil_object.scaling.input.parameters.misc_twin_parameters.twin_test_cuts.high_resolution is None: if data_strength.resolution_cut > data_strength.resolution_at_least: phil_object.scaling.input.parameters.misc_twin_parameters.twin_test_cuts.high_resolution = data_strength.resolution_at_least else: phil_object.scaling.input.parameters.misc_twin_parameters.twin_test_cuts.high_resolution = data_strength.resolution_cut ## Isotropic wilson scaling if verbose > 0: print(file=out) print(file=out) print("Maximum likelihood isotropic Wilson scaling ", file=out) n_residues = phil_object.scaling.input.asu_contents.n_residues n_bases = phil_object.scaling.input.asu_contents.n_bases if n_residues is None: n_residues = 0 if n_bases is None: n_bases = 0 if n_bases + n_residues == 0: raise Sorry("No scatterers available") iso_scale_and_b = absolute_scaling.ml_iso_absolute_scaling( miller_array=miller_array_new, n_residues=n_residues * miller_array.space_group().order_z() * n_copies_solc, n_bases=n_bases * miller_array.space_group().order_z() * n_copies_solc) iso_scale_and_b.show(out=out, verbose=verbose) self.iso_scale_and_b = iso_scale_and_b ## Store the b and scale values from isotropic ML scaling self.iso_p_scale = iso_scale_and_b.p_scale self.iso_b_wilson = iso_scale_and_b.b_wilson ## Anisotropic ml wilson scaling if verbose > 0: print(file=out) print(file=out) print("Maximum likelihood anisotropic Wilson scaling ", file=out) aniso_scale_and_b = absolute_scaling.ml_aniso_absolute_scaling( miller_array=miller_array_new, n_residues=n_residues * miller_array.space_group().order_z() * n_copies_solc, n_bases=n_bases * miller_array.space_group().order_z() * n_copies_solc) aniso_scale_and_b.show(out=out, verbose=1) self.aniso_scale_and_b = aniso_scale_and_b try: b_cart = aniso_scale_and_b.b_cart except AttributeError as e: print("*** ERROR ***", file=out) print(str(e), file=out) show_exception_info_if_full_testing() return self.aniso_p_scale = aniso_scale_and_b.p_scale self.aniso_u_star = aniso_scale_and_b.u_star self.aniso_b_cart = aniso_scale_and_b.b_cart # XXX: for GUI self.overall_b_cart = getattr(aniso_scale_and_b, "overall_b_cart", None) ## Correcting for anisotropy if verbose > 0: print("Correcting for anisotropy in the data", file=out) print(file=out) b_cart_observed = aniso_scale_and_b.b_cart b_trace_average = (b_cart_observed[0] + b_cart_observed[1] + b_cart_observed[2]) / 3.0 b_trace_min = b_cart_observed[0] if b_cart_observed[1] < b_trace_min: b_trace_min = b_cart_observed[1] if b_cart_observed[2] < b_trace_min: b_trace_min = b_cart_observed[2] if phil_object.scaling.input.optional.aniso.final_b == "eigen_min": b_use = aniso_scale_and_b.eigen_values[2] elif phil_object.scaling.input.optional.aniso.final_b == "eigen_mean": b_use = flex.mean(aniso_scale_and_b.eigen_values) elif phil_object.scaling.input.optional.aniso.final_b == "user_b_iso": assert phil_object.scaling.input.optional.aniso.b_iso is not None b_use = phil_object.scaling.input.optional.aniso.b_iso else: b_use = 30 b_cart_aniso_removed = [-b_use, -b_use, -b_use, 0, 0, 0] u_star_aniso_removed = adptbx.u_cart_as_u_star( miller_array.unit_cell(), adptbx.b_as_u(b_cart_aniso_removed)) ## I do things in two steps, but can easely be done in 1 step ## just for clarity, thats all. self.no_aniso_array = absolute_scaling.anisotropic_correction( miller_array_new, 0.0, aniso_scale_and_b.u_star) self.no_aniso_array = absolute_scaling.anisotropic_correction( self.no_aniso_array, 0.0, u_star_aniso_removed) self.no_aniso_array = self.no_aniso_array.set_observation_type( miller_array) ## Make normalised structure factors please sel_big = self.no_aniso_array.data() > 1.e+50 self.no_aniso_array = self.no_aniso_array.array( data=self.no_aniso_array.data().set_selected(sel_big, 0)) self.no_aniso_array = self.no_aniso_array.set_observation_type( miller_array) normalistion = absolute_scaling.kernel_normalisation( self.no_aniso_array, auto_kernel=True) self.normalised_miller = normalistion.normalised_miller.deep_copy() self.phil_object = phil_object ## Some basic statistics and sanity checks follow if verbose > 0: print("Some basic intensity statistics follow.", file=out) print(file=out) basic_data_stats = data_statistics.basic_intensity_statistics( miller_array, aniso_scale_and_b.p_scale, aniso_scale_and_b.u_star, iso_scale_and_b.scat_info, out=out, out_plot=out_plot) self.basic_data_stats = basic_data_stats self.miller_array = basic_data_stats.new_miller #relative wilson plot self.rel_wilson = None if (miller_calc is not None) and (miller_calc.d_min() < 4.0): try: self.rel_wilson = relative_wilson.relative_wilson( miller_obs=miller_array, miller_calc=miller_calc) except RuntimeError as e: print("*** Error calculating relative Wilson plot - skipping.", file=out) print("", file=out) if verbose > 0: print("Basic analyses completed", file=out)
def exercise_2(): hkl_file = libtbx.env.find_in_repositories( relative_path="phenix_regression/wizards/data/p9_se_w2.sca", test=os.path.isfile) if (hkl_file is None): warnings.warn("phenix_regression not available, skipping test") return hkl_in = file_reader.any_file(hkl_file).assert_file_type("hkl") i_obs_raw = hkl_in.file_object.as_miller_arrays( merge_equivalents=False, crystal_symmetry=crystal.symmetry(space_group_symbol="I4", unit_cell=(113.949, 113.949, 32.474, 90, 90, 90)))[0] i_obs = i_obs_raw.merge_equivalents().array() # completeness and data strength cstats = ds.i_sigi_completeness_stats(i_obs) d_min_cut = cstats.resolution_cut assert approx_equal(d_min_cut, 2.150815) ws = ds.wilson_scaling(miller_array=i_obs, n_residues=120) # outliers - this shouldn't actually work, since it requires additional # processing steps on the input data try: outliers = ds.possible_outliers(i_obs) except AssertionError: pass else: raise Exception_expected ###################################################################### # OVERALL ANALYSIS pdb_file = libtbx.env.find_in_repositories( relative_path="phenix_examples/p9-build/p9.pdb", test=os.path.isfile) f_calc = None if (pdb_file is not None): pdb_in = file_reader.any_file(pdb_file).assert_file_type("pdb") hierarchy = pdb_in.file_object.hierarchy xrs = pdb_in.file_object.xray_structure_simple(crystal_symmetry=i_obs) f_calc = xrs.structure_factors(d_min=i_obs.d_min()).f_calc() f_calc = abs(f_calc).generate_bijvoet_mates() f_calc = f_calc.set_observation_type_xray_amplitude() i_obs, f_calc = i_obs.common_sets(other=f_calc) open("tmp_xtriage.pdb", "w").write(hierarchy.as_pdb_string(crystal_symmetry=i_obs)) pdb_file = "tmp_xtriage.pdb" params = xtriage.master_params.extract() params.scaling.input.asu_contents.n_residues = 141 result = xtriage.xtriage_analyses(miller_obs=i_obs, miller_calc=f_calc, params=params, unmerged_obs=i_obs_raw, text_out=open("logfile3.log", "w")) #sys.stdout) # XXX there appears to be some system-dependence here, hence sloppy limits assert (15.5 < result.aniso_b_min < 15.9) assert (10 < result.aniso_range_of_b < 11) # check relative Wilson if (pdb_file is not None): assert (result.relative_wilson is not None) # FIXME #assert (result.relative_wilson.n_outliers() == 34) #show_pickled_object_sizes(result) test_pickle_consistency_and_size(result) # XXX PDB validation server assert approx_equal(result.iso_b_wilson, 18.33, eps=0.1) assert approx_equal(result.aniso_b_ratio, 0.546, eps=0.1) assert (result.number_of_wilson_outliers == 0) assert approx_equal(result.l_test_mean_l, 0.493, eps=0.1) assert approx_equal(result.l_test_mean_l_squared, 0.326, eps=0.1) assert approx_equal(result.i_over_sigma_outer_shell, 3.25, eps=0.1) assert approx_equal(result.overall_i_sig_i, 10.34, eps=0.1) assert approx_equal( result.anomalous_info.plan_sad_experiment_stats.get_overall( item="i_over_sigma_dict"), 10.61, eps=0.1) assert approx_equal( result.anomalous_info.plan_sad_experiment_stats.get_overall( item="anom_signal_dict"), 15.35, eps=0.1) assert ("No significant pseudotranslation is detected" in result.patterson_verdict) # test consistency of output after pickling and unpickling try: from phenix_dev.phenix_cloud import xtriage_json except ImportError: pass else: json_out = xtriage_json.json_output("p9.sca") result.show(out=json_out) open("xtriage.json", "w").write(json_out.export()) # unmerged data assert result.merging_stats is not None out = StringIO() result.merging_stats.show(out=out) assert ("R-merge: 0.073" in out.getvalue()) assert approx_equal(result.estimate_d_min(min_i_over_sigma=10), 1.9645, eps=0.001) # FIXME PDB doesn't actually have unit cell! # test detection of symmetry in reference file if (pdb_file is not None): args = [hkl_file, pdb_file] result = xtriage.run(args=args, out=null_out())
def __init__(self, miller_array, phil_object, out=None, out_plot=None, miller_calc=None, original_intensities=None, completeness_as_non_anomalous=None, verbose=0): if out is None: out=sys.stdout if verbose>0: print >> out print >> out print >> out, "Matthews coefficient and Solvent content statistics" n_copies_solc = 1.0 self.nres_known = False if (phil_object.scaling.input.asu_contents.n_residues is not None or phil_object.scaling.input.asu_contents.n_bases is not None) : self.nres_known = True if (phil_object.scaling.input.asu_contents.sequence_file is not None) : print >> out, " warning: ignoring sequence file" elif (phil_object.scaling.input.asu_contents.sequence_file is not None) : print >> out, " determining composition from sequence file %s" % \ phil_object.scaling.input.asu_contents.sequence_file seq_comp = iotbx.bioinformatics.composition_from_sequence_file( file_name=phil_object.scaling.input.asu_contents.sequence_file, log=out) if (seq_comp is not None) : phil_object.scaling.input.asu_contents.n_residues = seq_comp.n_residues phil_object.scaling.input.asu_contents.n_bases = seq_comp.n_bases self.nres_known = True matthews_results =matthews.matthews_rupp( crystal_symmetry = miller_array, n_residues = phil_object.scaling.input.asu_contents.n_residues, n_bases = phil_object.scaling.input.asu_contents.n_bases, out=out,verbose=1) phil_object.scaling.input.asu_contents.n_residues = matthews_results[0] phil_object.scaling.input.asu_contents.n_bases = matthews_results[1] n_copies_solc = matthews_results[2] self.matthews_results = matthews_results if phil_object.scaling.input.asu_contents.n_copies_per_asu is not None: n_copies_solc = phil_object.scaling.input.asu_contents.n_copies_per_asu self.defined_copies = n_copies_solc if verbose>0: print >> out,"Number of copies per asymmetric unit provided" print >> out," Will use user specified value of ", n_copies_solc else: phil_object.scaling.input.asu_contents.n_copies_per_asu = n_copies_solc self.guessed_copies = n_copies_solc # first report on I over sigma miller_array_new = miller_array self.data_strength = None miller_array_intensities = miller_array if (original_intensities is not None) : assert original_intensities.is_xray_intensity_array() miller_array_intensities = original_intensities if miller_array_intensities.sigmas() is not None: data_strength=data_statistics.i_sigi_completeness_stats( miller_array_intensities, isigi_cut = phil_object.scaling.input.parameters.misc_twin_parameters.twin_test_cuts.isigi_cut, completeness_cut = phil_object.scaling.input.parameters.misc_twin_parameters.twin_test_cuts.completeness_cut, completeness_as_non_anomalous=completeness_as_non_anomalous) data_strength.show(out) self.data_strength = data_strength if phil_object.scaling.input.parameters.misc_twin_parameters.twin_test_cuts.high_resolution is None: if data_strength.resolution_cut > data_strength.resolution_at_least: phil_object.scaling.input.parameters.misc_twin_parameters.twin_test_cuts.high_resolution = data_strength.resolution_at_least else: phil_object.scaling.input.parameters.misc_twin_parameters.twin_test_cuts.high_resolution = data_strength.resolution_cut ## Isotropic wilson scaling if verbose>0: print >> out print >> out print >> out, "Maximum likelihood isotropic Wilson scaling " n_residues = phil_object.scaling.input.asu_contents.n_residues n_bases = phil_object.scaling.input.asu_contents.n_bases if n_residues is None: n_residues = 0 if n_bases is None: n_bases = 0 if n_bases+n_residues==0: raise Sorry("No scatterers available") iso_scale_and_b = absolute_scaling.ml_iso_absolute_scaling( miller_array = miller_array_new, n_residues = n_residues* miller_array.space_group().order_z()*n_copies_solc, n_bases=n_bases* miller_array.space_group().order_z()*n_copies_solc) iso_scale_and_b.show(out=out,verbose=verbose) self.iso_scale_and_b = iso_scale_and_b ## Store the b and scale values from isotropic ML scaling self.iso_p_scale = iso_scale_and_b.p_scale self.iso_b_wilson = iso_scale_and_b.b_wilson ## Anisotropic ml wilson scaling if verbose>0: print >> out print >> out print >> out, "Maximum likelihood anisotropic Wilson scaling " aniso_scale_and_b = absolute_scaling.ml_aniso_absolute_scaling( miller_array = miller_array_new, n_residues = n_residues*miller_array.space_group().order_z()*n_copies_solc, n_bases = n_bases*miller_array.space_group().order_z()*n_copies_solc) aniso_scale_and_b.show(out=out,verbose=1) self.aniso_scale_and_b = aniso_scale_and_b try: b_cart = aniso_scale_and_b.b_cart except AttributeError, e: print >> out, "*** ERROR ***" print >> out, str(e) show_exception_info_if_full_testing() return