コード例 #1
0
    def apply_aniso_correction(self, f_array=None):

        if self.b_cart is None or self.b_cart_aniso_removed is None:
            return f_array  # nothing to do

        from mmtbx.scaling import absolute_scaling
        from cctbx import adptbx

        u_star = adptbx.u_cart_as_u_star(f_array.unit_cell(),
                                         adptbx.b_as_u(self.b_cart))

        u_star_aniso_removed = adptbx.u_cart_as_u_star(
            f_array.unit_cell(), adptbx.b_as_u(self.b_cart_aniso_removed))

        no_aniso_array = absolute_scaling.anisotropic_correction(
            f_array, 0.0, u_star, must_be_greater_than=-0.0001)

        no_aniso_array = absolute_scaling.anisotropic_correction(
            no_aniso_array,
            0.0,
            u_star_aniso_removed,
            must_be_greater_than=-0.0001)

        no_aniso_array = no_aniso_array.set_observation_type(f_array)
        return no_aniso_array
コード例 #2
0
  def __init__(self,
               miller_native,
               miller_derivative,
               use_intensities=True,
               scale_weight=True,
               use_weights=True):
    self.native = miller_native.deep_copy().map_to_asu()
    self.derivative = miller_derivative.deep_copy().map_to_asu()

    lsq_object = refinery(self.native,
                          self.derivative,
                          use_intensities=use_intensities,
                          scale_weight=scale_weight,
                          use_weights=use_weights)


    self.p_scale = lsq_object.p_scale
    self.b_cart = lsq_object.b_cart
    self.u_star = lsq_object.u_star

    ## very well, all done and set.
    ## apply the scaling on the data please and compute some r values
    tmp_nat, tmp_der = self.native.common_sets(self.derivative)

    self.r_val_before = flex.sum( flex.abs(tmp_nat.data()-tmp_der.data()) )
    if flex.sum( flex.abs(tmp_nat.data()+tmp_der.data()) ) > 0:
      self.r_val_before /=flex.sum( flex.abs(tmp_nat.data()+tmp_der.data()) )/2.0

    self.derivative = absolute_scaling.anisotropic_correction(
      self.derivative,self.p_scale,self.u_star )

    self.scaled_original_derivative = self.derivative.deep_copy().set_observation_type(
      self.derivative ).map_to_asu()

    tmp_nat = self.native
    tmp_der = self.derivative

    tmp_nat, tmp_der = self.native.map_to_asu().common_sets(self.derivative.map_to_asu())
    self.r_val_after = flex.sum( flex.abs( tmp_nat.data()-
                                           tmp_der.data()   )
                               )
    if (flex.sum( flex.abs(tmp_nat.data()) ) +
                        flex.sum( flex.abs(tmp_der.data()) )) > 0:
      self.r_val_after /=(flex.sum( flex.abs(tmp_nat.data()) ) +
                          flex.sum( flex.abs(tmp_der.data()) ))/2.0

    self.native=tmp_nat
    self.derivative=tmp_der
コード例 #3
0
ファイル: relative_scaling.py プロジェクト: dials/cctbx
  def __init__(self,
               miller_native,
               miller_derivative,
               use_intensities=True,
               scale_weight=True,
               use_weights=True):
    self.native = miller_native.deep_copy().map_to_asu()
    self.derivative = miller_derivative.deep_copy().map_to_asu()

    lsq_object = refinery(self.native,
                          self.derivative,
                          use_intensities=use_intensities,
                          scale_weight=scale_weight,
                          use_weights=use_weights)


    self.p_scale = lsq_object.p_scale
    self.b_cart = lsq_object.b_cart
    self.u_star = lsq_object.u_star

    ## very well, all done and set.
    ## apply the scaling on the data please and compute some r values
    tmp_nat, tmp_der = self.native.common_sets(self.derivative)

    self.r_val_before = flex.sum( flex.abs(tmp_nat.data()-tmp_der.data()) )
    if flex.sum( flex.abs(tmp_nat.data()+tmp_der.data()) ) > 0:
      self.r_val_before /=flex.sum( flex.abs(tmp_nat.data()+tmp_der.data()) )/2.0

    self.derivative = absolute_scaling.anisotropic_correction(
      self.derivative,self.p_scale,self.u_star )

    self.scaled_original_derivative = self.derivative.deep_copy().set_observation_type(
      self.derivative ).map_to_asu()

    tmp_nat = self.native
    tmp_der = self.derivative

    tmp_nat, tmp_der = self.native.map_to_asu().common_sets(self.derivative.map_to_asu())
    self.r_val_after = flex.sum( flex.abs( tmp_nat.data()-
                                           tmp_der.data()   )
                               )
    if (flex.sum( flex.abs(tmp_nat.data()) ) +
                        flex.sum( flex.abs(tmp_der.data()) )) > 0:
      self.r_val_after /=(flex.sum( flex.abs(tmp_nat.data()) ) +
                          flex.sum( flex.abs(tmp_der.data()) ))/2.0

    self.native=tmp_nat
    self.derivative=tmp_der
コード例 #4
0
ファイル: __init__.py プロジェクト: zhuligs/cctbx_project
 def anisotropic_correction(self):
   if not self.params.anisotropic_correction: return
   self.aniso_scale_and_b = None
   n_copies_solc = self.params.asu_contents.n_copies_per_asu
   n_residues = self.params.asu_contents.n_residues
   n_bases = self.params.asu_contents.n_bases
   self.aniso_scale_and_b = absolute_scaling.ml_aniso_absolute_scaling(
     miller_array=self.f_obs,
     n_residues=n_residues*self.f_obs.space_group().order_z()*n_copies_solc,
     n_bases=n_bases*self.f_obs.space_group().order_z()*n_copies_solc)
   self.aniso_scale_and_b.show(out=self.log)
   b_cart = self.aniso_scale_and_b.b_cart
   trace = sum(b_cart[:3])/3
   b_cart = [b_cart[0]-trace, b_cart[1]-trace, b_cart[2]-trace,
             b_cart[3], b_cart[4], b_cart[5]]
   u_star = adptbx.u_cart_as_u_star(self.f_obs.unit_cell(), adptbx.b_as_u(b_cart))
   self.f_obs = absolute_scaling.anisotropic_correction(
     self.f_obs, 0.0, u_star).set_observation_type(self.f_obs)
コード例 #5
0
ファイル: __init__.py プロジェクト: cctbx/cctbx-playground
 def anisotropic_correction(self):
   if not self.params.anisotropic_correction: return
   self.aniso_scale_and_b = None
   n_copies_solc = self.params.asu_contents.n_copies_per_asu
   n_residues = self.params.asu_contents.n_residues
   n_bases = self.params.asu_contents.n_bases
   self.aniso_scale_and_b = absolute_scaling.ml_aniso_absolute_scaling(
     miller_array=self.f_obs,
     n_residues=n_residues*self.f_obs.space_group().order_z()*n_copies_solc,
     n_bases=n_bases*self.f_obs.space_group().order_z()*n_copies_solc)
   self.aniso_scale_and_b.show(out=self.log)
   b_cart = self.aniso_scale_and_b.b_cart
   trace = sum(b_cart[:3])/3
   b_cart = [b_cart[0]-trace, b_cart[1]-trace, b_cart[2]-trace,
             b_cart[3], b_cart[4], b_cart[5]]
   u_star = adptbx.u_cart_as_u_star(self.f_obs.unit_cell(), adptbx.b_as_u(b_cart))
   self.f_obs = absolute_scaling.anisotropic_correction(
     self.f_obs, 0.0, u_star).set_observation_type(self.f_obs)
コード例 #6
0
  def __init__(self,
               miller_array,
               parameters,
               out=None,
               n_residues=100,
               n_bases=0):

    self.params=parameters
    self.miller_array=miller_array.deep_copy().set_observation_type(
      miller_array).merge_equivalents().array()
    self.out = out
    if self.out is None:
      self.out = sys.stdout
    if self.out == "silent":
      self.out = null_out()


    self.no_aniso_array = self.miller_array
    if self.params.aniso.action == "remove_aniso":
      # first perfom aniso scaling
      aniso_scale_and_b = absolute_scaling.ml_aniso_absolute_scaling(
        miller_array = self.miller_array,
        n_residues = n_residues,
        n_bases = n_bases)
      aniso_scale_and_b.p_scale = 0 # set the p_scale back to 0!
      aniso_scale_and_b.show(out=out)
      # now do aniso correction please
      self.aniso_p_scale = aniso_scale_and_b.p_scale
      self.aniso_u_star  = aniso_scale_and_b.u_star
      self.aniso_b_cart  = aniso_scale_and_b.b_cart
      if self.params.aniso.final_b == "eigen_min":
        b_use=aniso_scale_and_b.eigen_values[2]
      elif self.params.aniso.final_b == "eigen_mean" :
        b_use=flex.mean(aniso_scale_and_b.eigen_values)
      elif self.params.aniso.final_b == "user_b_iso":
        assert self.params.aniso.b_iso is not None
        b_use=self.params.aniso.b_iso
      else:
        b_use = 30

      b_cart_aniso_removed = [ -b_use,
                               -b_use,
                               -b_use,
                               0,
                               0,
                               0]
      u_star_aniso_removed = adptbx.u_cart_as_u_star(
        miller_array.unit_cell(),
        adptbx.b_as_u( b_cart_aniso_removed  ) )
      ## I do things in two steps, but can easely be done in 1 step
      ## just for clarity, thats all.
      self.no_aniso_array = absolute_scaling.anisotropic_correction(
        self.miller_array,0.0,aniso_scale_and_b.u_star )
      self.no_aniso_array = absolute_scaling.anisotropic_correction(
        self.no_aniso_array,0.0,u_star_aniso_removed)
      self.no_aniso_array = self.no_aniso_array.set_observation_type(
        miller_array )

    # that is done now, now we can do outlier detection if desired
    outlier_manager = outlier_rejection.outlier_manager(
      self.no_aniso_array,
      None,
      out=self.out)


    self.new_miller_array = self.no_aniso_array
    if self.params.outlier.action == "basic":
      print >> self.out, "Non-outliers found by the basic wilson statistics"
      print >> self.out, "protocol will be written out."
      basic_array = outlier_manager.basic_wilson_outliers(
        p_basic_wilson = self.params.outlier.parameters.basic_wilson.level,
        return_data = True)
      self.new_miller_array = basic_array

    if self.params.outlier.action == "extreme":
      print >> self.out, "Non-outliers found by the extreme value wilson statistics"
      print >> self.out, "protocol will be written out."
      extreme_array = outlier_manager.extreme_wilson_outliers(
      p_extreme_wilson = self.params.outlier.parameters.extreme_wilson.level,
      return_data = True)
      self.new_miller_array = extreme_array

    if self.params.outlier.action == "beamstop":
      print >> self.out, "Outliers found for the beamstop shadow"
      print >> self.out, "problems detection protocol will be written out."
      beamstop_array = outlier_manager.beamstop_shadow_outliers(
        level = self.params.outlier.parameters.beamstop.level,
        d_min = self.params.outlier.parameters.beamstop.d_min,
        return_data=True)
      self.new_miller_array = beamstop_array

    if self.params.outlier.action == "None":
      self.new_miller_array =  self.no_aniso_array



    # now we can twin or detwin the data if needed
    self.final_array = self.new_miller_array
    if self.params.symmetry.action == "twin":
      alpha = self.params.symmetry.twinning_parameters.fraction
      if (alpha is None):
        raise Sorry("Twin fraction not specified, not twinning data")
      elif not (0 <= alpha <= 0.5):
        raise Sorry("Twin fraction must be between 0 and 0.5.")
      print >> self.out
      print >> self.out, "Twinning given data"
      print >> self.out, "-------------------"
      print >> self.out
      print >> self.out, "Artifically twinning the data with fraction %3.2f" %\
        alpha

      self.final_array = self.new_miller_array.twin_data(
        twin_law = self.params.symmetry.twinning_parameters.twin_law,
        alpha=alpha).as_intensity_array()

    elif (self.params.symmetry.action == "detwin"):
      twin_law = self.params.symmetry.twinning_parameters.twin_law
      alpha = self.params.symmetry.twinning_parameters.fraction
      if (alpha is None):
        raise Sorry("Twin fraction not specified, not detwinning data")
      elif not (0 <= alpha <= 0.5):
        raise Sorry("Twin fraction must be between 0 and 0.5.")
      print >> self.out, """

Attempting to detwin data
-------------------------
Detwinning data with:
  - twin law:      %s
  - twin fraciton: %.2f

BE WARNED! DETWINNING OF DATA DOES NOT SOLVE YOUR TWINNING PROBLEM!
PREFERABLY, REFINEMENT SHOULD BE CARRIED OUT AGAINST ORIGINAL DATA
ONLY USING A TWIN SPECIFIC TARGET FUNCTION!

""" % (twin_law, alpha)
      self.final_array = self.new_miller_array.detwin_data(
        twin_law=twin_law,
        alpha=alpha).as_intensity_array()

    assert self.final_array is not None
コード例 #7
0
class basic_analyses(object):  # XXX is this ever used?
    def __init__(self,
                 miller_array,
                 phil_object,
                 out=None,
                 out_plot=None,
                 miller_calc=None,
                 original_intensities=None,
                 completeness_as_non_anomalous=None,
                 verbose=0):
        if out is None:
            out = sys.stdout
        if verbose > 0:
            print >> out
            print >> out
            print >> out, "Matthews coefficient and Solvent content statistics"
        n_copies_solc = 1.0
        self.nres_known = False
        if (phil_object.scaling.input.asu_contents.n_residues is not None
                or phil_object.scaling.input.asu_contents.n_bases is not None):
            self.nres_known = True
            if (phil_object.scaling.input.asu_contents.sequence_file
                    is not None):
                print >> out, "  warning: ignoring sequence file"
        elif (phil_object.scaling.input.asu_contents.sequence_file
              is not None):
            print >> out, "  determining composition from sequence file %s" % \
              phil_object.scaling.input.asu_contents.sequence_file
            seq_comp = iotbx.bioinformatics.composition_from_sequence_file(
                file_name=phil_object.scaling.input.asu_contents.sequence_file,
                log=out)
            if (seq_comp is not None):
                phil_object.scaling.input.asu_contents.n_residues = seq_comp.n_residues
                phil_object.scaling.input.asu_contents.n_bases = seq_comp.n_bases
                self.nres_known = True
        matthews_results = matthews.matthews_rupp(
            crystal_symmetry=miller_array,
            n_residues=phil_object.scaling.input.asu_contents.n_residues,
            n_bases=phil_object.scaling.input.asu_contents.n_bases,
            out=out,
            verbose=1)
        phil_object.scaling.input.asu_contents.n_residues = matthews_results[0]
        phil_object.scaling.input.asu_contents.n_bases = matthews_results[1]
        n_copies_solc = matthews_results[2]
        self.matthews_results = matthews_results

        if phil_object.scaling.input.asu_contents.n_copies_per_asu is not None:
            n_copies_solc = phil_object.scaling.input.asu_contents.n_copies_per_asu
            self.defined_copies = n_copies_solc
            if verbose > 0:
                print >> out, "Number of copies per asymmetric unit provided"
                print >> out, " Will use user specified value of ", n_copies_solc
        else:
            phil_object.scaling.input.asu_contents.n_copies_per_asu = n_copies_solc
            self.guessed_copies = n_copies_solc

        # first report on I over sigma
        miller_array_new = miller_array
        self.data_strength = None
        miller_array_intensities = miller_array
        if (original_intensities is not None):
            assert original_intensities.is_xray_intensity_array()
            miller_array_intensities = original_intensities
        if miller_array_intensities.sigmas() is not None:
            data_strength = data_statistics.i_sigi_completeness_stats(
                miller_array_intensities,
                isigi_cut=phil_object.scaling.input.parameters.
                misc_twin_parameters.twin_test_cuts.isigi_cut,
                completeness_cut=phil_object.scaling.input.parameters.
                misc_twin_parameters.twin_test_cuts.completeness_cut,
                completeness_as_non_anomalous=completeness_as_non_anomalous)
            data_strength.show(out)
            self.data_strength = data_strength
            if phil_object.scaling.input.parameters.misc_twin_parameters.twin_test_cuts.high_resolution is None:
                if data_strength.resolution_cut > data_strength.resolution_at_least:
                    phil_object.scaling.input.parameters.misc_twin_parameters.twin_test_cuts.high_resolution = data_strength.resolution_at_least
                else:
                    phil_object.scaling.input.parameters.misc_twin_parameters.twin_test_cuts.high_resolution = data_strength.resolution_cut

        ## Isotropic wilson scaling
        if verbose > 0:
            print >> out
            print >> out
            print >> out, "Maximum likelihood isotropic Wilson scaling "

        n_residues = phil_object.scaling.input.asu_contents.n_residues
        n_bases = phil_object.scaling.input.asu_contents.n_bases
        if n_residues is None:
            n_residues = 0
        if n_bases is None:
            n_bases = 0
        if n_bases + n_residues == 0:
            raise Sorry("No scatterers available")
        iso_scale_and_b = absolute_scaling.ml_iso_absolute_scaling(
            miller_array=miller_array_new,
            n_residues=n_residues * miller_array.space_group().order_z() *
            n_copies_solc,
            n_bases=n_bases * miller_array.space_group().order_z() *
            n_copies_solc)
        iso_scale_and_b.show(out=out, verbose=verbose)
        self.iso_scale_and_b = iso_scale_and_b
        ## Store the b and scale values from isotropic ML scaling
        self.iso_p_scale = iso_scale_and_b.p_scale
        self.iso_b_wilson = iso_scale_and_b.b_wilson

        ## Anisotropic ml wilson scaling
        if verbose > 0:
            print >> out
            print >> out
            print >> out, "Maximum likelihood anisotropic Wilson scaling "
        aniso_scale_and_b = absolute_scaling.ml_aniso_absolute_scaling(
            miller_array=miller_array_new,
            n_residues=n_residues * miller_array.space_group().order_z() *
            n_copies_solc,
            n_bases=n_bases * miller_array.space_group().order_z() *
            n_copies_solc)
        aniso_scale_and_b.show(out=out, verbose=1)

        self.aniso_scale_and_b = aniso_scale_and_b

        try:
            b_cart = aniso_scale_and_b.b_cart
        except AttributeError, e:
            print >> out, "*** ERROR ***"
            print >> out, str(e)
            show_exception_info_if_full_testing()
            return

        self.aniso_p_scale = aniso_scale_and_b.p_scale
        self.aniso_u_star = aniso_scale_and_b.u_star
        self.aniso_b_cart = aniso_scale_and_b.b_cart
        # XXX: for GUI
        self.overall_b_cart = getattr(aniso_scale_and_b, "overall_b_cart",
                                      None)

        ## Correcting for anisotropy
        if verbose > 0:
            print >> out, "Correcting for anisotropy in the data"
            print >> out

        b_cart_observed = aniso_scale_and_b.b_cart

        b_trace_average = (b_cart_observed[0] + b_cart_observed[1] +
                           b_cart_observed[2]) / 3.0
        b_trace_min = b_cart_observed[0]
        if b_cart_observed[1] < b_trace_min: b_trace_min = b_cart_observed[1]
        if b_cart_observed[2] < b_trace_min: b_trace_min = b_cart_observed[2]

        if phil_object.scaling.input.optional.aniso.final_b == "eigen_min":
            b_use = aniso_scale_and_b.eigen_values[2]
        elif phil_object.scaling.input.optional.aniso.final_b == "eigen_mean":
            b_use = flex.mean(aniso_scale_and_b.eigen_values)
        elif phil_object.scaling.input.optional.aniso.final_b == "user_b_iso":
            assert phil_object.scaling.input.optional.aniso.b_iso is not None
            b_use = phil_object.scaling.input.optional.aniso.b_iso
        else:
            b_use = 30

        b_cart_aniso_removed = [-b_use, -b_use, -b_use, 0, 0, 0]
        u_star_aniso_removed = adptbx.u_cart_as_u_star(
            miller_array.unit_cell(), adptbx.b_as_u(b_cart_aniso_removed))
        ## I do things in two steps, but can easely be done in 1 step
        ## just for clarity, thats all.
        self.no_aniso_array = absolute_scaling.anisotropic_correction(
            miller_array_new, 0.0, aniso_scale_and_b.u_star)
        self.no_aniso_array = absolute_scaling.anisotropic_correction(
            self.no_aniso_array, 0.0, u_star_aniso_removed)
        self.no_aniso_array = self.no_aniso_array.set_observation_type(
            miller_array)

        ## Make normalised structure factors please

        sel_big = self.no_aniso_array.data() > 1.e+50
        self.no_aniso_array = self.no_aniso_array.array(
            data=self.no_aniso_array.data().set_selected(sel_big, 0))
        self.no_aniso_array = self.no_aniso_array.set_observation_type(
            miller_array)

        normalistion = absolute_scaling.kernel_normalisation(
            self.no_aniso_array, auto_kernel=True)
        self.normalised_miller = normalistion.normalised_miller.deep_copy()

        self.phil_object = phil_object

        ## Some basic statistics and sanity checks follow
        if verbose > 0:
            print >> out, "Some basic intensity statistics follow."
            print >> out

        basic_data_stats = data_statistics.basic_intensity_statistics(
            miller_array,
            aniso_scale_and_b.p_scale,
            aniso_scale_and_b.u_star,
            iso_scale_and_b.scat_info,
            out=out,
            out_plot=out_plot)
        self.basic_data_stats = basic_data_stats
        self.miller_array = basic_data_stats.new_miller

        #relative wilson plot
        self.rel_wilson = None
        if (miller_calc is not None) and (miller_calc.d_min() < 4.0):
            try:
                self.rel_wilson = relative_wilson.relative_wilson(
                    miller_obs=miller_array, miller_calc=miller_calc)
            except RuntimeError, e:
                print >> out, "*** Error calculating relative Wilson plot - skipping."
                print >> out, ""
コード例 #8
0
ファイル: pre_scale.py プロジェクト: dials/cctbx
  def __init__(self,
               miller_array,
               pre_scaling_protocol,
               basic_info,
               out=None):
    ## Make deep copy of the miller array of interest
    self.x1 = miller_array.deep_copy()
    self.options=pre_scaling_protocol
    self.basic_info= basic_info

    ## Determine unit_cell contents
    print(file=out)
    print("Matthews analyses", file=out)
    print("-----------------", file=out)
    print(file=out)
    print("Inspired by: Kantardjieff and Rupp. Prot. Sci. 12(9): 1865-1871 (2003).", file=out)
    matthews_analyses = matthews.matthews_rupp(
      crystal_symmetry = self.x1,
      n_residues = self.basic_info.n_residues,
      n_bases = self.basic_info.n_bases,
      out=out, verbose=1)
    n_residues=matthews_analyses[0]
    n_bases=matthews_analyses[1]
    n_copies_solc=matthews_analyses[2]

    if (self.basic_info.n_residues==None):
      self.basic_info.n_residues = n_residues
    if (self.basic_info.n_bases == None):
      self.basic_info.n_bases = n_bases


    ## apply resolution cut
    print(file=out)
    print("Applying resolution cut", file=out)
    print("-----------------------", file=out)

    if self.options.low_resolution is None:
      if self.options.high_resolution is None:
        print("No resolution cut is made", file=out)

    low_cut=float(1e6)
    if self.options.low_resolution is not None:
      low_cut = self.options.low_resolution
      print("Specified low resolution limit: %3.2f"%(
       float(self.options.low_resolution) ), file=out)

    high_cut = 0
    if self.options.high_resolution is not None:
      high_cut = self.options.high_resolution
      print("Specified high resolution limit: %3.2f"%(
       float(self.options.high_resolution) ), file=out)

    ## perform outlier analyses
    ##
    ## Do a simple outlier analyses please
    print(file=out)
    print("Wilson statistics based outlier analyses", file=out)
    print("----------------------------------------", file=out)
    print(file=out)
    native_outlier = data_statistics.possible_outliers(
      miller_array = self.x1,
      prob_cut_ex = self.options.outlier_level_extreme,
      prob_cut_wil = self.options.outlier_level_wilson )
    native_outlier.show(out=out)

    self.x1 = native_outlier.remove_outliers(
      self.x1 )

    ## apply anisotropic scaling  (final B-value will be set to b_add)!
    if self.options.aniso_correction:

      b_final = self.options.b_add
      if b_final is None:
        b_final = 0.0

      print(file=out)
      print("Anisotropic absolute scaling of data", file=out)
      print("--------------------------------------", file=out)
      print(file=out)

      aniso_correct = absolute_scaling.ml_aniso_absolute_scaling(
        miller_array = self.x1,
        n_residues = n_residues*\
        self.x1.space_group().order_z()*n_copies_solc,
        n_bases = n_bases*\
        self.x1.space_group().order_z()*n_copies_solc)
      aniso_correct.show(out=out,verbose=1)
      print(file=out)
      print("  removing anisotropy for native  ", file=out)
      print(file=out)
      u_star_correct_nat = aniso_correct.u_star
      self.x1 = absolute_scaling.anisotropic_correction(
        self.x1,
        aniso_correct.p_scale,
        u_star_correct_nat  )
コード例 #9
0
  def __init__(self,
               miller_array,
               parameters,
               out=None,
               n_residues=100,
               n_bases=0):

    self.params=parameters
    self.miller_array=miller_array.deep_copy().set_observation_type(
      miller_array).merge_equivalents().array()
    self.out = out
    if self.out is None:
      self.out = sys.stdout
    if self.out == "silent":
      self.out = null_out()


    self.no_aniso_array = self.miller_array
    if self.params.aniso.action == "remove_aniso":
      # first perfom aniso scaling
      aniso_scale_and_b = absolute_scaling.ml_aniso_absolute_scaling(
        miller_array = self.miller_array,
        n_residues = n_residues,
        n_bases = n_bases)
      aniso_scale_and_b.p_scale = 0 # set the p_scale back to 0!
      aniso_scale_and_b.show(out=out)
      # now do aniso correction please
      self.aniso_p_scale = aniso_scale_and_b.p_scale
      self.aniso_u_star  = aniso_scale_and_b.u_star
      self.aniso_b_cart  = aniso_scale_and_b.b_cart
      if self.params.aniso.final_b == "eigen_min":
        b_use=aniso_scale_and_b.eigen_values[2]
      elif self.params.aniso.final_b == "eigen_mean" :
        b_use=flex.mean(aniso_scale_and_b.eigen_values)
      elif self.params.aniso.final_b == "user_b_iso":
        assert self.params.aniso.b_iso is not None
        b_use=self.params.aniso.b_iso
      else:
        b_use = 30

      b_cart_aniso_removed = [ -b_use,
                               -b_use,
                               -b_use,
                               0,
                               0,
                               0]
      u_star_aniso_removed = adptbx.u_cart_as_u_star(
        miller_array.unit_cell(),
        adptbx.b_as_u( b_cart_aniso_removed  ) )
      ## I do things in two steps, but can easely be done in 1 step
      ## just for clarity, thats all.
      self.no_aniso_array = absolute_scaling.anisotropic_correction(
        self.miller_array,0.0,aniso_scale_and_b.u_star )
      self.no_aniso_array = absolute_scaling.anisotropic_correction(
        self.no_aniso_array,0.0,u_star_aniso_removed)
      self.no_aniso_array = self.no_aniso_array.set_observation_type(
        miller_array )

    # that is done now, now we can do outlier detection if desired
    outlier_manager = outlier_rejection.outlier_manager(
      self.no_aniso_array,
      None,
      out=self.out)


    self.new_miller_array = self.no_aniso_array
    if self.params.outlier.action == "basic":
      print >> self.out, "Non-outliers found by the basic wilson statistics"
      print >> self.out, "protocol will be written out."
      basic_array = outlier_manager.basic_wilson_outliers(
        p_basic_wilson = self.params.outlier.parameters.basic_wilson.level,
        return_data = True)
      self.new_miller_array = basic_array

    if self.params.outlier.action == "extreme":
      print >> self.out, "Non-outliers found by the extreme value wilson statistics"
      print >> self.out, "protocol will be written out."
      extreme_array = outlier_manager.extreme_wilson_outliers(
      p_extreme_wilson = self.params.outlier.parameters.extreme_wilson.level,
      return_data = True)
      self.new_miller_array = extreme_array

    if self.params.outlier.action == "beamstop":
      print >> self.out, "Outliers found for the beamstop shadow"
      print >> self.out, "problems detection protocol will be written out."
      beamstop_array = outlier_manager.beamstop_shadow_outliers(
        level = self.params.outlier.parameters.beamstop.level,
        d_min = self.params.outlier.parameters.beamstop.d_min,
        return_data=True)
      self.new_miller_array = beamstop_array

    if self.params.outlier.action == "None":
      self.new_miller_array =  self.no_aniso_array



    # now we can twin or detwin the data if needed
    self.final_array = self.new_miller_array
    if self.params.symmetry.action == "twin":
      alpha = self.params.symmetry.twinning_parameters.fraction
      if (alpha is None) :
        raise Sorry("Twin fraction not specified, not twinning data")
      elif not (0 <= alpha <= 0.5):
        raise Sorry("Twin fraction must be between 0 and 0.5.")
      print >> self.out
      print >> self.out, "Twinning given data"
      print >> self.out, "-------------------"
      print >> self.out
      print >> self.out, "Artifically twinning the data with fraction %3.2f" %\
        alpha

      self.final_array = self.new_miller_array.twin_data(
        twin_law = self.params.symmetry.twinning_parameters.twin_law,
        alpha=alpha).as_intensity_array()

    elif (self.params.symmetry.action == "detwin") :
      twin_law = self.params.symmetry.twinning_parameters.twin_law
      alpha = self.params.symmetry.twinning_parameters.fraction
      if (alpha is None) :
        raise Sorry("Twin fraction not specified, not detwinning data")
      elif not (0 <= alpha <= 0.5):
        raise Sorry("Twin fraction must be between 0 and 0.5.")
      print >> self.out, """

Attempting to detwin data
-------------------------
Detwinning data with:
  - twin law:      %s
  - twin fraciton: %.2f

BE WARNED! DETWINNING OF DATA DOES NOT SOLVE YOUR TWINNING PROBLEM!
PREFERABLY, REFINEMENT SHOULD BE CARRIED OUT AGAINST ORIGINAL DATA
ONLY USING A TWIN SPECIFIC TARGET FUNCTION!

""" % (twin_law, alpha)
      self.final_array = self.new_miller_array.detwin_data(
        twin_law=twin_law,
        alpha=alpha).as_intensity_array()

    assert self.final_array is not None
コード例 #10
0
ファイル: pre_scale.py プロジェクト: cctbx/cctbx-playground
  def __init__(self,
               miller_array,
               pre_scaling_protocol,
               basic_info,
               out=None):
    ## Make deep copy of the miller array of interest
    self.x1 = miller_array.deep_copy()
    self.options=pre_scaling_protocol
    self.basic_info= basic_info

    ## Determine unit_cell contents
    print >> out
    print >> out, "Matthews analyses"
    print >> out, "-----------------"
    print >> out
    print >> out, "Inspired by: Kantardjieff and Rupp. Prot. Sci. 12(9): 1865-1871 (2003)."
    matthews_analyses = matthews.matthews_rupp(
      crystal_symmetry = self.x1,
      n_residues = self.basic_info.n_residues,
      n_bases = self.basic_info.n_bases,
      out=out, verbose=1)
    n_residues=matthews_analyses[0]
    n_bases=matthews_analyses[1]
    n_copies_solc=matthews_analyses[2]

    if (self.basic_info.n_residues==None):
      self.basic_info.n_residues = n_residues
    if (self.basic_info.n_bases == None):
      self.basic_info.n_bases = n_bases


    ## apply resolution cut
    print >> out
    print >> out, "Applying resolution cut"
    print >> out, "-----------------------"

    if self.options.low_resolution is None:
      if self.options.high_resolution is None:
        print >> out, "No resolution cut is made"

    low_cut=float(1e6)
    if self.options.low_resolution is not None:
      low_cut = self.options.low_resolution
      print >> out, "Specified low resolution limit: %3.2f"%(
       float(self.options.low_resolution) )

    high_cut = 0
    if self.options.high_resolution is not None:
      high_cut = self.options.high_resolution
      print >> out, "Specified high resolution limit: %3.2f"%(
       float(self.options.high_resolution) )

    ## perform outlier analyses
    ##
    ## Do a simple outlier analyses please
    print >> out
    print >> out, "Wilson statistics based outlier analyses"
    print >> out, "----------------------------------------"
    print >> out
    native_outlier = data_statistics.possible_outliers(
      miller_array = self.x1,
      prob_cut_ex = self.options.outlier_level_extreme,
      prob_cut_wil = self.options.outlier_level_wilson )
    native_outlier.show(out=out)

    self.x1 = native_outlier.remove_outliers(
      self.x1 )

    ## apply anisotropic scaling  (final B-value will be set to b_add)!
    if self.options.aniso_correction:

      b_final = self.options.b_add
      if b_final is None:
        b_final = 0.0

      print >> out
      print >> out, "Anisotropic absolute scaling of data"
      print >> out, "--------------------------------------"
      print >> out

      aniso_correct = absolute_scaling.ml_aniso_absolute_scaling(
        miller_array = self.x1,
        n_residues = n_residues*\
        self.x1.space_group().order_z()*n_copies_solc,
        n_bases = n_bases*\
        self.x1.space_group().order_z()*n_copies_solc)
      aniso_correct.show(out=out,verbose=1)
      print >> out
      print >> out, "  removing anisotropy for native  "
      print >> out
      u_star_correct_nat = aniso_correct.u_star
      self.x1 = absolute_scaling.anisotropic_correction(
        self.x1,
        aniso_correct.p_scale,
        u_star_correct_nat  )