예제 #1
0
    def get_percentile_cutoffs(self, map, vol_cutoff_plus_percent,
                               vol_cutoff_minus_percent):
        """
    For the double-step filtration in cctbx.miller (used as part of the
    procedure for replacing missing F-obs in maps), we need to calculate upper
    and lower cutoffs for the data based on percentile values.  This can be
    done in just a few lines of code by using flex.sort_permutation over the
    entire map, but this has a huge memory overhead (and possibly computational
    overhead as well).  Since we are only interested in subsets of values at
    the extreme ends of the distribution, we can perform the sort for these
    subsets instead, which should cut down on memory use.

    Returns the upper and lower map value cutoffs (as Python floats).
    """
        map_values = map.as_1d()
        size = map_values.size()
        # upper limit
        i_bin_plus = -1
        for i_bin, value in enumerate(self.v_values()):
            if ((value * 100) <= vol_cutoff_plus_percent):
                i_bin_plus = i_bin - 1
                break
        assert (i_bin_plus >= 0)
        cutoffp_lower_limit = self.arguments()[i_bin_plus]
        top_values = map_values.select(map_values >= cutoffp_lower_limit)
        i_upper = min(int(size * (vol_cutoff_plus_percent / 100.)),
                      top_values.size())
        s = flex.sort_permutation(top_values)
        top_values_sorted = top_values.select(s)
        del s
        assert (top_values_sorted.size() >= i_upper)
        cutoffp = top_values_sorted[-i_upper]
        del top_values
        del top_values_sorted
        # lower limit
        i_bin_minus = -1
        for i_bin, value in enumerate(self.c_values()):
            if ((value * 100) > vol_cutoff_minus_percent):
                i_bin_minus = i_bin
                break
        assert (i_bin_minus >= 0)
        cutoffm_upper_limit = self.arguments()[i_bin_minus]
        bottom_values = map_values.select(map_values <= cutoffm_upper_limit)
        i_lower = min(int(size * (vol_cutoff_minus_percent / 100.)),
                      bottom_values.size() - 1)
        s = flex.sort_permutation(bottom_values)
        bottom_values_sorted = bottom_values.select(s)
        del s
        assert (bottom_values_sorted.size() > i_lower)
        cutoffm = bottom_values_sorted[i_lower]
        del bottom_values
        del bottom_values_sorted
        return cutoffp, cutoffm
예제 #2
0
  def get_percentile_cutoffs (self, map, vol_cutoff_plus_percent,
      vol_cutoff_minus_percent) :
    """
    For the double-step filtration in cctbx.miller (used as part of the
    procedure for replacing missing F-obs in maps), we need to calculate upper
    and lower cutoffs for the data based on percentile values.  This can be
    done in just a few lines of code by using flex.sort_permutation over the
    entire map, but this has a huge memory overhead (and possibly computational
    overhead as well).  Since we are only interested in subsets of values at
    the extreme ends of the distribution, we can perform the sort for these
    subsets instead, which should cut down on memory use.

    Returns the upper and lower map value cutoffs (as Python floats).
    """
    map_values = map.as_1d()
    size = map_values.size()
    # upper limit
    i_bin_plus = -1
    for i_bin, value in enumerate(self.v_values()) :
      if ((value*100) <= vol_cutoff_plus_percent) :
        i_bin_plus = i_bin - 1
        break
    assert (i_bin_plus >= 0)
    cutoffp_lower_limit = self.arguments()[i_bin_plus]
    top_values = map_values.select(map_values >= cutoffp_lower_limit)
    i_upper = min(int(size * (vol_cutoff_plus_percent / 100.)),
                  top_values.size())
    s = flex.sort_permutation(top_values)
    top_values_sorted = top_values.select(s)
    del s
    assert (top_values_sorted.size() >= i_upper)
    cutoffp = top_values_sorted[-i_upper]
    del top_values
    del top_values_sorted
    # lower limit
    i_bin_minus = -1
    for i_bin, value in enumerate(self.c_values()) :
      if ((value*100) > vol_cutoff_minus_percent) :
        i_bin_minus = i_bin
        break
    assert (i_bin_minus >= 0)
    cutoffm_upper_limit = self.arguments()[i_bin_minus]
    bottom_values = map_values.select(map_values <= cutoffm_upper_limit)
    i_lower = min(int(size * (vol_cutoff_minus_percent / 100.)),
                  bottom_values.size() - 1)
    s = flex.sort_permutation(bottom_values)
    bottom_values_sorted = bottom_values.select(s)
    del s
    assert (bottom_values_sorted.size() > i_lower)
    cutoffm = bottom_values_sorted[i_lower]
    del bottom_values
    del bottom_values_sorted
    return cutoffp, cutoffm
예제 #3
0
  def __init__(self,
               hooft_analysis,
               use_students_t_distribution=False,
               students_t_nu=None,
               probability_plot_slope=None):
    self.delta_fo2, minus_fo2 =\
        hooft_analysis.delta_fo2.generate_bijvoet_mates().hemispheres_acentrics()
    self.delta_fc2, minus_fc2 =\
        hooft_analysis.delta_fc2.generate_bijvoet_mates().hemispheres_acentrics()
    # we want to plot both hemispheres
    self.delta_fo2.indices().extend(minus_fo2.indices())
    self.delta_fo2.data().extend(minus_fo2.data() * -1)
    self.delta_fo2.sigmas().extend(minus_fo2.sigmas())
    self.delta_fc2.indices().extend(minus_fc2.indices())
    self.delta_fc2.data().extend(minus_fc2.data() * -1)
    self.indices = self.delta_fo2.indices()
    observed_deviations = (hooft_analysis.G * self.delta_fc2.data()
                           - self.delta_fo2.data())/self.delta_fo2.sigmas()

    if probability_plot_slope is not None:
      observed_deviations /= probability_plot_slope
    selection = flex.sort_permutation(observed_deviations)
    observed_deviations = observed_deviations.select(selection)
    if use_students_t_distribution:
      if students_t_nu is None:
        students_t_nu = maximise_students_t_correlation_coefficient(
          observed_deviations, 1, 200)
      self.distribution = distributions.students_t_distribution(students_t_nu)
    else:
      self.distribution = distributions.normal_distribution()
    self.x = self.distribution.quantiles(observed_deviations.size())
    self.y = observed_deviations
    self.fit = flex.linear_regression(self.x[5:-5], self.y[5:-5])
    self.correlation = flex.linear_correlation(self.x[5:-5], self.y[5:-5])
    assert self.fit.is_well_defined()
예제 #4
0
파일: flex.py 프로젝트: kek-pf-mx/dials
    def sort(self, name, reverse=False, order=None):
        '''
    Sort the reflection table by a key.

    :param name: The name of the column
    :param reverse: Reverse the sort order
    :param order: For multi element items specify order

    '''
        import __builtin__
        if type(self[name]) in [
                vec2_double, vec3_double, mat3_double, int6, miller_index
        ]:
            data = self[name]
            if order is None:
                perm = flex.size_t(
                    __builtin__.sorted(range(len(self)),
                                       key=lambda x: data[x],
                                       reverse=reverse))
            else:
                assert len(order) == len(data[0])

                def compare(x, y):
                    a = tuple(x[i] for i in order)
                    b = tuple(y[i] for i in order)
                    return cmp(a, b)

                perm = flex.size_t(
                    __builtin__.sorted(range(len(self)),
                                       key=lambda x: data[x],
                                       cmp=compare,
                                       reverse=reverse))
        else:
            perm = flex.sort_permutation(self[name], reverse=reverse)
        self.reorder(perm)
예제 #5
0
def i_sig_i_vs_batch(intensities, batches):
  assert intensities.size() == batches.size()
  assert intensities.sigmas() is not None
  sel = intensities.sigmas() > 0

  i_sig_i = intensities.data().select(sel) / intensities.sigmas().select(sel)
  batches = batches.select(sel)

  bins = []
  data = []

  perm = flex.sort_permutation(batches.data())
  batches = batches.data().select(perm)
  i_sig_i = i_sig_i.select(perm)

  i_batch_start = 0
  current_batch = flex.min(batches)
  n_ref = batches.size()
  for i_ref in range(n_ref + 1):
    if i_ref == n_ref or batches[i_ref] != current_batch:
      assert batches[i_batch_start:i_ref].all_eq(current_batch)
      data.append(flex.mean(i_sig_i[i_batch_start:i_ref]))
      bins.append(current_batch)
      i_batch_start = i_ref
      if i_ref < n_ref:
        current_batch = batches[i_batch_start]

  return batch_binned_data(bins, data)
def exercise_truncate(q_large):
    tprs_full = dmtbx.triplet_generator(miller_set=q_large,
                                        discard_weights=True)
    tprs = dmtbx.triplet_generator(miller_set=q_large,
                                   amplitudes=q_large.data(),
                                   max_relations_per_reflection=0,
                                   discard_weights=True)
    assert tprs.n_relations().all_eq(tprs_full.n_relations())
    for n in (1, 10, 100, 1000):
        tprs = dmtbx.triplet_generator(miller_set=q_large,
                                       amplitudes=q_large.data(),
                                       max_relations_per_reflection=n,
                                       discard_weights=True)
        assert (tprs.n_relations() >= n).all_eq(tprs.n_relations() == n)
    n = 3
    tprs = dmtbx.triplet_generator(miller_set=q_large,
                                   amplitudes=q_large.data(),
                                   max_relations_per_reflection=n,
                                   discard_weights=True)
    n_rel_full = tprs_full.n_relations()
    n_rel = tprs.n_relations()
    amp = q_large.data()
    for ih in range(q_large.indices().size()):
        if (n_rel[ih] == n_rel_full[ih]): continue
        aa_full = flex.double()
        for relation in tprs_full.relations_for(ih):
            aa_full.append(amp[relation.ik()] * amp[relation.ihmk()])
        aa = flex.double()
        for relation in tprs.relations_for(ih):
            aa.append(amp[relation.ik()] * amp[relation.ihmk()])
        aa_full = aa_full.select(
            flex.sort_permutation(data=aa_full, reverse=True))
        assert approx_equal(aa_full[:n], aa)
예제 #7
0
  def __init__(self,
               hooft_analysis,
               use_students_t_distribution=False,
               students_t_nu=None,
               probability_plot_slope=None):
    self.delta_fo2, minus_fo2 =\
        hooft_analysis.delta_fo2.generate_bijvoet_mates().hemispheres_acentrics()
    self.delta_fc2, minus_fc2 =\
        hooft_analysis.delta_fc2.generate_bijvoet_mates().hemispheres_acentrics()
    # we want to plot both hemispheres
    self.delta_fo2.indices().extend(minus_fo2.indices())
    self.delta_fo2.data().extend(minus_fo2.data() * -1)
    self.delta_fo2.sigmas().extend(minus_fo2.sigmas())
    self.delta_fc2.indices().extend(minus_fc2.indices())
    self.delta_fc2.data().extend(minus_fc2.data() * -1)
    self.indices = self.delta_fo2.indices()
    observed_deviations = (hooft_analysis.G * self.delta_fc2.data()
                           - self.delta_fo2.data())/self.delta_fo2.sigmas()

    if probability_plot_slope is not None:
      observed_deviations /= probability_plot_slope
    selection = flex.sort_permutation(observed_deviations)
    observed_deviations = observed_deviations.select(selection)
    if use_students_t_distribution:
      if students_t_nu is None:
        students_t_nu = maximise_students_t_correlation_coefficient(
          observed_deviations, 1, 200)
      self.distribution = distributions.students_t_distribution(students_t_nu)
    else:
      self.distribution = distributions.normal_distribution()
    self.x = self.distribution.quantiles(observed_deviations.size())
    self.y = observed_deviations
    self.fit = flex.linear_regression(self.x[5:-5], self.y[5:-5])
    self.correlation = flex.linear_correlation(self.x[5:-5], self.y[5:-5])
    assert self.fit.is_well_defined()
예제 #8
0
파일: absences.py 프로젝트: dials/cctbx
    def suggest_likely_candidates(self, acceptable_violations=1e+90):
        used = flex.bool(len(self.sg_choices), False)
        order = []

        all_done = False
        count = -1
        if (len(self.tuple_score) == 0):
            return []
        tmp_scores = []
        for tt in self.tuple_score:
            tmp_scores.append(tt[0])
        order = flex.sort_permutation(flex.double(tmp_scores), False)

        sorted_rows = []
        max_score = flex.min(flex.double(tmp_scores))
        for ii in order:
            sg = self.sg_choices[ii]
            tmp_n = self.n[ii]
            tmp_violations = self.violations[ii]
            tmp_mean_i = self.mean_i[ii]
            tmp_mean_isigi = self.mean_isigi[ii]
            tuple_score = self.tuple_score[ii]

            sorted_rows.append([
                str(sg),
                '%i' % (tmp_n),
                '%8.2f  ' % (tmp_mean_i),
                '%8.2f  ' % (tmp_mean_isigi),
                ' %i ' % (tuple_score[1]),
                ' %i ' % (tuple_score[2]),
                ' %8.3e ' % ((tuple_score[0] - max_score))
            ])

        return sorted_rows
def exercise_truncate(q_large):
    tprs_full = dmtbx.triplet_generator(miller_set=q_large, discard_weights=True)
    tprs = dmtbx.triplet_generator(
        miller_set=q_large, amplitudes=q_large.data(), max_relations_per_reflection=0, discard_weights=True
    )
    assert tprs.n_relations().all_eq(tprs_full.n_relations())
    for n in (1, 10, 100, 1000):
        tprs = dmtbx.triplet_generator(
            miller_set=q_large, amplitudes=q_large.data(), max_relations_per_reflection=n, discard_weights=True
        )
        assert (tprs.n_relations() >= n).all_eq(tprs.n_relations() == n)
    n = 3
    tprs = dmtbx.triplet_generator(
        miller_set=q_large, amplitudes=q_large.data(), max_relations_per_reflection=n, discard_weights=True
    )
    n_rel_full = tprs_full.n_relations()
    n_rel = tprs.n_relations()
    amp = q_large.data()
    for ih in xrange(q_large.indices().size()):
        if n_rel[ih] == n_rel_full[ih]:
            continue
        aa_full = flex.double()
        for relation in tprs_full.relations_for(ih):
            aa_full.append(amp[relation.ik()] * amp[relation.ihmk()])
        aa = flex.double()
        for relation in tprs.relations_for(ih):
            aa.append(amp[relation.ik()] * amp[relation.ihmk()])
        aa_full = aa_full.select(flex.sort_permutation(data=aa_full, reverse=True))
        assert approx_equal(aa_full[:n], aa)
예제 #10
0
  def suggest_likely_candidates( self, acceptable_violations = 1e+90 ):
    used = flex.bool( len(self.sg_choices), False )
    order = []

    all_done = False
    count = -1
    if (len(self.tuple_score) == 0) :
      return []
    tmp_scores = []
    for tt in self.tuple_score:
      tmp_scores.append( tt[0] )
    order = flex.sort_permutation( flex.double( tmp_scores ), False  )


    sorted_rows = []
    max_score = flex.min( flex.double( tmp_scores ) )
    for ii in order:
      sg             = self.sg_choices[ii]
      tmp_n          = self.n[ii]
      tmp_violations = self.violations[ii]
      tmp_mean_i     = self.mean_i[ii]
      tmp_mean_isigi = self.mean_isigi[ii]
      tuple_score    = self.tuple_score[ii]

      sorted_rows.append( [str(sg), '%i'%(tmp_n),
                           '%8.2f  '%(tmp_mean_i),
                           '%8.2f  '%(tmp_mean_isigi),
                           ' %i '%(tuple_score[1]),
                           ' %i '%(tuple_score[2]),
                           ' %8.3e '%((tuple_score[0]-max_score))
                          ])

    return sorted_rows
예제 #11
0
파일: CommonScaler.py 프로젝트: hainm/xia2
def anomalous_probability_plot(intensities, expected_delta=None):
  from scitbx.math import distributions
  from scitbx.array_family import flex

  assert intensities.is_unique_set_under_symmetry()
  assert intensities.anomalous_flag()

  dI = intensities.anomalous_differences()
  y = dI.data()/dI.sigmas()
  perm = flex.sort_permutation(y)
  y = y.select(perm)
  distribution = distributions.normal_distribution()

  x = distribution.quantiles(y.size())

  if expected_delta is not None:
    sel = flex.abs(x) < expected_delta
    x = x.select(sel)
    y = y.select(sel)

  fit = flex.linear_regression(x, y)
  correlation = flex.linear_correlation(x, y)
  assert fit.is_well_defined()

  if 0:
    from matplotlib import pyplot
    pyplot.scatter(x, y)
    m = fit.slope()
    c = fit.y_intercept()
    pyplot.plot(pyplot.xlim(), [m * x_ + c for x_ in pyplot.xlim()])
    pyplot.show()

  return fit.slope(), fit.y_intercept(), x.size()
예제 #12
0
def anomalous_probability_plot(intensities, expected_delta=None):
    from scitbx.math import distributions
    from scitbx.array_family import flex

    assert intensities.is_unique_set_under_symmetry()
    assert intensities.anomalous_flag()

    dI = intensities.anomalous_differences()
    y = dI.data() / dI.sigmas()
    perm = flex.sort_permutation(y)
    y = y.select(perm)
    distribution = distributions.normal_distribution()

    x = distribution.quantiles(y.size())

    if expected_delta is not None:
        sel = flex.abs(x) < expected_delta
        x = x.select(sel)
        y = y.select(sel)

    fit = flex.linear_regression(x, y)
    correlation = flex.linear_correlation(x, y)
    assert fit.is_well_defined()

    if 0:
        from matplotlib import pyplot
        pyplot.scatter(x, y)
        m = fit.slope()
        c = fit.y_intercept()
        pyplot.plot(pyplot.xlim(), [m * x_ + c for x_ in pyplot.xlim()])
        pyplot.show()

    return fit.slope(), fit.y_intercept(), x.size()
예제 #13
0
    def __init__(self, data, lattice_id, resort=False, verbose=True):
        self.verbose = verbose
        ######  INPUTS #######
        #       data = miller array: ASU miller index & intensity
        #       lattice_id = flex double: assignment of each miller index to a lattice number
        ######################

        if resort:
            order = flex.sort_permutation(lattice_id.data())
            sorted_lattice_id = flex.select(lattice_id.data(), order)
            sorted_data = data.data().select(order)
            sorted_indices = data.indices().select(order)
            self.lattice_id = sorted_lattice_id
            self.data = data.customized_copy(indices=sorted_indices,
                                             data=sorted_data)
        else:
            self.lattice_id = lattice_id.data()  # type flex int
            self.data = data  # type miller array with flex double data
        assert type(self.data.indices()) == type(flex.miller_index())
        assert type(self.data.data()) == type(flex.double())

        # construct a lookup for the separate lattices
        last_id = -1
        self.lattices = flex.int()
        for n in xrange(len(self.lattice_id)):
            if self.lattice_id[n] != last_id:
                last_id = self.lattice_id[n]
                self.lattices.append(n)
예제 #14
0
  def __init__(self, data, lattice_id, resort=False, verbose=True):
    self.verbose = verbose
    ######  INPUTS #######
    #       data = miller array: ASU miller index & intensity
    #       lattice_id = flex double: assignment of each miller index to a lattice number
    ######################

    if resort:
      order = flex.sort_permutation(lattice_id.data())
      sorted_lattice_id = flex.select(lattice_id.data(), order)
      sorted_data = data.data().select( order)
      sorted_indices = data.indices().select( order)
      self.lattice_id = sorted_lattice_id
      self.data = data.customized_copy(indices = sorted_indices, data = sorted_data)
    else:
      self.lattice_id = lattice_id.data() # type flex int
      self.data = data # type miller array with flex double data
    assert type(self.data.indices()) == type(flex.miller_index())
    assert type(self.data.data()) == type(flex.double())

    # construct a lookup for the separate lattices
    last_id = -1; self.lattices = flex.int()
    for n in xrange(len(self.lattice_id)):
      if self.lattice_id[n] != last_id:
        last_id = self.lattice_id[n]
        self.lattices.append(n)
예제 #15
0
def sort_triple( triple ):
  order = flex.sort_permutation( flex.int(triple),False )
  sorted_triple = ( triple[order[0]],
                    triple[order[1]],
                    triple[order[2]] )

  return sorted_triple
예제 #16
0
 def set_scene_bin_thresholds(self,
                              binvals=[],
                              bin_scene_label="Resolution",
                              nbins=6):
     self.viewer.binscenelabel = bin_scene_label
     if binvals:
         binvals = list(1.0 / flex.double(binvals))
     else:
         if self.viewer.binscenelabel == "Resolution":
             uc = self.viewer.miller_array.unit_cell()
             indices = self.viewer.miller_array.indices()
             dmaxmin = self.viewer.miller_array.d_max_min()
             binning = miller.binning(uc, nbins, indices, dmaxmin[0],
                                      dmaxmin[1])
             binvals = [
                 binning.bin_d_range(n)[0] for n in binning.range_all()
             ]
             binvals = [e for e in binvals
                        if e != -1.0]  # delete dummy limit
             binvals = list(1.0 / flex.double(binvals))
         else:
             bindata = self.viewer.HKLscenes[int(
                 self.viewer.binscenelabel)].data.deep_copy()
             selection = flex.sort_permutation(bindata)
             bindata_sorted = bindata.select(selection)
             # get binvals by dividing bindata_sorted with nbins
             binvals = [bindata_sorted[0]] * nbins  #
             for i, e in enumerate(bindata_sorted):
                 idiv = int(nbins * float(i) / len(bindata_sorted))
                 binvals[idiv] = e
     binvals.sort()
     self.viewer.UpdateBinValues(binvals)
예제 #17
0
 def print_table(self):
  from libtbx import table_utils
  from libtbx.str_utils import format_value

  table_header = ["Tile","Dist","Nobs","aRmsd","Rmsd","delx","dely","disp","rotdeg","Rsigma","Tsigma"]
  table_data = []
  table_data.append(table_header)
  sort_radii = flex.sort_permutation(flex.double(self.radii))
  tile_rmsds = flex.double()
  radial_sigmas = flex.double(len(self.tiles) // 4)
  tangen_sigmas = flex.double(len(self.tiles) // 4)
  for idx in range(len(self.tiles) // 4):
    x = sort_radii[idx]
    if self.tilecounts[x] < 3:
      wtaveg = 0.0
      radial = (0,0)
      tangential = (0,0)
      rmean,tmean,rsigma,tsigma=(0,0,1,1)
    else:
      wtaveg = self.weighted_average_angle_deg_from_tile(x)
      radial,tangential,rmean,tmean,rsigma,tsigma = get_radial_tangential_vectors(self,x)

    radial_sigmas[x]=rsigma
    tangen_sigmas[x]=tsigma
    table_data.append(  [
      format_value("%3d",   x),
      format_value("%7.2f", self.radii[x]),
      format_value("%6d",  self.tilecounts[x]),
      format_value("%5.2f", self.asymmetric_tile_rmsd[x]),
      format_value("%5.2f", self.tile_rmsd[x]),
      format_value("%5.2f", self.mean_cv[x][0]),
      format_value("%5.2f", self.mean_cv[x][1]),
      format_value("%5.2f", matrix.col(self.mean_cv[x]).length()),
      format_value("%6.2f", wtaveg),
      format_value("%6.2f", rsigma),
      format_value("%6.2f", tsigma),
    ])
  table_data.append([""]*len(table_header))
  rstats = flex.mean_and_variance(radial_sigmas,self.tilecounts.as_double())
  tstats = flex.mean_and_variance(tangen_sigmas,self.tilecounts.as_double())
  table_data.append(  [
      format_value("%3s",   "ALL"),
      format_value("%s", ""),
      format_value("%6d",  self.overall_N),
      format_value("%5.2f", math.sqrt(flex.mean(self.delrsq))),
      format_value("%5.2f", self.overall_rmsd),
      format_value("%5.2f", self.overall_cv[0]),
      format_value("%5.2f", self.overall_cv[1]),
      format_value("%5.2f", flex.mean(flex.double([matrix.col(cv).length() for cv in self.mean_cv]))),
      format_value("%s", ""),
      format_value("%6.2f", rstats.mean()),
      format_value("%6.2f", tstats.mean()),
    ])

  print
  print table_utils.format(table_data,has_header=1,justify='center',delim=" ")
예제 #18
0
 def add(self,
         next_to_i_seqs,
         sites_individual=False,
         sites_torsion_angles=False,
         sites_rigid_body=False,
         adp_individual_iso=False,
         adp_individual_aniso=False,
         adp_group=False,
         group_h=False,
         adp_tls=False,
         s_occupancies=False):
     # XXX group_anomalous selection should be added
     next_to_i_seqs = flex.size_t(next_to_i_seqs)
     perm = flex.sort_permutation(next_to_i_seqs, reverse=True)
     next_to_i_seqs = next_to_i_seqs.select(perm)
     for next_to_i_seq in next_to_i_seqs:
         if (self.sites_individual is not None):
             self.sites_individual = self._add(x=self.sites_individual,
                                               next_to_i_seq=next_to_i_seq,
                                               squeeze_in=sites_individual)
         if (self.sites_torsion_angles is not None):
             self.sites_torsion_angles = self._add(
                 x=self.sites_torsion_angles,
                 next_to_i_seq=next_to_i_seq,
                 squeeze_in=sites_torsion_angles)
         if (self.sites_rigid_body is not None):
             self.sites_rigid_body = self._add(x=self.sites_rigid_body,
                                               next_to_i_seq=next_to_i_seq,
                                               squeeze_in=sites_rigid_body)
         if (self.adp_individual_iso is not None):
             self.adp_individual_iso = self._add(
                 x=self.adp_individual_iso,
                 next_to_i_seq=next_to_i_seq,
                 squeeze_in=adp_individual_iso)
         if (self.adp_individual_aniso is not None):
             self.adp_individual_aniso = self._add(
                 x=self.adp_individual_aniso,
                 next_to_i_seq=next_to_i_seq,
                 squeeze_in=adp_individual_aniso)
         if (self.adp_group is not None):
             self.adp_group = self._add(x=self.adp_group,
                                        next_to_i_seq=next_to_i_seq,
                                        squeeze_in=adp_group)
         if (self.group_h is not None):
             self.group_h = self._add(x=self.group_h,
                                      next_to_i_seq=next_to_i_seq,
                                      squeeze_in=group_h)
         if (self.adp_tls is not None):
             self.adp_tls = self._add(x=self.adp_tls,
                                      next_to_i_seq=next_to_i_seq,
                                      squeeze_in=adp_tls)
         if (self.s_occupancies is not None):
             self.s_occupancies = self._add(x=self.s_occupancies,
                                            next_to_i_seq=next_to_i_seq,
                                            squeeze_in=s_occupancies)
     return self
예제 #19
0
 def print_table(self):
  from libtbx import table_utils
  from libtbx.str_utils import format_value

  table_header = ["Tile","Dist","Nobs","aRmsd","Rmsd","delx","dely","disp","rotdeg","Rsigma","Tsigma"]
  table_data = []
  table_data.append(table_header)
  sort_radii = flex.sort_permutation(flex.double(self.radii))
  tile_rmsds = flex.double()
  radial_sigmas = flex.double(len(self.tiles) // 4)
  tangen_sigmas = flex.double(len(self.tiles) // 4)
  for idx in range(len(self.tiles) // 4):
    x = sort_radii[idx]
    if self.tilecounts[x] < 3:
      wtaveg = 0.0
      radial = (0,0)
      tangential = (0,0)
      rmean,tmean,rsigma,tsigma=(0,0,1,1)
    else:
      wtaveg = self.weighted_average_angle_deg_from_tile(x)
      radial,tangential,rmean,tmean,rsigma,tsigma = get_radial_tangential_vectors(self,x)

    radial_sigmas[x]=rsigma
    tangen_sigmas[x]=tsigma
    table_data.append(  [
      format_value("%3d",   x),
      format_value("%7.2f", self.radii[x]),
      format_value("%6d",  self.tilecounts[x]),
      format_value("%5.2f", self.asymmetric_tile_rmsd[x]),
      format_value("%5.2f", self.tile_rmsd[x]),
      format_value("%5.2f", self.mean_cv[x][0]),
      format_value("%5.2f", self.mean_cv[x][1]),
      format_value("%5.2f", matrix.col(self.mean_cv[x]).length()),
      format_value("%6.2f", wtaveg),
      format_value("%6.2f", rsigma),
      format_value("%6.2f", tsigma),
    ])
  table_data.append([""]*len(table_header))
  rstats = flex.mean_and_variance(radial_sigmas,self.tilecounts.as_double())
  tstats = flex.mean_and_variance(tangen_sigmas,self.tilecounts.as_double())
  table_data.append(  [
      format_value("%3s",   "ALL"),
      format_value("%s", ""),
      format_value("%6d",  self.overall_N),
      format_value("%5.2f", math.sqrt(flex.mean(self.delrsq))),
      format_value("%5.2f", self.overall_rmsd),
      format_value("%5.2f", self.overall_cv[0]),
      format_value("%5.2f", self.overall_cv[1]),
      format_value("%5.2f", flex.mean(flex.double([matrix.col(cv).length() for cv in self.mean_cv]))),
      format_value("%s", ""),
      format_value("%6.2f", rstats.mean()),
      format_value("%6.2f", tstats.mean()),
    ])

  print
  print table_utils.format(table_data,has_header=1,justify='center',delim=" ")
def percentile_cutoffs_inefficient(map_data, vol_cutoff_plus_percent,
                                   vol_cutoff_minus_percent):
    s = flex.sort_permutation(map_data.as_1d())
    map_data_sorted = map_data.select(s)
    i = map_data.size() - 1 - int(map_data.size() *
                                  (vol_cutoff_plus_percent / 100.))
    cutoffp = map_data_sorted[i]
    j = int(map_data.size() * (vol_cutoff_minus_percent / 100.))
    cutoffm = map_data_sorted[j]
    return cutoffp, cutoffm
예제 #21
0
파일: flex.py 프로젝트: biochem-fan/dials
  def sort(self, name, reverse=False):
    '''
    Sort the reflection table by a key.

    :param name: The name of the column
    :param reverse: Reverse the sort order

    '''
    perm = flex.sort_permutation(self[name], reverse=reverse)
    self.reorder(perm)
예제 #22
0
def exercise_optimise_shelxl_weights():
    def calc_goof(fo2, fc, w, k, n_params):
        fc2 = fc.as_intensity_array()
        w = w(fo2.data(), fo2.sigmas(), fc2.data(), k)
        return math.sqrt(
            flex.sum(w * flex.pow2(fo2.data() - k * fc2.data())) /
            (fo2.size() - n_params))

    xs = smtbx.development.sucrose()
    k = 0.05 + 10 * flex.random_double()
    fc = xs.structure_factors(anomalous_flag=False, d_min=0.7).f_calc()
    fo = fc.as_amplitude_array()
    fo = fo.customized_copy(data=fo.data() * math.sqrt(k))
    fo = fo.customized_copy(sigmas=0.03 * fo.data())
    sigmas = fo.sigmas()
    for i in range(fo.size()):
        fo.data()[i] += 2 * scitbx.random.variate(
          scitbx.random.normal_distribution(sigma=sigmas[i]))() \
          + 0.5*random.random()
    fo2 = fo.as_intensity_array()
    fc2 = fc.as_intensity_array()
    w = least_squares.mainstream_shelx_weighting(a=0.1)
    s = calc_goof(fo2, fc, w, k, xs.n_parameters())
    w2 = w.optimise_parameters(fo2, fc2, k, xs.n_parameters())
    s2 = calc_goof(fo2, fc, w2, k, xs.n_parameters())
    # sort data and setup binning by fc/fc_max
    fc_sq = fc.as_intensity_array()
    fc_sq_over_fc_sq_max = fc_sq.data() / flex.max(fc_sq.data())
    permutation = flex.sort_permutation(fc_sq_over_fc_sq_max)
    fc_sq_over_fc_sq_max = fc_sq.customized_copy(
        data=fc_sq_over_fc_sq_max).select(permutation)
    fc_sq = fc_sq.select(permutation)
    fo_sq = fo2.select(permutation)
    n_bins = 10
    bin_max = 0
    bin_limits = flex.size_t(1, 0)
    bin_count = flex.size_t()
    for i in range(n_bins):
        bin_limits.append(int(math.ceil((i + 1) * fc_sq.size() / n_bins)))
        bin_count.append(bin_limits[i + 1] - bin_limits[i])
    goofs_w = flex.double()
    goofs_w2 = flex.double()
    for i_bin in range(n_bins):
        sel = flex.size_t_range(bin_limits[i_bin], bin_limits[i_bin + 1])
        goofs_w2.append(
            calc_goof(fo_sq.select(sel), fc_sq.select(sel), w2, k,
                      xs.n_parameters()))
        goofs_w.append(
            calc_goof(fo_sq.select(sel), fc_sq.select(sel), w, k,
                      xs.n_parameters()))
    a = flex.mean_and_variance(goofs_w).unweighted_sample_variance()
    b = flex.mean_and_variance(goofs_w2).unweighted_sample_variance()
    assert a > b or abs(1 - s) > abs(1 - s2)
    assert a > b  # flat analysis of variance
    assert abs(1 - s) > abs(1 - s2)  # GooF close to 1
예제 #23
0
 def sort(self, reverse=False):
     from scitbx.array_family import flex
     selection = flex.sort_permutation(self.heights, reverse=reverse)
     heights_sorted = self.heights.select(selection)
     sites_sorted = self.sites.select(selection)
     iseqs_sorted = None
     if (self.iseqs_of_closest_atoms is not None):
         iseqs_sorted = self.iseqs_of_closest_atoms.select(selection)
     self.heights = heights_sorted
     self.sites = sites_sorted
     self.iseqs_of_closest_atoms = iseqs_sorted
예제 #24
0
 def sort (self, reverse=False) :
   from scitbx.array_family import flex
   selection = flex.sort_permutation(self.heights, reverse=reverse)
   heights_sorted = self.heights.select(selection)
   sites_sorted = self.sites.select(selection)
   iseqs_sorted = None
   if (self.iseqs_of_closest_atoms is not None) :
     iseqs_sorted = self.iseqs_of_closest_atoms.select(selection)
   self.heights = heights_sorted
   self.sites = sites_sorted
   self.iseqs_of_closest_atoms = iseqs_sorted
def percentile_cutoffs_inefficient(
      map_data,
      vol_cutoff_plus_percent,
      vol_cutoff_minus_percent):
  s = flex.sort_permutation(map_data.as_1d())
  map_data_sorted = map_data.select(s)
  i = map_data.size()-1-int(map_data.size()*(vol_cutoff_plus_percent/100.))
  cutoffp = map_data_sorted[i]
  j = int(map_data.size()*(vol_cutoff_minus_percent/100.))
  cutoffm = map_data_sorted[j]
  return cutoffp, cutoffm
예제 #26
0
파일: PyChef.py 프로젝트: xia2/xia2
def remove_batch_gaps(batches):
  perm = flex.sort_permutation(batches)
  new_batches = flex.int(batches.size(), -1)
  sorted_batches = batches.select(perm)
  curr_batch = -1
  new_batch = -1
  for i, b in enumerate(sorted_batches):
    if b != curr_batch:
      curr_batch = b
      new_batch += 1
    new_batches[perm[i]] = new_batch
  return new_batches
예제 #27
0
 def score(self, rw, rf, rfrw, deltab, w, score_target, score_target_value,
           secondary_target=None):
   sel  = score_target < score_target_value
   sel &= score_target > 0
   if(sel.count(True)>0):
     rw,rf,rfrw,deltab,w = self.select(
       rw=rw,rf=rf,rfrw=rfrw,deltab=deltab,w=w, sel=sel)
   else:
     if(secondary_target is None):
       sel = flex.sort_permutation(score_target)
     else:
       sel = flex.sort_permutation(secondary_target)
     rw,rf,rfrw,deltab,w = self.select(
       rw=rw,rf=rf,rfrw=rfrw,deltab=deltab,w=w, sel=sel)
     #
     rw     = flex.double([rw    [0]])
     rf     = flex.double([rf    [0]])
     rfrw   = flex.double([rfrw  [0]])
     deltab = flex.double([deltab[0]])
     w      = flex.double([w     [0]])
   return rw, rf, rfrw, deltab, w
예제 #28
0
def remove_batch_gaps(batches):
    perm = flex.sort_permutation(batches)
    new_batches = flex.int(batches.size(), -1)
    sorted_batches = batches.select(perm)
    curr_batch = -1
    new_batch = -1
    for i, b in enumerate(sorted_batches):
        if b != curr_batch:
            curr_batch = b
            new_batch += 1
        new_batches[perm[i]] = new_batch
    return new_batches
def exercise_optimise_shelxl_weights():
  def calc_goof(fo2, fc, w, k, n_params):
    fc2 = fc.as_intensity_array()
    w = w(fo2.data(), fo2.sigmas(), fc2.data(), k)
    return math.sqrt(flex.sum(
      w * flex.pow2(fo2.data() - k*fc2.data()))/(fo2.size() - n_params))
  xs = smtbx.development.sucrose()
  k = 0.05 + 10 * flex.random_double()
  fc = xs.structure_factors(anomalous_flag=False, d_min=0.7).f_calc()
  fo = fc.as_amplitude_array()
  fo = fo.customized_copy(data=fo.data()*math.sqrt(k))
  fo = fo.customized_copy(sigmas=0.03*fo.data())
  sigmas = fo.sigmas()
  for i in range(fo.size()):
    fo.data()[i] += 2 * scitbx.random.variate(
      scitbx.random.normal_distribution(sigma=sigmas[i]))() \
      + 0.5*random.random()
  fo2 = fo.as_intensity_array()
  fc2 = fc.as_intensity_array()
  w = least_squares.mainstream_shelx_weighting(a=0.1)
  s = calc_goof(fo2, fc, w, k, xs.n_parameters())
  w2 = w.optimise_parameters(fo2, fc2, k, xs.n_parameters())
  s2 = calc_goof(fo2, fc, w2, k, xs.n_parameters())
  # sort data and setup binning by fc/fc_max
  fc_sq = fc.as_intensity_array()
  fc_sq_over_fc_sq_max = fc_sq.data()/flex.max(fc_sq.data())
  permutation = flex.sort_permutation(fc_sq_over_fc_sq_max)
  fc_sq_over_fc_sq_max = fc_sq.customized_copy(
    data=fc_sq_over_fc_sq_max).select(permutation)
  fc_sq = fc_sq.select(permutation)
  fo_sq = fo2.select(permutation)
  n_bins = 10
  bin_max = 0
  bin_limits = flex.size_t(1, 0)
  bin_count = flex.size_t()
  for i in range(n_bins):
    bin_limits.append(int(math.ceil((i+1) * fc_sq.size()/n_bins)))
    bin_count.append(bin_limits[i+1] - bin_limits[i])
  goofs_w = flex.double()
  goofs_w2 = flex.double()
  for i_bin in range(n_bins):
    sel = flex.size_t_range(bin_limits[i_bin], bin_limits[i_bin+1])
    goofs_w2.append(calc_goof(fo_sq.select(sel),
                              fc_sq.select(sel),
                              w2, k, xs.n_parameters()))
    goofs_w.append(calc_goof(fo_sq.select(sel),
                              fc_sq.select(sel),
                              w, k, xs.n_parameters()))
  a = flex.mean_and_variance(goofs_w).unweighted_sample_variance()
  b = flex.mean_and_variance(goofs_w2).unweighted_sample_variance()
  assert a > b or abs(1-s) > abs(1-s2)
  assert a > b # flat analysis of variance
  assert abs(1-s) > abs(1-s2) # GooF close to 1
예제 #30
0
 def score(self, rw, rf, rfrw, deltab, w, score_target, score_target_value,
           secondary_target=None):
   sel  = score_target < score_target_value
   sel &= score_target > 0
   if(sel.count(True)>0):
     rw,rf,rfrw,deltab,w = self.select(
       rw=rw,rf=rf,rfrw=rfrw,deltab=deltab,w=w, sel=sel)
   else:
     if(secondary_target is None):
       sel = flex.sort_permutation(score_target)
     else:
       sel = flex.sort_permutation(secondary_target)
     rw,rf,rfrw,deltab,w = self.select(
       rw=rw,rf=rf,rfrw=rfrw,deltab=deltab,w=w, sel=sel)
     #
     rw     = flex.double([rw    [0]])
     rf     = flex.double([rf    [0]])
     rfrw   = flex.double([rfrw  [0]])
     deltab = flex.double([deltab[0]])
     w      = flex.double([w     [0]])
   return rw, rf, rfrw, deltab, w
예제 #31
0
 def plot_rij_cumulative_frequency(self, plot_name=None):
   rij = self.rij_matrix.as_1d()
   perm = flex.sort_permutation(rij)
   from matplotlib import pyplot as plt
   fig = plt.figure(figsize=(10,8))
   plt.clf()
   plt.plot(rij.select(perm), flex.int_range(perm.size()))
   plt.xlabel(r'$r_{ij}$')
   plt.ylabel('Cumulative requency')
   if plot_name is not None:
     plt.savefig(plot_name)
   else:
     plt.show()
예제 #32
0
 def __init__(self, unit_cell, map_data, radius, shell, site_frac):
     from cctbx import maptbx
     obj = maptbx.grid_points_in_sphere_around_atom_and_distances(
         unit_cell=unit_cell,
         data=map_data,
         radius=radius,
         shell=shell,
         site_frac=site_frac)
     data = obj.data_at_grid_points()
     dist = obj.distances()
     p = flex.sort_permutation(dist)
     self.data_ = data.select(p)
     self.dist_ = dist.select(p)
예제 #33
0
 def sorted_type_index_pairs(self, heaviest_first=True):
   ugs = self.unique_gaussians_as_list()
   pairs = []
   sf0s = flex.double()
   for t,i in self.type_index_pairs_as_dict().items():
     pairs.append((t,i))
     gaussian = ugs[i]
     if (gaussian is None):
       sf0s.append(0)
     else:
       sf0s.append(gaussian.at_stol(0))
   perm = flex.sort_permutation(sf0s, reverse=heaviest_first)
   return flex.select(pairs, permutation=perm)
예제 #34
0
 def get_table(self):
   from libtbx import table_utils
   rows = [["dataset", "batches", "delta_cc_i", "sigma"]]
   labels = self._labels()
   normalised_score = self._normalised_delta_cc_i()
   perm = flex.sort_permutation(self.delta_cc)
   for i in perm:
     bmin = flex.min(self.batches[i].data())
     bmax = flex.max(self.batches[i].data())
     rows.append(
       [str(labels[i]), '%i to %i' %(bmin, bmax),
        '% .3f' %self.delta_cc[i], '% .2f' %normalised_score[i]])
   return table_utils.format(rows, has_header=True, prefix="|", postfix="|")
예제 #35
0
    def find_peaks(self):
        grid_real_binary = self.grid_real.deep_copy()
        rmsd = math.sqrt(
            flex.mean(
                flex.pow2(grid_real_binary.as_1d() -
                          flex.mean(grid_real_binary.as_1d()))))
        grid_real_binary.set_selected(
            grid_real_binary < (self.params.rmsd_cutoff) * rmsd, 0)
        grid_real_binary.as_1d().set_selected(grid_real_binary.as_1d() > 0, 1)
        grid_real_binary = grid_real_binary.iround()
        from cctbx import masks
        flood_fill = masks.flood_fill(grid_real_binary, self.fft_cell)
        if flood_fill.n_voids() < 4:
            # Require at least peak at origin and one peak for each basis vector
            raise Sorry(
                "Indexing failed: fft3d peak search failed to find sufficient number of peaks."
            )
        # the peak at the origin might have a significantly larger volume than the
        # rest so exclude this peak from determining maximum volume
        isel = (flood_fill.grid_points_per_void() > int(
            self.params.fft3d.peak_volume_cutoff *
            flex.max(flood_fill.grid_points_per_void()[1:]))).iselection()

        if self.params.optimise_initial_basis_vectors:
            self.volumes = flood_fill.grid_points_per_void().select(isel)
            sites_cart = flood_fill.centres_of_mass_cart().select(isel)
            sites_cart_optimised = optimise_basis_vectors(
                self.reflections['rlp'].select(
                    self.reflections_used_for_indexing), sites_cart)

            self.sites = self.fft_cell.fractionalize(sites_cart_optimised)

            diffs = (sites_cart_optimised - sites_cart)
            norms = diffs.norms()
            flex.min_max_mean_double(norms).show()
            perm = flex.sort_permutation(norms, reverse=True)
            for p in perm[:10]:
                logger.debug(sites_cart[p], sites_cart_optimised[p], norms[p])

            # only use those vectors which haven't shifted too far from starting point
            sel = norms < (5 * self.fft_cell.parameters()[0] /
                           self.gridding[0])
            self.sites = self.sites.select(sel)
            self.volumes = self.volumes.select(sel)
            #diff = (self.sites - flood_fill.centres_of_mass_frac().select(isel))
            #flex.min_max_mean_double(diff.norms()).show()

        else:
            self.sites = flood_fill.centres_of_mass_frac().select(isel)
            self.volumes = flood_fill.grid_points_per_void().select(isel)
예제 #36
0
def make_peak_dict(peaks, selection, obs_map, cutoff):
  result = {}
  for i in flex.sort_permutation(data=peaks.iseqs_of_closest_atoms):
    s = peaks.sites[i]
    h = peaks.heights[i]
    obsh = obs_map.eight_point_interpolation(s)
    if obsh<cutoff:
      continue
    i_seq = peaks.iseqs_of_closest_atoms[i]
    if(selection[i_seq]):
      if result.has_key(i_seq):
        result[i_seq].extend( [(h, s)] )
      else:
        result[i_seq] = [(h, s)]
  return result
예제 #37
0
def make_peak_dict(peaks, selection, obs_map, cutoff):
    result = {}
    for i in flex.sort_permutation(data=peaks.iseqs_of_closest_atoms):
        s = peaks.sites[i]
        h = peaks.heights[i]
        obsh = obs_map.eight_point_interpolation(s)
        if obsh < cutoff:
            continue
        i_seq = peaks.iseqs_of_closest_atoms[i]
        if (selection[i_seq]):
            if i_seq in result:
                result[i_seq].extend([(h, s)])
            else:
                result[i_seq] = [(h, s)]
    return result
예제 #38
0
def rmerge_vs_batch(intensities, batches):
  assert intensities.size() == batches.size()

  intensities = intensities.map_to_asu()

  bins = []
  data = []

  merging = intensities.merge_equivalents()
  merged_intensities = merging.array()

  perm = flex.sort_permutation(batches.data())
  batches = batches.data().select(perm)
  intensities = intensities.select(perm)

  from cctbx import miller

  matches = miller.match_multi_indices(
    merged_intensities.indices(), intensities.indices())
  pairs = matches.pairs()

  i_batch_start = 0
  current_batch = flex.min(batches)
  n_ref = batches.size()
  for i_ref in range(n_ref + 1):
    if i_ref == n_ref or batches[i_ref] != current_batch:
      assert batches[i_batch_start:i_ref].all_eq(current_batch)

      numerator = 0
      denominator = 0

      for p in pairs[i_batch_start:i_ref]:
        unmerged_Ij = intensities.data()[p[1]]
        merged_Ij = merged_intensities.data()[p[0]]
        numerator += abs(unmerged_Ij - merged_Ij)
        denominator += unmerged_Ij

      bins.append(current_batch)
      if denominator > 0:
        data.append(numerator / denominator)
      else:
        data.append(0)

      i_batch_start = i_ref
      if i_ref < n_ref:
        current_batch = batches[i_batch_start]

  return batch_binned_data(bins, data)
def exercise_average_densities(space_group_info, d_min=1.5):
    structure = random_structure.xray_structure(space_group_info,
                                                elements=("C", "H", "O", "Cl"),
                                                volume_per_atom=500,
                                                min_distance=5)
    f_calc = structure.structure_factors(anomalous_flag=False,
                                         d_min=d_min,
                                         algorithm="direct").f_calc()
    map = f_calc.fft_map().real_map_unpadded()
    for radius in [1, 2]:
        densities = maptbx.average_densities(unit_cell=structure.unit_cell(),
                                             data=map,
                                             sites_frac=structure.sites_frac(),
                                             radius=radius)
        perm = flex.sort_permutation(data=densities, reverse=True)
        assert list(perm) == [3, 2, 0, 1]
예제 #40
0
파일: tst.py 프로젝트: stufisher/Proton8
def exercise_01():
  file = libtbx.env.find_in_repositories(relative_path=
    "chem_data/polygon_data/all_mvd.pickle", test=os.path.isfile)
  database_dict = easy_pickle.load(file)
  #
  twinned = database_dict["twinned"]
  sel  = twinned != "none"
  sel &= twinned != "false"
  n_twinned = sel.count(True)
  print "TWINNED: %d (percent: %6.2f), TOTAL: %d" % (n_twinned,
    n_twinned*100./sel.size(), sel.size())
  r_work_pdb = database_dict["pdb_header_r_work"]
  r_work_cutoff = database_dict["r_work_cutoffs"]
  r_work_re_computed = database_dict["r_work"]
  name = database_dict["pdb_code"]
  #
  sel &= r_work_cutoff != "none"
  sel &= r_work_pdb != "none"
  #
  r_work_pdb         = r_work_pdb.select(sel)
  r_work_cutoff      = r_work_cutoff.select(sel)
  r_work_re_computed = r_work_re_computed.select(sel)
  name               = name.select(sel)
  twinned            = twinned.select(sel)
  #
  def str_to_float(x):
    tmp = flex.double()
    for x_ in x:
      tmp.append(float(x_))
    return tmp
  #
  r_work_cutoff = str_to_float(r_work_cutoff)
  r_work_re_computed = str_to_float(r_work_re_computed)
  r_work_pdb = str_to_float(r_work_pdb)
  #
  delta = (r_work_cutoff - r_work_pdb)*100.
  #
  sp = flex.sort_permutation(delta)
  name               = name         .select(sp)
  delta              = delta        .select(sp)
  r_work_cutoff      = r_work_cutoff.select(sp)
  r_work_pdb         = r_work_pdb   .select(sp)
  r_work_re_computed = r_work_re_computed.select(sp)
  twinned            = twinned.select(sp)
  #
  for n,d,rwc,rwp,rw,t in zip(name,delta,r_work_cutoff,r_work_pdb, r_work_re_computed, twinned):
    print "%s diff=%6.2f rw_c=%6.4f rw_p=%6.4f rw_ad=%6.4f %s" % (n,d,rwc,rwp,rw, t)
예제 #41
0
 def __init__(self, unit_cell,
                    map_data,
                    radius,
                    shell,
                    site_frac):
   from cctbx import maptbx
   obj = maptbx.grid_points_in_sphere_around_atom_and_distances(
                                                        unit_cell = unit_cell,
                                                        data      = map_data,
                                                        radius    = radius,
                                                        shell     = shell,
                                                        site_frac = site_frac)
   data = obj.data_at_grid_points()
   dist = obj.distances()
   p = flex.sort_permutation(dist)
   self.data_ = data.select(p)
   self.dist_ = dist.select(p)
예제 #42
0
파일: DeltaCcHalf.py 프로젝트: isikhar/xia2
 def get_table(self, html=False):
     if html:
         delta_cc_half_header = u"Delta CC<sub>½</sub>"
     else:
         delta_cc_half_header = u"Delta CC½"
     rows = [["Dataset", "Batches", delta_cc_half_header, u"σ"]]
     normalised_score = self._normalised_delta_cc_i()
     perm = flex.sort_permutation(self.delta_cc)
     for i in perm:
         bmin, bmax = self._group_to_batches[i]
         rows.append([
             str(self._group_to_dataset_id[i]),
             "%i to %i" % (bmin, bmax),
             "% .3f" % self.delta_cc[i],
             "% .2f" % normalised_score[i],
         ])
     return rows
예제 #43
0
def run():
    two_folds = enumerate_reduced_cell_two_folds()
    assert len(two_folds) == 81
    deltas = flex.double()
    perturbations = flex.double()
    for fudge_factor in [0.002, 0.01, 0.02, 0.05, 0.1]:
        sample(two_folds=two_folds,
               fudge_factor=fudge_factor,
               deltas=deltas,
               perturbations=perturbations)
    perm = flex.sort_permutation(data=deltas)
    deltas = deltas.select(perm)
    perturbations = perturbations.select(perm)
    f = open("le_page_1982_vs_lebedev_2005_plot", "w")
    for x, y in zip(deltas, perturbations):
        print >> f, x, y
    print "OK"
예제 #44
0
 def initialize_increments(self,image_number=0):
   #initialize a data structure that contains possible vectors
   # background_pixel - spot_center
   # consider a large box 4x as large as the presumptive mask.
   from scitbx.array_family import flex
   Incr = []
   Distsq = flex.double()
   self.sorted = [] # a generic list of points close in distance to a central point
   if self.mask_focus[image_number] == None: return
   for i in range(-self.mask_focus[image_number][0],1+self.mask_focus[image_number][0]):
     for j in range(-self.mask_focus[image_number][1],1+self.mask_focus[image_number][1]):
       Incr.append(matrix.col((i,j)))
       Distsq.append(i*i+j*j)
   order = flex.sort_permutation(Distsq)
   for i in range(len(order)):
     #print i,order[i],Distsq[order[i]],Incr[order[i]]
     self.sorted.append(Incr[order[i]])
예제 #45
0
 def initialize_increments(self,image_number=0):
   #initialize a data structure that contains possible vectors
   # background_pixel - spot_center
   # consider a large box 4x as large as the presumptive mask.
   from scitbx.array_family import flex
   Incr = []
   Distsq = flex.double()
   self.sorted = [] # a generic list of points close in distance to a central point
   if self.mask_focus[image_number] == None: return
   for i in xrange(-self.mask_focus[image_number][0],1+self.mask_focus[image_number][0]):
     for j in xrange(-self.mask_focus[image_number][1],1+self.mask_focus[image_number][1]):
       Incr.append(matrix.col((i,j)))
       Distsq.append(i*i+j*j)
   order = flex.sort_permutation(Distsq)
   for i in xrange(len(order)):
     #print i,order[i],Distsq[order[i]],Incr[order[i]]
     self.sorted.append(Incr[order[i]])
def run():
  two_folds = enumerate_reduced_cell_two_folds()
  assert len(two_folds) == 81
  deltas = flex.double()
  perturbations = flex.double()
  for fudge_factor in [0.002, 0.01, 0.02, 0.05, 0.1]:
    sample(
      two_folds=two_folds,
      fudge_factor=fudge_factor,
      deltas=deltas,
      perturbations=perturbations)
  perm = flex.sort_permutation(data=deltas)
  deltas = deltas.select(perm)
  perturbations = perturbations.select(perm)
  f = open("le_page_1982_vs_lebedev_2005_plot", "w")
  for x,y in zip(deltas, perturbations):
    print >> f, x, y
  print "OK"
예제 #47
0
 def __init__(self, x, i_obs, F, use_curvatures):
     self.x = x
     self.i_obs = i_obs
     self.F = F
     self.t = None
     self.g = None
     self.d = None
     # Needed to do sums from small to large to prefent loss
     s = flex.sort_permutation(self.i_obs.data())
     self.i_obs = self.i_obs.select(s)
     self.F = [f.select(s) for f in self.F]
     #
     self.sum_i_obs = flex.sum(
         self.i_obs.data())  # needed for Python version
     self.use_curvatures = use_curvatures
     self.tgo = mosaic_ext.alg2_tg(F=[f.data() for f in self.F],
                                   i_obs=self.i_obs.data())
     self.update_target_and_grads(x=x)
예제 #48
0
파일: fft3d.py 프로젝트: dials/dials
  def find_peaks(self):
    grid_real_binary = self.grid_real.deep_copy()
    rmsd = math.sqrt(
      flex.mean(flex.pow2(grid_real_binary.as_1d()-flex.mean(grid_real_binary.as_1d()))))
    grid_real_binary.set_selected(grid_real_binary < (self.params.rmsd_cutoff)*rmsd, 0)
    grid_real_binary.as_1d().set_selected(grid_real_binary.as_1d() > 0, 1)
    grid_real_binary = grid_real_binary.iround()
    from cctbx import masks
    flood_fill = masks.flood_fill(grid_real_binary, self.fft_cell)
    if flood_fill.n_voids() < 4:
      # Require at least peak at origin and one peak for each basis vector
      raise Sorry("Indexing failed: fft3d peak search failed to find sufficient number of peaks.")
    # the peak at the origin might have a significantly larger volume than the
    # rest so exclude this peak from determining maximum volume
    isel = (flood_fill.grid_points_per_void() > int(
        self.params.fft3d.peak_volume_cutoff * flex.max(
          flood_fill.grid_points_per_void()[1:]))).iselection()

    if self.params.optimise_initial_basis_vectors:
      self.volumes = flood_fill.grid_points_per_void().select(isel)
      sites_cart = flood_fill.centres_of_mass_cart().select(isel)
      sites_cart_optimised = optimise_basis_vectors(
        self.reflections['rlp'].select(self.reflections_used_for_indexing),
        sites_cart)

      self.sites = self.fft_cell.fractionalize(sites_cart_optimised)

      diffs = (sites_cart_optimised - sites_cart)
      norms = diffs.norms()
      flex.min_max_mean_double(norms).show()
      perm = flex.sort_permutation(norms, reverse=True)
      for p in perm[:10]:
        logger.debug(sites_cart[p], sites_cart_optimised[p], norms[p])

      # only use those vectors which haven't shifted too far from starting point
      sel = norms < (5 * self.fft_cell.parameters()[0]/self.gridding[0])
      self.sites = self.sites.select(sel)
      self.volumes = self.volumes.select(sel)
      #diff = (self.sites - flood_fill.centres_of_mass_frac().select(isel))
      #flex.min_max_mean_double(diff.norms()).show()

    else:
      self.sites = flood_fill.centres_of_mass_frac().select(isel)
      self.volumes = flood_fill.grid_points_per_void().select(isel)
def exercise_average_densities(space_group_info, d_min=1.5):
  structure = random_structure.xray_structure(
    space_group_info,
    elements=("C", "H", "O", "Cl"),
    volume_per_atom=500,
    min_distance=5)
  f_calc = structure.structure_factors(
    anomalous_flag=False,
    d_min=d_min,
    algorithm="direct").f_calc()
  map = f_calc.fft_map().real_map_unpadded()
  for radius in [1,2]:
    densities = maptbx.average_densities(
      unit_cell=structure.unit_cell(),
      data=map,
      sites_frac=structure.sites_frac(),
      radius=radius)
    perm = flex.sort_permutation(data=densities, reverse=True)
    assert list(perm) == [3,2,0,1]
예제 #50
0
 def plot_wij_cumulative_frequency(self, plot_name=None):
   if self._weights is None:
     return
   wij = self.wij_matrix.as_1d()
   perm = flex.sort_permutation(wij)
   import scitbx.math
   non_zero_sel = wij > 0
   logger.info('%i (%.1f%%) non-zero elements of Wij matrix' %(
     non_zero_sel.count(True), 100*non_zero_sel.count(True)/non_zero_sel.size()))
   scitbx.math.basic_statistics(wij.select(non_zero_sel)).show(f=debug_handle)
   from matplotlib import pyplot as plt
   fig = plt.figure(figsize=(10,8))
   plt.clf()
   plt.plot(wij.select(perm), flex.int_range(perm.size()))
   plt.xlabel(r'$w_{ij}$')
   plt.ylabel('Cumulative requency')
   if plot_name is not None:
     plt.savefig(plot_name)
   else:
     plt.show()
예제 #51
0
 def show_mapped(self, pdb_atoms):
   if(self.peaks_ is None): return None
   peaks = self.peaks()
   if(peaks.iseqs_of_closest_atoms is None):
     raise RuntimeError("iseqs_of_closest_atoms is None")
   scatterers = self.fmodel.xray_structure.scatterers()
   assert scatterers.size() == pdb_atoms.size()
   assert peaks.sites.size() == peaks.heights.size()
   assert peaks.heights.size() == peaks.iseqs_of_closest_atoms.size()
   print >> self.log
   dist = self.fmodel.xray_structure.unit_cell().distance
   for i in flex.sort_permutation(data=peaks.iseqs_of_closest_atoms):
     s = peaks.sites[i]
     h = peaks.heights[i]
     i_seq = peaks.iseqs_of_closest_atoms[i]
     sc = scatterers[i_seq]
     d = dist(s, sc.site)
     element = sc.element_symbol()
     print >> self.log, "peak= %8.3f closest distance to %s = %8.3f" % (
       h, pdb_atoms[i_seq].id_str(), d)
     assert d <= self.params.map_next_to_model.max_model_peak_dist
     assert d >= self.params.map_next_to_model.min_model_peak_dist
예제 #52
0
def cross_check(args):
  quick_summaries = []
  for file_name in args:
    quick_summaries.append(easy_pickle.load(file_name))
  assert len(quick_summaries) == 2
  lines = []
  max_of_errors = flex.double()
  atomic_numbers = flex.double()
  n_less = 0
  n_greater = 0
  n_equal = 0
  for label_1,error_1 in quick_summaries[0].items():
    error_2 = quick_summaries[1].get(label_1, None)
    if (error_2 is not None):
      line = "%-10s %7.4f %7.4f" % (label_1, error_1, error_2)
      if   (error_1 < error_2):
        line += " less    %7.4f" % (error_2/error_1)
        n_less += 1
      elif (error_1 > error_2):
        line += " greater %7.4f" % (error_1/error_2)
        n_greater += 1
      else:
        line += " equal"
        n_equal += 1
      lines.append(line)
      max_of_errors.append(max(error_1, error_2))
      atomic_numbers.append(
        tiny_pse.table(label_1.split("_")[0]).atomic_number())
  for sort_key,reverse in [(max_of_errors,True), (atomic_numbers,False)]:
    perm = flex.sort_permutation(data=sort_key, reverse=reverse)
    perm_lines = flex.select(lines, perm)
    for line in perm_lines:
      print line
    print
  print "n_less:", n_less
  print "n_greater:", n_greater
  print "n_equal:", n_equal
  print "total:", n_less + n_greater + n_equal
예제 #53
0
파일: flex.py 프로젝트: dials/dials
  def sort(self, name, reverse=False, order=None):
    '''
    Sort the reflection table by a key.

    :param name: The name of the column
    :param reverse: Reverse the sort order
    :param order: For multi element items specify order

    '''
    import __builtin__
    if type(self[name]) in [
        vec2_double,
        vec3_double,
        mat3_double,
        int6,
        miller_index ]:
      data = self[name]
      if order is None:
        perm = flex.size_t(
          __builtin__.sorted(
            range(len(self)),
            key=lambda x: data[x],
            reverse=reverse))
      else:
        assert len(order) == len(data[0])
        def compare(x, y):
          a = tuple(x[i] for i in order)
          b = tuple(y[i] for i in order)
          return cmp(a, b)
        perm = flex.size_t(
          __builtin__.sorted(
            range(len(self)),
            key=lambda x: data[x],
            cmp=compare,
            reverse=reverse))
    else:
      perm = flex.sort_permutation(self[name], reverse=reverse)
    self.reorder(perm)
예제 #54
0
 def flipped_fraction_as_delta(self, fraction):
   rho = self.real_map_unpadded(in_place=False).as_1d()
   p = flex.sort_permutation(rho)
   sorted_rho = rho.select(p)
   return sorted_rho[int(fraction * sorted_rho.size())]
예제 #55
0
def sort_triple(triple):
    order = flex.sort_permutation(flex.int(triple), False)
    sorted_triple = (triple[order[0]], triple[order[1]], triple[order[2]])

    return sorted_triple
예제 #56
0
  def analyze_aniso_correction(self, n_check=2000, p_check=0.25, level=3,
      z_level=9):
    self.min_d = None
    self.max_d = None
    self.level = None
    self.z_level = None
    self.z_low = None
    self.z_high = None
    self.z_tot = None
    self.mean_isigi = None
    self.mean_count = None
    self.mean_isigi_low_correction_factor = None
    self.frac_below_low_correction = None
    self.mean_isigi_high_correction_factor = None
    self.frac_below_high_correction = None
    if self.work_array.sigmas() is None:
       return "No further analysis of anisotropy carried out because of absence of sigmas"

    correction_factors = self.work_array.customized_copy(
                 data=self.work_array.data()*0.0+1.0, sigmas=None )
    correction_factors = anisotropic_correction(
      correction_factors,0.0,self.u_star ).data()
    self.work_array = self.work_array.f_as_f_sq()
    isigi = self.work_array.data() / (
        self.work_array.sigmas()+max(1e-8,flex.min(self.work_array.sigmas())))
    d_spacings = self.work_array.d_spacings().data().as_double()
    if d_spacings.size() <= n_check:
      n_check = d_spacings.size()-2
    d_sort   = flex.sort_permutation( d_spacings )
    d_select = d_sort[0:n_check]
    min_d = d_spacings[ d_select[0] ]
    max_d = d_spacings[ d_select[ n_check-1] ]
    isigi = isigi.select( d_select )
    mean_isigi = flex.mean( isigi )
    observed_count = flex.bool( isigi > level ).as_double()
    mean_count = flex.mean( observed_count )
    correction_factors = correction_factors.select( d_select )
    isigi_rank      = flex.sort_permutation(isigi)
    correction_rank = flex.sort_permutation(correction_factors, reverse=True)
    n_again = int(correction_rank.size()*p_check )
    sel_hc = correction_rank[0:n_again]
    sel_lc = correction_rank[n_again:]
    mean_isigi_low_correction_factor  = flex.mean(isigi.select(sel_lc) )
    mean_isigi_high_correction_factor = flex.mean(isigi.select(sel_hc) )
    frac_below_low_correction        = flex.mean(observed_count.select(sel_lc))
    frac_below_high_correction       = flex.mean(observed_count.select(sel_hc))
    mu = flex.mean( observed_count )
    var = math.sqrt(mu*(1.0-mu)/n_again)
    z_low  = abs(frac_below_low_correction-mean_count)/max(1e-8,var)
    z_high = abs(frac_below_high_correction-mean_count)/max(1e-8,var)
    z_tot  = math.sqrt( (z_low*z_low + z_high*z_high) )
    # save some of these for later
    self.min_d = min_d
    self.max_d = max_d
    self.level = level
    self.z_level = z_level
    self.z_low = z_low
    self.z_high = z_high
    self.z_tot = z_tot
    self.mean_isigi = mean_isigi
    self.mean_count = mean_count
    self.mean_isigi_low_correction_factor = mean_isigi_low_correction_factor
    self.frac_below_low_correction = frac_below_low_correction
    self.mean_isigi_high_correction_factor = mean_isigi_high_correction_factor
    self.frac_below_high_correction = frac_below_high_correction
예제 #57
0
  def __init__(self,
               miller_array,
               kernel_width=None,
               n_bins=23,
               n_term=13,
               d_star_sq_low=None,
               d_star_sq_high=None,
               auto_kernel=False,
               number_of_sorted_reflections_for_auto_kernel=50):
    ## Autokernel is either False, true or a specific integer
    if kernel_width is None:
      assert (auto_kernel is not False)
    if auto_kernel is not False:
      assert (kernel_width==None)
    assert miller_array.size()>0
    ## intensity arrays please
    work_array = None
    if not miller_array.is_real_array():
      raise RuntimeError("Please provide real arrays only")
      ## I might have to change this upper condition
    if miller_array.is_xray_amplitude_array():
      work_array = miller_array.f_as_f_sq()
    if miller_array.is_xray_intensity_array():
      work_array = miller_array.deep_copy()
      work_array = work_array.set_observation_type(miller_array)
    ## If type is not intensity or amplitude
    ## raise an execption please
    if not miller_array.is_xray_intensity_array():
      if not miller_array.is_xray_amplitude_array():
        raise RuntimeError("Observation type unknown")
    ## declare some shorthands
    I_obs = work_array.data()
    epsilons = work_array.epsilons().data().as_double()
    d_star_sq_hkl = work_array.d_spacings().data()
    d_star_sq_hkl = 1.0/(d_star_sq_hkl*d_star_sq_hkl)
    ## Set up some limits
    if d_star_sq_low is None:
      d_star_sq_low = flex.min(d_star_sq_hkl)
    if d_star_sq_high is None:
      d_star_sq_high = flex.max(d_star_sq_hkl)
    ## A feeble attempt to determine an appropriate kernel width
    ## that seems to work reasonable in practice
    self.kernel_width=kernel_width
    if auto_kernel is not False:
      ## get the d_star_sq_array and sort it
      sort_permut = flex.sort_permutation(d_star_sq_hkl)
      ##
      if auto_kernel==True:
        number=number_of_sorted_reflections_for_auto_kernel
      else:
        number=int(auto_kernel)
      if number > d_star_sq_hkl.size():
        number = d_star_sq_hkl.size()-1
      self.kernel_width = d_star_sq_hkl[sort_permut[number]]-d_star_sq_low
      assert self.kernel_width > 0
    ## Making the d_star_sq_array
    assert (n_bins>1) ## assure that there are more then 1 bins for interpolation
    self.d_star_sq_array = chebyshev_lsq_fit.chebyshev_nodes(
      n=n_bins,
      low=d_star_sq_low,
      high=d_star_sq_high,
      include_limits=True)

    ## Now get the average intensity please
    ##
    ## This step can be reasonably time consuming
    self.mean_I_array = scaling.kernel_normalisation(
      d_star_sq_hkl = d_star_sq_hkl,
      I_hkl = I_obs,
      epsilon = epsilons,
      d_star_sq_array = self.d_star_sq_array,
      kernel_width = self.kernel_width
      )
    self.var_I_array = scaling.kernel_normalisation(
      d_star_sq_hkl = d_star_sq_hkl,
      I_hkl = I_obs*I_obs,
      epsilon = epsilons*epsilons,
      d_star_sq_array = self.d_star_sq_array,
      kernel_width = self.kernel_width
      )
    self.var_I_array = self.var_I_array - self.mean_I_array*self.mean_I_array
    self.weight_sum = self.var_I_array = scaling.kernel_normalisation(
      d_star_sq_hkl = d_star_sq_hkl,
      I_hkl = I_obs*0.0+1.0,
      epsilon = epsilons*0.0+1.0,
      d_star_sq_array = self.d_star_sq_array,
      kernel_width = self.kernel_width
      )
    eps = 1e-16 # XXX Maybe this should be larger?
    self.bin_selection = (self.mean_I_array > eps)
    sel_pos = self.bin_selection.iselection()
    # FIXME rare bug: this crashes when the majority of the data are zero,
    # e.g. because resolution limit was set too high and F/I filled in with 0.
    # it would be good to catch such cases in advance by inspecting the binned
    # values, and raise a different error message.
    assert sel_pos.size() > 0
    if (sel_pos.size() < self.mean_I_array.size() / 2) :
      raise Sorry("Analysis could not be continued because more than half "+
        "of the data have values below 1e-16.  This usually indicates either "+
        "an inappropriately high resolution cutoff, or an error in the data "+
        "file which artificially creates a higher resolution limit.")
    self.mean_I_array = self.mean_I_array.select(sel_pos)
    self.d_star_sq_array = self.d_star_sq_array.select(sel_pos)
    self.var_I_array = flex.log( self.var_I_array.select( sel_pos ) )
    self.weight_sum = self.weight_sum.select(sel_pos)
    self.mean_I_array = flex.log( self.mean_I_array )
    ## Fit a chebyshev polynome please
    normalizer_fit_lsq = chebyshev_lsq_fit.chebyshev_lsq_fit(
      n_term,
      self.d_star_sq_array,
      self.mean_I_array )
    self.normalizer = chebyshev_polynome(
      n_term,
      d_star_sq_low,
      d_star_sq_high,
      normalizer_fit_lsq.coefs)
    var_lsq_fit = chebyshev_lsq_fit.chebyshev_lsq_fit(
      n_term,
      self.d_star_sq_array,
      self.var_I_array )
    self.var_norm = chebyshev_polynome(
      n_term,
      d_star_sq_low,
      d_star_sq_high,
      var_lsq_fit.coefs)
    ws_fit = chebyshev_lsq_fit.chebyshev_lsq_fit(
      n_term,
      self.d_star_sq_array,
      self.weight_sum )
    self.weight_sum = chebyshev_polynome(
      n_term,
      d_star_sq_low,
      d_star_sq_high,
      ws_fit.coefs)

    ## The data wil now be normalised using the
    ## chebyshev polynome we have just obtained
    self.mean_I_array = flex.exp( self.mean_I_array)
    self.normalizer_for_miller_array =  flex.exp( self.normalizer.f(d_star_sq_hkl) )
    self.var_I_array = flex.exp( self.var_I_array )
    self.var_norm = flex.exp( self.var_norm.f(d_star_sq_hkl) )
    self.weight_sum = flex.exp( self.weight_sum.f(d_star_sq_hkl))
    self.normalised_miller = None
    self.normalised_miller_dev_eps = None
    if work_array.sigmas() is not None:
      self.normalised_miller = work_array.customized_copy(
        data = work_array.data()/self.normalizer_for_miller_array,
        sigmas = work_array.sigmas()/self.normalizer_for_miller_array
        ).set_observation_type(work_array)
      self.normalised_miller_dev_eps = self.normalised_miller.customized_copy(
        data = self.normalised_miller.data()/epsilons,
        sigmas = self.normalised_miller.sigmas()/epsilons)\
        .set_observation_type(work_array)
    else:
      self.normalised_miller = work_array.customized_copy(
        data = work_array.data()/self.normalizer_for_miller_array
        ).set_observation_type(work_array)
      self.normalised_miller_dev_eps = self.normalised_miller.customized_copy(
        data = self.normalised_miller.data()/epsilons)\
        .set_observation_type(work_array)
예제 #58
0
 if len(unique_reindexing_operators) == 0:
     print "Incompatible unit cells:"
     print "  %2d:" % (i_1 + 1), cache_1.input.info()
     print "  %2d:" % (i_0 + 1), cache_0.input.info()
     print "No comparison."
     print
 else:
     ccs = flex.double()
     for cb_op in unique_reindexing_operators:
         similar_array_0 = cache_0.observations.change_basis(cb_op).map_to_asu()
         ccs.append(
             cache_1.observations.correlation(
                 other=similar_array_0, assert_is_similar_symmetry=False
             ).coefficient()
         )
     permutation = flex.sort_permutation(ccs, reverse=True)
     ccs = ccs.select(permutation)
     unique_reindexing_operators = flex.select(unique_reindexing_operators, permutation=permutation)
     for i_cb_op, cb_op, cc in zip(count(), unique_reindexing_operators, ccs):
         combined_cb_op = cache_1.combined_cb_op(other=cache_0, cb_op=cb_op)
         if not combined_cb_op.c().is_unit_mx():
             reindexing_note = "  after reindexing %d using %s" % (i_0 + 1, combined_cb_op.as_hkl())
         else:
             reindexing_note = ""
         print "CC Obs %d %d %6.3f  %s" % (i_1 + 1, i_0 + 1, cc, combined_cb_op.as_hkl())
         print "Correlation of:"
         print "  %2d:" % (i_1 + 1), cache_1.input.info()
         print "  %2d:" % (i_0 + 1), cache_0.input.info()
         print "Overall correlation: %6.3f%s" % (cc, reindexing_note)
         show_in_bins = False
         if i_cb_op == 0 or (cc >= 0.3 and cc >= ccs[0] - 0.2):
예제 #59
0
      frame_no = result_row[0]
      d_min_obs = result_row[1]
      sum_refl_obs = result_row[2]
      cc = result_row[3]

      frame_no_all.append(frame_no)
      d_min_obs_all.append(d_min_obs)
      sum_refl_obs_all.append(sum_refl_obs)
      cc_all.append(cc)
      sort_weight_all.append(cc*sum_refl_obs)

      #print frame_no, d_min_iso, d_min_obs, sum_refl_iso, sum_refl_obs, cc
      #print txt_out


  perm = flex.sort_permutation(sort_weight_all, reverse = True)
  frame_no_all_sort = frame_no_all.select(perm)
  d_min_obs_all_sort = d_min_obs_all.select(perm)
  sum_refl_obs_all_sort = sum_refl_obs_all.select(perm)
  cc_all_sort = cc_all.select(perm)

  n_show_top = 10
  cn_n_show = 0
  for frame_no, d_min_obs, sum_obs, cc in zip(frame_no_all_sort,
        d_min_obs_all_sort, sum_refl_obs_all_sort, cc_all_sort):
    print frame_no, d_min_obs, sum_obs, cc
    cn_n_show += 1
    if cn_n_show == n_show_top:
      break