コード例 #1
0
def show_terms(structure, term_table, coseq_dict=None):
  assert len(term_table) == structure.scatterers().size()
  for scatterer,terms in zip(structure.scatterers(), term_table):
    print scatterer.label, list(terms),
    if (coseq_dict is not None):
      terms_to_match = list(terms[1:])
      have_match = False
      tags = coseq_dict.keys()
      tags.sort()
      for tag in tags:
        for coseq_terms in coseq_dict[tag]:
          n = min(len(coseq_terms), len(terms_to_match))
          if (coseq_terms[:n] == terms_to_match[:n]):
            print tag,
            have_match = True
      if (not have_match):
        print "Unknown",
    print
  sums_terms = flex.double()
  multiplicities = flex.double()
  for scatterer,terms in zip(structure.scatterers(), term_table):
    sums_terms.append(flex.sum(flex.size_t(list(terms))))
    multiplicities.append(scatterer.multiplicity())
  print "TD%d: %.2f" % (
    len(terms)-1, flex.mean_weighted(sums_terms, multiplicities))
コード例 #2
0
def show_terms(structure, term_table, coseq_dict=None):
    assert len(term_table) == structure.scatterers().size()
    for scatterer, terms in zip(structure.scatterers(), term_table):
        print(scatterer.label, list(terms), end=' ')
        if (coseq_dict is not None):
            terms_to_match = list(terms[1:])
            have_match = False
            tags = list(coseq_dict.keys())
            tags.sort()
            for tag in tags:
                for coseq_terms in coseq_dict[tag]:
                    n = min(len(coseq_terms), len(terms_to_match))
                    if (coseq_terms[:n] == terms_to_match[:n]):
                        print(tag, end=' ')
                        have_match = True
            if (not have_match):
                print("Unknown", end=' ')
        print()
    sums_terms = flex.double()
    multiplicities = flex.double()
    for scatterer, terms in zip(structure.scatterers(), term_table):
        sums_terms.append(flex.sum(flex.size_t(list(terms))))
        multiplicities.append(scatterer.multiplicity())
    print("TD%d: %.2f" %
          (len(terms) - 1, flex.mean_weighted(sums_terms, multiplicities)))
コード例 #3
0
ファイル: DeltaCcHalf.py プロジェクト: isikhar/xia2
 def _compute_delta_cc_for_dataset(self, intensities):
     intensities.use_binning(self.binner)
     if self._cc_one_half_method == "sigma_tau":
         cc_bins = intensities.cc_one_half_sigma_tau(use_binning=True,
                                                     return_n_refl=True)
     else:
         cc_bins = intensities.cc_one_half(use_binning=True,
                                           return_n_refl=True)
     cc_i = flex.mean_weighted(
         flex.double(b[0] for b in cc_bins.data[1:-1]),
         flex.double(b[1] for b in cc_bins.data[1:-1]),
     )
     return self.cc_overall - cc_i
コード例 #4
0
 def _compute_mean_weighted_cc_half(self, intensities):
     intensities.use_binning(self.binner)
     if self._cc_one_half_method == "sigma_tau":
         cc_bins = intensities.cc_one_half_sigma_tau(use_binning=True,
                                                     return_n_refl=True)
     else:
         cc_bins = intensities.cc_one_half(use_binning=True,
                                           return_n_refl=True)
     bin_data = [b for b in cc_bins.data if b is not None]
     return flex.mean_weighted(
         flex.double(b[0] for b in bin_data),
         flex.double(b[1] for b in bin_data),
     )
コード例 #5
0
  def __init__(self, unmerged_intensities, batches_all, n_bins=20, d_min=None,
               cc_one_half_method='sigma_tau', id_to_batches=None):

    sel = unmerged_intensities.sigmas() > 0
    unmerged_intensities = unmerged_intensities.select(sel).set_info(
        unmerged_intensities.info())
    batches_all = batches_all.select(sel)

    unmerged_intensities.setup_binner(n_bins=n_bins)
    self.unmerged_intensities = unmerged_intensities
    self.merged_intensities = unmerged_intensities.merge_equivalents().array()

    separate = separate_unmerged(unmerged_intensities, batches_all,
                                 id_to_batches=id_to_batches)
    self.intensities = separate.intensities
    self.batches = separate.batches
    self.run_id_to_batch_id = separate.run_id_to_batch_id

    from iotbx.merging_statistics import dataset_statistics
    self.merging_statistics = dataset_statistics(
      unmerged_intensities, n_bins=n_bins,
      cc_one_half_significance_level=0.01,
      binning_method='counting_sorted',
      anomalous=True,
      use_internal_variance=False,
      eliminate_sys_absent=False,
      cc_one_half_method=cc_one_half_method)
    if cc_one_half_method == 'sigma_tau':
      cc_overall = self.merging_statistics.cc_one_half_sigma_tau_overall
    else:
      cc_overall = self.merging_statistics.cc_one_half_overall
    self.merging_statistics.show()

    self.delta_cc = flex.double()
    for test_k in self.intensities.keys():
      #print test_k
      indices_i = flex.miller_index()
      data_i = flex.double()
      sigmas_i = flex.double()
      for k, unmerged in self.intensities.iteritems():
        if k == test_k: continue
        indices_i.extend(unmerged.indices())
        data_i.extend(unmerged.data())
        sigmas_i.extend(unmerged.sigmas())

      unmerged_i = unmerged_intensities.customized_copy(
        indices=indices_i, data=data_i, sigmas=sigmas_i).set_info(
          unmerged_intensities.info())

      unmerged_i.setup_binner_counting_sorted(n_bins=n_bins)
      if cc_one_half_method == 'sigma_tau':
        cc_bins = unmerged_i.cc_one_half_sigma_tau(
          use_binning=True, return_n_refl=True)
      else:
        cc_bins = unmerged_i.cc_one_half(use_binning=True, return_n_refl=True)
      cc_i = flex.mean_weighted(
          flex.double(b[0] for b in cc_bins.data[1:-1]),
          flex.double(b[1] for b in cc_bins.data[1:-1]))

      delta_cc_i = cc_i - cc_overall
      self.delta_cc.append(delta_cc_i)