Exemplo n.º 1
0
def check_image_cluster(
      work_params,
      i_calc,
      reindexing_assistant,
      image_mdls,
      scales_input,
      cluster):
  from scitbx.array_family import flex
  for i_perm in xrange(len(cluster.miis_perms)):
    expected = i_calc.select(cluster.miis_perms[i_perm])
    reconstr = expected.customized_copy(data=cluster.esti_perms[i_perm])
    print "i_perm:", i_perm
    flex.linear_correlation(x=expected.data(), y=reconstr.data()).show_summary()
    r1 = expected.f_sq_as_f().r1_factor(
      other=reconstr.f_sq_as_f(), scale_factor=libtbx.Auto)
    print "r1: %.5f" % r1
    print
  for i_img,i_perm_and_scale in cluster.i_perm_and_scale_by_i_img.items():
    im = image_mdls.array[i_img]
    im.i_perm = i_perm_and_scale.i_perm
    im.scale = i_perm_and_scale.scale
  if (    not work_params.index_and_integrate
      and not work_params.force_unit_spot_intensities):
    image_mdls.check_i_perm_vs_backup(reindexing_assistant)
  cluster_scales = image_mdls.extract_scales()
  print "input vs. cluster scales:"
  flex.linear_correlation(x=scales_input, y=cluster_scales).show_summary()
  print
Exemplo n.º 2
0
def check_image_cluster(
      work_params,
      i_calc,
      reindexing_assistant,
      image_mdls,
      scales_input,
      cluster):
  from scitbx.array_family import flex
  for i_perm in xrange(len(cluster.miis_perms)):
    expected = i_calc.select(cluster.miis_perms[i_perm])
    reconstr = expected.customized_copy(data=cluster.esti_perms[i_perm])
    print "i_perm:", i_perm
    flex.linear_correlation(x=expected.data(), y=reconstr.data()).show_summary()
    r1 = expected.f_sq_as_f().r1_factor(
      other=reconstr.f_sq_as_f(), scale_factor=libtbx.Auto)
    print "r1: %.5f" % r1
    print
  for i_img,i_perm_and_scale in cluster.i_perm_and_scale_by_i_img.items():
    im = image_mdls.array[i_img]
    im.i_perm = i_perm_and_scale.i_perm
    im.scale = i_perm_and_scale.scale
  if (    not work_params.index_and_integrate
      and not work_params.force_unit_spot_intensities):
    image_mdls.check_i_perm_vs_backup(reindexing_assistant)
  cluster_scales = image_mdls.extract_scales()
  print "input vs. cluster scales:"
  flex.linear_correlation(x=scales_input, y=cluster_scales).show_summary()
  print
Exemplo n.º 3
0
 def cc_work_and_free (self, other) :
   """
   Given a unique array of arbitrary resolution range, extract the equivalent
   reflections from the observed and calculated intensities, and calculate
   CC and R-factor for work and free sets.  Currently, these statistics will
   be None if there are no matching reflections.
   """
   assert (self.i_obs.is_similar_symmetry(other))
   i_obs_sel = self.i_obs.common_set(other=other)
   f_obs_sel = self.f_obs.common_set(other=other)
   i_calc_sel = self.i_calc.common_set(other=other)
   work_sel = self.work_sel.common_set(other=other)
   free_sel = self.free_sel.common_set(other=other)
   if (len(i_obs_sel.data()) == 0) : # XXX should this raise an error?
     return [None] * 4
   i_obs_work = i_obs_sel.select(work_sel.data())
   i_calc_work = i_calc_sel.select(work_sel.data())
   i_obs_free = i_obs_sel.select(free_sel.data())
   i_calc_free = i_calc_sel.select(free_sel.data())
   f_obs_work = f_obs_sel.select(work_sel.data())
   f_obs_free = f_obs_sel.select(free_sel.data())
   if (len(f_obs_work.data()) > 0) and (len(f_obs_free.data()) > 0) :
     from scitbx.array_family import flex
     cc_work = flex.linear_correlation(i_obs_work.data(),
       i_calc_work.data()).coefficient()
     cc_free = flex.linear_correlation(i_obs_free.data(),
       i_calc_free.data()).coefficient()
     r_work = f_obs_work.r1_factor(i_calc_work.f_sq_as_f())
     r_free = f_obs_free.r1_factor(i_calc_free.f_sq_as_f())
     return cc_work, cc_free, r_work, r_free
   return [None] * 4
Exemplo n.º 4
0
def run(prefix="tst_model_map"):
  """
  Exercise phenix.model_map
  """
  # original (zero-origin) map and model
  pdb_inp = iotbx.pdb.input(source_info=None, lines=pdb_str)
  pdb_inp.write_pdb_file(file_name="%s.pdb"%prefix)
  ph = pdb_inp.construct_hierarchy()
  xrs = pdb_inp.xray_structure_simple()
  fc0 = xrs.structure_factors(d_min=3.0).f_calc()
  #
  easy_run.call("phenix.model_map %s.pdb output_file_name_prefix=%s"%(
    prefix, prefix))
  m1 = iotbx.ccp4_map.map_reader(file_name="%s.ccp4"%prefix).data.as_double()
  m2 = iotbx.xplor.map.reader(file_name="%s.xplor"%prefix).data
  #
  fc1 = fc0.structure_factors_from_map(
    map            = m1,
    use_scale      = True,
    anomalous_flag = False,
    use_sg         = False)
  fc2 = fc0.structure_factors_from_map(
    map            = m2,
    use_scale      = True,
    anomalous_flag = False,
    use_sg         = False)
  cc1 = flex.linear_correlation(
    x=abs(fc0).data(), y=abs(fc1).data()).coefficient()
  cc2 = flex.linear_correlation(
    x=abs(fc0).data(), y=abs(fc2).data()).coefficient()
  print cc1, cc2
  assert cc1>0.9999
  assert cc2>0.97 # WHY?
Exemplo n.º 5
0
 def cc_work_and_free(self, other):
     """
 Given a unique array of arbitrary resolution range, extract the equivalent
 reflections from the observed and calculated intensities, and calculate
 CC and R-factor for work and free sets.  Currently, these statistics will
 be None if there are no matching reflections.
 """
     assert (self.i_obs.is_similar_symmetry(other))
     i_obs_sel = self.i_obs.common_set(other=other)
     f_obs_sel = self.f_obs.common_set(other=other)
     i_calc_sel = self.i_calc.common_set(other=other)
     work_sel = self.work_sel.common_set(other=other)
     free_sel = self.free_sel.common_set(other=other)
     if (len(i_obs_sel.data()) == 0):  # XXX should this raise an error?
         return [None] * 4
     i_obs_work = i_obs_sel.select(work_sel.data())
     i_calc_work = i_calc_sel.select(work_sel.data())
     i_obs_free = i_obs_sel.select(free_sel.data())
     i_calc_free = i_calc_sel.select(free_sel.data())
     f_obs_work = f_obs_sel.select(work_sel.data())
     f_obs_free = f_obs_sel.select(free_sel.data())
     if (len(f_obs_work.data()) > 0) and (len(f_obs_free.data()) > 0):
         from scitbx.array_family import flex
         cc_work = flex.linear_correlation(
             i_obs_work.data(), i_calc_work.data()).coefficient()
         cc_free = flex.linear_correlation(
             i_obs_free.data(), i_calc_free.data()).coefficient()
         r_work = f_obs_work.r1_factor(i_calc_work.f_sq_as_f())
         r_free = f_obs_free.r1_factor(i_calc_free.f_sq_as_f())
         return cc_work, cc_free, r_work, r_free
     return [None] * 4
def test_the_intensity_structure():
  Fe_special_model = Fe_reduced_model
  print(Fe_special_model.fp_fdp_at_wavelength(1.74135))
  Fe_special_model = Fe_oxidized_model
  print(Fe_special_model.fp_fdp_at_wavelength(1.74135))
  Fe_special_model = special_proxy(True)
  print(Fe_special_model.fp_fdp_at_wavelength(1.74135))
  Fe_special_model = special_proxy(False)
  print(Fe_special_model.fp_fdp_at_wavelength(1.74135))

  with (open("model_independent.pickle","rb")) as inp:
    print("reading pickle")
    base = model_independent = pickle.load(inp)
  modelRD = get_intensity_structure(base,FE1_model=Fe_oxidized_model,FE2_model=Fe_reduced_model)
  modelSP = get_intensity_structure(base,FE1_model=Fe_oxidized_model,FE2_model=Fe_special_model)

  incr = 50
  energy = 7120.
  Hrange = range(100)
  #change in FE2 model
  original_fp = Fe_reduced_model.fp_fdp_at_wavelength(angstroms = 12398.425/energy)[0]
  modified_fp = Fe_special_model.fp_fdp_at_wavelength(angstroms = 12398.425/energy)[0]
  delta_fp = modified_fp - original_fp
  for ix in Hrange:
    print(
      "%3d %9.3f %9.3f %9.3f"%(
      ix,modelSP[(ix,50)],modelRD[(ix,50)],modelSP[(ix,50)]-modelRD[(ix,50)]),
      "%9.3f %9.3f"%((modelSP[(ix,50)]-modelRD[(ix,50)])/delta_fp,
                     modelRD[(ix,350)] )
    )
  ccx = (modelSP.matrix_copy_block(i_row=0,i_column=50,n_rows=modelRD.focus()[0],n_columns=1)-
  modelRD.matrix_copy_block(i_row=0,i_column=50,n_rows=modelRD.focus()[0],n_columns=1))/delta_fp
  ccy = modelRD.matrix_copy_block(i_row=0,i_column=350,n_rows=modelRD.focus()[0],n_columns=1)
  CC=flex.linear_correlation(ccx.as_1d(),ccy.as_1d())
  print("Finite vs analytical correlation for FE2 fp:",CC.coefficient())
  assert CC.coefficient() > 0.999

  Fe_special_model = special_proxy(True) # now consider the case of varying fdp, in the FE2 model
  modelSP = get_intensity_structure(base,FE1_model=Fe_oxidized_model,FE2_model=Fe_special_model)

  original_fdp = Fe_reduced_model.fp_fdp_at_wavelength(angstroms = 12398.425/energy)[1]
  modified_fdp = Fe_special_model.fp_fdp_at_wavelength(angstroms = 12398.425/energy)[1]
  delta_fdp = modified_fdp - original_fdp
  for ix in Hrange:
    print(
      "%3d %9.3f %9.3f %9.3f"%(
      ix,modelSP[(ix,50)],modelRD[(ix,50)],modelSP[(ix,50)]-modelRD[(ix,50)]),
      "%9.3f %9.3f"%((modelSP[(ix,50)]-modelRD[(ix,50)])/delta_fdp,
                     modelRD[(ix,450)] )
    )
  ccx = (modelSP.matrix_copy_block(i_row=0,i_column=50,n_rows=modelRD.focus()[0],n_columns=1)-
  modelRD.matrix_copy_block(i_row=0,i_column=50,n_rows=modelRD.focus()[0],n_columns=1))/delta_fdp
  ccy = modelRD.matrix_copy_block(i_row=0,i_column=450,n_rows=modelRD.focus()[0],n_columns=1)
  CC=flex.linear_correlation(ccx.as_1d(),ccy.as_1d())
  print("Finite vs analytical correlation for FE2 fdp:",CC.coefficient())
  assert CC.coefficient() > 0.999
Exemplo n.º 7
0
 def show_correlation_of_scales(assert_perfect=False):
   expected = scales_input / scales_input[0]
   estimated = image_mdls.extract_scales()
   print "Correlation of expected and estimated scales:"
   flex.linear_correlation(expected, estimated).show_summary(prefix="  ")
   print
   sys.stdout.flush()
   if (assert_perfect):
     from libtbx.test_utils import approx_equal
     assert approx_equal(estimated, expected)
Exemplo n.º 8
0
 def show_correlation_of_scales(assert_perfect=False):
   expected = scales_input / scales_input[0]
   estimated = image_mdls.extract_scales()
   print "Correlation of expected and estimated scales:"
   flex.linear_correlation(expected, estimated).show_summary(prefix="  ")
   print
   sys.stdout.flush()
   if (assert_perfect):
     from libtbx.test_utils import approx_equal
     assert approx_equal(estimated, expected)
Exemplo n.º 9
0
def exercise_1():
    from scitbx.array_family import flex
    a = flex.double()
    b = flex.double()
    for i in xrange(1, 100):
        for xx in xrange(1, 1000):
            x = xx / 10
            n = 10**i
            if p_of_none_greater(x, n) > 0.001 and p_of_none_greater(
                    x, n) < 0.999:
                a.append(p_of_none_greater(x, n))
                b.append(get_prob_more_than_z_n_tries(x, n))
    print a.size()
    print flex.linear_correlation(a, b).coefficient()
    assert flex.linear_correlation(a, b).coefficient() > 0.99
Exemplo n.º 10
0
def run(args, out=sys.stdout):
  # this wrapper loads the data and flags (or raises an error if additional
  # input is needed), reads the PDB file, optionally processes the geometry,
  # and creates an fmodel object using the data, flags, and xray.structure
  # object from the PDB file.
  cmdline = mmtbx.command_line.load_model_and_data(
    args=args,
    master_phil=master_phil(),
    out=out,
    process_pdb_file=PROCESS_PDB_FILE,
    create_fmodel=True,
    prefer_anomalous=False)
  fmodel = cmdline.fmodel
  pdb_hierarchy = cmdline.pdb_hierarchy
  xray_structure = cmdline.xray_structure
  params = cmdline.params
  f_obs = fmodel.f_obs()
  # the fmodel object will already have the bulk solvent correction and
  # scaling performed when created using the above code, so we can immediately
  # use the f_model array.
  f_calc = abs(fmodel.f_model()) # just amplitudes, please
  assert (len(f_calc.indices()) == len(f_obs.indices()))
  from scitbx.array_family import flex
  cc = flex.linear_correlation(f_obs.data(), f_calc.data()).coefficient()
  print("CC(obs-calc): %.3f" % cc, file=out)
Exemplo n.º 11
0
def cc(a, b):

    assert(len(a) == len(b))

    from scitbx.array_family import flex
    corr = flex.linear_correlation(flex.double(a), flex.double(b))
    return corr.coefficient()
Exemplo n.º 12
0
 def cc_model_map (self, selection=None, radius=1.5) :
   """
   Calculate the correlation coefficient for the current model (in terms of
   F(calc) from the xray structure) and the target map, calculated at atomic
   positions rather than grid points.  This will be much
   less accurate than the CC calculated in the original crystal environment,
   with full F(model) including bulk solvent correction.
   """
   from scitbx.array_family import flex
   if (selection is None) :
     selection = self.selection_in_box
   fcalc = self.box.xray_structure_box.structure_factors(d_min=self.d_min).f_calc()
   fc_fft_map = fcalc.fft_map(resolution_factor=self.resolution_factor)
   fc_map = fc_fft_map.apply_sigma_scaling().real_map_unpadded()
   sites_selected = self.get_selected_sites(selection, hydrogens=False)
   assert (len(sites_selected) > 0)
   fc_values = flex.double()
   map_values = flex.double()
   unit_cell = self.box.xray_structure_box.unit_cell()
   for site in sites_selected :
     site_frac = unit_cell.fractionalize(site)
     fc_values.append(fc_map.tricubic_interpolation(site_frac))
     map_values.append(self.target_map_box.tricubic_interpolation(site_frac))
   return flex.linear_correlation(
     x=map_values,
     y=fc_values).coefficient()
Exemplo n.º 13
0
def exercise_translational_phase_shift(n_sites=100,
                                       d_min=1.5,
                                       resolution_factor=0.3):
    sgi = space_group_info("P1")
    xrs = random_structure.xray_structure(
        space_group_info=sgi,
        elements=(("O", "N", "C") * (n_sites // 3 + 1))[:n_sites],
        volume_per_atom=50,
        min_distance=1.5)
    f_calc = xrs.structure_factors(d_min=d_min).f_calc()
    print f_calc.unit_cell()
    from scitbx.matrix import col
    shift_frac = col((.23984120, .902341127, .51219021))

    # Shift phases directly
    phase_shifted = f_calc.translational_shift(shift_frac=shift_frac)

    # Check that map from phase_shifted FC matches map calculated from
    #   translated xrs

    # Map from phase-shifted FC
    shifted_fft_map = phase_shifted.fft_map(
        resolution_factor=resolution_factor)
    shifted_fft_map.apply_sigma_scaling()
    shifted_map_data = shifted_fft_map.real_map_unpadded()
    cs = xrs.crystal_symmetry()
    from cctbx.maptbx import crystal_gridding
    cg = crystal_gridding(unit_cell=cs.unit_cell(),
                          space_group_info=cs.space_group_info(),
                          pre_determined_n_real=shifted_map_data.all())

    # Map from translated xrs
    sites_shifted = xrs.sites_frac() + shift_frac
    xrs.set_sites_frac(sites_shifted)
    f_calc_from_shifted_xrs = xrs.structure_factors(d_min=d_min).f_calc()
    fft_map_from_shifted_xrs = f_calc_from_shifted_xrs.fft_map(
        resolution_factor=resolution_factor, crystal_gridding=cg)
    map_data_from_shifted_xrs = fft_map_from_shifted_xrs.real_map_unpadded()

    # shifted_map_data (map from phase shifted f_calc),
    # map_data_from_shifted_xrs (recalculated with shifted xrs)

    assert shifted_map_data.all() == map_data_from_shifted_xrs.all()
    from cctbx import maptbx
    sel = maptbx.grid_indices_around_sites(unit_cell=xrs.unit_cell(),
                                           fft_n_real=shifted_map_data.focus(),
                                           fft_m_real=shifted_map_data.all(),
                                           sites_cart=xrs.sites_cart(),
                                           site_radii=flex.double(
                                               xrs.scatterers().size(), 1.5))
    shifted_map_data = shifted_map_data.select(sel)
    map_data_from_shifted_xrs = map_data_from_shifted_xrs.select(sel)

    cc_map_data_from_shifted_xrs_shifted_map_data = flex.linear_correlation(
        x=map_data_from_shifted_xrs.as_1d(),
        y=shifted_map_data.as_1d()).coefficient()
    print "cc_map_data_from_shifted_xrs_shifted_map_data",\
       cc_map_data_from_shifted_xrs_shifted_map_data
    assert cc_map_data_from_shifted_xrs_shifted_map_data > 0.99
    print "*" * 25
Exemplo n.º 14
0
def get_map_histograms(data, n_slots=20, data_1=None, data_2=None):
    h0, h1, h2 = None, None, None
    data_min = None
    hmhcc = None
    if (data_1 is None):
        h0 = flex.histogram(data=data.as_1d(), n_slots=n_slots)
    else:
        data_min = min(flex.min(data_1), flex.min(data_2))
        data_max = max(flex.max(data_1), flex.max(data_2))
        h0 = flex.histogram(data=data.as_1d(), n_slots=n_slots)
        h1 = flex.histogram(data=data_1.as_1d(),
                            data_min=data_min,
                            data_max=data_max,
                            n_slots=n_slots)
        h2 = flex.histogram(data=data_2.as_1d(),
                            data_min=data_min,
                            data_max=data_max,
                            n_slots=n_slots)
        hmhcc = flex.linear_correlation(
            x=h1.slots().as_double(), y=h2.slots().as_double()).coefficient()
    return group_args(h_map=h0,
                      h_half_map_1=h1,
                      h_half_map_2=h2,
                      _data_min=data_min,
                      half_map_histogram_cc=hmhcc)
def run (args, out=sys.stdout) :
  # this wrapper loads the data and flags (or raises an error if additional
  # input is needed), reads the PDB file, optionally processes the geometry,
  # and creates an fmodel object using the data, flags, and xray.structure
  # object from the PDB file.
  cmdline = mmtbx.command_line.load_model_and_data(
    args=args,
    master_phil=master_phil(),
    out=out,
    process_pdb_file=PROCESS_PDB_FILE,
    create_fmodel=True,
    prefer_anomalous=False)
  fmodel = cmdline.fmodel
  pdb_hierarchy = cmdline.pdb_hierarchy
  xray_structure = cmdline.xray_structure
  params = cmdline.params
  f_obs = fmodel.f_obs()
  # the fmodel object will already have the bulk solvent correction and
  # scaling performed when created using the above code, so we can immediately
  # use the f_model array.
  f_calc = abs(fmodel.f_model()) # just amplitudes, please
  assert (len(f_calc.indices()) == len(f_obs.indices()))
  from scitbx.array_family import flex
  cc = flex.linear_correlation(f_obs.data(), f_calc.data()).coefficient()
  print >> out, "CC(obs-calc): %.3f" % cc
Exemplo n.º 16
0
def good_atoms_selection(crystal_gridding, map_coeffs, xray_structure):
    #XXX copy from model_missing_reflections map_tools.py, consolidate later
    #XXX Also look for similar crap in f_model.py
    fft_map = miller.fft_map(crystal_gridding=crystal_gridding,
                             fourier_coefficients=map_coeffs)
    fft_map.apply_sigma_scaling()
    map_data = fft_map.real_map_unpadded()
    rho_atoms = flex.double()
    for site_frac in xray_structure.sites_frac():
        rho_atoms.append(map_data.eight_point_interpolation(site_frac))
    #rho_mean = flex.mean_default(rho_atoms.select(rho_atoms>1.0), 1.0)
    sel_exclude = rho_atoms < 1.0  # XXX ??? TRY 0.5!
    sites_cart = xray_structure.sites_cart()
    #
    f_calc = map_coeffs.structure_factors_from_scatterers(
        xray_structure=xray_structure).f_calc()
    fft_map = miller.fft_map(crystal_gridding=crystal_gridding,
                             fourier_coefficients=f_calc)
    fft_map.apply_sigma_scaling()
    map_data2 = fft_map.real_map_unpadded()
    #
    hd_sel = xray_structure.hd_selection()
    for i_seq, site_cart in enumerate(sites_cart):
        selection = maptbx.grid_indices_around_sites(
            unit_cell=map_coeffs.unit_cell(),
            fft_n_real=map_data.focus(),
            fft_m_real=map_data.all(),
            sites_cart=flex.vec3_double([site_cart]),
            site_radii=flex.double([1.5]))
        cc = flex.linear_correlation(
            x=map_data.select(selection),
            y=map_data2.select(selection)).coefficient()
        if (cc < 0.7 or hd_sel[i_seq]): sel_exclude[i_seq] = True
    return ~sel_exclude
Exemplo n.º 17
0
 def __init__(self, fmodel, ligands, params, log):
     from cctbx import maptbx
     from scitbx.array_family import flex
     map_helper = fmodel.electron_density_map()
     self.two_fofc_map_coeffs = map_helper.map_coefficients("2mFo-DFc")
     self.fofc_map_coeffs = map_helper.map_coefficients("mFo-DFc")
     fft_map = self.two_fofc_map_coeffs.fft_map(resolution_factor=0.25)
     fft_map.apply_sigma_scaling()
     fcalc = map_helper.map_coefficients("Fc")
     fcalc_map = fcalc.fft_map(resolution_factor=0.25)
     fcalc_map.apply_sigma_scaling()
     real_map = fft_map.real_map()
     fcalc_real_map = fcalc_map.real_map()
     final_cc = []
     for k, ligand in enumerate(ligands):
         atoms = ligand.atoms()
         sites = flex.vec3_double()
         radii = flex.double()
         for atom in atoms:
             if (not atom.element.strip() in ["H", "D"]):
                 sites.append(atom.xyz)
                 radii.append(1.5)
         sel = maptbx.grid_indices_around_sites(
             unit_cell=self.two_fofc_map_coeffs.unit_cell(),
             fft_n_real=real_map.focus(),
             fft_m_real=real_map.all(),
             sites_cart=sites,
             site_radii=radii)
         m1 = real_map.select(sel)
         m2 = fcalc_real_map.select(sel)
         cc = flex.linear_correlation(x=m1, y=m2).coefficient()
         final_cc.append(cc)
         print >> log, "  Ligand %d: CC = %5.3f" % (k + 1, cc)
     print >> log, ""
     self.final_cc = final_cc
Exemplo n.º 18
0
def run(file_name="tst_tls_as_xyz.pdb"):
    of = open(file_name, "w")
    print(pdb_str, file=of)
    of.close()
    uc = iotbx.pdb.input(file_name=file_name).crystal_symmetry().unit_cell()
    #for n in range(10,100,10)+range(100,1000,100)+range(1000,10001,1000)+[15000,20000]:
    for n in [
            1000,
    ]:
        easy_run.call("phenix.tls_as_xyz %s n_models=%s > tst_tls_as_xyz.log" %
                      (file_name, str(n)))
        for i in [0, 1]:
            u1 = iotbx.pdb.input(
                file_name="tst_tls_as_xyz_u_from_ensemble_%s.pdb" %
                str(i)).xray_structure_simple().scatterers().extract_u_cart(uc)
            u2 = iotbx.pdb.input(
                file_name="tst_tls_as_xyz_u_from_tls_%s.pdb" %
                str(i)).xray_structure_simple().scatterers().extract_u_cart(uc)

        u1, u2 = u1.as_double(), u2.as_double()
        cc = flex.linear_correlation(x=u1, y=u2).coefficient()
        r = flex.sum(flex.abs(flex.abs(u1)-flex.abs(u2)))/\
            flex.sum(flex.abs(flex.abs(u1)+flex.abs(u2)))*2
        print("%5d %6.4f %6.4f" % (n, cc, r))
    assert cc > 0.99, cc
    assert r < 0.06, r
Exemplo n.º 19
0
 def cc_model_map(self, selection=None, radius=1.5):
     """
 Calculate the correlation coefficient for the current model (in terms of
 F(calc) from the xray structure) and the target map, calculated at atomic
 positions rather than grid points.  This will be much
 less accurate than the CC calculated in the original crystal environment,
 with full F(model) including bulk solvent correction.
 """
     from scitbx.array_family import flex
     if (selection is None):
         selection = self.selection_in_box
     fcalc = self.box.xray_structure_box.structure_factors(
         d_min=self.d_min).f_calc()
     fc_fft_map = fcalc.fft_map(resolution_factor=self.resolution_factor)
     fc_map = fc_fft_map.apply_sigma_scaling().real_map_unpadded()
     sites_selected = self.get_selected_sites(selection, hydrogens=False)
     assert (len(sites_selected) > 0)
     fc_values = flex.double()
     map_values = flex.double()
     unit_cell = self.box.xray_structure_box.unit_cell()
     for site in sites_selected:
         site_frac = unit_cell.fractionalize(site)
         fc_values.append(fc_map.tricubic_interpolation(site_frac))
         map_values.append(
             self.target_map_box.tricubic_interpolation(site_frac))
     return flex.linear_correlation(x=map_values, y=fc_values).coefficient()
Exemplo n.º 20
0
 def get_sites_cc (self, atoms, sites=None) :
   from cctbx import maptbx
   from scitbx.array_family import flex
   radii = flex.double()
   for atom in atoms :
     if (atom.element.strip() in ["H", "D"]) :
       radii.append(1.)
     else :
       radii.append(1.5)
   fcalc_map = self.fcalc_real_map
   if (sites is None) :
     sites = atoms.extract_xyz()
   else :
     fcalc_map = self.get_new_fcalc_map(
       sites_new=sites,
       i_seqs=atoms.extract_i_seq())
   sel = maptbx.grid_indices_around_sites(
     unit_cell  = self.unit_cell,
     fft_n_real = self.n_real,
     fft_m_real = self.m_real,
     sites_cart = sites,
     site_radii = radii)
   m1 = self.real_map.select(sel)
   m2 = fcalc_map.select(sel)
   cc = flex.linear_correlation(x=m1, y=m2).coefficient()
   return group_args(
     cc=cc,
     map_mean=flex.mean(m1.as_1d()))
Exemplo n.º 21
0
def show_complete_true_false_cc(nu, nv, nw, recycled, verbose):
    nuvw = nu * nv * nw
    corr = flex.linear_correlation(x=recycled[0][:nuvw].as_double(),
                                   y=recycled[1][:nuvw].as_double())
    if (verbose):
        print "dims:", (nu, nv,
                        nw), "complete=true,false cc:", corr.coefficient()
Exemplo n.º 22
0
 def get_sites_cc(self, atoms, sites=None):
   from cctbx import maptbx
   from scitbx.array_family import flex
   radii = flex.double()
   for atom in atoms :
     if (atom.element.strip() in ["H", "D"]):
       radii.append(1.)
     else :
       radii.append(1.5)
   fcalc_map = self.fcalc_real_map
   if (sites is None):
     sites = atoms.extract_xyz()
   else :
     fcalc_map = self.get_new_fcalc_map(
       sites_new=sites,
       i_seqs=atoms.extract_i_seq())
   sel = maptbx.grid_indices_around_sites(
     unit_cell  = self.unit_cell,
     fft_n_real = self.n_real,
     fft_m_real = self.m_real,
     sites_cart = sites,
     site_radii = radii)
   m1 = self.real_map.select(sel)
   m2 = fcalc_map.select(sel)
   cc = flex.linear_correlation(x=m1, y=m2).coefficient()
   return group_args(
     cc=cc,
     map_mean=flex.mean(m1.as_1d()))
Exemplo n.º 23
0
    def _packed_corr_mat(m):
        """Return a 1D flex array containing the upper diagonal values of the
    correlation matrix calculated between columns of 2D matrix m"""

        nr, nc = m.all()

        try:  # convert a flex.double matrix to sparse
            nr, nc = m.all()
            from scitbx import sparse
            m2 = sparse.matrix(nr, nc)
            m2.assign_block(m, 0, 0)
            m = m2
        except AttributeError:
            pass  # assume m is already scitbx_sparse_ext.matrix

        packed_len = (m.n_cols * (m.n_cols + 1)) // 2
        i = 0
        tmp = flex.double(packed_len)
        for col1 in range(m.n_cols):
            for col2 in range(col1, m.n_cols):
                tmp[i] = flex.linear_correlation(
                    m.col(col1).as_dense_vector(),
                    m.col(col2).as_dense_vector()).coefficient()
                i += 1

        return tmp
def get_cc(mc1, mc2, xrs):
  crystal_gridding = mc1.crystal_gridding(
    d_min = mc1.d_min(), resolution_factor = 0.25)
  fft_map = miller.fft_map(
    crystal_gridding     = crystal_gridding,
    fourier_coefficients = mc1)
  fft_map.apply_sigma_scaling()
  m1 = fft_map.real_map_unpadded()
  fft_map = miller.fft_map(
    crystal_gridding     = crystal_gridding,
    fourier_coefficients = mc2)
  fft_map.apply_sigma_scaling()
  m2 = fft_map.real_map_unpadded()
  assert m1.focus()==m2.focus()
  assert m1.all()==m2.all()
  ccs = flex.double()
  for site_cart in xrs.sites_cart():
    sel = maptbx.grid_indices_around_sites(
      unit_cell  = mc1.unit_cell(),
      fft_n_real = m1.focus(),
      fft_m_real = m1.all(),
      sites_cart = flex.vec3_double([site_cart]),
      site_radii = flex.double([1.5]))
    cc = flex.linear_correlation(x=m1.select(sel), y=m2.select(sel)).coefficient()
    ccs.append(cc)
  return ccs
Exemplo n.º 25
0
def show_complete_true_false_cc(nu, nv, nw, recycled, verbose):
  nuvw = nu*nv*nw
  corr = flex.linear_correlation(
    x=recycled[0][:nuvw].as_double(),
    y=recycled[1][:nuvw].as_double())
  if (verbose):
    print "dims:", (nu,nv,nw), "complete=true,false cc:", corr.coefficient()
Exemplo n.º 26
0
def get_cc(mc1, mc2, xrs):
    crystal_gridding = mc1.crystal_gridding(d_min=mc1.d_min(),
                                            resolution_factor=0.25)
    fft_map = miller.fft_map(crystal_gridding=crystal_gridding,
                             fourier_coefficients=mc1)
    fft_map.apply_sigma_scaling()
    m1 = fft_map.real_map_unpadded()
    fft_map = miller.fft_map(crystal_gridding=crystal_gridding,
                             fourier_coefficients=mc2)
    fft_map.apply_sigma_scaling()
    m2 = fft_map.real_map_unpadded()
    assert m1.focus() == m2.focus()
    assert m1.all() == m2.all()
    ccs = flex.double()
    for site_cart in xrs.sites_cart():
        sel = maptbx.grid_indices_around_sites(unit_cell=mc1.unit_cell(),
                                               fft_n_real=m1.focus(),
                                               fft_m_real=m1.all(),
                                               sites_cart=flex.vec3_double(
                                                   [site_cart]),
                                               site_radii=flex.double([1.5]))
        cc = flex.linear_correlation(x=m1.select(sel),
                                     y=m2.select(sel)).coefficient()
        ccs.append(cc)
    return ccs
Exemplo n.º 27
0
    def CC(self, beam_center, rotmat=None):
        detector = self.image.get_detector()
        angle = [0, 3, 2, 1][self.i_quad]  #

        asic = self.image.get_raw_data()[list(detector.get_names()).index(
            self.panel.get_name())].matrix_rot90(angle)

        p_w, p_h = self.panel.get_image_size()
        b = [
            self.panel.get_pixel_lab_coord((0, 0)),
            self.panel.get_pixel_lab_coord((p_w - 1, 0)),
            self.panel.get_pixel_lab_coord((p_w - 1, p_h - 1)),
            self.panel.get_pixel_lab_coord((0, p_h - 1))
        ]
        asic_origin = col(
            self.panel.millimeter_to_pixel(
                (min([p[0] for p in b]), min([p[1] for p in b]))))

        if rotmat is None:
            rot45 = sqr(
                (sin(pi / 3.), -cos(pi / 3.), cos(pi / 3.), sin(pi / 3.)))
        else:
            rot45 = rotmat

        from xfel.metrology.legacy_scale import quadrant_self_correlation
        min_value = self.image.get_detector()[0].get_trusted_range()[0]
        REF, ROT = quadrant_self_correlation(asic.as_double(), asic_origin,
                                             beam_center, rot45, min_value)
        CCRR = flex.linear_correlation(REF, ROT)

        return CCRR.coefficient()
Exemplo n.º 28
0
    def _packed_corr_mat(m):
        """Return a 1D flex array containing the upper diagonal values of the
    correlation matrix calculated between columns of 2D matrix m"""

        try:  # convert a flex.double matrix to sparse
            nr, nc = m.all()
            from scitbx import sparse

            m2 = sparse.matrix(nr, nc)
            m2.assign_block(m, 0, 0)
            m = m2
        except AttributeError:
            pass  # assume m is already scitbx_sparse_ext.matrix

        packed_len = (m.n_cols * (m.n_cols + 1)) // 2
        i = 0
        tmp = flex.double(packed_len)
        for col1 in range(m.n_cols):
            for col2 in range(col1, m.n_cols):
                tmp[i] = flex.linear_correlation(
                    m.col(col1).as_dense_vector(), m.col(col2).as_dense_vector()
                ).coefficient()
                i += 1

        return tmp
Exemplo n.º 29
0
 def get_map_stats_for_atoms(self, atoms):
     from cctbx import maptbx
     from scitbx.array_family import flex
     sites_cart = flex.vec3_double()
     sites_cart_nonH = flex.vec3_double()
     values_2fofc = flex.double()
     values_fofc = flex.double()
     for atom in atoms:
         sites_cart.append(atom.xyz)
         if (not atom.element.strip() in ["H", "D"]):  #XXX trap: neutrons?
             sites_cart_nonH.append(atom.xyz)
             site_frac = self.unit_cell.fractionalize(atom.xyz)
             values_2fofc.append(
                 self.f_map.eight_point_interpolation(site_frac))
             values_fofc.append(
                 self.diff_map.eight_point_interpolation(site_frac))
     if (len(sites_cart_nonH) == 0):
         return None
     sel = maptbx.grid_indices_around_sites(unit_cell=self.unit_cell,
                                            fft_n_real=self.f_map.focus(),
                                            fft_m_real=self.f_map.all(),
                                            sites_cart=sites_cart,
                                            site_radii=get_atom_radii(
                                                atoms, self.atom_radius))
     f_map_sel = self.f_map.select(sel)
     model_map_sel = self.model_map.select(sel)
     diff_map_sel = self.diff_map.select(sel)
     cc = flex.linear_correlation(x=f_map_sel,
                                  y=model_map_sel).coefficient()
     return group_args(cc=cc,
                       mean_2fofc=flex.mean(values_2fofc),
                       mean_fofc=flex.mean(values_fofc))
Exemplo n.º 30
0
    def _packed_corr_mat(m):
        """Return a 1D flex array containing the upper diagonal values of the
        correlation matrix calculated between columns of 2D matrix m"""

        nr, nc = m.all()

        try:  # convert a flex.double matrix to sparse
            from scitbx import sparse

            m2 = sparse.matrix(nr, nc)
            m2.assign_block(m, 0, 0)
            m = m2
        except AttributeError:
            pass  # assume m is already scitbx_sparse_ext.matrix

        packed_len = (m.n_cols * (m.n_cols + 1)) // 2
        i = 0
        tmp = flex.double(packed_len)
        for col1 in range(m.n_cols):
            for col2 in range(col1, m.n_cols):
                if col1 == col2:
                    tmp[i] = 1.0
                else:
                    # Avoid spuriously high correlation between a column that should be
                    # zero (such as the gradient of X residuals wrt the Shift2 parameter)
                    # and another column (such as the gradient of X residuals wrt the
                    # Dist parameter) by rounding values to 15 places. It seems that such
                    # spurious correlations may occur in cases where gradients are
                    # calculated to be zero by matrix operations, rather than set to zero.
                    v1 = m.col(col1).as_dense_vector().round(15)
                    v2 = m.col(col2).as_dense_vector().round(15)
                    tmp[i] = flex.linear_correlation(v1, v2).coefficient()
                i += 1

        return tmp
Exemplo n.º 31
0
 def get_map_stats_for_atoms (self, atoms) :
   from cctbx import maptbx
   from scitbx.array_family import flex
   sites_cart = flex.vec3_double()
   sites_cart_nonH = flex.vec3_double()
   values_2fofc = flex.double()
   values_fofc = flex.double()
   for atom in atoms :
     sites_cart.append(atom.xyz)
     if (not atom.element.strip() in ["H","D"]) : #XXX trap: neutrons?
       sites_cart_nonH.append(atom.xyz)
       site_frac = self.unit_cell.fractionalize(atom.xyz)
       values_2fofc.append(self.f_map.eight_point_interpolation(site_frac))
       values_fofc.append(self.diff_map.eight_point_interpolation(site_frac))
   if (len(sites_cart_nonH) == 0) :
     return None
   sel = maptbx.grid_indices_around_sites(
     unit_cell=self.unit_cell,
     fft_n_real=self.f_map.focus(),
     fft_m_real=self.f_map.all(),
     sites_cart=sites_cart,
     site_radii=get_atom_radii(atoms, self.atom_radius))
   f_map_sel = self.f_map.select(sel)
   model_map_sel = self.model_map.select(sel)
   diff_map_sel = self.diff_map.select(sel)
   cc = flex.linear_correlation(x=f_map_sel, y=model_map_sel).coefficient()
   return group_args(cc=cc,
     mean_2fofc=flex.mean(values_2fofc),
     mean_fofc=flex.mean(values_fofc))
Exemplo n.º 32
0
def run(args, out=sys.stdout):
    cmdline = iotbx.phil.process_command_line_with_files(
        args=args,
        master_phil_string=master_phil_str,
        pdb_file_def="model",
        map_file_def="map",
        usage_string="""\
em_rscc.py model.pdb map.ccp4

%s""" % __doc__)
    params = cmdline.work.extract()
    assert (not None in [params.model, params.map])
    pdb_in = cmdline.get_file(params.model).file_object
    m = cmdline.get_file(params.map).file_object
    print >> out, "Input electron density map:"
    print >> out, "m.all()   :", m.data.all()
    print >> out, "m.focus() :", m.data.focus()
    print >> out, "m.origin():", m.data.origin()
    print >> out, "m.nd()    :", m.data.nd()
    print >> out, "m.size()  :", m.data.size()
    print >> out, "m.focus_size_1d():", m.data.focus_size_1d()
    print >> out, "m.is_0_based()   :", m.data.is_0_based()
    print >> out, "map: min/max/mean:", flex.min(m.data), flex.max(
        m.data), flex.mean(m.data)
    print >> out, "unit cell:", m.unit_cell_parameters
    symm = crystal.symmetry(space_group_symbol="P1",
                            unit_cell=m.unit_cell_parameters)
    xrs = pdb_in.input.xray_structure_simple(crystal_symmetry=symm)
    print >> out, "Setting up electron scattering table (d_min=%g)" % params.d_min
    xrs.scattering_type_registry(d_min=params.d_min, table="electron")
    fc = xrs.structure_factors(d_min=params.d_min).f_calc()
    cg = maptbx.crystal_gridding(unit_cell=symm.unit_cell(),
                                 space_group_info=symm.space_group_info(),
                                 pre_determined_n_real=m.data.all())
    fc_map = fc.fft_map(
        crystal_gridding=cg).apply_sigma_scaling().real_map_unpadded()
    assert (fc_map.all() == fc_map.focus() == m.data.all())
    em_data = m.data.as_double()
    unit_cell_for_interpolation = m.grid_unit_cell()
    frac_matrix = unit_cell_for_interpolation.fractionalization_matrix()
    sites_cart = xrs.sites_cart()
    sites_frac = xrs.sites_frac()
    print >> out, "PER-RESIDUE CORRELATION:"
    for chain in pdb_in.hierarchy.only_model().chains():
        for residue_group in chain.residue_groups():
            i_seqs = residue_group.atoms().extract_i_seq()
            values_em = flex.double()
            values_fc = flex.double()
            for i_seq in i_seqs:
                rho_em = maptbx.non_crystallographic_eight_point_interpolation(
                    map=em_data,
                    gridding_matrix=frac_matrix,
                    site_cart=sites_cart[i_seq])
                rho_fc = fc_map.eight_point_interpolation(sites_frac[i_seq])
                values_em.append(rho_em)
                values_fc.append(rho_fc)
            cc = flex.linear_correlation(x=values_em,
                                         y=values_fc).coefficient()
            print >> out, residue_group.id_str(), cc
Exemplo n.º 33
0
def plot_energy_scale(IDX, ax, abs_PA, origin, position0, B, intensity_lookup,
                      intensity_lookup_1, key):
    d_Ang = A.unit_cell().d(one_index)
    print(d_Ang)
    unit_pos0 = position0.normalize()
    spectrumx = []
    spectrumy = []
    spectrumy_1 = flex.double()
    for eV in range(7090, 7151):
        spectrumx.append(eV)
        specy = 0.
        specy_1 = 0.
        lambda_Ang = 12398.425 / eV
        two_theta = 2. * math.asin(lambda_Ang / (2. * d_Ang))
        radius_mm = distance_mm * math.tan(two_theta)
        radius_px = radius_mm / pixel_sz_mm
        contour_x = []
        contour_y = []
        for rot in range(-2, 3):
            PA = abs_PA + rot * mos_rotation_deg * math.pi / 180.
            clock = unit_pos0.rotate_2d(-PA, deg=False)
            position1 = origin + radius_px * clock
            contour_x.append(position1[1] - 0.5 - B[0])
            contour_y.append(position1[0] - 0.5 - B[2])

        for rot in range(-8, 9):
            PA = abs_PA + 0.25 * rot * mos_rotation_deg * math.pi / 180.
            clock = unit_pos0.rotate_2d(-PA, deg=False)
            position1 = origin + radius_px * clock
            int_coords = (int(position1[0]), int(position1[1]))

            specy += intensity_lookup.get(int_coords, 0)
            specy_1 += intensity_lookup_1.get(int_coords, 0)
        spectrumy.append(specy)
        spectrumy_1.append(specy_1)
    # rescale the partiality spectrum
    pscale = 0.5 * max(spectrumy) / flex.max(spectrumy_1)
    # plot partiality model
    ax[IDX // 3][IDX % 3].plot(spectrumx, pscale * spectrumy_1, "g-")

    #ax[IDX//3][IDX%3].plot(spectrumx, spectrumy,"r-")
    iterator = SS.generate_recast_renormalized_image(image=key,
                                                     energy=7120.,
                                                     total_flux=1e12)
    wavlen, flux, wavelength_A = next(
        iterator)  # list of lambdas, list of fluxes, average wavelength
    ratio = flex.max(flux) / max(spectrumy)
    # plot incident spectrum
    ax[IDX // 3][IDX % 3].plot(12398.425 / wavlen, (flux / ratio), "b-")
    combined_model = flex.double()
    incident_xaxis = 12398.425 / wavlen
    int_ix = [int(ix) for ix in incident_xaxis]
    for ic in range(len(spectrumx)):
        ic_idx = int_ix.index(spectrumx[ic])
        combined_model.append(flux[ic_idx] * spectrumy_1[ic])
    cscale = max(spectrumy) / max(combined_model)
    cc = flex.linear_correlation(combined_model,
                                 flex.double(spectrumy)).coefficient()
    print("The correlation coefficient is", cc)
Exemplo n.º 34
0
def get_model_map_stats (
    selection,
    target_map,
    model_map,
    unit_cell,
    sites_cart,
    pdb_atoms,
    local_sampling=False) :
  """
  Collect basic statistics for a model map and some target map (usually an
  mFo-DFc map), including CC, mean, and minimum density at the atomic
  positions.
  """
  assert (len(target_map) == len(model_map))
  iselection = selection
  if (type(selection).__name__ == 'bool') :
    iselection = selection.iselection()
  from scitbx.array_family import flex
  sites_cart_refined = sites_cart.select(selection)
  sites_selected = flex.vec3_double()
  map1 = flex.double()
  map2 = flex.double()
  min_density = sys.maxint
  sum_density = n_sites = 0
  worst_atom = None
  # XXX I'm not sure the strict density cutoff is a good idea here
  for i_seq, xyz in zip(iselection, sites_cart_refined) :
    if (pdb_atoms[i_seq].element.strip() != "H") :
      sites_selected.append(xyz)
      site_frac = unit_cell.fractionalize(site_cart=xyz)
      target_value = target_map.tricubic_interpolation(site_frac)
      if (target_value < min_density) :
        min_density = target_value
        worst_atom = pdb_atoms[i_seq]
      sum_density += target_value
      n_sites += 1
      if (not local_sampling) :
        map1.append(target_value)
        map2.append(model_map.tricubic_interpolation(site_frac))
  assert (n_sites > 0)
  if (local_sampling) :
    from cctbx import maptbx
    map_sel = maptbx.grid_indices_around_sites(
      unit_cell=unit_cell,
      fft_n_real=target_map.focus(),
      fft_m_real=target_map.all(),
      sites_cart=sites_selected,
      site_radii=flex.double(sites_selected.size(), 1.0))
    map1 = target_map.select(map_sel)
    map2 = model_map.select(map_sel)
  assert (len(map1) > 0) and (len(map1) == len(map2))
  cc = flex.linear_correlation(x=map1, y=map2).coefficient()
  return group_args(
    cc=cc,
    min=min_density,
    mean=sum_density/n_sites)
Exemplo n.º 35
0
def get_model_map_stats (
    selection,
    target_map,
    model_map,
    unit_cell,
    sites_cart,
    pdb_atoms,
    local_sampling=False) :
  """
  Collect basic statistics for a model map and some target map (usually an
  mFo-DFc map), including CC, mean, and minimum density at the atomic
  positions.
  """
  assert (len(target_map) == len(model_map))
  iselection = selection
  if (type(selection).__name__ == 'bool') :
    iselection = selection.iselection()
  from scitbx.array_family import flex
  sites_cart_refined = sites_cart.select(selection)
  sites_selected = flex.vec3_double()
  map1 = flex.double()
  map2 = flex.double()
  min_density = sys.maxint
  sum_density = n_sites = 0
  worst_atom = None
  # XXX I'm not sure the strict density cutoff is a good idea here
  for i_seq, xyz in zip(iselection, sites_cart_refined) :
    if (pdb_atoms[i_seq].element.strip() != "H") :
      sites_selected.append(xyz)
      site_frac = unit_cell.fractionalize(site_cart=xyz)
      target_value = target_map.tricubic_interpolation(site_frac)
      if (target_value < min_density) :
        min_density = target_value
        worst_atom = pdb_atoms[i_seq]
      sum_density += target_value
      n_sites += 1
      if (not local_sampling) :
        map1.append(target_value)
        map2.append(model_map.tricubic_interpolation(site_frac))
  assert (n_sites > 0)
  if (local_sampling) :
    from cctbx import maptbx
    map_sel = maptbx.grid_indices_around_sites(
      unit_cell=unit_cell,
      fft_n_real=target_map.focus(),
      fft_m_real=target_map.all(),
      sites_cart=sites_selected,
      site_radii=flex.double(sites_selected.size(), 1.0))
    map1 = target_map.select(map_sel)
    map2 = model_map.select(map_sel)
  assert (len(map1) > 0) and (len(map1) == len(map2))
  cc = flex.linear_correlation(x=map1, y=map2).coefficient()
  return group_args(
    cc=cc,
    min=min_density,
    mean=sum_density/n_sites)
Exemplo n.º 36
0
def from_map_map_atoms(map_1, map_2, sites_cart, unit_cell, radius):
    assert_same_gridding(map_1, map_2)
    sel = maptbx.grid_indices_around_sites(unit_cell=unit_cell,
                                           fft_n_real=map_1.focus(),
                                           fft_m_real=map_1.all(),
                                           sites_cart=sites_cart,
                                           site_radii=flex.double(
                                               sites_cart.size(), radius))
    return flex.linear_correlation(x=map_1.select(sel).as_1d(),
                                   y=map_2.select(sel).as_1d()).coefficient()
Exemplo n.º 37
0
 def _cc_volume(self, map_calc):
   s = get_selection_above_cutoff(m=map_calc, n=self.n_nodes_inside).iselection()
   #G = flex.double(flex.grid(self.map.all()), 0)
   #G = G.set_selected(s, 1)
   #ccp4_map(cg=self.crystal_gridding, file_name="m1.ccp4", map_data=self.map)
   #ccp4_map(cg=self.crystal_gridding, file_name="m2.ccp4", map_data=map_calc)
   #ccp4_map(cg=self.crystal_gridding, file_name="m3.ccp4", map_data=G)
   return flex.linear_correlation(
     x=self.map.select(s).as_1d(),
     y=map_calc.select(s).as_1d()).coefficient()
def map_cc(map_coeffs_1, map_coeffs_2):
    fft_map_1 = map_coeffs_1.fft_map(resolution_factor=0.25)
    map_1 = fft_map_1.real_map_unpadded()
    fft_map_2 = miller.fft_map(crystal_gridding=fft_map_1,
                               fourier_coefficients=map_coeffs_2)
    map_2 = fft_map_2.real_map_unpadded()
    assert map_1.size() == map_2.size()
    m1 = map_1.as_1d()
    m2 = map_2.as_1d()
    return flex.linear_correlation(x=m1, y=m2).coefficient()
Exemplo n.º 39
0
def from_map_map_atoms(map_1, map_2, sites_cart, unit_cell, radius):
  assert_same_gridding(map_1, map_2)
  sel = maptbx.grid_indices_around_sites(
    unit_cell  = unit_cell,
    fft_n_real = map_1.focus(),
    fft_m_real = map_1.all(),
    sites_cart = sites_cart,
    site_radii = flex.double(sites_cart.size(), radius))
  return flex.linear_correlation(
    x=map_1.select(sel).as_1d(),
    y=map_2.select(sel).as_1d()).coefficient()
def map_cc(map_coeffs_1, map_coeffs_2):
  fft_map_1 = map_coeffs_1.fft_map(resolution_factor=0.25)
  map_1 = fft_map_1.real_map_unpadded()
  fft_map_2 = miller.fft_map(
    crystal_gridding = fft_map_1,
    fourier_coefficients = map_coeffs_2)
  map_2 = fft_map_2.real_map_unpadded()
  assert map_1.size() == map_2.size()
  m1 = map_1.as_1d()
  m2 = map_2.as_1d()
  return flex.linear_correlation(x = m1, y = m2).coefficient()
Exemplo n.º 41
0
 def set_vals(result, d, verbose=False):
   for key1, value1 in d.items():
     if value1 != [] and value != []:
       cc = flex.linear_correlation(
         x=flex.double(value), y=flex.double(value1)).coefficient()
     else:
       cc = None
     result.setdefault(key1, OrderedDict())
     result[key1].setdefault(key, cc)
     if verbose:
       print('comparing\n%s\n%s' % (pretty_aev(value), pretty_aev(value1)))
       print('  CC = %0.3f' % cc)
Exemplo n.º 42
0
def exercise():
    pdb_file = "tmp_ringer.pdb"
    f = open(pdb_file, "w")
    f.write(pdb_str)
    f.close()
    mtz_file = "tmp_ringer.mtz"
    cmd = " ".join([
        "phenix.fmodel",
        pdb_file,
        "high_resolution=2.0",
        "type=real",
        "r_free_flags_fraction=0.1",
        "random_seed=12345",
        "label=F",
        "output.file_name=%s" % mtz_file,
    ])
    print(cmd)
    easy_run.call(cmd)
    result = easy_run.fully_buffered(
        "phenix.maps \"%s\" \"%s\" output.prefix=tmp_ringer" %
        (pdb_file, mtz_file)).raise_if_errors()
    assert (result.return_code == 0)
    result = easy_run.fully_buffered(
        "mmtbx.ringer \"%s\" tmp_ringer_map_coeffs.mtz" %
        pdb_file).raise_if_errors()
    with open("tmp_ringer_ringer.csv") as f:
        _lines1 = f.read().splitlines()
    lines1 = []
    for line in _lines1:
        if ("2mFo-DFc" in line):
            lines1.append(line)
    os.remove("tmp_ringer_ringer.csv")
    assert (result.return_code == 0)
    # Now with ccp4 map as input
    result2 = easy_run.fully_buffered(
        "phenix.mtz2map tmp_ringer_map_coeffs.mtz")
    assert (result2.return_code == 0)
    result3 = easy_run.fully_buffered(
        "mmtbx.ringer \"%s\" tmp_ringer_map_coeffs_2mFo-DFc.ccp4" % pdb_file)
    assert (
        result3.return_code == 0
    ), "DL: crash is expected due to new sanity check. Will replace input map."
    with open("tmp_ringer_ringer.csv") as f:
        lines2 = f.read().splitlines()
    assert len(lines1) == len(lines2)
    for line1, line2 in zip(lines1, lines2):
        fields1 = line1.split(",")
        fields2 = line2.split(",")
        rho1 = flex.double([float(x) for x in fields1[4:]])
        rho2 = flex.double([float(x) for x in fields2[4:]])
        cc = flex.linear_correlation(x=rho1, y=rho2).coefficient()
        assert (cc >= 0.99), cc
Exemplo n.º 43
0
    def is_refinement_needed(self, peptide_isel, sites_cart, atoms, cc_limit):
        from scitbx.array_family import flex

        for i_seq in peptide_isel:
            if not atoms[i_seq].element.strip().lower() in ["h", "d"]:
                xyz = sites_cart[i_seq]
                sel_map = self.select(sites_cart=flex.vec3_double([xyz]))
                m1 = self.target_map_data.select(sel_map)
                m2 = self.model_map_data.select(sel_map)
                cc = flex.linear_correlation(x=m1, y=m2).coefficient()
                if cc < cc_limit:
                    return True
        return False
Exemplo n.º 44
0
def test_correlation_coefficient_accumulator():
    x = flex.double((1, 2, 3))
    cc_accumulator = CorrelationCoefficientAccumulator(x, x)
    corr = flex.linear_correlation(x, x)
    assert cc_accumulator.coefficient() == pytest.approx(corr.coefficient())
    assert cc_accumulator.n() == x.size()
    assert cc_accumulator.coefficient() == pytest.approx(1.0)

    # compare with flex.linear_correlation()
    n = 100
    x = flex.random_double(n)
    y = flex.random_double(n)
    cc_accumulator = CorrelationCoefficientAccumulator(x, y)
    corr = flex.linear_correlation(x, y)
    assert cc_accumulator.coefficient() == pytest.approx(corr.coefficient())
    assert cc_accumulator.n() == x.size()

    # test CorrelationCoefficientAccumulator.accumulate()
    x_all = x
    y_all = y
    x_ = flex.random_double(n)
    y_ = flex.random_double(n)
    cc_accumulator.accumulate(x_, y_)
    x_all.extend(x_)
    y_all.extend(y_)
    corr = flex.linear_correlation(x_all, y_all)
    assert cc_accumulator.coefficient() == pytest.approx(corr.coefficient())
    assert cc_accumulator.n() == x_all.size()

    # test CorrelationCoefficientAccumulator += other
    x_ = flex.random_double(n)
    y_ = flex.random_double(n)
    cc_accumulator += CorrelationCoefficientAccumulator(x_, y_)
    x_all.extend(x_)
    y_all.extend(y_)
    corr = flex.linear_correlation(x_all, y_all)
    assert cc_accumulator.coefficient() == pytest.approx(corr.coefficient())
    assert cc_accumulator.n() == x_all.size()
Exemplo n.º 45
0
    def set_plot(self, x, y, x_label, y_label):
        from scitbx.array_family import flex

        self.figure.clear()
        ax = self.figure.add_subplot(111)
        ax.plot(x, y, ".")
        ax.set_xlabel(x_label)
        ax.set_ylabel(y_label)
        ax.grid(True, color="0.5")
        c = flex.linear_correlation(flex.double(x), flex.double(y))
        cc = c.coefficient()
        ax.set_title("%s vs. %s (%d structures, CC = %.3f)" % (x_label, y_label, len(x), cc))
        self.canvas.draw()
        # self.parent.statusbar.SetStatusText("Correlation coefficient (CC): %.3f" %
        #  cc)
        self.parent.Refresh()
def exercise():
  mt = flex.mersenne_twister(seed=0)
  sz = 12
  for i_trial in xrange(10):
    x = mt.random_double(size=sz)*5-1
    y = mt.random_double(size=sz)*3-1
    for i_w,w in enumerate([flex.double(sz, 1), mt.random_double(size=sz)*7]):
      cc, d_cc, d2_cc = weighted_correlation(
        w, x, y, derivatives_wrt_y_depth=2)
      if (i_w == 0):
        cc_w1 = flex.linear_correlation(x, y).coefficient()
        assert approx_equal(cc, cc_w1)
      d_cc_fd = finite_difference_derivatives(w, x, y, depth=1)
      assert approx_equal(d_cc, d_cc_fd)
      d2_cc_fd = finite_difference_derivatives(w, x, y, depth=2)
      assert approx_equal(d2_cc, d2_cc_fd)
Exemplo n.º 47
0
 def set_plot(self, x, y, x_label, y_label):
     from scitbx.array_family import flex
     self.figure.clear()
     ax = self.figure.add_subplot(111)
     ax.plot(x, y, '.')
     ax.set_xlabel(x_label)
     ax.set_ylabel(y_label)
     ax.grid(True, color="0.5")
     c = flex.linear_correlation(flex.double(x), flex.double(y))
     cc = c.coefficient()
     ax.set_title("%s vs. %s (%d structures, CC = %.3f)" %
                  (x_label, y_label, len(x), cc))
     self.canvas.draw()
     #self.parent.statusbar.SetStatusText("Correlation coefficient (CC): %.3f" %
     #  cc)
     self.parent.Refresh()
Exemplo n.º 48
0
  def CC(self, beam_center):
    quad = self.tiling[4*self.sensor[0]:4+4*self.sensor[0]]

    asic = self.image.linearintdata.matrix_copy_block(quad[0],quad[1],
                                    quad[2]-quad[0],quad[3]-quad[1])

    #npy = asic.as_numpy_array()
    #from matplotlib import pyplot as plt
    #plt.imshow(npy, cmap="hot")
    #plt.show()

    asci_origin = col((float(quad[0]),float(quad[1])))

    rot45 = sqr((sin(pi/4.),-cos(pi/4.),cos(pi/4.),sin(pi/4.)))

    from xfel.metrology.legacy_scale import quadrant_self_correlation
    min_value = self.image.get_detector()[0].get_trusted_range()[0]
    REF,ROT = quadrant_self_correlation(asic,asci_origin,beam_center,rot45,min_value)
    CCRR = flex.linear_correlation(REF,ROT)

    """initial python implementation
    #rot_asic = flex.double(asic.accessor())
    F0,F1 = asic.focus()

    ref_data = flex.double()
    rot_data = flex.double()
    constant = rot45*(asci_origin - beam_center) +beam_center - asci_origin
    for xcoord in xrange(quad[2]-quad[0]):
      for ycoord in xrange(quad[3]-quad[1]):

        acoord = col((float(xcoord),float(ycoord)))
        #prime = rot45*(acoord + asci_origin - beam_center) + beam_center - asci_origin
        prime = rot45*acoord + constant
        prime = (int(round(prime[0],0)),int(round(prime[1],0)))
        if 0<=prime[0]<F0 and 0<=prime[1]<F1:
          #rot_asic[(xcoord,ycoord)]=asic[prime]

          ref_data.append(asic[(xcoord,ycoord)])
          rot_data.append(asic[prime])
    CC = flex.linear_correlation(ref_data,rot_data)

    print "Correlation_coefficient %7.4f %7.4f"%(CC.coefficient(),CCRR.coefficient())
    """

    return CCRR.coefficient()
Exemplo n.º 49
0
def exercise () :
  pdb_file = "tmp_ringer.pdb"
  mtz_file = "tmp_ringer.mtz"
  open(pdb_file, "w").write(tst_build_alt_confs.pdb_raw)
  cmd = " ".join([
    "phenix.fmodel",
    pdb_file,
    "high_resolution=2.0",
    "type=real",
    "r_free_flags_fraction=0.1",
    "random_seed=12345",
    "label=F",
    "output.file_name=%s" % mtz_file,
  ])
  print cmd
  easy_run.call(cmd)
  result = easy_run.fully_buffered(
    "phenix.maps \"%s\" \"%s\" output.prefix=tmp_ringer" %
    (pdb_file, mtz_file)).raise_if_errors()
  assert (result.return_code == 0)
  result = easy_run.fully_buffered(
    "mmtbx.ringer \"%s\" tmp_ringer_map_coeffs.mtz" % pdb_file).raise_if_errors()
  _lines1 = open("tmp_ringer_ringer.csv").read().splitlines()
  lines1 = []
  for line in _lines1 :
    if ("2mFo-DFc" in line) :
      lines1.append(line)
  os.remove("tmp_ringer_ringer.csv")
  assert (result.return_code == 0)
  # Now with ccp4 map as input
  result2 = easy_run.fully_buffered(
    "phenix.mtz2map \"%s\" tmp_ringer_map_coeffs.mtz" % pdb_file)
  assert (result2.return_code == 0)
  result3 = easy_run.fully_buffered(
    "mmtbx.ringer \"%s\" tmp_ringer_map_coeffs_2mFo-DFc.ccp4" % pdb_file)
  assert (result3.return_code == 0)
  lines2 = open("tmp_ringer_ringer.csv").read().splitlines()
  assert len(lines1) == len(lines2)
  for line1, line2 in zip(lines1, lines2) :
    fields1 = line1.split(",")
    fields2 = line2.split(",")
    rho1 = flex.double([ float(x) for x in fields1[4:] ])
    rho2 = flex.double([ float(x) for x in fields2[4:] ])
    cc = flex.linear_correlation(x=rho1, y=rho2).coefficient()
    assert (cc >= 0.99), cc
Exemplo n.º 50
0
def show_i_calc_reindexing_correlations(i_calc, reindexing_assistant):
  assert i_calc.indices().all_eq(reindexing_assistant.miller_indices)
  assert i_calc.space_group_info().type().number() == 1
  assert i_calc.anomalous_flag()
  from scitbx.array_family import flex
  print "I-calc reindexing correlations:"
  for cb_op,inv_perm in zip(
        reindexing_assistant.cb_ops,
        reindexing_assistant.inv_perms):
    i_calc_cb = i_calc.change_basis(cb_op)
    i_calc_perm = i_calc_cb.select(inv_perm)
    assert i_calc_perm.indices().all_eq(i_calc.indices())
    cc = flex.linear_correlation(
      i_calc.data(),
      i_calc_perm.data()).coefficient()
    r1 = i_calc.f_sq_as_f().r1_factor(
      other=i_calc_perm.f_sq_as_f(), scale_factor=libtbx.Auto)
    print "  %-12s  %8.5f (r1: %.5f)" % (cb_op.c().r().as_hkl(), cc, r1)
  print
Exemplo n.º 51
0
 def __init__ (self,
     fmodel,
     ligands,
     params,
     log) :
   from cctbx import maptbx
   from scitbx.array_family import flex
   map_helper = fmodel.electron_density_map()
   self.two_fofc_map_coeffs = map_helper.map_coefficients("2mFo-DFc")
   self.fofc_map_coeffs = map_helper.map_coefficients("mFo-DFc")
   fft_map = self.two_fofc_map_coeffs.fft_map(resolution_factor=0.25)
   fft_map.apply_sigma_scaling()
   fcalc = map_helper.map_coefficients("Fc")
   fcalc_map = fcalc.fft_map(resolution_factor=0.25)
   fcalc_map.apply_sigma_scaling()
   real_map = fft_map.real_map()
   fcalc_real_map = fcalc_map.real_map()
   final_cc = []
   for k, ligand in enumerate(ligands) :
     atoms = ligand.atoms()
     sites = flex.vec3_double()
     radii = flex.double()
     for atom in atoms :
       if (not atom.element.strip() in ["H","D"]) :
         sites.append(atom.xyz)
         radii.append(1.5)
     sel = maptbx.grid_indices_around_sites(
       unit_cell  = self.two_fofc_map_coeffs.unit_cell(),
       fft_n_real = real_map.focus(),
       fft_m_real = real_map.all(),
       sites_cart = sites,
       site_radii = radii)
     m1 = real_map.select(sel)
     m2 = fcalc_real_map.select(sel)
     cc = flex.linear_correlation(x=m1, y=m2).coefficient()
     final_cc.append(cc)
     print >> log, "  Ligand %d: CC = %5.3f" % (k+1, cc)
   print >> log, ""
   self.final_cc = final_cc
def find_delta(rho_map, tol):
  """ Find delta as hinted on fig. 1 of ref. [1] in module charge_flipping """
  rho = rho_map.real_map_unpadded().as_1d()
  max_rho = flex.max(rho)
  rho /= max_rho
  sorting = flex.sort_permutation(rho)
  sorted_rho = rho.select(sorting)
  n = len(sorted_rho)
  p,q = n//4, 3*n//4
  indexes = flex.double_range(p,q)
  values = sorted_rho[p:q]
  c = flex.linear_correlation(indexes, values)
  assert c.is_well_defined() and c.coefficient() > 0.99
  r = flex.linear_regression(indexes, values)
  a,b = r.y_intercept(), r.slope()
  deviation = flex.abs(a + b*flex.double_range(n) - sorted_rho)
  non_linear_sel = deviation > tol
  low = flex.first_index(non_linear_sel, False)
  high = flex.last_index(non_linear_sel, False)
  assert non_linear_sel[low:high].count(False)/(high-low+1) > 0.99
  assert sorted_rho[low] < 0 and sorted_rho[high] > 0
  return min(sorted_rho[high], -sorted_rho[low]), max_rho
Exemplo n.º 53
0
def good_atoms_selection(
      crystal_gridding,
      map_coeffs,
      xray_structure):
  #XXX copy from model_missing_reflections map_tools.py, consolidate later
  #XXX Also look for similar crap in f_model.py
  fft_map = miller.fft_map(
    crystal_gridding     = crystal_gridding,
    fourier_coefficients = map_coeffs)
  fft_map.apply_sigma_scaling()
  map_data = fft_map.real_map_unpadded()
  rho_atoms = flex.double()
  for site_frac in xray_structure.sites_frac():
    rho_atoms.append(map_data.eight_point_interpolation(site_frac))
  #rho_mean = flex.mean_default(rho_atoms.select(rho_atoms>1.0), 1.0)
  sel_exclude = rho_atoms < 1.0 # XXX ??? TRY 0.5!
  sites_cart = xray_structure.sites_cart()
  #
  f_calc = map_coeffs.structure_factors_from_scatterers(
    xray_structure = xray_structure).f_calc()
  fft_map = miller.fft_map(
    crystal_gridding     = crystal_gridding,
    fourier_coefficients = f_calc)
  fft_map.apply_sigma_scaling()
  map_data2 = fft_map.real_map_unpadded()
  #
  hd_sel = xray_structure.hd_selection()
  for i_seq, site_cart in enumerate(sites_cart):
    selection = maptbx.grid_indices_around_sites(
      unit_cell  = map_coeffs.unit_cell(),
      fft_n_real = map_data.focus(),
      fft_m_real = map_data.all(),
      sites_cart = flex.vec3_double([site_cart]),
      site_radii = flex.double([1.5]))
    cc = flex.linear_correlation(x=map_data.select(selection),
      y=map_data2.select(selection)).coefficient()
    if(cc<0.7 or hd_sel[i_seq]): sel_exclude[i_seq] = True
  return ~sel_exclude
Exemplo n.º 54
0
  def CC(self, beam_center):
    detector = self.image.get_detector()
    angle = [0,3,2,1][self.i_quad] #

    asic = self.image.get_raw_data()[list(detector.get_names()).index(self.panel.get_name())].matrix_rot90(angle)

    p_w, p_h = self.panel.get_image_size()
    b = [self.panel.get_pixel_lab_coord((0    ,0    )),
         self.panel.get_pixel_lab_coord((p_w-1,0    )),
         self.panel.get_pixel_lab_coord((p_w-1,p_h-1)),
         self.panel.get_pixel_lab_coord((0    ,p_h-1))]
    asic_origin = col(self.panel.millimeter_to_pixel((min([p[0] for p in b]),
                                                      min([p[1] for p in b]))))


    rot45 = sqr((sin(pi/4.),-cos(pi/4.),cos(pi/4.),sin(pi/4.)))

    from xfel.metrology.legacy_scale import quadrant_self_correlation
    min_value = self.image.get_detector()[0].get_trusted_range()[0]
    REF,ROT = quadrant_self_correlation(asic.as_double(),asic_origin,beam_center,rot45,min_value)
    CCRR = flex.linear_correlation(REF,ROT)

    return CCRR.coefficient()
Exemplo n.º 55
0
def run(file_name = "tst_tls_as_xyz.pdb"):
  of = open(file_name,"w")
  print >> of, pdb_str
  of.close()
  uc = iotbx.pdb.input(file_name=file_name).crystal_symmetry().unit_cell()
  #for n in range(10,100,10)+range(100,1000,100)+range(1000,10001,1000)+[15000,20000]:
  for n in [1000,]:
    easy_run.call("phenix.tls_as_xyz %s n_models=%s > tst_tls_as_xyz.log"%(
      file_name,str(n)))
    for i in [0,1]:
      u1 = iotbx.pdb.input(file_name=
        "tst_tls_as_xyz_u_from_ensemble_%s.pdb"%str(i)
        ).xray_structure_simple().scatterers().extract_u_cart(uc)
      u2 = iotbx.pdb.input(file_name=
        "tst_tls_as_xyz_u_from_tls_%s.pdb"%str(i)
        ).xray_structure_simple().scatterers().extract_u_cart(uc)

    u1, u2 = u1.as_double(), u2.as_double()
    cc = flex.linear_correlation(x=u1, y=u2).coefficient()
    r = flex.sum(flex.abs(flex.abs(u1)-flex.abs(u2)))/\
        flex.sum(flex.abs(flex.abs(u1)+flex.abs(u2)))*2
    print "%5d %6.4f %6.4f"%(n, cc, r)
  assert cc>0.99, cc
  assert r<0.06, r
def run_call_back(flags, space_group_info):
  structure = set_up_random_structure(space_group_info)
  if (flags.Verbose):
    structure.scattering_type_registry().show()
  rho_at_sites_from_phenix_fft_map = map_value_at_sites(structure)
  rho_at_sites_calculated = map_value_at_sites_calculated(structure)
  for scatterer,rf,rc in zip(
        structure.scatterers(),
        rho_at_sites_from_phenix_fft_map,
        rho_at_sites_calculated):
     if (flags.Verbose):
       print numstr(scatterer.site), "%.3f" % rf, "%.3f" % rc
  from scitbx.array_family import flex
  corr = flex.linear_correlation(
    flex.double(rho_at_sites_from_phenix_fft_map),
    flex.double(rho_at_sites_calculated))
  assert corr.is_well_defined
  cc = corr.coefficient()
  if (flags.Verbose):
    print "Correlation coefficient:", cc
  from libtbx.test_utils import is_above_limit
  assert is_above_limit(value=cc, limit=0.99)
  if (flags.Verbose):
    print
def exercise(space_group_info, redundancy_counter=0):
  n_real = (12,12,12)
  miller_max = (2,2,2)
  gt = maptbx.grid_tags(n_real)
  uc = space_group_info.any_compatible_unit_cell(volume=1000)
  fl = sgtbx.search_symmetry_flags(use_space_group_symmetry=True)
  gt.build(space_group_info.type(), fl)
  fft = fftpack.real_to_complex_3d(n_real)
  map0 = flex.double(flex.grid(fft.m_real()).set_focus(fft.n_real()), 0)
  weight_map = map0.deep_copy()
  map = map0.deep_copy()
  ta = gt.tag_array()
  order_z = space_group_info.group().order_z()
  problems_expected = (redundancy_counter != 0)
  for ijk in flex.nested_loop(n_real):
    t = ta[ijk]
    if (t < 0):
      xyz = [i/n for i,n in zip(ijk, n_real)]
      ss = sgtbx.site_symmetry(
        unit_cell=uc,
        space_group=space_group_info.group(),
        original_site=xyz,
        min_distance_sym_equiv=1e-5)
      m = space_group_info.group().multiplicity(
        site=boost.rational.vector(ijk, n_real))
      assert m == ss.multiplicity()
      w = m / order_z
      weight_map[ijk] = w
      map[ijk] = w
    elif (redundancy_counter != 0):
      redundancy_counter -= 1
      ijk_asu = n_dim_index_from_one_dim(i1d=t, sizes=n_real)
      assert ta.accessor()(ijk_asu) == t
      map[ijk] = map[ijk_asu]
  sf_map = fft.forward(map)
  del map
  mi = miller.index_generator(
    space_group_info.type(), False, miller_max).to_array()
  assert mi.size() != 0
  from_map = maptbx.structure_factors.from_map(
    space_group=space_group_info.group(),
    anomalous_flag=False,
    miller_indices=mi,
    complex_map=sf_map,
    conjugate_flag=True)
  sf = [iround(abs(f)) for f in from_map.data()]
  if (sf != [0]*len(sf)):
    assert problems_expected
    return
  else:
    not problems_expected
  #
  map_p1 = map0.deep_copy()
  map_sw = map0.deep_copy()
  for ijk in flex.nested_loop(n_real):
    t = ta[ijk]
    if (t < 0):
      v = random.random()*2-1
      map_p1[ijk] = v
      map_sw[ijk] = v * weight_map[ijk]
    else:
      ijk_asu = n_dim_index_from_one_dim(i1d=t, sizes=n_real)
      assert ta.accessor()(ijk_asu) == t
      assert map_p1[ijk_asu] != 0
      map_p1[ijk] = map_p1[ijk_asu]
  #
  # fft followed by symmetry summation in reciprocal space
  sf_map_sw = fft.forward(map_sw)
  del map_sw
  sf_sw = maptbx.structure_factors.from_map(
    space_group=space_group_info.group(),
    anomalous_flag=False,
    miller_indices=mi,
    complex_map=sf_map_sw,
    conjugate_flag=True).data()
  del sf_map_sw
  #
  # symmetry expansion in real space (done above already) followed fft
  sf_map_p1 = fft.forward(map_p1)
  del map_p1
  sf_p1 = maptbx.structure_factors.from_map(
    space_group=sgtbx.space_group(),
    anomalous_flag=False,
    miller_indices=mi,
    complex_map=sf_map_p1,
    conjugate_flag=True).data()
  del sf_map_p1
  #
  corr = flex.linear_correlation(x=flex.abs(sf_sw), y=flex.abs(sf_p1))
  assert corr.is_well_defined
  assert approx_equal(corr.coefficient(), 1)
Exemplo n.º 58
0
  def scaling_metrics(self,other):
    # Read reflections
    # some requirements. 1) Fobs scaled to Fcalc, not the other way around.
    # 2) ability to make a plot of the two scaled sets
    # 3) set the number of bins
    # 4) understand and print out the per-bin scaling factor
    # 5) print an overall stats line at the end of the table
    # 6) choose one or the other binnings
    """1) scaling and analysis are separate functions"""

    #f_obs, r_free_flags = f_obs.common_sets(r_free_flags)
    f_obs = other
    #r_free_flags = r_free_flags.array(data=r_free_flags.data()==1)
    # Read model

    # Get Fmodel
    fmodel = mmtbx.f_model.manager(
      f_obs          = f_obs,
      #r_free_flags   = r_free_flags,
      xray_structure = self.xray_structure)
    # Do anisotropic overall scaling, bulk-solvent modeling, outlier rejection
    #fmodel.update_all_scales()
    print "r_work, r_free: %6.4f, %6.4f"%(fmodel.r_work(), fmodel.r_free())
    # Print statistics in resolution bins
    f_model = fmodel.f_model_scaled_with_k1()
    bin_selections = fmodel.f_obs().log_binning()
    dsd = fmodel.f_obs().d_spacings().data()
    print "Bin# Resolution    Nref Cmpl  Rw     CC"
    fmt="%2d: %6.3f-%-6.3f %5d %5.3f %6.4f %6.4f"
    for i_bin, sel in enumerate(bin_selections):
      d           = dsd.select(sel)
      d_min       = flex.min(d)
      d_max       = flex.max(d)
      fmodel_sel  = fmodel.select(sel)
      n           = d.size()
      f_obs_sel   = fmodel.f_obs().select(sel)
      f_model_sel = abs(f_model.select(sel)).data()
      cmpl        = f_obs_sel.completeness(d_max=d_max)
      r_work      = fmodel_sel.r_work()
      cc          = flex.linear_correlation(x=f_obs_sel.data(),
                    y=f_model_sel).coefficient()
      print fmt%(i_bin, d_max, d_min, n, cmpl, r_work, cc)
    # Alternative binning
    print
    print "Bin# Resolution    Nref Cmpl  Rw     CC"
    fmodel.f_obs().setup_binner(reflections_per_bin = 2500)
    f_model.use_binning_of(fmodel.f_obs())
    for i_bin in fmodel.f_obs().binner().range_used():
      sel = fmodel.f_obs().binner().selection(i_bin)
      d           = dsd.select(sel)
      d_min       = flex.min(d)
      d_max       = flex.max(d)
      fmodel_sel  = fmodel.select(sel)
      n           = d.size()
      f_obs_sel   = fmodel.f_obs().select(sel)
      f_model_sel = abs(f_model.select(sel)).data()
      cmpl        = f_obs_sel.completeness(d_max=d_max)
      r_work      = fmodel_sel.r_work()
      cc          = flex.linear_correlation(x=f_obs_sel.data(),
                    y=f_model_sel).coefficient()
      print fmt%(i_bin, d_max, d_min, n, cmpl, r_work, cc)
Exemplo n.º 59
0
def run (args, out=sys.stdout) :
  cmdline = iotbx.phil.process_command_line_with_files(
    args=args,
    master_phil_string=master_phil_str,
    pdb_file_def="model",
    map_file_def="map",
    usage_string="""\
em_rscc.py model.pdb map.ccp4

%s""" % __doc__)
  params = cmdline.work.extract()
  assert (not None in [params.model, params.map])
  pdb_in = cmdline.get_file(params.model).file_object
  m = cmdline.get_file(params.map).file_object
  print >> out, "Input electron density map:"
  print >> out, "m.all()   :", m.data.all()
  print >> out, "m.focus() :", m.data.focus()
  print >> out, "m.origin():", m.data.origin()
  print >> out, "m.nd()    :", m.data.nd()
  print >> out, "m.size()  :", m.data.size()
  print >> out, "m.focus_size_1d():", m.data.focus_size_1d()
  print >> out, "m.is_0_based()   :", m.data.is_0_based()
  print >> out, "map: min/max/mean:", flex.min(m.data), flex.max(m.data), flex.mean(m.data)
  print >> out, "unit cell:", m.unit_cell_parameters
  symm = crystal.symmetry(
    space_group_symbol="P1",
    unit_cell=m.unit_cell_parameters)
  xrs = pdb_in.input.xray_structure_simple(crystal_symmetry=symm)
  print >> out, "Setting up electron scattering table (d_min=%g)" % params.d_min
  xrs.scattering_type_registry(
    d_min=params.d_min,
    table="electron")
  fc = xrs.structure_factors(d_min=params.d_min).f_calc()
  cg = maptbx.crystal_gridding(
    unit_cell=symm.unit_cell(),
    space_group_info=symm.space_group_info(),
    pre_determined_n_real=m.data.all())
  fc_map = fc.fft_map(
    crystal_gridding=cg).apply_sigma_scaling().real_map_unpadded()
  assert (fc_map.all() == fc_map.focus() == m.data.all())
  em_data = m.data.as_double()
  unit_cell_for_interpolation = m.grid_unit_cell()
  frac_matrix = unit_cell_for_interpolation.fractionalization_matrix()
  sites_cart = xrs.sites_cart()
  sites_frac = xrs.sites_frac()
  print >> out, "PER-RESIDUE CORRELATION:"
  for chain in pdb_in.hierarchy.only_model().chains() :
    for residue_group in chain.residue_groups() :
      i_seqs = residue_group.atoms().extract_i_seq()
      values_em = flex.double()
      values_fc = flex.double()
      for i_seq in i_seqs :
        rho_em = maptbx.non_crystallographic_eight_point_interpolation(
          map=em_data,
          gridding_matrix=frac_matrix,
          site_cart=sites_cart[i_seq])
        rho_fc = fc_map.eight_point_interpolation(sites_frac[i_seq])
        values_em.append(rho_em)
        values_fc.append(rho_fc)
      cc = flex.linear_correlation(x=values_em, y=values_fc).coefficient()
      print >> out, residue_group.id_str(), cc
Exemplo n.º 60
0
def process_core(work_params, i_calc, reindexing_assistant, image_mdls):
  show_i_calc_reindexing_correlations(i_calc, reindexing_assistant)
  if (work_params.index_and_integrate):
    input_im0_i_perm = None
  else:
    input_im0_i_perm = image_mdls.array[0].backup.i_perm
  if (work_params.check_refine_uc_cr):
    check_refine_uc_cr(work_params, image_mdls)
  scales_input = image_mdls.extract_scales()
  image_mdls.erase_scales()
  if (work_params.index_and_integrate):
    image_mdls.erase_spot_models()
    index_and_integrate(work_params, image_mdls)
    show_vm_info("After index_and_integrate():")
    isel = image_mdls.iselection_entries_with_spot_model()
    print "Removing %d image models for which" \
      " indexing or integration failed." % (image_mdls.size() - isel.size())
    scales_input = scales_input.select(isel)
    image_mdls = image_mdls.remove_all_entries_without_spot_model()
    print
  image_mdls.normalize_spot_intensities(target_mean=100)
  image_mdls.check_i_obs_vs_backup(work_params)
  image_mdls.reset_miller_image_map()
  image_mdls.miller_image_map.show_images_per_miller_index()
  image_mdls.reset_partialities(work_params)
  if (work_params.pickle_image_models and work_params.index_and_integrate):
    from libtbx import easy_pickle
    file_name = "%s_image_mdls_index_and_integrate.pickle" % str(
      work_params.base36_timestamp)
    easy_pickle.dump(
      file_name=file_name,
      obj=(work_params, image_mdls, reindexing_assistant))
    show_vm_info("After %s:" % file_name)
  if (work_params.write_image_models_to_mtz_files):
    image_mdls.write_to_mtz_files(common_unit_cell=work_params.unit_cell)
    show_vm_info("After write_image_models_to_mtz_files:")
  usables = build_usables(work_params, reindexing_assistant, image_mdls)
  image_cluster = build_image_cluster(
    work_params, reindexing_assistant, image_mdls, usables)
  show_vm_info("After build_image_cluster():")
  check_image_cluster(
    work_params, i_calc, reindexing_assistant, image_mdls,
    scales_input, image_cluster)
  cluster_scales = image_mdls.extract_scales()
  for im in image_mdls.array:
    im.reindex_in_place(reindexing_assistant)
  image_mdls.reset_miller_image_map()
  image_mdls.miller_image_map.show_images_per_miller_index()
  image_mdls.reset_partialities(work_params)
  from scitbx.array_family import flex
  def show_correlation_of_scales(assert_perfect=False):
    expected = scales_input / scales_input[0]
    estimated = image_mdls.extract_scales()
    print "Correlation of expected and estimated scales:"
    flex.linear_correlation(expected, estimated).show_summary(prefix="  ")
    print
    sys.stdout.flush()
    if (assert_perfect):
      from libtbx.test_utils import approx_equal
      assert approx_equal(estimated, expected)
  show_correlation_of_scales(
    assert_perfect=not work_params.index_and_integrate)
  indices, data = image_mdls.extract_estimated_i_obs(
    work_params.usable_partiality_threshold)
  i_obs_cluster = i_calc.customized_copy(indices=indices, data=data)
  refined_scales = None
  if (work_params.refine_scales.max_iterations in [None, 0]):
    print "refinement target: %.6g" % image_mdls.refinement_target(
      work_params.usable_partiality_threshold)
    print
  else:
    refined = refinery(work_params, image_mdls)
    refined.show_summary()
    show_correlation_of_scales()
    refined_scales = image_mdls.extract_scales()
  indices, data = image_mdls.extract_estimated_i_obs(
    work_params.usable_partiality_threshold)
  i_obs_est = i_calc.customized_copy(indices=indices, data=data)
  from libtbx import easy_pickle
  from libtbx import group_args
  easy_pickle.dump(
    file_name="%s_solver_results.pickle" % work_params.base36_timestamp,
    obj=group_args(
      work_params=work_params,
      i_calc=i_calc,
      reindexing_assistant=reindexing_assistant,
      scales_input=scales_input,
      cluster_scales=cluster_scales,
      refined_scales=refined_scales,
      i_obs_cluster=i_obs_cluster,
      i_obs_est=i_obs_est))
  print "Input I-calc:"
  i_calc.show_comprehensive_summary(prefix="  ")
  print
  print "Estimated I-obs:"
  i_obs_est.show_comprehensive_summary(prefix="  ")
  print
  if (i_obs_est.indices().size() > 2):
    if (input_im0_i_perm is not None):
      print "input_im0_i_perm:", input_im0_i_perm
      print
    print "Correlation of input and estimated I-obs:"
    cc_im0_i_perm = None
    best_cc = -2
    for i_perm,cb_op in enumerate(reindexing_assistant.cb_ops):
      c, e = i_calc.change_basis(cb_op).common_sets(i_obs_est)
      assert c.indices().size() == i_obs_est.indices().size()
      corr = flex.linear_correlation(c.data(), e.data())
      assert corr.is_well_defined()
      cc = corr.coefficient()
      if (best_cc < cc): best_cc = cc
      if (input_im0_i_perm is not None and i_perm == input_im0_i_perm):
        cc_im0_i_perm = cc
      r1 = c.f_sq_as_f().r1_factor(
        other=e.f_sq_as_f(), scale_factor=libtbx.Auto)
      print "  i_perm=%d: %8.5f (r1: %.5f)" % (i_perm, cc, r1)
    if (input_im0_i_perm is not None):
      assert cc_im0_i_perm is not None
      from libtbx.test_utils import approx_equal
      assert approx_equal(cc_im0_i_perm, 1)
    print "  Best correlation: %8.5f" % best_cc
    print
  return True