コード例 #1
0
def exercise():
  xrs = iotbx.pdb.input(source_info=None, lines=pdb_str).xray_structure_simple()
  fc = xrs.structure_factors(d_min=1.5).f_calc()
  fft_map = fc.fft_map(resolution_factor=0.1)
  fft_map.apply_sigma_scaling()
  map_data = fft_map.real_map_unpadded()
  for vcp in [1,50,99]:
    for vcm in [1,50,99]:
      # Doing it old inefficient way
      po, mo = percentile_cutoffs_inefficient(
        map_data                 = map_data,
        vol_cutoff_plus_percent  = vcp,
        vol_cutoff_minus_percent = vcm)
      # Doing it new memory efficient way
      hist = maptbx.histogram(map_data, n_bins=min(10000, map_data.size()))
      pn, mn = hist.get_percentile_cutoffs(
        map                      = map_data,
        vol_cutoff_plus_percent  = vcp,
        vol_cutoff_minus_percent = vcm)
      assert approx_equal(po,pn, 0.01)
      assert approx_equal(mo,mn, 0.01)
      rp = (map_data >= po).count(True)*100./map_data.size()
      rm = (map_data <= mo).count(True)*100./map_data.size()
      assert approx_equal(rp, vcp, 1.e-3)
      assert approx_equal(rm, vcm, 1.e-3)
      assert approx_equal(
        rp,
        (map_data >= pn).count(True)*100./map_data.size(), 0.1)
      assert approx_equal(
        rm,
        (map_data <= mn).count(True)*100./map_data.size(), 0.1)
コード例 #2
0
def exercise():
  xrs = iotbx.pdb.input(source_info=None, lines=pdb_str).xray_structure_simple()
  fc = xrs.structure_factors(d_min=1.5).f_calc()
  fft_map = fc.fft_map(resolution_factor=0.1)
  fft_map.apply_sigma_scaling()
  map_data = fft_map.real_map_unpadded()
  for vcp in [1,50,99]:
    for vcm in [1,50,99]:
      # Doing it old inefficient way
      po, mo = percentile_cutoffs_inefficient(
        map_data                 = map_data,
        vol_cutoff_plus_percent  = vcp,
        vol_cutoff_minus_percent = vcm)
      # Doing it new memory efficient way
      hist = maptbx.histogram(map_data, n_bins=min(10000, map_data.size()))
      pn, mn = hist.get_percentile_cutoffs(
        map                      = map_data,
        vol_cutoff_plus_percent  = vcp,
        vol_cutoff_minus_percent = vcm)
      assert approx_equal(po,pn, 0.01)
      assert approx_equal(mo,mn, 0.01)
      rp = (map_data >= po).count(True)*100./map_data.size()
      rm = (map_data <= mo).count(True)*100./map_data.size()
      assert approx_equal(rp, vcp, 1.e-3)
      assert approx_equal(rm, vcm, 1.e-3)
      assert approx_equal(
        rp,
        (map_data >= pn).count(True)*100./map_data.size(), 0.1)
      assert approx_equal(
        rm,
        (map_data <= mn).count(True)*100./map_data.size(), 0.1)
コード例 #3
0
def run(args, out=sys.stdout, validated=False):
    show_citation(out=out)
    if (len(args) == 0):
        master_phil.show(out=out)
        print('\nUsage: phenix.map_comparison <CCP4> <CCP4>\n',\
          '       phenix.map_comparison <CCP4> <MTZ> mtz_label_1=<label>\n',\
          '       phenix.map_comparison <MTZ 1> mtz_label_1=<label 1> <MTZ 2> mtz_label_2=<label 2>\n', file=out)
        sys.exit()

    # process arguments
    params = None
    input_attributes = ['map_1', 'mtz_1', 'map_2', 'mtz_2']
    try:  # automatic parsing
        params = phil.process_command_line_with_files(
            args=args, master_phil=master_phil).work.extract()
    except Exception:  # map_file_def only handles one map phil
        from libtbx.phil.command_line import argument_interpreter
        arg_int = argument_interpreter(master_phil=master_phil)
        command_line_args = list()
        map_files = list()
        for arg in args:
            if (os.path.isfile(arg)):
                map_files.append(arg)
            else:
                command_line_args.append(arg_int.process(arg))
        params = master_phil.fetch(sources=command_line_args).extract()

        # check if more files are necessary
        n_defined = 0
        for attribute in input_attributes:
            if (getattr(params.input, attribute) is not None):
                n_defined += 1

        # matches files to phil scope, stops once there is sufficient data
        for map_file in map_files:
            if (n_defined < 2):
                current_map = file_reader.any_file(map_file)
                if (current_map.file_type == 'ccp4_map'):
                    n_defined += 1
                    if (params.input.map_1 is None):
                        params.input.map_1 = map_file
                    elif (params.input.map_2 is None):
                        params.input.map_2 = map_file
                elif (current_map.file_type == 'hkl'):
                    n_defined += 1
                    if (params.input.mtz_1 is None):
                        params.input.mtz_1 = map_file
                    elif (params.input.mtz_2 is None):
                        params.input.mtz_2 = map_file
            else:
                print('WARNING: only the first two files are used', file=out)
                break

    # validate arguments (GUI sets validated to true, no need to run again)
    assert (params is not None)
    if (not validated):
        validate_params(params)

    # ---------------------------------------------------------------------------
    # check if maps need to be generated from mtz
    n_maps = 0
    maps = list()
    map_names = list()
    for attribute in input_attributes:
        filename = getattr(params.input, attribute)
        if (filename is not None):
            map_names.append(filename)
            current_map = file_reader.any_file(filename)
            maps.append(current_map)
            if (current_map.file_type == 'ccp4_map'):
                n_maps += 1

    # construct maps, if necessary
    crystal_gridding = None
    m1 = None
    m2 = None

    # 1 map, 1 mtz file
    if (n_maps == 1):
        for current_map in maps:
            if (current_map.file_type == 'ccp4_map'):
                uc = current_map.file_object.unit_cell()
                sg_info = space_group_info(
                    current_map.file_object.space_group_number)
                n_real = current_map.file_object.unit_cell_grid
                crystal_gridding = maptbx.crystal_gridding(
                    uc, space_group_info=sg_info, pre_determined_n_real=n_real)
                m1 = current_map.file_object.map_data()
        if (crystal_gridding is not None):
            label = None
            for attribute in [('mtz_1', 'mtz_label_1'),
                              ('mtz_2', 'mtz_label_2')]:
                filename = getattr(params.input, attribute[0])
                label = getattr(params.input, attribute[1])
                if ((filename is not None) and (label is not None)):
                    break
            # labels will match currently open mtz file
            for current_map in maps:
                if (current_map.file_type == 'hkl'):
                    m2 = miller.fft_map(
                        crystal_gridding=crystal_gridding,
                        fourier_coefficients=current_map.file_server.
                        get_miller_array(
                            label)).apply_sigma_scaling().real_map_unpadded()
        else:
            raise Sorry('Gridding is not defined.')

    # 2 mtz files
    elif (n_maps == 0):
        crystal_symmetry = get_crystal_symmetry(maps[0])
        d_min = min(get_d_min(maps[0]), get_d_min(maps[1]))
        crystal_gridding = maptbx.crystal_gridding(
            crystal_symmetry.unit_cell(),
            d_min=d_min,
            resolution_factor=params.options.resolution_factor,
            space_group_info=crystal_symmetry.space_group_info())
        m1 = miller.fft_map(
            crystal_gridding=crystal_gridding,
            fourier_coefficients=maps[0].file_server.get_miller_array(
                params.input.mtz_label_1)).apply_sigma_scaling(
                ).real_map_unpadded()
        m2 = miller.fft_map(
            crystal_gridding=crystal_gridding,
            fourier_coefficients=maps[1].file_server.get_miller_array(
                params.input.mtz_label_2)).apply_sigma_scaling(
                ).real_map_unpadded()

    # 2 maps
    else:
        m1 = maps[0].file_object.map_data()
        m2 = maps[1].file_object.map_data()

    # ---------------------------------------------------------------------------
    # analyze maps
    assert ((m1 is not None) and (m2 is not None))

    # show general statistics
    s1 = maptbx.more_statistics(m1)
    s2 = maptbx.more_statistics(m2)
    show_overall_statistics(out=out, s=s1, header="Map 1 (%s):" % map_names[0])
    show_overall_statistics(out=out, s=s2, header="Map 2 (%s):" % map_names[1])
    cc_input_maps = flex.linear_correlation(x=m1.as_1d(),
                                            y=m2.as_1d()).coefficient()
    print("CC, input maps: %6.4f" % cc_input_maps, file=out)

    # compute CCpeak
    cc_peaks = list()
    m1_he = maptbx.volume_scale(map=m1, n_bins=10000).map_data()
    m2_he = maptbx.volume_scale(map=m2, n_bins=10000).map_data()
    cc_quantile = flex.linear_correlation(x=m1_he.as_1d(),
                                          y=m2_he.as_1d()).coefficient()
    print("CC, quantile rank-scaled (histogram equalized) maps: %6.4f" % \
      cc_quantile, file=out)
    print("Peak correlation:", file=out)
    print("  cutoff  CCpeak", file=out)
    cutoffs = [i / 100.
               for i in range(1, 90)] + [i / 1000 for i in range(900, 1000)]
    for cutoff in cutoffs:
        cc_peak = maptbx.cc_peak(map_1=m1_he, map_2=m2_he, cutoff=cutoff)
        print("  %3.2f   %7.4f" % (cutoff, cc_peak), file=out)
        cc_peaks.append((cutoff, cc_peak))

    # compute discrepancy function (D-function)
    discrepancies = list()
    cutoffs = flex.double(cutoffs)
    df = maptbx.discrepancy_function(map_1=m1_he, map_2=m2_he, cutoffs=cutoffs)
    print("Discrepancy function:", file=out)
    print("  cutoff  D", file=out)
    for c, d in zip(cutoffs, df):
        print("  %3.2f   %7.4f" % (c, d), file=out)
        discrepancies.append((c, d))

    # compute and output histograms
    h1 = maptbx.histogram(map=m1, n_bins=10000)
    h2 = maptbx.histogram(map=m2, n_bins=10000)
    print("Map histograms:", file=out)
    print("Map 1 (%s)     Map 2 (%s)"%\
      (params.input.map_1,params.input.map_2), file=out)
    print("(map_value,cdf,frequency) <> (map_value,cdf,frequency)", file=out)
    for a1, c1, v1, a2, c2, v2 in zip(h1.arguments(), h1.c_values(),
                                      h1.values(), h2.arguments(),
                                      h2.c_values(), h2.values()):
        print("(%9.5f %9.5f %9.5f) <> (%9.5f %9.5f %9.5f)"%\
          (a1,c1,v1, a2,c2,v2), file=out)

    # store results
    s1_dict = create_statistics_dict(s=s1)
    s2_dict = create_statistics_dict(s=s2)
    results = dict()
    inputs = list()
    for attribute in input_attributes:
        filename = getattr(params.input, attribute)
        if (filename is not None):
            inputs.append(filename)
    assert (len(inputs) == 2)
    results['map_files'] = inputs
    results['map_statistics'] = (s1_dict, s2_dict)
    results['cc_input_maps'] = cc_input_maps
    results['cc_quantile'] = cc_quantile
    results['cc_peaks'] = cc_peaks
    results['discrepancies'] = discrepancies
    # TODO, verify h1,h2 are not dicts, e.g. .values is py2/3 compat. I assume it is here
    results['map_histograms'] = ((h1.arguments(), h1.c_values(), h1.values()),
                                 (h2.arguments(), h2.c_values(), h2.values()))

    return results
コード例 #4
0
def run(args, out=sys.stdout, validated=False):
  show_citation(out=out)
  if (len(args) == 0):
    master_phil.show(out=out)
    print >> out,\
      '\nUsage: phenix.map_comparison <CCP4> <CCP4>\n',\
      '       phenix.map_comparison <CCP4> <MTZ> mtz_label_1=<label>\n',\
      '       phenix.map_comparison <MTZ 1> mtz_label_1=<label 1> <MTZ 2> mtz_label_2=<label 2>\n'
    sys.exit()

  # process arguments
  params = None
  input_attributes = ['map_1', 'mtz_1', 'map_2', 'mtz_2']
  try: # automatic parsing
    params = phil.process_command_line_with_files(
      args=args, master_phil=master_phil).work.extract()
  except Exception: # map_file_def only handles one map phil
    from libtbx.phil.command_line import argument_interpreter
    arg_int = argument_interpreter(master_phil=master_phil)
    command_line_args = list()
    map_files = list()
    for arg in args:
      if (os.path.isfile(arg)):
        map_files.append(arg)
      else:
        command_line_args.append(arg_int.process(arg))
    params = master_phil.fetch(sources=command_line_args).extract()

    # check if more files are necessary
    n_defined = 0
    for attribute in input_attributes:
      if (getattr(params.input, attribute) is not None):
        n_defined += 1

    # matches files to phil scope, stops once there is sufficient data
    for map_file in map_files:
      if (n_defined < 2):
        current_map = file_reader.any_file(map_file)
        if (current_map.file_type == 'ccp4_map'):
          n_defined += 1
          if (params.input.map_1 is None):
            params.input.map_1 = map_file
          elif (params.input.map_2 is None):
            params.input.map_2 = map_file
        elif (current_map.file_type == 'hkl'):
          n_defined += 1
          if (params.input.mtz_1 is None):
            params.input.mtz_1 = map_file
          elif (params.input.mtz_2 is None):
            params.input.mtz_2 = map_file
      else:
        print >> out, 'WARNING: only the first two files are used'
        break

  # validate arguments (GUI sets validated to true, no need to run again)
  assert (params is not None)
  if (not validated):
    validate_params(params)

  # ---------------------------------------------------------------------------
  # check if maps need to be generated from mtz
  n_maps = 0
  maps = list()
  map_names = list()
  for attribute in input_attributes:
    filename = getattr(params.input, attribute)
    if (filename is not None):
      map_names.append(filename)
      current_map = file_reader.any_file(filename)
      maps.append(current_map)
      if (current_map.file_type == 'ccp4_map'):
        n_maps += 1

  # construct maps, if necessary
  crystal_gridding = None
  m1 = None
  m2 = None

  # 1 map, 1 mtz file
  if (n_maps == 1):
    for current_map in maps:
      if (current_map.file_type == 'ccp4_map'):
        uc = current_map.file_object.unit_cell()
        sg_info = space_group_info(current_map.file_object.space_group_number)
        n_real = current_map.file_object.unit_cell_grid
        crystal_gridding = maptbx.crystal_gridding(
          uc, space_group_info=sg_info, pre_determined_n_real=n_real)
        m1 = current_map.file_object.map_data()
    if (crystal_gridding is not None):
      label = None
      for attribute in [('mtz_1', 'mtz_label_1'),
                        ('mtz_2', 'mtz_label_2')]:
        filename = getattr(params.input, attribute[0])
        label = getattr(params.input, attribute[1])
        if ( (filename is not None) and (label is not None) ):
          break
      # labels will match currently open mtz file
      for current_map in maps:
        if (current_map.file_type == 'hkl'):
          m2 = miller.fft_map(
            crystal_gridding=crystal_gridding,
            fourier_coefficients=current_map.file_server.get_miller_array(
              label)).apply_sigma_scaling().real_map_unpadded()
    else:
      raise Sorry('Gridding is not defined.')

  # 2 mtz files
  elif (n_maps == 0):
    crystal_symmetry = get_crystal_symmetry(maps[0])
    d_min = min(get_d_min(maps[0]), get_d_min(maps[1]))
    crystal_gridding = maptbx.crystal_gridding(
      crystal_symmetry.unit_cell(), d_min=d_min,
      resolution_factor=params.options.resolution_factor,
      space_group_info=crystal_symmetry.space_group_info())
    m1 = miller.fft_map(
      crystal_gridding=crystal_gridding,
      fourier_coefficients=maps[0].file_server.get_miller_array(
        params.input.mtz_label_1)).apply_sigma_scaling().real_map_unpadded()
    m2 = miller.fft_map(
      crystal_gridding=crystal_gridding,
      fourier_coefficients=maps[1].file_server.get_miller_array(
        params.input.mtz_label_2)).apply_sigma_scaling().real_map_unpadded()

  # 2 maps
  else:
    m1 = maps[0].file_object.map_data()
    m2 = maps[1].file_object.map_data()

  # ---------------------------------------------------------------------------
  # analyze maps
  assert ( (m1 is not None) and (m2 is not None) )

  # show general statistics
  s1 = maptbx.more_statistics(m1)
  s2 = maptbx.more_statistics(m2)
  show_overall_statistics(out=out, s=s1, header="Map 1 (%s):"%map_names[0])
  show_overall_statistics(out=out, s=s2, header="Map 2 (%s):"%map_names[1])
  cc_input_maps = flex.linear_correlation(x = m1.as_1d(),
                                          y = m2.as_1d()).coefficient()
  print >> out, "CC, input maps: %6.4f" % cc_input_maps

  # compute CCpeak
  cc_peaks = list()
  m1_he = maptbx.volume_scale(map = m1,  n_bins = 10000).map_data()
  m2_he = maptbx.volume_scale(map = m2,  n_bins = 10000).map_data()
  cc_quantile = flex.linear_correlation(x = m1_he.as_1d(),
                                        y = m2_he.as_1d()).coefficient()
  print >> out, "CC, quantile rank-scaled (histogram equalized) maps: %6.4f" % \
    cc_quantile
  print >> out, "Peak correlation:"
  print >> out, "  cutoff  CCpeak"
  cutoffs = [i/100.  for i in range(1,90)]+ [i/1000 for i in range(900,1000)]
  for cutoff in cutoffs:
    cc_peak = maptbx.cc_peak(map_1=m1_he, map_2=m2_he, cutoff=cutoff)
    print >> out, "  %3.2f   %7.4f" % (cutoff, cc_peak)
    cc_peaks.append((cutoff, cc_peak))

  # compute discrepancy function (D-function)
  discrepancies = list()
  cutoffs = flex.double(cutoffs)
  df = maptbx.discrepancy_function(map_1=m1_he, map_2=m2_he, cutoffs=cutoffs)
  print >> out, "Discrepancy function:"
  print >> out, "  cutoff  D"
  for c, d in zip(cutoffs, df):
    print >> out, "  %3.2f   %7.4f" % (c,d)
    discrepancies.append((c, d))

  # compute and output histograms
  h1 = maptbx.histogram(map=m1, n_bins=10000)
  h2 = maptbx.histogram(map=m2, n_bins=10000)
  print >> out, "Map histograms:"
  print >> out, "Map 1 (%s)     Map 2 (%s)"%\
    (params.input.map_1,params.input.map_2)
  print >> out, "(map_value,cdf,frequency) <> (map_value,cdf,frequency)"
  for a1,c1,v1, a2,c2,v2 in zip(h1.arguments(), h1.c_values(), h1.values(),
                                h2.arguments(), h2.c_values(), h2.values()):
    print >> out, "(%9.5f %9.5f %9.5f) <> (%9.5f %9.5f %9.5f)"%\
      (a1,c1,v1, a2,c2,v2)

  # store results
  s1_dict = create_statistics_dict(s=s1)
  s2_dict = create_statistics_dict(s=s2)
  results = dict()
  inputs = list()
  for attribute in input_attributes:
    filename = getattr(params.input,attribute)
    if (filename is not None):
      inputs.append(filename)
  assert (len(inputs) == 2)
  results['map_files'] = inputs
  results['map_statistics'] = (s1_dict, s2_dict)
  results['cc_input_maps'] = cc_input_maps
  results['cc_quantile'] = cc_quantile
  results['cc_peaks'] = cc_peaks
  results['discrepancies'] = discrepancies
  results['map_histograms'] = ( (h1.arguments(), h1.c_values(), h1.values()),
                                (h2.arguments(), h2.c_values(), h2.values()) )

  return results
コード例 #5
0
def run(args, validated=False):
  show_citation()
  if ( (len(args) == 0) or (len(args) > 2) ):
    print '\nUsage: phenix.map_comparison map_1=<first map> map_2=<second map>\n'
    sys.exit()

  # process arguments
  try: # automatic parsing
    params = phil.process_command_line_with_files(
      args=args, master_phil=master_phil).work.extract()
  except Exception: # map_file_def only handles one map phil
    from libtbx.phil.command_line import argument_interpreter
    arg_int = argument_interpreter(master_phil=master_phil)
    command_line_args = list()
    map_files = list()
    for arg in args:
      if (os.path.isfile(arg)):
        map_files.append(arg)
      else:
        command_line_args.append(arg_int.process(arg))
    params = master_phil.fetch(sources=command_line_args).extract()
    for map_file in map_files:
      if (params.input.map_1 is None):
        params.input.map_1 = map_file
      else:
        params.input.map_2 = map_file

  # validate arguments (GUI sets validated to true, no need to run again)
  if (not validated):
    validate_params(params)

  # ---------------------------------------------------------------------------
  # map 1
  ccp4_map_1 = iotbx.ccp4_map.map_reader(file_name=params.input.map_1)
  cs_1 = crystal.symmetry(ccp4_map_1.unit_cell().parameters(),
    ccp4_map_1.space_group_number)
  m1 = ccp4_map_1.map_data()

  # map 2
  ccp4_map_2 = iotbx.ccp4_map.map_reader(file_name=params.input.map_2)
  cs_2 = crystal.symmetry(ccp4_map_2.unit_cell().parameters(),
    ccp4_map_2.space_group_number)
  m2 = ccp4_map_2.map_data()

  # show general statistics
  s1 = maptbx.more_statistics(m1)
  s2 = maptbx.more_statistics(m2)
  show_overall_statistics(s=s1, header="Map 1 (%s):"%params.input.map_1)
  show_overall_statistics(s=s2, header="Map 2 (%s):"%params.input.map_2)
  cc_input_maps = flex.linear_correlation(x = m1.as_1d(),
                                          y = m2.as_1d()).coefficient()
  print "CC, input maps: %6.4f" % cc_input_maps

  # compute CCpeak
  cc_peaks = list()
  m1_he = maptbx.volume_scale(map = m1,  n_bins = 10000).map_data()
  m2_he = maptbx.volume_scale(map = m2,  n_bins = 10000).map_data()
  cc_quantile = flex.linear_correlation(x = m1_he.as_1d(),
                                        y = m2_he.as_1d()).coefficient()
  print "CC, quantile rank-scaled (histogram equalized) maps: %6.4f" % \
    cc_quantile
  print "Peak correlation:"
  print "  cutoff  CCpeak"
  for cutoff in [i/100. for i in range(0,100,5)]+[0.99, 1.0]:
    cc_peak = maptbx.cc_peak(map_1=m1_he, map_2=m2_he, cutoff=cutoff)
    print "  %3.2f   %7.4f" % (cutoff, cc_peak)
    cc_peaks.append((cutoff, cc_peak))

  # compute discrepancy function (D-function)
  discrepancies = list()
  cutoffs = flex.double([i/20. for i in range(1,20)])
  df = maptbx.discrepancy_function(map_1=m1_he, map_2=m2_he, cutoffs=cutoffs)
  print "Discrepancy function:"
  print "  cutoff  D"
  for c, d in zip(cutoffs, df):
    print "  %3.2f   %7.4f" % (c,d)
    discrepancies.append((c, d))

  # compute and output histograms
  h1 = maptbx.histogram(map=m1, n_bins=10000)
  h2 = maptbx.histogram(map=m2, n_bins=10000)
  print "Map histograms:"
  print "Map 1 (%s)     Map 2 (%s)"%(params.input.map_1,params.input.map_2)
  print "(map_value,cdf,frequency) <> (map_value,cdf,frequency)"
  for a1,c1,v1, a2,c2,v2 in zip(h1.arguments(), h1.c_values(), h1.values(),
                                h2.arguments(), h2.c_values(), h2.values()):
    print "(%9.5f %9.5f %9.5f) <> (%9.5f %9.5f %9.5f)"%(a1,c1,v1, a2,c2,v2)

  # store results
  s1_dict = create_statistics_dict(s1)
  s2_dict = create_statistics_dict(s2)
  results = dict()
  results['map_files'] = (params.input.map_1, params.input.map_2)
  results['map_statistics'] = (s1_dict, s2_dict)
  results['cc_input_maps'] = cc_input_maps
  results['cc_quantile'] = cc_quantile
  results['cc_peaks'] = cc_peaks
  results['discrepancies'] = discrepancies
  results['map_histograms'] = ( (h1.arguments(), h1.c_values(), h1.values()),
                                (h2.arguments(), h2.c_values(), h2.values()) )

  return results