def run(args):
  assert len(args) == 1
  timer = time_log("pdb.input").start()
  pdb_inp = iotbx.pdb.input(file_name=args[0])
  print "number of pdb atoms:", pdb_inp.atoms().size()
  print timer.log()
  crystal_symmetry = pdb_inp.crystal_symmetry()
  assert crystal_symmetry is not None
  crystal_symmetry.show_summary()
  assert crystal_symmetry.unit_cell() is not None
  assert crystal_symmetry.space_group_info() is not None
  sites_cart = pdb_inp.atoms().extract_xyz()
  site_radii = flex.double(sites_cart.size(), 2.5)
  crystal_gridding = maptbx.crystal_gridding(
    unit_cell=crystal_symmetry.unit_cell(),
    d_min=2,
    resolution_factor=1/3)
  fft = fftpack.real_to_complex_3d(crystal_gridding.n_real())
  print "n_real:", fft.n_real()
  print "m_real:", fft.m_real()
  timer = time_log("grid_indices_around_sites").start()
  grid_indices = maptbx.grid_indices_around_sites(
    unit_cell=crystal_symmetry.unit_cell(),
    fft_n_real=fft.n_real(),
    fft_m_real=fft.m_real(),
    sites_cart=sites_cart,
    site_radii=site_radii)
  print "grid_indices.size():", grid_indices.size()
  print timer.log()
  print "grid fraction:", \
    grid_indices.size() / matrix.col(fft.n_real()).product()
예제 #2
0
def run(args):
    assert len(args) == 1
    timer = time_log("pdb.input").start()
    pdb_inp = iotbx.pdb.input(file_name=args[0])
    print("number of pdb atoms:", pdb_inp.atoms().size())
    print(timer.log())
    crystal_symmetry = pdb_inp.crystal_symmetry()
    assert crystal_symmetry is not None
    crystal_symmetry.show_summary()
    assert crystal_symmetry.unit_cell() is not None
    assert crystal_symmetry.space_group_info() is not None
    sites_cart = pdb_inp.atoms().extract_xyz()
    site_radii = flex.double(sites_cart.size(), 2.5)
    crystal_gridding = maptbx.crystal_gridding(
        unit_cell=crystal_symmetry.unit_cell(),
        d_min=2,
        resolution_factor=1 / 3)
    fft = fftpack.real_to_complex_3d(crystal_gridding.n_real())
    print("n_real:", fft.n_real())
    print("m_real:", fft.m_real())
    timer = time_log("grid_indices_around_sites").start()
    grid_indices = maptbx.grid_indices_around_sites(
        unit_cell=crystal_symmetry.unit_cell(),
        fft_n_real=fft.n_real(),
        fft_m_real=fft.m_real(),
        sites_cart=sites_cart,
        site_radii=site_radii)
    print("grid_indices.size():", grid_indices.size())
    print(timer.log())
    print("grid fraction:", \
      grid_indices.size() / matrix.col(fft.n_real()).product())
예제 #3
0
def one_series(num_threads, n_iterations, quick=False):
  if (num_threads > 0):
    omptbx.env.num_threads = num_threads
  print "working omptbx.env.num_threads:", omptbx.env.num_threads
  use_wall_clock = (omptbx.env.num_threads > 1)
  print "use_wall_clock:", use_wall_clock
  #
  if (quick):
    dims = (2*3, 3*4, 4*5)
  else:
    dims = (2*3*5*7,3*4*5*7,3*4*5*5)
  rfft = fftpack.real_to_complex_3d(dims)
  print "rfft.m_real():", rfft.m_real()
  #
  t_map = time_log(label="map", use_wall_clock=use_wall_clock)
  t_fill = time_log(label="fill", use_wall_clock=use_wall_clock)
  t_fft = time_log(label="fft", use_wall_clock=use_wall_clock)
  print t_map.legend
  sys.stdout.flush()
  for i_iteration in xrange(n_iterations):
    t_map.start()
    map = fftpack.zeros_parallel_double(flex_grid=flex.grid(rfft.m_real()))
    print t_map.log()
    sys.stdout.flush()
    t_fill.start()
    for i in xrange(0, map.size(), 97):
      map[i] = random.random()
    print t_fill.log()
    sys.stdout.flush()
    t_fft.start()
    map = rfft.forward(map)
    print t_fft.log()
    sys.stdout.flush()
예제 #4
0
  def __init__(self, directory, file_ext,
               build_miller_arrays=False,
               build_xray_structure=False):
    timer = time_log("parsing")
    error_count = 0
    self.parsing_error_count = 0
    for root, dirs, files in os.walk(directory):
      cif_g = glob.glob(os.path.join(root, "*.%s" %file_ext))
      files_to_read = cif_g
      for path in files_to_read:
        timer.start()
        try:
          reader = self.run_once(path, build_miller_arrays=build_miller_arrays,
                            build_xray_structure=build_xray_structure)

        except Exception as e:
          print("error reading %s" %path)
          print(e)
          error_count += 1
        timer.stop()
    print()
    print("%i files read (%i with building errors and %i with parsing errors)" %(
      timer.n, error_count, self.parsing_error_count))
    print(timer.legend)
    print(timer.report())
    sys.stdout.flush()
예제 #5
0
def run(args, out=sys.stdout):
    if len(args) == 0: args = ["--help"]
    command_line = (option_parser(
        usage="iotbx.cif.validate filepath|directory [options]").option(
            None, "--file_ext", action="store", default="cif").option(
                None, "--dic", action="append", dest="dictionaries").option(
                    None, "--show_warnings", action="store_true").option(
                        None, "--show_timings", action="store_true").option(
                            None,
                            "--strict",
                            action="store",
                            type="bool",
                            default="true")).process(args=args)
    if len(command_line.args) != 1:
        command_line.parser.show_help()
        return
    total_timer = time_log("total").start()
    filepath = command_line.args[0]
    if not os.path.isabs(filepath):
        abs_path = libtbx.env.find_in_repositories(relative_path=filepath)
        if abs_path is None:
            abs_path = libtbx.env.find_in_repositories(relative_path=filepath,
                                                       test=os.path.isfile)
        if abs_path is not None: filepath = abs_path
    cif_dics = command_line.options.dictionaries
    if cif_dics is None:
        cif_dics = ["cif_core.dic"]
    cif_dic = validation.smart_load_dictionary(name=cif_dics[0])
    if len(cif_dics) > 1:
        [
            cif_dic.update(validation.smart_load_dictionary(name=d))
            for d in cif_dics[1:]
        ]
    show_warnings = command_line.options.show_warnings == True
    show_timings = command_line.options.show_timings == True
    strict = command_line.options.strict
    if os.path.isdir(filepath):
        file_ext = command_line.options.file_ext
        crawl(filepath,
              file_ext=file_ext,
              cif_dic=cif_dic,
              show_warnings=show_warnings,
              show_timings=show_timings,
              strict=strict)
    elif os.path.isfile(filepath):
        cm = cif.reader(file_path=filepath, strict=strict).model()
        cm.validate(cif_dic, show_warnings=show_warnings)
    else:
        try:
            file_object = urllib.request.urlopen(filepath)
        except urllib.error.URLError as e:
            pass
        else:
            cm = cif.reader(file_object=file_object, strict=strict).model()
            cm.validate(cif_dic, show_warnings=show_warnings)
    if show_timings:
        total_timer.stop()
        print(total_timer.report())
예제 #6
0
def run(args, out=sys.stdout):
  if len(args) == 0: args = ["--help"]
  command_line = (option_parser(
                  usage="iotbx.cif.validate filepath|directory [options]")
                  .option(None, "--file_ext",
                          action="store",
                          default="cif")
                  .option(None, "--dic",
                          action="append",
                          dest="dictionaries")
                  .option(None, "--show_warnings",
                          action="store_true")
                  .option(None, "--show_timings",
                          action="store_true")
                  .option(None, "--strict",
                          action="store",
                          type="bool",
                          default="true")).process(args=args)
  if len(command_line.args) != 1:
    command_line.parser.show_help()
    return
  total_timer = time_log("total").start()
  filepath = command_line.args[0]
  if not os.path.isabs(filepath):
    abs_path = libtbx.env.find_in_repositories(relative_path=filepath)
    if abs_path is None:
      abs_path = libtbx.env.find_in_repositories(
        relative_path=filepath, test=os.path.isfile)
    if abs_path is not None: filepath = abs_path
  cif_dics = command_line.options.dictionaries
  if cif_dics is None:
    cif_dics = ["cif_core.dic"]
  cif_dic = validation.smart_load_dictionary(name=cif_dics[0])
  if len(cif_dics) > 1:
    [cif_dic.update(
      validation.smart_load_dictionary(name=d)) for d in cif_dics[1:]]
  show_warnings = command_line.options.show_warnings == True
  show_timings = command_line.options.show_timings == True
  strict = command_line.options.strict
  if os.path.isdir(filepath):
    file_ext = command_line.options.file_ext
    crawl(filepath, file_ext=file_ext,
          cif_dic=cif_dic, show_warnings=show_warnings,
          show_timings=show_timings, strict=strict)
  elif os.path.isfile(filepath):
    cm = cif.reader(file_path=filepath, strict=strict).model()
    cm.validate(cif_dic, show_warnings=show_warnings)
  else:
    try:
      file_object = urllib2.urlopen(filepath)
    except urllib2.URLError, e:
      pass
    else:
예제 #7
0
def exercise_smart_load(show_timings=False, exercise_url=False):
    from libtbx.test_utils import open_tmp_directory
    from libtbx.utils import time_log
    import libtbx
    import os, shutil
    name = ["cif_core.dic", "cif_mm.dic"][0]
    url = [cif_core_dic_url, cif_mm_dic_url][0]
    # from gz
    gz_timer = time_log("from gz").start()
    cd = validation.smart_load_dictionary(name=name)
    gz_timer.stop()
    if exercise_url:
        tempdir = open_tmp_directory()
        store_dir = libtbx.env.under_dist(module_name='iotbx',
                                          path='cif/dictionaries')
        file_path = os.path.join(store_dir, name) + '.gz'
        shutil.copy(os.path.join(store_dir, name) + '.gz', tempdir)
        # from url
        url_timer = time_log("from url").start()
        cd = validation.smart_load_dictionary(url=url, store_dir=tempdir)
        url_timer.stop()
        # from url to file
        url_to_file_timer = time_log("url to file").start()
        cd = validation.smart_load_dictionary(url=url,
                                              save_local=True,
                                              store_dir=tempdir)
        url_to_file_timer.stop()
        # read local file
        file_timer = time_log("from file").start()
        cd = validation.smart_load_dictionary(
            file_path=os.path.join(tempdir, name))
        file_timer.stop()
    if show_timings:
        print(time_log.legend)
        print(gz_timer.report())
        if exercise_url:
            print(url_timer.report())
            print(url_to_file_timer.report())
            print(file_timer.report())
예제 #8
0
def exercise_smart_load(show_timings=False, exercise_url=False):
  from libtbx.test_utils import open_tmp_directory
  from libtbx.utils import time_log
  import libtbx
  import os, shutil
  name = ["cif_core.dic", "cif_mm.dic"][0]
  url = [cif_core_dic_url, cif_mm_dic_url][0]
  # from gz
  gz_timer = time_log("from gz").start()
  cd = validation.smart_load_dictionary(name=name)
  gz_timer.stop()
  if exercise_url:
    tempdir = open_tmp_directory()
    store_dir = libtbx.env.under_dist(
      module_name='iotbx', path='cif/dictionaries')
    file_path = os.path.join(store_dir, name) + '.gz'
    shutil.copy(os.path.join(store_dir, name) + '.gz', tempdir)
    # from url
    url_timer = time_log("from url").start()
    cd = validation.smart_load_dictionary(url=url, store_dir=tempdir)
    url_timer.stop()
    # from url to file
    url_to_file_timer = time_log("url to file").start()
    cd = validation.smart_load_dictionary(
      url=url, save_local=True, store_dir=tempdir)
    url_to_file_timer.stop()
    # read local file
    file_timer = time_log("from file").start()
    cd = validation.smart_load_dictionary(file_path=os.path.join(tempdir, name))
    file_timer.stop()
  if show_timings:
    print time_log.legend
    print gz_timer.report()
    if exercise_url:
      print url_timer.report()
      print url_to_file_timer.report()
      print file_timer.report()
예제 #9
0
def crawl(directory, file_ext, cif_dic, show_warnings, show_timings, strict):
  timer = time_log("parsing")
  validate_timer = time_log("validate")
  for root, dirs, files in os.walk(directory):
    cif_g = glob.glob(os.path.join(root, "*.%s" %file_ext))
    files_to_read = cif_g
    for path in files_to_read:
      timer.start()
      try:
        cm = cif.reader(file_path=path, strict=strict).model()
      except AssertionError:
        continue
      timer.stop()
      s = StringIO()
      validate_timer.start()
      cm.validate(cif_dic, show_warnings=show_warnings, out=s)
      validate_timer.stop()
      if s.getvalue():
        print path
        print s.getvalue()
  if show_timings:
    print timer.legend
    print timer.report()
    print validate_timer.report()
예제 #10
0
  def __init__(self, directory, file_ext,
               build_miller_arrays=False,
               build_xray_structure=False):
    timer = time_log("parsing")
    error_count = 0
    self.parsing_error_count = 0
    for root, dirs, files in os.walk(directory):
      cif_g = glob.glob(os.path.join(root, "*.%s" %file_ext))
      files_to_read = cif_g
      for path in files_to_read:
        timer.start()
        try:
          reader = self.run_once(path, build_miller_arrays=build_miller_arrays,
                            build_xray_structure=build_xray_structure)

        except Exception, e:
          print "error reading %s" %path
          print e
          error_count += 1
        timer.stop()
예제 #11
0
    def __init__(self,
                 directory,
                 file_ext,
                 build_miller_arrays=False,
                 build_xray_structure=False):
        timer = time_log("parsing")
        error_count = 0
        self.parsing_error_count = 0
        for root, dirs, files in os.walk(directory):
            cif_g = glob.glob(os.path.join(root, "*.%s" % file_ext))
            files_to_read = cif_g
            for path in files_to_read:
                timer.start()
                try:
                    reader = self.run_once(
                        path,
                        build_miller_arrays=build_miller_arrays,
                        build_xray_structure=build_xray_structure)

                except Exception, e:
                    print "error reading %s" % path
                    print e
                    error_count += 1
                timer.stop()
예제 #12
0
    def __init__(self,
                 pickle_path,
                 sweep_path,
                 extra_args,
                 expected_unit_cell,
                 expected_rmsds,
                 expected_hall_symbol,
                 n_expected_lattices=1,
                 relative_length_tolerance=0.005,
                 absolute_angle_tolerance=0.5):

        args = ["dials.index", pickle_path, sweep_path] + extra_args

        cwd = os.path.abspath(os.curdir)
        tmp_dir = open_tmp_directory(suffix="test_dials_index")
        os.chdir(tmp_dir)
        command = " ".join(args)
        print command
        result = easy_run.fully_buffered(command=command).raise_if_errors()
        os.chdir(cwd)
        assert os.path.exists(os.path.join(tmp_dir, "experiments.json"))
        experiments_list = dxtbx_load.experiment_list(os.path.join(
            tmp_dir, "experiments.json"),
                                                      check_format=False)
        assert len(experiments_list.crystals()) == n_expected_lattices, (len(
            experiments_list.crystals()), n_expected_lattices)
        assert os.path.exists(os.path.join(tmp_dir, "indexed.pickle"))
        from libtbx.utils import time_log
        unpickling_timer = time_log("unpickling")
        self.calc_rmsds_timer = time_log("calc_rmsds")
        unpickling_timer.start()
        self.indexed_reflections = load.reflections(
            os.path.join(tmp_dir, "indexed.pickle"))
        unpickling_timer.stop()
        for i in range(len(experiments_list)):
            experiment = experiments_list[i]
            self.crystal_model = experiment.crystal
            #assert self.crystal_model.get_unit_cell().is_similar_to(
            #expected_unit_cell,
            #relative_length_tolerance=relative_length_tolerance,
            #absolute_angle_tolerance=absolute_angle_tolerance), (
            #self.crystal_model.get_unit_cell().parameters(),
            #expected_unit_cell.parameters())
            assert unit_cells_are_similar(
                self.crystal_model.get_unit_cell(),
                expected_unit_cell,
                relative_length_tolerance=relative_length_tolerance,
                absolute_angle_tolerance=absolute_angle_tolerance), (
                    self.crystal_model.get_unit_cell().parameters(),
                    expected_unit_cell.parameters())
            sg = self.crystal_model.get_space_group()
            assert sg.type().hall_symbol() == expected_hall_symbol, (
                sg.type().hall_symbol(), expected_hall_symbol)
            reflections = self.indexed_reflections.select(
                self.indexed_reflections['id'] == i)
            mi = reflections['miller_index']
            assert (mi != (0, 0, 0)).count(False) == 0
            reflections = reflections.select(mi != (0, 0, 0))
            self.rmsds = self.get_rmsds_obs_pred(reflections, experiment)
            for actual, expected in zip(self.rmsds, expected_rmsds):
                assert actual <= expected, "%s %s" % (self.rmsds,
                                                      expected_rmsds)
        if 0:
            print self.calc_rmsds_timer.legend
            print unpickling_timer.report()
            print self.calc_rmsds_timer.report()
예제 #13
0
        self._r_inv *= matrix.sqr(m_elems)
        self._n_iterations += 1


class reduction_with_tracking_and_eq_always_false(reduction_with_tracking):
    def __init__(self, unit_cell):
        reduction_with_tracking.__init__(self, unit_cell)

    def eps_eq(self, x, y):
        return False


relative_epsilon = None
track_infinite = False
eq_always_false = False
time_krivy_gruber_1976 = time_log("krivy_gruber_1976.reduction")
time_gruber_1973 = time_log("gruber_1973.reduction")
time_krivy_gruber_1976_minimum = time_log(
    "krivy_gruber_1976.minimum_reduction")
time_gruber_1973_minimum = time_log("gruber_1973.minimum_reduction")
time_gruber_1973_fast_minimum = time_log("gruber_1973.fast_minimum_reduction")
time_uctbx_fast_minimum = time_log("uctbx.fast_minimum_reduction")
fast_minimum_reduction_max_n_iterations = 0


def do_reduce(inp):
    assert not inp.is_degenerate()
    time_krivy_gruber_1976.start()
    red = krivy_gruber_1976.reduction(inp, relative_epsilon=relative_epsilon)
    time_krivy_gruber_1976.stop()
    assert red.is_niggli_cell()
예제 #14
0
      raise StopIteration
    self._r_inv *= matrix.sqr(m_elems)
    self._n_iterations += 1

class reduction_with_tracking_and_eq_always_false(reduction_with_tracking):

  def __init__(self, unit_cell):
    reduction_with_tracking.__init__(self, unit_cell)

  def eps_eq(self, x, y):
    return False

relative_epsilon = None
track_infinite = False
eq_always_false = False
time_krivy_gruber_1976 = time_log("krivy_gruber_1976.reduction")
time_gruber_1973 = time_log("gruber_1973.reduction")
time_krivy_gruber_1976_minimum=time_log("krivy_gruber_1976.minimum_reduction")
time_gruber_1973_minimum = time_log("gruber_1973.minimum_reduction")
time_gruber_1973_fast_minimum = time_log("gruber_1973.fast_minimum_reduction")
time_uctbx_fast_minimum = time_log("uctbx.fast_minimum_reduction")
fast_minimum_reduction_max_n_iterations = 0

def do_reduce(inp):
  assert not inp.is_degenerate()
  time_krivy_gruber_1976.start()
  red = krivy_gruber_1976.reduction(
    inp, relative_epsilon=relative_epsilon)
  time_krivy_gruber_1976.stop()
  assert red.is_niggli_cell()
  red_cell = red.as_unit_cell()
def run(experiments, reflections, random_seed=42):
    scitbx.random.set_random_seed(random_seed)
    random.seed(random_seed)

    reflections["id"] = flex.int(len(reflections), 0)
    reflections = reflections.select(
        reflections.get_flags(reflections.flags.indexed))

    beam = experiments[0].beam
    detector = experiments[0].detector
    p_id, (x, y) = detector.get_ray_intersection(beam.get_s0())

    g = scitbx.random.variate(
        scitbx.random.normal_distribution(mean=0, sigma=2))

    n = 100
    shift_x = g(n)
    shift_y = g(n)

    expected_miller_indices = reflections["miller_index"]
    non_zero_sel = expected_miller_indices != (0, 0, 0)

    misindexed_global = flex.size_t()
    correct_global = flex.size_t()
    misindexed_local = flex.size_t()
    correct_local = flex.size_t()

    global_timer = time_log("global")
    local_timer = time_log("local")

    for d_x, d_y in zip(shift_x, shift_y):
        set_slow_fast_beam_centre_mm(detector, beam, (y + d_y, x + d_x), p_id)

        refl = Indexer.map_centroids_to_reciprocal_space(
            experiments, reflections)

        refl_global = copy.deepcopy(refl)
        refl_global["id"] = flex.int(len(refl), -1)
        global_timer.start()
        assign_indices.AssignIndicesGlobal()(refl_global, experiments)
        global_timer.stop()

        misindexed_global.append(
            (expected_miller_indices == refl_global["miller_index"]
             ).select(non_zero_sel).count(False))
        correct_global.append(
            (expected_miller_indices == refl_global["miller_index"]
             ).select(non_zero_sel).count(True))

        refl_local = copy.deepcopy(refl)
        refl_local["id"] = flex.int(len(refl), -1)
        local_timer.start()
        assign_indices.AssignIndicesLocal()(refl_local, experiments)
        local_timer.stop()

        misindexed_local.append(
            (expected_miller_indices == refl_local["miller_index"]
             ).select(non_zero_sel).count(False))
        correct_local.append(
            (expected_miller_indices == refl_local["miller_index"]
             ).select(non_zero_sel).count(True))

        print("Beam centre shift: (%.2f, %.2f)" % (d_x, d_y))
        print("Misindexed global: %i" % misindexed_global[-1])
        print("Correct global: %i" % correct_global[-1])
        print("Misindexed local: %i" % misindexed_local[-1])
        print("Correct local: %i" % correct_local[-1])
        print()

    print(global_timer.legend)
    print(global_timer.report())
    print(local_timer.report())

    vmax = max(flex.max(correct_global), flex.max(correct_local))

    import matplotlib

    matplotlib.use("Agg")
    from matplotlib import pyplot as plt

    fig, axes = plt.subplots(ncols=2, sharey=True, figsize=(15, 10))
    sc = axes[0].scatter(
        shift_x,
        shift_y,
        vmin=0,
        vmax=1,
        c=correct_global.as_double() / vmax,
        cmap="viridis",
    )
    sc = axes[1].scatter(
        shift_x,
        shift_y,
        vmin=0,
        vmax=1,
        c=correct_local.as_double() / vmax,
        cmap="viridis",
    )
    axes[0].set_title("global")
    axes[1].set_title("local")
    for ax in axes:
        ax.set_aspect("equal")
        ax.set_xlabel("beam centre shift (mm)")
    axes[0].set_ylabel("beam centre shift (mm)")

    cbar = plt.colorbar(sc, ax=axes, shrink=0.5)
    cbar.set_label("Fraction correctly indexed")
    plt.savefig("correctly_indexed.png")
예제 #16
0
def exercise_masks(xs,
                   fo_sq,
                   solvent_radius,
                   shrink_truncation_radius,
                   resolution_factor=None,
                   grid_step=None,
                   resolution_cutoff=None,
                   atom_radii_table=None,
                   use_space_group_symmetry=False,
                   debug=False,
                   verbose=False):
    assert resolution_factor is None or grid_step is None
    xs_ref = xs.deep_copy_scatterers()
    time_total = time_log("masks total").start()
    fo_sq = fo_sq.customized_copy(anomalous_flag=True)
    fo_sq = fo_sq.eliminate_sys_absent()
    merging = fo_sq.merge_equivalents()
    fo_sq_merged = merging.array()
    if resolution_cutoff is not None:
        fo_sq_merged = fo_sq_merged.resolution_filter(d_min=resolution_cutoff)
    if verbose:
        print "Merging summary:"
        print "R-int, R-sigma: %.4f, %.4f" % (merging.r_int(),
                                              merging.r_sigma())
        merging.show_summary()
        print
        fo_sq_merged.show_comprehensive_summary()
        print
    mask = masks.mask(xs, fo_sq_merged)
    time_compute_mask = time_log("compute mask").start()
    mask.compute(solvent_radius=solvent_radius,
                 shrink_truncation_radius=shrink_truncation_radius,
                 resolution_factor=resolution_factor,
                 grid_step=grid_step,
                 atom_radii_table=atom_radii_table,
                 use_space_group_symmetry=use_space_group_symmetry)
    time_compute_mask.stop()
    time_structure_factors = time_log("structure factors").start()
    f_mask = mask.structure_factors()
    time_structure_factors.stop()
    mask.show_summary()
    f_model = mask.f_model()
    # write modified intensities as shelxl hkl
    out = StringIO()
    modified_fo_sq = mask.modified_intensities()
    modified_fo_sq.export_as_shelx_hklf(out)
    out_file = open('modified.hkl', 'w')
    out_file.write(out.getvalue())
    out_file.close()

    if verbose:
        print
        print time_log.legend
        print time_compute_mask.report()
        print time_structure_factors.report()
        print time_total.log()

    if debug:
        f_obs = fo_sq_merged.as_amplitude_array()
        sf = xray.structure_factors.from_scatterers(miller_set=f_obs,
                                                    cos_sin_table=True)
        f_calc = sf(xs, f_obs).f_calc()
        f_model = mask.f_model()
        scale_factor = f_obs.scale_factor(f_model)
        # f_obs - f_calc
        k = f_obs.scale_factor(f_calc)
        f_obs_minus_f_calc = f_obs.f_obs_minus_f_calc(1 / k, f_calc)
        diff_map_calc = miller.fft_map(mask.crystal_gridding,
                                       f_obs_minus_f_calc)
        diff_map_calc.apply_volume_scaling()
        # f_mask
        mask_map = miller.fft_map(mask.crystal_gridding, f_mask)
        mask_map.apply_volume_scaling()
        # f_model
        model_map = miller.fft_map(mask.crystal_gridding, f_model)
        model_map.apply_volume_scaling()
        # f_obs - f_model
        f_obs_minus_f_model = f_obs.f_obs_minus_f_calc(1 / scale_factor,
                                                       f_model)
        diff_map_model = miller.fft_map(mask.crystal_gridding,
                                        f_obs_minus_f_model)
        diff_map_model.apply_volume_scaling()
        # modified f_obs
        modified_fo_sq_map = miller.fft_map(
            mask.crystal_gridding,
            modified_fo_sq.as_amplitude_array().phase_transfer(f_calc))
        modified_fo_sq_map.apply_volume_scaling()
        # view the maps
        from crys3d import wx_map_viewer
        wx_map_viewer.display(title="Mask",
                              raw_map=mask.mask.data.as_double(),
                              unit_cell=f_obs.unit_cell())
        wx_map_viewer.display(title="f_obs - f_calc",
                              raw_map=diff_map_calc.real_map(),
                              unit_cell=f_obs.unit_cell())
        wx_map_viewer.display(title="f_mask",
                              raw_map=mask_map.real_map(),
                              unit_cell=f_obs.unit_cell())
        wx_map_viewer.display(title="f_model",
                              raw_map=model_map.real_map(),
                              unit_cell=f_obs.unit_cell())
        wx_map_viewer.display(title="f_obs - f_model",
                              raw_map=diff_map_model.real_map(),
                              unit_cell=f_obs.unit_cell())
        wx_map_viewer.display(title="modified_fo_sq",
                              raw_map=modified_fo_sq_map.real_map(),
                              unit_cell=f_obs.unit_cell())
    return mask
예제 #17
0
def exercise_masks(xs, fo_sq,
                   solvent_radius,
                   shrink_truncation_radius,
                   resolution_factor=None,
                   grid_step=None,
                   resolution_cutoff=None,
                   atom_radii_table=None,
                   use_space_group_symmetry=False,
                   debug=False,
                   verbose=False):
  assert resolution_factor is None or grid_step is None
  xs_ref = xs.deep_copy_scatterers()
  time_total = time_log("masks total").start()
  fo_sq = fo_sq.customized_copy(anomalous_flag=True)
  fo_sq = fo_sq.eliminate_sys_absent()
  merging = fo_sq.merge_equivalents()
  fo_sq_merged = merging.array()
  if resolution_cutoff is not None:
    fo_sq_merged = fo_sq_merged.resolution_filter(d_min=resolution_cutoff)
  if verbose:
    print "Merging summary:"
    print "R-int, R-sigma: %.4f, %.4f" %(merging.r_int(), merging.r_sigma())
    merging.show_summary()
    print
    fo_sq_merged.show_comprehensive_summary()
    print
  mask = masks.mask(xs, fo_sq_merged)
  time_compute_mask = time_log("compute mask").start()
  mask.compute(solvent_radius=solvent_radius,
               shrink_truncation_radius=shrink_truncation_radius,
               resolution_factor=resolution_factor,
               grid_step=grid_step,
               atom_radii_table=atom_radii_table,
               use_space_group_symmetry=use_space_group_symmetry)
  time_compute_mask.stop()
  time_structure_factors = time_log("structure factors").start()
  f_mask = mask.structure_factors()
  time_structure_factors.stop()
  mask.show_summary()
  f_model = mask.f_model()
  # write modified intensities as shelxl hkl
  out = StringIO()
  modified_fo_sq = mask.modified_intensities()
  modified_fo_sq.export_as_shelx_hklf(out)
  out_file = open('modified.hkl', 'w')
  out_file.write(out.getvalue())
  out_file.close()

  if verbose:
    print
    print time_log.legend
    print time_compute_mask.report()
    print time_structure_factors.report()
    print time_total.log()

  if debug:
    f_obs = fo_sq_merged.as_amplitude_array()
    sf = xray.structure_factors.from_scatterers(
      miller_set=f_obs,
      cos_sin_table=True)
    f_calc = sf(xs, f_obs).f_calc()
    f_model = mask.f_model()
    scale_factor = f_obs.scale_factor(f_model)
    # f_obs - f_calc
    k = f_obs.scale_factor(f_calc)
    f_obs_minus_f_calc = f_obs.f_obs_minus_f_calc(1/k, f_calc)
    diff_map_calc = miller.fft_map(mask.crystal_gridding, f_obs_minus_f_calc)
    diff_map_calc.apply_volume_scaling()
    # f_mask
    mask_map = miller.fft_map(mask.crystal_gridding, f_mask)
    mask_map.apply_volume_scaling()
    # f_model
    model_map = miller.fft_map(mask.crystal_gridding, f_model)
    model_map.apply_volume_scaling()
    # f_obs - f_model
    f_obs_minus_f_model = f_obs.f_obs_minus_f_calc(1/scale_factor, f_model)
    diff_map_model = miller.fft_map(mask.crystal_gridding, f_obs_minus_f_model)
    diff_map_model.apply_volume_scaling()
    # modified f_obs
    modified_fo_sq_map = miller.fft_map(
      mask.crystal_gridding, modified_fo_sq.as_amplitude_array().phase_transfer(f_calc))
    modified_fo_sq_map.apply_volume_scaling()
    # view the maps
    from crys3d import wx_map_viewer
    wx_map_viewer.display(
      title="Mask",
      raw_map=mask.mask.data.as_double(),
      unit_cell=f_obs.unit_cell())
    wx_map_viewer.display(
      title="f_obs - f_calc",
      raw_map=diff_map_calc.real_map(),
      unit_cell=f_obs.unit_cell())
    wx_map_viewer.display(
      title="f_mask",
      raw_map=mask_map.real_map(),
      unit_cell=f_obs.unit_cell())
    wx_map_viewer.display(
      title="f_model",
      raw_map=model_map.real_map(),
      unit_cell=f_obs.unit_cell())
    wx_map_viewer.display(
      title="f_obs - f_model",
      raw_map=diff_map_model.real_map(),
      unit_cell=f_obs.unit_cell())
    wx_map_viewer.display(
      title="modified_fo_sq",
      raw_map=modified_fo_sq_map.real_map(),
      unit_cell=f_obs.unit_cell())
  return mask
예제 #18
0
  def __init__(self,
               pickle_path,
               sweep_path,
               extra_args,
               expected_unit_cell,
               expected_rmsds,
               expected_hall_symbol,
               n_expected_lattices=1,
               relative_length_tolerance=0.005,
               absolute_angle_tolerance=0.5):

    args = ["dials.index", pickle_path, sweep_path] + extra_args

    cwd = os.path.abspath(os.curdir)
    tmp_dir = open_tmp_directory(suffix="test_dials_index")
    os.chdir(tmp_dir)
    command = " ".join(args)
    print command
    result = easy_run.fully_buffered(command=command).raise_if_errors()
    os.chdir(cwd)
    assert os.path.exists(os.path.join(tmp_dir, "experiments.json"))
    experiments_list = dxtbx_load.experiment_list(
      os.path.join(tmp_dir, "experiments.json"), check_format=False)
    assert len(experiments_list.crystals()) == n_expected_lattices, (
      len(experiments_list.crystals()), n_expected_lattices)
    assert os.path.exists(os.path.join(tmp_dir, "indexed.pickle"))
    from libtbx.utils import time_log
    unpickling_timer = time_log("unpickling")
    self.calc_rmsds_timer = time_log("calc_rmsds")
    unpickling_timer.start()
    self.indexed_reflections = load.reflections(os.path.join(tmp_dir, "indexed.pickle"))
    unpickling_timer.stop()
    for i in range(len(experiments_list)):
      experiment = experiments_list[i]
      self.crystal_model = experiment.crystal
      #assert self.crystal_model.get_unit_cell().is_similar_to(
        #expected_unit_cell,
        #relative_length_tolerance=relative_length_tolerance,
        #absolute_angle_tolerance=absolute_angle_tolerance), (
          #self.crystal_model.get_unit_cell().parameters(),
          #expected_unit_cell.parameters())
      assert unit_cells_are_similar(
        self.crystal_model.get_unit_cell(),expected_unit_cell,
        relative_length_tolerance=relative_length_tolerance,
        absolute_angle_tolerance=absolute_angle_tolerance), (
          self.crystal_model.get_unit_cell().parameters(),
          expected_unit_cell.parameters())
      sg = self.crystal_model.get_space_group()
      assert sg.type().hall_symbol() == expected_hall_symbol, (
        sg.type().hall_symbol(), expected_hall_symbol)
      reflections = self.indexed_reflections.select(
        self.indexed_reflections['id'] == i)
      mi = reflections['miller_index']
      assert (mi != (0,0,0)).count(False) == 0
      reflections = reflections.select(mi != (0,0,0))
      self.rmsds = self.get_rmsds_obs_pred(reflections, experiment)
      for actual, expected in zip(self.rmsds, expected_rmsds):
        assert actual <= expected, "%s %s" %(self.rmsds, expected_rmsds)
    if 0:
      print self.calc_rmsds_timer.legend
      print unpickling_timer.report()
      print self.calc_rmsds_timer.report()
예제 #19
0
  def compute_rij_wij(self, use_cache=True):

    group = flex.bool(self._lattices.size(), True)

    n_lattices = group.count(True)
    n_sym_ops = len(self._sym_ops)

    NN = n_lattices * n_sym_ops

    index_selected = group.iselection()
    self.rij_matrix = flex.double(flex.grid(NN,NN),0.)
    if self._weights is None:
      self.wij_matrix = None
    else:
      self.wij_matrix = flex.double(flex.grid(NN,NN),0.)

    indices = {}
    space_group_type = self._data.space_group().type()
    for cb_op in self._sym_ops:
      cb_op = sgtbx.change_of_basis_op(cb_op)
      indices_reindexed = cb_op.apply(self._data.indices())
      miller.map_to_asu(space_group_type, False, indices_reindexed)
      indices[cb_op.as_xyz()] = indices_reindexed

    def _compute_rij_matrix_one_row_block(i):
      rij_cache = {}

      n_sym_ops = len(self._sym_ops)
      NN = n_lattices * n_sym_ops

      from scipy import sparse
      rij_row = []
      rij_col = []
      rij_data = []
      if self._weights is not None:
        wij_row = []
        wij_col = []
        wij_data = []
      else:
        wij = None

      i_lower, i_upper = self.lattice_lower_upper_index(i)
      intensities_i = self._data.data()[i_lower:i_upper]

      for j in range(i, n_lattices):

        j_lower, j_upper = self.lattice_lower_upper_index(j)
        intensities_j = self._data.data()[j_lower:j_upper]

        for k, cb_op_k in enumerate(self._sym_ops):
          cb_op_k = sgtbx.change_of_basis_op(cb_op_k)

          indices_i = indices[cb_op_k.as_xyz()][i_lower:i_upper]

          for kk, cb_op_kk in enumerate(self._sym_ops):
            if i == j and k == kk:
              # don't include correlation of dataset with itself
              continue
            cb_op_kk = sgtbx.change_of_basis_op(cb_op_kk)

            ik = i + (n_lattices * k)
            jk = j + (n_lattices * kk)

            key = (i, j, str(cb_op_k.inverse() * cb_op_kk))
            if use_cache and key in rij_cache:
              cc, n = rij_cache[key]
            else:
              indices_j = indices[cb_op_kk.as_xyz()][j_lower:j_upper]

              matches = miller.match_indices(indices_i, indices_j)
              pairs = matches.pairs()
              isel_i = pairs.column(0)
              isel_j = pairs.column(1)
              isel_i = isel_i.select(
                self._patterson_group.epsilon(indices_i.select(isel_i)) == 1)
              isel_j = isel_j.select(
                self._patterson_group.epsilon(indices_j.select(isel_j)) == 1)
              corr = flex.linear_correlation(
                intensities_i.select(isel_i),
                intensities_j.select(isel_j))

              if corr.is_well_defined():
                cc = corr.coefficient()
                n = corr.n()
                rij_cache[key] = (cc, n)
              else:
                cc = None
                n = None

            if n < self._min_pairs:
              continue

            if cc is not None and n is not None:
              if self._weights == 'count':
                wij_row.extend([ik, jk])
                wij_col.extend([jk, ik])
                wij_data.extend([n, n])
              elif self._weights == 'standard_error':
                assert n > 2
                # http://www.sjsu.edu/faculty/gerstman/StatPrimer/correlation.pdf
                se = math.sqrt((1-cc**2)/(n-2))
                wij = 1/se
                wij_row.extend([ik, jk])
                wij_col.extend([jk, ik])
                wij_data.extend([wij, wij])

              rij_row.extend([ik, jk])
              rij_col.extend([jk, ik])
              rij_data.extend([cc, cc])

      rij = sparse.coo_matrix((rij_data, (rij_row, rij_col)), shape=(NN, NN))
      if self._weights is not None:
        wij = sparse.coo_matrix((wij_data, (wij_row, wij_col)), shape=(NN, NN))

      return rij, wij

    timer_mp = time_log('parallel_map', use_wall_clock=True)
    timer_mp.start()
    from libtbx import easy_mp
    args = [(i,) for i in range(n_lattices)]
    results = easy_mp.parallel_map(
      _compute_rij_matrix_one_row_block,
      args,
      processes=self._nproc,
      iterable_type=easy_mp.posiargs,
      method='multiprocessing')
    timer_mp.stop()

    timer_collate = time_log('collate', use_wall_clock=True)
    timer_collate.start()
    rij_matrix = None
    wij_matrix = None
    for i, (rij, wij) in enumerate(results):
      if rij_matrix is None:
        rij_matrix = rij
      else:
        rij_matrix += rij
      if wij is not None:
        if wij_matrix is None:
          wij_matrix = wij
        else:
          wij_matrix += wij

    self.rij_matrix = flex.double(rij_matrix.todense())
    if wij_matrix is not None:
      import numpy as np
      self.wij_matrix = flex.double(wij_matrix.todense().astype(np.float64))
    timer_collate.stop()

    logger.debug(time_log.legend)
    logger.debug(timer_mp.report())
    logger.debug(timer_collate.report())

    return self.rij_matrix, self.wij_matrix