Exemple #1
0
def run(args):
    import libtbx
    from libtbx import easy_pickle

    cmd_line = command_line.argument_interpreter(master_params=phil_scope)
    working_phil, files = cmd_line.process_and_fetch(
        args=args, custom_processor="collect_remaining")
    working_phil.show()
    params = working_phil.extract()

    datasets = []

    for file_name in files:

        reader = any_reflection_file(file_name)
        assert reader.file_type() == "ccp4_mtz"
        mtz_object = reader.file_content()

        cb_op = sgtbx.change_of_basis_op(params.change_of_basis_op)
        basename = os.path.basename(file_name)
        out_name = os.path.splitext(
            basename)[0] + params.output.suffix + ".mtz"
        print("reindexing %s (%s)" % (file_name, cb_op.as_xyz()))
        mtz_object.change_basis_in_place(cb_op)
        mtz_object.write(out_name)
Exemple #2
0
  def __init__(self, master_phil, local_overrides = "",
               cmdline_args = None, verbose=False):

    self._verbose = verbose

    arg_interpreter = command_line.argument_interpreter(
        master_phil=master_phil)

    user_phil = parse(local_overrides)
    cmdline_phils = []
    if cmdline_args:
      for arg in cmdline_args:
        cmdline_phils.append(arg_interpreter.process(arg))

    working_phil = master_phil.fetch(
        sources=[user_phil] + cmdline_phils)

    self._params = working_phil.extract().geometry.parameters

    self.set_seed()

    self.build_goniometer()

    self.build_crystal()

    self.build_beam()

    self.build_detector()

    # write changes back to the PHIL object
    temp = working_phil.extract()
    temp.geometry.parameters = self._params
    self.phil = master_phil.format(python_object = temp)
Exemple #3
0
def load_cxi_phil(path, args=[]):
    import os
    from labelit.phil_preferences import iotbx_defs, libtbx_defs
    from iotbx import phil
    from libtbx.phil.command_line import argument_interpreter
    from libtbx.utils import Sorry

    exts = ["", ".params", ".phil"]
    foundIt = False
    for ext in exts:
        if os.path.exists(path + ext):
            foundIt = True
            path += ext
            break
    if not foundIt:
        raise Sorry("Target not found: " + path)

    master_phil = phil.parse(input_string=iotbx_defs + libtbx_defs,
                             process_includes=True)

    horizons_phil = master_phil.fetch(
        sources=[phil.parse(file_name=path, process_includes=True)])

    argument_interpreter = argument_interpreter(master_phil=master_phil)
    consume = []
    for arg in args:
        try:
            command_line_params = argument_interpreter.process(arg=arg)
            horizons_phil = horizons_phil.fetch(sources=[
                command_line_params,
            ])
            consume.append(arg)

        except Sorry, e:
            pass
def create_phil(n, cts, bg):
    from dials.algorithms.simulation.generate_test_reflections import master_phil
    from libtbx.phil import command_line

    cmd = command_line.argument_interpreter(master_params=master_phil)
    working_phil = cmd.process_and_fetch(args=[create_phil_string(n, cts, bg)])
    return working_phil.extract()
def create_phil(n, cts, bg):
  from dials.algorithms.simulation.generate_test_reflections import \
    master_phil
  from libtbx.phil import command_line
  cmd = command_line.argument_interpreter(master_params = master_phil)
  working_phil = cmd.process_and_fetch(args = [create_phil_string(n, cts, bg)])
  return working_phil.extract()
Exemple #6
0
def process_input(args, phil_args, input_file, mode='auto', now=None):
    """ Read and parse parameter file

      input: input_file_list - PHIL-format files w/ parameters

      output: params - PHIL-formatted parameters
              txt_output - plain text-formatted parameters
  """

    from libtbx.phil.command_line import argument_interpreter
    from libtbx.utils import Sorry

    if mode == 'file':
        user_phil = [ip.parse(open(inp).read()) for inp in [input_file]]
        working_phil = master_phil.fetch(sources=user_phil)
        params = working_phil.extract()
    elif mode == 'auto':
        params = master_phil.extract()
        params.description = 'IOTA parameters auto-generated on {}'.format(now)
        params.input = [input_file]

    final_phil = master_phil.format(python_object=params)

    # Parse in-line params into phil
    argument_interpreter = argument_interpreter(master_phil=master_phil)
    consume = []
    for arg in phil_args:
        try:
            command_line_params = argument_interpreter.process(arg=arg)
            final_phil = final_phil.fetch(sources=[
                command_line_params,
            ])
            consume.append(arg)
        except Sorry, e:
            pass
Exemple #7
0
    def __init__(self,
                 master_phil,
                 local_overrides="",
                 cmdline_args=None,
                 verbose=False):

        self._verbose = verbose

        arg_interpreter = command_line.argument_interpreter(
            master_phil=master_phil)

        user_phil = parse(local_overrides)
        cmdline_phils = []
        if cmdline_args:
            for arg in cmdline_args:
                cmdline_phils.append(arg_interpreter.process(arg))

        working_phil = master_phil.fetch(sources=[user_phil] + cmdline_phils)

        self._params = working_phil.extract().geometry.parameters

        self.set_seed()

        self.build_goniometer()

        self.build_crystal()

        self.build_beam()

        self.build_detector()

        # write changes back to the PHIL object
        temp = working_phil.extract()
        temp.geometry.parameters = self._params
        self.phil = master_phil.format(python_object=temp)
Exemple #8
0
    def __init__(
        self,
        master_phil,
        target,
        prediction_parameterisation,
        local_overrides="",
        cmdline_args=None,
        verbose=True,
    ):

        self._target = target
        self._prediction_parameterisation = prediction_parameterisation
        self._verbose = verbose

        arg_interpreter = command_line.argument_interpreter(
            master_phil=master_phil)

        user_phil = parse(local_overrides)
        cmdline_phils = []
        if cmdline_args:
            for arg in cmdline_args:
                cmdline_phils.append(arg_interpreter.process(arg))

        working_phil = master_phil.fetch(sources=[user_phil] + cmdline_phils)

        self._params = working_phil.extract().minimiser.parameters

        self.refiner = self.build_minimiser()
Exemple #9
0
def run(args):

    cmd_line = command_line.argument_interpreter(
        master_params=master_phil_scope)
    working_phil, args = cmd_line.process_and_fetch(
        args=args, custom_processor="collect_remaining")
    working_phil.show()
    params = working_phil.extract()

    if params.unit_cell is not None:
        unit_cell = params.unit_cell
        crystal_symmetry = crystal.symmetry(unit_cell=unit_cell)
    else:
        crystal_symmetry = None

    from iotbx.reflection_file_reader import any_reflection_file
    result = any_reflection_file(args[0])
    unmerged_intensities = None
    batches_all = None

    for ma in result.as_miller_arrays(merge_equivalents=False,
                                      crystal_symmetry=crystal_symmetry):
        #print ma.info().labels
        if ma.info().labels == ['I(+)', 'SIGI(+)', 'I(-)', 'SIGI(-)']:
            assert ma.anomalous_flag()
            unmerged_intensities = ma
        elif ma.info().labels == ['I', 'SIGI']:
            assert not ma.anomalous_flag()
            unmerged_intensities = ma
        elif ma.info().labels == ['BATCH']:
            batches_all = ma

    assert batches_all is not None
    assert unmerged_intensities is not None

    id_to_batches = None

    if len(params.batch) > 0:
        id_to_batches = {}
        for b in params.batch:
            assert b.id is not None
            assert b.range is not None
            assert b.id not in id_to_batches, "Duplicate batch id: %s" % b.id
            id_to_batches[b.id] = b.range

    result = delta_cc_half(unmerged_intensities,
                           batches_all,
                           n_bins=params.n_bins,
                           d_min=params.d_min,
                           cc_one_half_method=params.cc_one_half_method,
                           id_to_batches=id_to_batches)
    hist_filename = 'delta_cc_hist.png'
    print 'Saving histogram to %s' % hist_filename
    result.plot_histogram(hist_filename)
    print result.get_table()
    from xia2.Handlers.Citations import Citations
    Citations.cite('delta_cc_half')
    for citation in Citations.get_citations_acta():
        print citation
Exemple #10
0
def main(args):
    from dials.algorithms.simulation.generate_test_reflections import main as _main
    from dials.algorithms.simulation.generate_test_reflections import master_phil
    from libtbx.phil import command_line

    cmd = command_line.argument_interpreter(master_params=master_phil)
    working_phil = cmd.process_and_fetch(args=sys.argv[1:])
    _main(working_phil.extract())
def main(args):
  from dials.algorithms.simulation.generate_test_reflections import main \
    as _main
  from dials.algorithms.simulation.generate_test_reflections import \
    master_phil
  from libtbx.phil import command_line
  cmd = command_line.argument_interpreter(master_params = master_phil)
  working_phil = cmd.process_and_fetch(args = sys.argv[1:])
  _main(working_phil.extract())
Exemple #12
0
def run(args):

  cmd_line = command_line.argument_interpreter(master_params=master_phil_scope)
  working_phil, args = cmd_line.process_and_fetch(
    args=args, custom_processor="collect_remaining")
  working_phil.show()
  params = working_phil.extract()
  assert len(args) == 1

  french_wilson(args[0], params=params)
Exemple #13
0
def run(args):

  cmd_line = command_line.argument_interpreter(master_params=master_phil_scope)
  working_phil, args = cmd_line.process_and_fetch(
      args=args, custom_processor="collect_remaining")
  working_phil.show()
  params = working_phil.extract()
  assert len(args) == 1

  french_wilson(args[0], params=params)
Exemple #14
0
def run(args):
    cmd_line = command_line.argument_interpreter(
        master_params=master_phil_scope)
    working_phil = cmd_line.process_and_fetch(args=args)
    working_phil.show()
    params = working_phil.extract()
    if params.find_spots_phil is not None:
        params.find_spots_phil = os.path.abspath(params.find_spots_phil)
        assert os.path.isfile(params.find_spots_phil)
    if params.index_phil is not None:
        params.index_phil = os.path.abspath(params.index_phil)
        assert os.path.isfile(params.index_phil)

    templates = params.template
    print(templates)

    args = []

    filenames = []

    for t in templates:
        print(t)
        filenames.extend(glob.glob(t))
    print(filenames)
    from dxtbx.imageset import ImageSetFactory, ImageSweep
    from dxtbx.datablock import DataBlockFactory

    datablocks = DataBlockFactory.from_args(filenames, verbose=True)

    i = 0
    for i, datablock in enumerate(datablocks):
        sweeps = datablock.extract_sweeps()
        for imageset in sweeps:
            if (isinstance(imageset, ImageSweep)
                    and len(imageset) >= params.min_sweep_length):
                i += 1
                print(imageset)
                print(imageset.get_template())
                args.append((imageset.paths(), i, params))

    # sort based on the first filename of each imageset
    args.sort(key=lambda x: x[0][0])

    nproc = params.nproc
    results = easy_mp.parallel_map(
        func=run_once,
        iterable=args,
        processes=nproc,
        method=params.technology,
        qsub_command=params.qsub_command,
        preserve_order=True,
        asynchronous=False,
        preserve_exception_message=True,
    )
Exemple #15
0
def test_generate_test_reflections(tmpdir):
    tmpdir.chdir()
    from libtbx.phil import command_line

    from dials.algorithms.simulation.generate_test_reflections import main, master_phil

    cmd = command_line.argument_interpreter(master_phil=master_phil)
    working_phil = cmd.process_and_fetch(
        args=[
            """nrefl = 10
shoebox_size {
  x = 20
  y = 20
  z = 20
}
spot_size {
  x = 1
  y = 3
  z = 1
}
spot_offset {
  x = -0.5
  y = -0.5
  z = -0.5
}
mask_nsigma = 3.0
counts = 10000
background = 0
background_a = 10
background_b = 0.1
background_c = -0.1
background_d = 0
pixel_mask = *all static precise
background_method = *xds mosflm
integration_methpd = *xds mosflm
output {
  over = None
  under = None
  all = all_refl.refl
}
rotation {
  axis {
    x = 0
    y = 0
    z = 1
  }
  angle = 45
}
"""
        ]
    )
    main(working_phil.extract())
    assert (tmpdir / "all_refl.refl").check()
def main(args):
    from dials.algorithms.simulation.generate_test_reflections import \
       simple_gaussian_spots
    from dials.algorithms.simulation.generate_test_reflections import \
      master_phil
    from libtbx.phil import command_line
    cmd = command_line.argument_interpreter(master_params=master_phil)
    working_phil = cmd.process_and_fetch(args=sys.argv[1:])
    params = working_phil.extract()
    rlist = simple_gaussian_spots(params)
    import cPickle as pickle
    if params.output.all:
        pickle.dump(rlist, open(params.output.all, 'w'))
def main(args):
  from dials.algorithms.simulation.generate_test_reflections import \
     simple_gaussian_spots
  from dials.algorithms.simulation.generate_test_reflections import \
    master_phil
  from libtbx.phil import command_line
  cmd = command_line.argument_interpreter(master_params = master_phil)
  working_phil = cmd.process_and_fetch(args = sys.argv[1:])
  params = working_phil.extract()
  rlist = simple_gaussian_spots(params)
  import cPickle as pickle
  if params.output.all:
    pickle.dump(rlist, open(params.output.all, 'w'))
Exemple #18
0
def get_input_phil(paramfile=None, phil_args=None, ha14=False, gui=False):
    """Generate PHIL from file, master, and/or command arguments.

    :param args: command line arguments
    :param phil_args: PHIL settings as command line arguments
    :param paramfile: file with input settings in PHIL format
    :return:
    """
    from libtbx.phil.command_line import argument_interpreter
    from libtbx.utils import Sorry

    # Depending on mode, either read input from file, or generate defaults
    if paramfile:
        with open(paramfile, "r") as inpf:
            user_phil = ip.parse(inpf.read())
        phil_fixer = PHILFixer()
        working_phil = phil_fixer.run(old_phil=user_phil, write_file=True)

    else:
        if ha14:
            from iota.etc.iota_cctbx_ha14 import ha14_str

            working_phil = ip.parse(master_phil_str + ha14_str,
                                    process_includes=True)
        else:
            working_phil = master_phil

    if gui:
        from libtbx.phil import find_scope

        if not find_scope(working_phil, "gui"):
            from iota.gui.base import gui_phil

            working_phil.adopt_scope(gui_phil)

    # Parse in-line params into phil
    bad_args = []
    if phil_args:
        argument_interpreter = argument_interpreter(master_phil=working_phil)
        for arg in phil_args:
            try:
                command_line_params = argument_interpreter.process(arg=arg)
                working_phil = working_phil.fetch(
                    sources=[command_line_params])
            except Sorry:
                bad_args.append(arg)

    # Self-fetch to resolve variables
    working_phil = working_phil.fetch(source=working_phil)

    return working_phil, bad_args
    def generate_profiles(self, num, counts):
        from dials.algorithms.simulation.generate_test_reflections import main
        from dials.algorithms.simulation.generate_test_reflections import \
          master_phil
        from libtbx.phil import command_line
        cmd = command_line.argument_interpreter(master_params=master_phil)
        working_phil = cmd.process_and_fetch(args=[
            """
      nrefl = %d
      shoebox_size {
        x = 10
        y = 10
        z = 10
      }
      spot_size {
        x = 1
        y = 1
        z = 1
      }
      spot_offset {
        x = -0.5
        y = -0.5
        z = -0.5
      }
      mask_nsigma = 3.0
      counts = %d
      background = 10
      pixel_mask = all *static precise
      background_method = *xds mosflm
      integration_methpd = *xds mosflm
      output {
        over = None
        under = None
        all = all_refl.pickle
      }
      rotation {
        axis {
          x = 0
          y = 0
          z = 1
        }
        angle = 0
      }

      """ % (num, counts)
        ])
        main(working_phil.extract())
        import six.moves.cPickle as pickle
        with open("all_refl.pickle", "rb") as fh:
            return pickle.load(fh)
def tst_generate_test_reflections():
  from dials.algorithms.simulation.generate_test_reflections import main
  from dials.algorithms.simulation.generate_test_reflections import \
    master_phil
  from libtbx.phil import command_line
  cmd = command_line.argument_interpreter(master_params = master_phil)
  working_phil = cmd.process_and_fetch(args = ["""nrefl = 10
shoebox_size {
  x = 20
  y = 20
  z = 20
}
spot_size {
  x = 1
  y = 3
  z = 1
}
spot_offset {
  x = -0.5
  y = -0.5
  z = -0.5
}
mask_nsigma = 3.0
counts = 10000
background = 0
background_a = 10
background_b = 0.1
background_c = -0.1
background_d = 0
pixel_mask = *all static precise
background_method = *xds mosflm
integration_methpd = *xds mosflm
output {
  over = None
  under = None
  all = all_refl.pickle
}
rotation {
  axis {
    x = 0
    y = 0
    z = 1
  }
  angle = 45
}

"""])
  main(working_phil.extract())
  print 'OK'
def main(args):
    from dials.algorithms.simulation.generate_test_reflections import (
        simple_gaussian_spots,
    )
    from dials.algorithms.simulation.generate_test_reflections import master_phil
    from libtbx.phil import command_line

    cmd = command_line.argument_interpreter(master_params=master_phil)
    working_phil = cmd.process_and_fetch(args=sys.argv[1:])
    params = working_phil.extract()
    rlist = simple_gaussian_spots(params)
    import six.moves.cPickle as pickle

    if params.output.all:
        with open(params.output.all, "wb") as fh:
            pickle.dump(rlist, fh, pickle.HIGHEST_PROTOCOL)
Exemple #22
0
def run(args):

  cmd_line = command_line.argument_interpreter(master_params=master_phil_scope)
  working_phil, args = cmd_line.process_and_fetch(args=args, custom_processor="collect_remaining")
  working_phil.show()
  params = working_phil.extract()

  if params.unit_cell is not None:
    unit_cell = params.unit_cell
    crystal_symmetry = crystal.symmetry(unit_cell=unit_cell)
  else:
    crystal_symmetry = None

  from iotbx.reflection_file_reader import any_reflection_file
  result = any_reflection_file(args[0])
  unmerged_intensities = None
  batches_all = None

  for ma in result.as_miller_arrays(
    merge_equivalents=False, crystal_symmetry=crystal_symmetry):
    #print ma.info().labels
    if ma.info().labels == ['I(+)', 'SIGI(+)', 'I(-)', 'SIGI(-)']:
      assert ma.anomalous_flag()
      unmerged_intensities = ma
    elif ma.info().labels == ['I', 'SIGI']:
      assert not ma.anomalous_flag()
      unmerged_intensities = ma
    elif ma.info().labels == ['BATCH']:
      batches_all = ma

  assert batches_all is not None
  assert unmerged_intensities is not None

  id_to_batches = None

  if len(params.batch) > 0:
    id_to_batches = {
    }
    for b in params.batch:
      assert b.id is not None
      assert b.range is not None
      assert b.id not in id_to_batches, "Duplicate batch id: %s" %b.id
      id_to_batches[b.id] = b.range

  multi_crystal_analysis(unmerged_intensities, batches_all,
                         n_bins=params.n_bins, d_min=params.d_min,
                         id_to_batches=id_to_batches)
  def generate_profiles(self, num, counts):
    from dials.algorithms.simulation.generate_test_reflections import main
    from dials.algorithms.simulation.generate_test_reflections import \
      master_phil
    from libtbx.phil import command_line
    cmd = command_line.argument_interpreter(master_params = master_phil)
    working_phil = cmd.process_and_fetch(args = ["""
      nrefl = %d
      shoebox_size {
        x = 10
        y = 10
        z = 10
      }
      spot_size {
        x = 1
        y = 1
        z = 1
      }
      spot_offset {
        x = -0.5
        y = -0.5
        z = -0.5
      }
      mask_nsigma = 3.0
      counts = %d
      background = 10
      pixel_mask = all *static precise
      background_method = *xds mosflm
      integration_methpd = *xds mosflm
      output {
        over = None
        under = None
        all = all_refl.pickle
      }
      rotation {
        axis {
          x = 0
          y = 0
          z = 1
        }
        angle = 0
      }

      """ % (num, counts)])
    main(working_phil.extract())
    import cPickle as pickle
    return pickle.load(open("all_refl.pickle", "rb"))
Exemple #24
0
  def merge_command_line(self,args):

    from libtbx.phil.command_line import argument_interpreter

    argument_interpreter = argument_interpreter(
      master_phil=phil_preferences.effective_param_generator.master(),
    )
    consume = []
    for arg in args:

      try:
        command_line_params = argument_interpreter.process(
          arg=arg
        )
        self.phil_scope = self.phil_scope.fetch(sources=[command_line_params,])
        consume.append(arg)

      except Sorry,e:
        pass
Exemple #25
0
def process_input(args,
                  phil_args,
                  input_file,
                  mode='auto',
                  now=None):
  """ Read and parse parameter file

      input: input_file_list - PHIL-format files w/ parameters

      output: params - PHIL-formatted parameters
              txt_output - plain text-formatted parameters
  """

  from libtbx.phil.command_line import argument_interpreter
  from libtbx.utils import Sorry

  if mode == 'file':
    user_phil = [ip.parse(open(inp).read()) for inp in [input_file]]
    working_phil = master_phil.fetch(sources=user_phil)
    params = working_phil.extract()
  elif mode == 'auto':
    params = master_phil.extract()
    params.description = 'IOTA parameters auto-generated on {}'.format(now)
    params.input = [input_file]
    if params.advanced.integrate_with == 'dials':
      params.dials.target = 'dials.phil'
    elif params.advanced.integrate_with == 'cctbx':
      params.cctbx.target = 'cctbx.phil'

  final_phil = master_phil.format(python_object=params)

  # Parse in-line params into phil
  argument_interpreter = argument_interpreter(master_phil=master_phil)
  consume = []
  for arg in phil_args:
    try:
      command_line_params = argument_interpreter.process(arg=arg)
      final_phil = final_phil.fetch(sources=[command_line_params,])
      consume.append(arg)
    except Sorry,e:
      pass
def run(args):
  from libtbx.phil import command_line
  from dials.util.command_line import Importer

  args = sys.argv[1:]
  importer = Importer(args)
  if len(importer.imagesets) == 0:
    print "No sweep object could be constructed"
    return
  elif len(importer.imagesets) > 1:
    raise RuntimeError("Only one imageset can be processed at a time")
  sweeps = importer.imagesets
  args = importer.unhandled_arguments

  sweep = sweeps[0]
  cmd_line = command_line.argument_interpreter(master_params=master_phil_scope)
  working_phil, args = cmd_line.process_and_fetch(
      args=args, custom_processor="collect_remaining")
  working_phil.show()

  result = gen_lattice_points(sweep, params=working_phil.extract())
def run(args):
    from libtbx.phil import command_line
    from dials.util.command_line import Importer

    args = sys.argv[1:]
    importer = Importer(args)
    if len(importer.imagesets) == 0:
        print("No sweep object could be constructed")
        return
    elif len(importer.imagesets) > 1:
        raise RuntimeError("Only one imageset can be processed at a time")
    sweeps = importer.imagesets
    args = importer.unhandled_arguments

    sweep = sweeps[0]
    cmd_line = command_line.argument_interpreter(master_params=master_phil_scope)
    working_phil, args = cmd_line.process_and_fetch(
        args=args, custom_processor="collect_remaining"
    )
    working_phil.show()

    result = gen_lattice_points(sweep, params=working_phil.extract())
Exemple #28
0
  def __init__(self, master_phil, target, prediction_parameterisation,
      local_overrides = "", cmdline_args = None, verbose=True):

    self._target = target
    self._prediction_parameterisation = prediction_parameterisation
    self._verbose = verbose

    arg_interpreter = command_line.argument_interpreter(
        master_phil=master_phil)

    user_phil = parse(local_overrides)
    cmdline_phils = []
    if cmdline_args:
      for arg in cmdline_args:
        cmdline_phils.append(arg_interpreter.process(arg))

    working_phil = master_phil.fetch(
        sources=[user_phil] + cmdline_phils)

    self._params = working_phil.extract().minimiser.parameters

    self.refiner = self.build_minimiser()
Exemple #29
0
    def merge_command_line(self, args):

        from libtbx.phil.command_line import argument_interpreter

        argument_interpreter = argument_interpreter(
            master_phil=phil_preferences.effective_param_generator.master(), )
        consume = []
        for arg in args:

            try:
                command_line_params = argument_interpreter.process(arg=arg)
                self.phil_scope = self.phil_scope.fetch(sources=[
                    command_line_params,
                ])
                consume.append(arg)

            except Sorry as e:
                pass

        for item in consume:
            args.remove(item)

        self.new_scope_extract()
def load_cxi_phil(path, args=[]):
  import os
  from labelit.phil_preferences import iotbx_defs, libtbx_defs
  from iotbx import phil
  from libtbx.phil.command_line import argument_interpreter
  from libtbx.utils import Sorry

  exts = ["", ".params", ".phil"]
  foundIt = False
  for ext in exts:
    if os.path.exists(path + ext):
      foundIt = True
      path += ext
      break
  if not foundIt:
    raise Sorry("Target not found: " + path)

  master_phil = phil.parse(input_string=iotbx_defs + libtbx_defs,
                           process_includes=True)

  horizons_phil = master_phil.fetch(
    sources=[phil.parse(file_name=path, process_includes=True)])

  argument_interpreter = argument_interpreter(
    master_phil=master_phil
  )
  consume = []
  for arg in args:
    try:
      command_line_params = argument_interpreter.process(
        arg=arg
      )
      horizons_phil = horizons_phil.fetch(sources=[command_line_params,])
      consume.append(arg)

    except Sorry,e:
      pass
Exemple #31
0
def process_input(args,
                  phil_args,
                  input_file,
                  mode='auto',
                  now=None):
  """ Read and parse parameter file and/or command-line args; if none found,
  create a default parameter object

  :param args: command-line arguments upon launch
  :param phil_args: command-line arguments pertaining to IOTA parameters
  :param input_file: text file with IOTA parameters
  :param mode: Mode of XtermIOTA run. See the InitAll base class
  :param now: date / time stamp
  :return: PHIL-formatted parameters
  """

  from libtbx.phil.command_line import argument_interpreter
  from libtbx.utils import Sorry

  # Depending on mode, either read input from file, or generate defaults
  if mode == 'file':
    user_phil = [ip.parse(open(inp).read()) for inp in [input_file]]
    working_phil = master_phil.fetch(sources=user_phil)
  else:
    working_phil = master_phil

  # Parse in-line params into phil
  argument_interpreter = argument_interpreter(master_phil=master_phil)
  consume = []
  for arg in phil_args:
    try:
      command_line_params = argument_interpreter.process(arg=arg)
      working_phil = working_phil.fetch(sources=[command_line_params,])
      consume.append(arg)
    except Sorry,e:
      pass
def run(args):

  from libtbx.phil import command_line

  from dials.util.command_line import Importer
  from dials.array_family import flex
  print args
  importer = Importer(args, check_format=False)
  assert len(importer.datablocks) == 1
  sweeps = importer.datablocks[0].extract_imagesets()
  assert len(sweeps) == 1
  sweep = sweeps[0]

  cmd_line = command_line.argument_interpreter(master_params=master_phil_scope)
  working_phil = cmd_line.process_and_fetch(args=importer.unhandled_arguments)
  working_phil.show()

  params = working_phil.extract()
  assert params.unit_cell is not None
  assert params.space_group is not None
  unit_cell = params.unit_cell
  space_group = params.space_group.group()

  import random
  from dxtbx.model.crystal import crystal_model
  from cctbx import crystal, miller
  from scitbx import matrix

  flex.set_random_seed(params.random_seed)
  random.seed(params.random_seed)

  crystal_symmetry = crystal.symmetry(unit_cell=unit_cell,
                                      space_group=space_group)

  # the reciprocal matrix
  B = matrix.sqr(unit_cell.fractionalization_matrix()).transpose()

  n_predicted = flex.double()

  def predict_once(args):
    from dxtbx.model.experiment.experiment_list import Experiment
    U = args[0]
    A = U * B
    direct_matrix = A.inverse()
    cryst_model = crystal_model(direct_matrix[0:3],
                                direct_matrix[3:6],
                                direct_matrix[6:9],
                                space_group=space_group)
    experiment = Experiment(imageset=sweep,
                            beam=sweep.get_beam(),
                            detector=sweep.get_detector(),
                            goniometer=sweep.get_goniometer(),
                            scan=sweep.get_scan(),
                            crystal=cryst_model)
    predicted_reflections = flex.reflection_table.from_predictions(
      experiment)
    miller_indices = predicted_reflections['miller_index']
    miller_set = miller.set(
      crystal_symmetry, miller_indices, anomalous_flag=True)
    if params.d_min is not None:
      resolution_sel = miller_set.d_spacings().data() > params.d_min
      predicted_reflections = predicted_reflections.select(resolution_sel)
    return len(predicted_reflections)

  from libtbx import easy_mp
  args = [(random_rotation(),) for i in range(params.n_samples)]
  results = easy_mp.parallel_map(
    func=predict_once,
    iterable=args,
    processes=params.nproc,
    preserve_order=True,
    preserve_exception_message=True)
  n_predicted = flex.double(results)

  print "Basic statistics:"
  from scitbx.math import basic_statistics
  stats = basic_statistics(n_predicted)
  stats.show()

  print "Histogram:"
  hist = flex.histogram(n_predicted, n_slots=20)
  hist.show()

  print "Raw spot counts:"
  print list(n_predicted)

  if params.plot:
    from matplotlib import pyplot
    from matplotlib.backends.backend_pdf import PdfPages

    pyplot.rc('font', family='serif')
    pyplot.rc('font', serif='Times New Roman')

    red, blue = '#B2182B', '#2166AC'
    fig = pyplot.figure()
    ax = fig.add_subplot(1,1,1)
    ax.bar(hist.slot_centers(), hist.slots(), width=0.75*hist.slot_width(),
           color=blue, edgecolor=blue)
    ax.set_xlabel('Spot count')
    ax.set_ylabel('Frequency')
    pdf = PdfPages("predicted_count_histogram.pdf")
    pdf.savefig(fig)
    pdf.close()
def run(args):

    from libtbx.phil import command_line

    from dials.util.command_line import Importer
    from dials.array_family import flex

    print(args)
    importer = Importer(args, check_format=False)
    assert len(importer.datablocks) == 1
    sweeps = importer.datablocks[0].extract_imagesets()
    assert len(sweeps) == 1
    sweep = sweeps[0]

    cmd_line = command_line.argument_interpreter(master_params=master_phil_scope)
    working_phil = cmd_line.process_and_fetch(args=importer.unhandled_arguments)
    working_phil.show()

    params = working_phil.extract()
    assert params.unit_cell is not None
    assert params.space_group is not None
    unit_cell = params.unit_cell
    space_group = params.space_group.group()

    import random
    from dxtbx.model.crystal import crystal_model
    from cctbx import crystal, miller
    from scitbx import matrix

    flex.set_random_seed(params.random_seed)
    random.seed(params.random_seed)

    crystal_symmetry = crystal.symmetry(unit_cell=unit_cell, space_group=space_group)

    # the reciprocal matrix
    B = matrix.sqr(unit_cell.fractionalization_matrix()).transpose()

    n_predicted = flex.double()

    def predict_once(args):
        from dxtbx.model.experiment.experiment_list import Experiment

        U = args[0]
        A = U * B
        direct_matrix = A.inverse()
        cryst_model = crystal_model(
            direct_matrix[0:3],
            direct_matrix[3:6],
            direct_matrix[6:9],
            space_group=space_group,
        )
        experiment = Experiment(
            imageset=sweep,
            beam=sweep.get_beam(),
            detector=sweep.get_detector(),
            goniometer=sweep.get_goniometer(),
            scan=sweep.get_scan(),
            crystal=cryst_model,
        )
        predicted_reflections = flex.reflection_table.from_predictions(experiment)
        miller_indices = predicted_reflections["miller_index"]
        miller_set = miller.set(crystal_symmetry, miller_indices, anomalous_flag=True)
        if params.d_min is not None:
            resolution_sel = miller_set.d_spacings().data() > params.d_min
            predicted_reflections = predicted_reflections.select(resolution_sel)
        return len(predicted_reflections)

    from libtbx import easy_mp

    args = [(random_rotation(),) for i in range(params.n_samples)]
    results = easy_mp.parallel_map(
        func=predict_once,
        iterable=args,
        processes=params.nproc,
        preserve_order=True,
        preserve_exception_message=True,
    )
    n_predicted = flex.double(results)

    print("Basic statistics:")
    from scitbx.math import basic_statistics

    stats = basic_statistics(n_predicted)
    stats.show()

    print("Histogram:")
    hist = flex.histogram(n_predicted, n_slots=20)
    hist.show()

    print("Raw spot counts:")
    print(list(n_predicted))

    if params.plot:
        from matplotlib import pyplot
        from matplotlib.backends.backend_pdf import PdfPages

        pyplot.rc("font", family="serif")
        pyplot.rc("font", serif="Times New Roman")

        red, blue = "#B2182B", "#2166AC"
        fig = pyplot.figure()
        ax = fig.add_subplot(1, 1, 1)
        ax.bar(
            hist.slot_centers(),
            hist.slots(),
            width=0.75 * hist.slot_width(),
            color=blue,
            edgecolor=blue,
        )
        ax.set_xlabel("Spot count")
        ax.set_ylabel("Frequency")
        pdf = PdfPages("predicted_count_histogram.pdf")
        pdf.savefig(fig)
        pdf.close()
def run(args):

    cmd_line = command_line.argument_interpreter(
        master_params=master_phil_scope)
    working_phil, args = cmd_line.process_and_fetch(
        args=args, custom_processor="collect_remaining")
    working_phil.show()
    params = working_phil.extract()

    if params.unit_cell is not None:
        unit_cell = params.unit_cell
        crystal_symmetry = crystal.symmetry(unit_cell=unit_cell)
    else:
        crystal_symmetry = None

    from iotbx.reflection_file_reader import any_reflection_file

    unmerged_intensities = None
    batches_all = None
    batch_add = None
    id_to_batches = None

    for i, file_name in enumerate(args):

        result = any_reflection_file(file_name)
        intensities = None
        batches = None

        for ma in result.as_miller_arrays(merge_equivalents=False,
                                          crystal_symmetry=crystal_symmetry):
            if ma.info().labels == ['I(+)', 'SIGI(+)', 'I(-)', 'SIGI(-)']:
                assert ma.anomalous_flag()
                intensities = ma
            elif ma.info().labels == ['I', 'SIGI']:
                assert not ma.anomalous_flag()
                intensities = ma
            elif ma.info().labels == ['BATCH']:
                batches = ma

        assert batches is not None
        assert intensities is not None

        if batches_all is None:
            batches_all = batches
            id_to_batches = {}
        else:
            if batch_add is None:
                import math
                batch_add = 10**int(
                    math.ceil(math.log10(flex.max(batches_all.data()) + 10)))
            batches = batches.customized_copy(data=batches.data() +
                                              batch_add * i)
            batches_all = batches_all.concatenate(
                batches, assert_is_similar_symmetry=False)
        id_to_batches[i] = (flex.min(batches.data()), flex.max(batches.data()))
        if unmerged_intensities is None:
            unmerged_intensities = intensities
        else:
            unmerged_intensities = unmerged_intensities.concatenate(
                intensities, assert_is_similar_symmetry=False)

    if len(id_to_batches) == 1:
        # single file as input
        id_to_batches = None

    if len(params.batch) > 0:
        id_to_batches = OrderedDict()
        for b in params.batch:
            assert b.id is not None
            assert b.range is not None
            assert b.id not in id_to_batches, "Duplicate batch id: %s" % b.id
            id_to_batches[b.id] = b.range

    multi_crystal_analysis(unmerged_intensities,
                           batches_all,
                           n_bins=params.n_bins,
                           d_min=params.d_min,
                           id_to_batches=id_to_batches)
Exemple #35
0
    def main(self):
        # FIXME import simulation code
        import six.moves.cPickle as pickle
        import math
        from dials.util.command_line import Importer
        from dials.algorithms.integration import ReflectionPredictor
        from libtbx.utils import Sorry

        # Parse the command line
        params, options, args = self.parser.parse_args()

        importer = Importer(args)
        if len(importer.imagesets) == 0 and len(importer.crystals) == 0:
            self.config().print_help()
            return
        if len(importer.imagesets) != 1:
            raise Sorry('need 1 sweep: %d given' % len(importer.imagesets))
        if len(importer.crystals) != 1:
            raise Sorry('need 1 crystal: %d given' % len(importer.crystals))
        sweep = importer.imagesets[0]
        crystal = importer.crystals[0]

        # generate predictions for possible reflections => generate a
        # reflection list

        predict = ReflectionPredictor()
        predicted = predict(sweep, crystal)

        # sort with James's reflection table: should this not go somewhere central?
        from dials.scratch.jmp.container.reflection_table import ReflectionTable

        # calculate shoebox sizes: take parameters from params & transform
        # from reciprocal space to image space to decide how big a shoe box to use

        table = ReflectionTable()
        table['miller_index'] = predicted.miller_index()
        indexer = table.index_map('miller_index')

        candidates = []

        unique = sorted(indexer)

        for h, k, l in unique:

            try:
                for _h in h - 1, h + 1:
                    if not indexer[(_h, k, l)]:
                        raise ValueError('missing')
                for _k in k - 1, k + 1:
                    if not indexer[(h, _k, l)]:
                        raise ValueError('missing')
                for _l in l - 1, l + 1:
                    if not indexer[(h, k, _l)]:
                        raise ValueError('missing')
                candidates.append((h, k, l))
            except ValueError:
                continue

        from dials.algorithms.simulation.utils import build_prediction_matrix

        from dials.algorithms.simulation.generate_test_reflections import \
         master_phil
        from libtbx.phil import command_line
        cmd = command_line.argument_interpreter(master_params=master_phil)
        working_phil = cmd.process_and_fetch(args=args[2:])
        params = working_phil.extract()

        node_size = params.rs_node_size
        window_size = params.rs_window_size
        reference = params.integrated_data_file
        scale = params.integrated_data_file_scale

        if reference:
            counts_database = {}
            from iotbx import mtz
            m = mtz.object(reference)
            mi = m.extract_miller_indices()
            i = m.extract_reals('IMEAN').data
            s = m.space_group().build_derived_point_group()
            for j in range(len(mi)):
                for op in s.all_ops():
                    hkl = tuple(map(int, op * mi[j]))
                    counts = max(0, int(math.floor(i[j] * scale)))
                    counts_database[hkl] = counts
                    counts_database[(-hkl[0], -hkl[1], -hkl[2])] = counts
        else:

            def constant_factory(value):
                import itertools
                return itertools.repeat(value).next

            from collections import defaultdict
            counts_database = defaultdict(constant_factory(params.counts))

        from dials.model.data import ReflectionList

        useful = ReflectionList()
        d_matrices = []

        for h, k, l in candidates:
            hkl = predicted[indexer[(h, k, l)][0]]
            _x = hkl.image_coord_px[0]
            _y = hkl.image_coord_px[1]
            _z = hkl.frame_number

            # build prediction matrix
            mhkl = predicted[indexer[(h - 1, k, l)][0]]
            phkl = predicted[indexer[(h + 1, k, l)][0]]
            hmkl = predicted[indexer[(h, k - 1, l)][0]]
            hpkl = predicted[indexer[(h, k + 1, l)][0]]
            hkml = predicted[indexer[(h, k, l - 1)][0]]
            hkpl = predicted[indexer[(h, k, l + 1)][0]]
            d = build_prediction_matrix(hkl, mhkl, phkl, hmkl, hpkl, hkml,
                                        hkpl)
            d_matrices.append(d)

            # construct the shoebox parameters: outline the ellipsoid
            x, y, z = [], [], []

            for dh in (1, 0, 0), (0, 1, 0), (0, 0, 1):
                dxyz = -1 * window_size * d * dh
                x.append(dxyz[0] + _x)
                y.append(dxyz[1] + _y)
                z.append(dxyz[2] + _z)
                dxyz = window_size * d * dh
                x.append(dxyz[0] + _x)
                y.append(dxyz[1] + _y)
                z.append(dxyz[2] + _z)

            hkl.bounding_box = (int(math.floor(min(x))),
                                int(math.floor(max(x)) + 1),
                                int(math.floor(min(y))),
                                int(math.floor(max(y)) + 1),
                                int(math.floor(min(z))),
                                int(math.floor(max(z)) + 1))
            try:
                counts = counts_database[hkl.miller_index]
                useful.append(hkl)
            except KeyError:
                continue

        from dials.algorithms import shoebox
        shoebox.allocate(useful)

        from dials.util.command_line import ProgressBar
        p = ProgressBar(title='Generating shoeboxes')

        # now for each reflection perform the simulation
        for j, refl in enumerate(useful):
            p.update(j * 100.0 / len(useful))
            d = d_matrices[j]

            from scitbx.random import variate, normal_distribution
            g = variate(normal_distribution(mean=0, sigma=node_size))
            counts = counts_database[refl.miller_index]
            dhs = g(counts)
            dks = g(counts)
            dls = g(counts)
            self.map_to_image_space(refl, d, dhs, dks, dls)

        p.finished('Generated %d shoeboxes' % len(useful))

        # now for each reflection add background
        from dials.algorithms.simulation.generate_test_reflections import \
         random_background_plane

        p = ProgressBar(title='Generating background')
        for j, refl in enumerate(useful):
            p.update(j * 100.0 / len(useful))
            if params.background:
                random_background_plane(refl.shoebox, params.background, 0.0,
                                        0.0, 0.0)
            else:
                random_background_plane(refl.shoebox, params.background_a,
                                        params.background_b,
                                        params.background_c,
                                        params.background_d)

        p.finished('Generated %d backgrounds' % len(useful))
        if params.output.all:
            with open(params.output.all, 'wb') as fh:
                pickle.dump(useful, fh, pickle.HIGHEST_PROTOCOL)
Exemple #36
0
    phil = sys.argv[1]

    master_phil = parse("""
    input
      .multiple = True
    {
      experiments = None
        .type = path
      reflections = None
        .type = path
    }
    n_macrocycles = 1
      .type = int(value_min=1)
    """)

    cmd_line = command_line.argument_interpreter(master_params=master_phil)
    working_phil = cmd_line.process_and_fetch(args=(phil, ))
    working_params = working_phil.extract()

    assert len(working_params.input) > 1

    if rank == 0:
        for input in working_params.input:
            print(input.experiments, input.reflections)

        print(len(working_params.input), "datasets specified as input")

        e = enumerate(working_params.input)
        i, line = next(e)
        reflections, exp = load_input(line.experiments, line.reflections)
        assert reflections["id"].all_eq(0)
Exemple #37
0
def run(args):
  cmd_line = command_line.argument_interpreter(master_params=master_phil_scope)
  working_phil, args = cmd_line.process_and_fetch(
    args=args, custom_processor="collect_remaining")
  working_phil.show()
  params = working_phil.extract()
  batch_multiplier = params.batch_multiplier
  split_at_batch = params.split_at_batch
  assert split_at_batch is not None

  assert len(args) == 1
  file_name = args[0]
  reader = any_reflection_file(file_name)
  assert reader.file_type() == 'ccp4_mtz'

  as_miller_arrays = reader.as_miller_arrays(merge_equivalents=False)
  mtz_object = reader.file_content()
  intensities = [ma for ma in as_miller_arrays
                 if ma.info().labels == ['I', 'SIGI']][0]
  intensities = intensities.customized_copy(
    indices=mtz_object.extract_original_index_miller_indices()).set_info(
      intensities.info())
  batch_ids = [ma for ma in as_miller_arrays
             if ma.info().labels == ['BATCH']][0]
  batch_ids = batch_ids.customized_copy(
    indices=mtz_object.extract_original_index_miller_indices()).set_info(
      batch_ids.info())
  intensities = intensities.customized_copy(anomalous_flag=True).set_info(
    intensities.info())
  intensities.set_observation_type_xray_intensity()

  run_id = flex.int()
  run_batch_id = flex.int()
  for b in batch_ids.data():
    r_id, b_id = divmod(b, batch_multiplier)
    run_id.append(r_id)
    run_batch_id.append(b_id)

  sel = run_batch_id < split_at_batch

  for negate in (False, True):
    if not negate:
      outfile = "split_1.mtz"
    else:
      outfile = "split_2.mtz"

    intensities_sel = intensities.select(sel, negate=negate).set_info(
      intensities.info())

    result = iotbx.merging_statistics.dataset_statistics(intensities_sel)
    result.show()

    intensities_merged_anom = intensities_sel.merge_equivalents().array()
    intensities_merged = intensities_sel.customized_copy(
      anomalous_flag=False).merge_equivalents().array()

    dataset = intensities_merged_anom.as_mtz_dataset(
      "I", wavelength=intensities.info().wavelength)
    dataset.add_miller_array(
      miller_array=intensities_merged, column_root_label="IMEAN")
    m = dataset.mtz_object()
    m.write(outfile)

  if params.export_unmerged:

    #for negate in (False, True):
      #if not negate:
        #isel = sel.iselection()
        #outfile = "unmerged_1.mtz"
      #else:
        #isel = (~sel).iselection()
        #outfile = "unmerged_2.mtz"

      #m = iotbx.mtz.object()
      #m.set_title(mtz_object.title())
      #m.set_space_group_info(mtz_object.space_group_info())

      #batches = mtz_object.batches()
      #batch_sel = flex.bool(batches.size(), False)
      #for i, b in enumerate(batches):
        #r_id, b_id = divmod(b.num(), batch_multiplier)
        #if (((not negate) and (b_id < split_at_batch)) or
            #(negate and (b_id >= split_at_batch))):
          #o = m.add_batch()
          #o.set_num(b.num())
          #o.set_nbsetid(b.nbsetid())
          #o.set_ncryst(b.ncryst())
          #o.set_time1(b.time1())
          #o.set_time2(b.time2())
          #o.set_title(b.title())
          #o.set_ndet(b.ndet())
          #o.set_theta(b.theta())
          #o.set_lbmflg(b.lbmflg())
          #o.set_alambd(b.alambd())
          #o.set_delamb(b.delamb())
          #o.set_delcor(b.delcor())
          #o.set_divhd(b.divhd())
          #o.set_divvd(b.divvd())
          #o.set_so(b.so())
          #o.set_bbfac(b.bbfac())
          #o.set_bscale(b.bscale())
          #o.set_sdbfac(b.sdbfac())
          #o.set_sdbscale(b.sdbscale())
          #o.set_nbscal(b.nbscal())
          #o.set_cell(b.cell())
          #o.set_lbcell(b.lbcell())
          #o.set_umat(b.umat())
          #o.set_crydat(b.crydat())
          #o.set_lcrflg(b.lcrflg())
          #o.set_datum(b.datum())
          #o.set_detlm(b.detlm())
          #o.set_dx(b.dx())
          #o.set_e1(b.e1())
          #o.set_e2(b.e2())
          #o.set_e3(b.e3())
          #o.set_gonlab(b.gonlab())
          #o.set_jsaxs(b.jsaxs())
          #o.set_ngonax(b.ngonax())
          #o.set_phixyz(b.phixyz())
          #o.set_phistt(b.phistt())
          #o.set_phirange(b.phirange())
          #o.set_phiend(b.phiend())
          #o.set_scanax(b.scanax())
          #o.set_misflg(b.misflg())
          #o.set_jumpax(b.jumpax())
          #o.set_ldtype(b.ldtype())


      #for x in m.crystals():
        #x_ = m.add_crystal(x.name(), x.project_name(), x.unit_cell_parameters())
        #for d in x.datasets():
          #d_ = x_.add_dataset(d.name(), d.wavelength())

          #nref = isel.size()
          #m.adjust_column_array_sizes(nref)
          #m.set_n_reflections(nref)

          #for column in d.columns():
            #d_.add_column(column.label(), column.type()).set_values(
              #column.extract_values())
      #print

      #m.write(outfile)

    for negate in (False, True):
      if negate:
        outfile = "split_unmerged_1.mtz"
      else:
        outfile = "split_unmerged_2.mtz"
      reader = any_reflection_file(file_name)
      mtz_object = reader.file_content()
      if negate:
        isel = (~sel).iselection()
      else:
        isel = sel.iselection()
      mtz_object.delete_reflections(isel)
      mtz_object.write(outfile)
if len(sys.argv) != 2: exit("please pass the path to a phil file")
phil = sys.argv[1]

master_phil = parse("""
  input
    .multiple = True
  {
    experiments = None
      .type = path
    reflections = None
      .type = path
  }
  """)

cmd_line = command_line.argument_interpreter(master_params=master_phil)
working_phil = cmd_line.process_and_fetch(args=(phil,))
working_params = working_phil.extract()

for input in working_params.input:
  print input.experiments, input.reflections

from dials.model.serialize import load as load_dials
from dxtbx.serialize import load as load_dxtbx
def load_input(exp_path, ref_path):
  refs = load_dials.reflections(ref_path)
  exp = load_dxtbx.experiment_list(exp_path , check_format=False)[0]
  return refs, exp

from dxtbx.model.experiment.experiment_list import ExperimentList, Experiment
class ExperimentFromCrystal(object):
def run(args):
  cmd_line = command_line.argument_interpreter(master_params=master_phil_scope)
  working_phil, args = cmd_line.process_and_fetch(
    args=args, custom_processor="collect_remaining")
  working_phil.show()
  params = working_phil.extract()

  files = args

  from cctbx import crystal
  from iotbx.reflection_file_reader import any_reflection_file

  file_name_dict = {}

  wedge_id = -1
  wedge_number = -1

  wedge_number_to_wedge_id = {}

  assert params.space_group is not None
  assert params.unit_cell is not None
  space_group = params.space_group.group()
  unit_cell = params.unit_cell
  crystal_symmetry = crystal.symmetry(
    unit_cell=unit_cell, space_group=space_group)

  for file_name in files:
    file_name = os.path.abspath(file_name)
    print file_name
    wedge_number_ = None
    for s in file_name.split(os.path.sep):
      if s.startswith('sweep_'):
        wedge_number_ = int(os.path.splitext(s)[0][-3:])
        print "wedge_number:", wedge_number_
        break
    if wedge_number_ is not None:
      wedge_number = wedge_number_
    else:
      wedge_number += 1
    lattice_id = 1
    for s in file_name.split(os.path.sep):
      if s.startswith('lattice_'):
        lattice_id = int(os.path.splitext(s)[0].split('_')[-1])
        print "lattice_id:", lattice_id
        break
    wedge_id += 1
    print "wedge_id: %i, wedge_number: %i, lattice_id: %i" %(
      wedge_id, wedge_number, lattice_id)
    wedge_number_to_wedge_id.setdefault(wedge_number, [])
    wedge_number_to_wedge_id[wedge_number].append(wedge_id)

    #if not intensities.crystal_symmetry().is_similar_symmetry(
      #crystal_symmetry, relative_length_tolerance=0.1):
      #continue

    file_name_dict[wedge_id] = file_name

  if params.overlaps.find_overlaps:
    # figure out the overlapping reflections and save the miller indices
    # for later on
    reject_hkl = {}

    def run_find_overlaps(args):
      wedge_n, wedge_ids = args
      result_dict = {}

      print "Wedge", wedge_n
      if len(wedge_ids) > 1:
        for wedge_id in wedge_ids:
          args = ["dials.import_xds",
                  os.path.split(file_name_dict[wedge_id])[0],
                  "--output='experiments_%i.json'" %wedge_id]
          cmd = " ".join(args)
          print cmd
          result = easy_run.fully_buffered(cmd).raise_if_errors()
          result.show_stdout()
          result.show_stderr()

          args = ["dials.import_xds",
                  file_name_dict[wedge_id],
                  "experiments_%i.json" %wedge_id,
                  "--input=reflections",
                  "--output='integrate_hkl_%i.pickle'" %wedge_id]
          cmd = " ".join(args)
          print cmd
          result = easy_run.fully_buffered(cmd).raise_if_errors()
          result.show_stdout()
          result.show_stderr()

        from dials.command_line import find_overlaps
        args = ['experiments_%i.json' %wedge_id for wedge_id in wedge_ids]
        args.extend(['integrate_hkl_%i.pickle' %wedge_id for wedge_id in wedge_ids])
        #args.append("nproc=%s" %params.nproc)

        args.append("max_overlap_fraction=%f" %params.overlaps.max_overlap_fraction)
        args.append("max_overlap_pixels=%f" %params.overlaps.max_overlap_pixels)
        args.append("n_sigma=%f" %params.overlaps.n_sigma)
        args.append("save_overlaps=False")
        overlaps = find_overlaps.run(args)
        miller_indices = overlaps.overlapping_reflections['miller_index']
        overlapping = [
          miller_indices.select(
            overlaps.overlapping_reflections['id'] == i_lattice)
          for i_lattice in range(len(wedge_ids))]
        for wedge_id, overlaps in zip(wedge_ids, overlapping):
          result_dict[wedge_id] = overlaps
      return result_dict

    from libtbx import easy_mp
    results = easy_mp.parallel_map(
      func=run_find_overlaps,
      iterable=wedge_number_to_wedge_id.items(),
      processes=params.nproc,
      preserve_order=True,
      asynchronous=False,
      preserve_exception_message=True,
    )
    for result in results:
      reject_hkl.update(result)

  for wedge_n, wedge_ids in wedge_number_to_wedge_id.iteritems():
    for wedge in wedge_ids:
      cmd = """\
pointless -copy xdsin %s hklout integrate_hkl_%03.f.mtz << EOF
SPACEGROUP %s
EOF
""" %(file_name_dict[wedge], wedge, space_group.type().lookup_symbol())
      log = open('pointless_%03.f.log' %wedge, 'wb')
      print >> log, cmd
      result = easy_run.fully_buffered(command=cmd)
      result.show_stdout(out=log)
      result.show_stderr(out=log)

      if params.overlaps.find_overlaps:
        from cctbx import miller
        from iotbx import mtz
        m = mtz.object(file_name="integrate_hkl_%03.f.mtz" %wedge)
        orig_indices = m.extract_original_index_miller_indices()
        overlaps = reject_hkl.get(wedge)
        if overlaps is not None and len(overlaps) > 0:
          matches = miller.match_multi_indices(overlaps, orig_indices)
          before = m.n_reflections()
          print "before: %i reflections" %m.n_reflections()
          for i_ref in sorted(matches.pair_selection(1).iselection(), reverse=True):
            m.delete_reflection(i_ref)
          after = m.n_reflections()
          print "after: %i reflections" %m.n_reflections()
          m.add_history("Removed %i overlapping reflections" %len(overlaps))
          m.write("integrate_hkl_%03.f.mtz" %wedge)

  g = glob.glob("integrate_hkl_*.mtz")

  if params.resolve_indexing_ambiguity:
    from cctbx.command_line import brehm_diederichs
    args = g
    args.append("asymmetric=1")
    args.append("save_plot=True")
    args.append("show_plot=False")
    brehm_diederichs.run(args)
    g = glob.glob("integrate_hkl_*_reindexed.mtz")

  for file_name in g:
    wedge_number = int(os.path.splitext(
      os.path.basename(file_name))[0].replace('_reindexed', '')[-3:])
    #print wedge_number, wedge_number
    result = any_reflection_file(file_name)
    mtz_object = result.file_content()
    #if not mtz_object.crystals()[0].crystal_symmetry().is_similar_symmetry(
      #crystal_symmetry, relative_length_tolerance=0.1):
      #continue
    for batch in mtz_object.batches():
      batch.set_num(batch.num() + 1000 * wedge_number)
    batches = mtz_object.get_column('BATCH')
    batches.set_values(batches.extract_values() + 1000*wedge_number)
    mtz_object.write("rebatch-%i.mtz" %(wedge_number))

  g = glob.glob("rebatch-*.mtz")

  cmd = """\
pointless -copy hklin %s hklout pointless.mtz << EOF
ALLOW OUTOFSEQUENCEFILES
TOLERANCE 4
SPACEGROUP %s
EOF
""" %(" ".join(g), space_group.type().lookup_symbol())

  log = open('pointless_all.log', 'wb')
  print >> log, cmd
  result = easy_run.fully_buffered(command=cmd)
  result.show_stdout(out=log)
  result.show_stderr(out=log)

  cmd = """\
aimless pointless.mtz << EOF
OUTPUT UNMERGED TOGETHER
%s
EOF
""" %("\n".join(params.aimless.command))

  log = open('aimless.log', 'wb')
  print >> log, cmd
  result = easy_run.fully_buffered(command=cmd)
  result.show_stdout(out=log)
  result.show_stderr(out=log)
def run(args):
    import os
    from libtbx.phil import command_line
    from libtbx.utils import Sorry, Usage

    if len(args) == 0:
        from cStringIO import StringIO
        s = StringIO()
        master_phil_scope.show(out=s)
        raise Usage("""\
dxtbx.export_bitmaps image_files [options]

% s
""" % s.getvalue())

    from dxtbx.datablock import DataBlockFactory
    unhandled = []
    datablocks = DataBlockFactory.from_args(args,
                                            verbose=False,
                                            unhandled=unhandled)
    assert len(datablocks) > 0
    imagesets = datablocks[0].extract_imagesets()

    cmd_line = command_line.argument_interpreter(
        master_params=master_phil_scope)
    working_phil = cmd_line.process_and_fetch(args=unhandled)
    working_phil.show()
    params = working_phil.extract()

    brightness = params.brightness / 100
    vendortype = "made up"

    # check that binning is a power of 2
    binning = params.binning
    if not (binning > 0 and ((binning & (binning - 1)) == 0)):
        raise Sorry("binning must be a power of 2")

    output_dir = params.output_dir
    if output_dir is None:
        output_dir = "."
    elif not os.path.exists(output_dir):
        os.makedirs(output_dir)

    from rstbx.slip_viewer.tile_generation \
         import _get_flex_image, _get_flex_image_multipanel

    for imageset in imagesets:
        detector = imageset.get_detector()
        panel = detector[0]
        # XXX is this inclusive or exclusive?
        saturation = panel.get_trusted_range()[1]
        for i_image, image in enumerate(imageset):

            if len(detector) > 1:
                # FIXME This doesn't work properly, as flex_image.size2() is incorrect
                # also binning doesn't work
                assert binning == 1
                flex_image = _get_flex_image_multipanel(
                    brightness=brightness,
                    panels=detector,
                    raw_data=image,
                    beam=imageset.get_beam())
            else:
                flex_image = _get_flex_image(brightness=brightness,
                                             data=image,
                                             binning=binning,
                                             saturation=saturation,
                                             vendortype=vendortype)

            flex_image.setWindow(0, 0, 1)
            flex_image.adjust(
                color_scheme=colour_schemes.get(params.colour_scheme))

            # now export as a bitmap
            flex_image.prep_string()
            try:
                import PIL.Image as Image
            except ImportError:
                import Image
            # XXX is size//binning safe here?
            try:
                pil_img = Image.fromstring('RGB',
                                           (flex_image.size2() // binning,
                                            flex_image.size1() // binning),
                                           flex_image.export_string)
            except NotImplementedError:
                pil_img = Image.frombytes('RGB',
                                          (flex_image.size2() // binning,
                                           flex_image.size1() // binning),
                                          flex_image.export_string)

            basename = os.path.basename(
                os.path.splitext(imageset.paths()[i_image])[0])
            path = os.path.join(output_dir, basename + '.' + params.format)

            print "Exporting %s" % path
            tmp_stream = open(path, 'wb')
            pil_img.save(tmp_stream, format=params.format)
            tmp_stream.close()
def run(args, validated=False):
  show_citation()
  if ( (len(args) == 0) or (len(args) > 2) ):
    print '\nUsage: phenix.map_comparison map_1=<first map> map_2=<second map>\n'
    sys.exit()

  # process arguments
  try: # automatic parsing
    params = phil.process_command_line_with_files(
      args=args, master_phil=master_phil).work.extract()
  except Exception: # map_file_def only handles one map phil
    from libtbx.phil.command_line import argument_interpreter
    arg_int = argument_interpreter(master_phil=master_phil)
    command_line_args = list()
    map_files = list()
    for arg in args:
      if (os.path.isfile(arg)):
        map_files.append(arg)
      else:
        command_line_args.append(arg_int.process(arg))
    params = master_phil.fetch(sources=command_line_args).extract()
    for map_file in map_files:
      if (params.input.map_1 is None):
        params.input.map_1 = map_file
      else:
        params.input.map_2 = map_file

  # validate arguments (GUI sets validated to true, no need to run again)
  if (not validated):
    validate_params(params)

  # ---------------------------------------------------------------------------
  # map 1
  ccp4_map_1 = iotbx.ccp4_map.map_reader(file_name=params.input.map_1)
  cs_1 = crystal.symmetry(ccp4_map_1.unit_cell().parameters(),
    ccp4_map_1.space_group_number)
  m1 = ccp4_map_1.map_data()

  # map 2
  ccp4_map_2 = iotbx.ccp4_map.map_reader(file_name=params.input.map_2)
  cs_2 = crystal.symmetry(ccp4_map_2.unit_cell().parameters(),
    ccp4_map_2.space_group_number)
  m2 = ccp4_map_2.map_data()

  # show general statistics
  s1 = maptbx.more_statistics(m1)
  s2 = maptbx.more_statistics(m2)
  show_overall_statistics(s=s1, header="Map 1 (%s):"%params.input.map_1)
  show_overall_statistics(s=s2, header="Map 2 (%s):"%params.input.map_2)
  cc_input_maps = flex.linear_correlation(x = m1.as_1d(),
                                          y = m2.as_1d()).coefficient()
  print "CC, input maps: %6.4f" % cc_input_maps

  # compute CCpeak
  cc_peaks = list()
  m1_he = maptbx.volume_scale(map = m1,  n_bins = 10000).map_data()
  m2_he = maptbx.volume_scale(map = m2,  n_bins = 10000).map_data()
  cc_quantile = flex.linear_correlation(x = m1_he.as_1d(),
                                        y = m2_he.as_1d()).coefficient()
  print "CC, quantile rank-scaled (histogram equalized) maps: %6.4f" % \
    cc_quantile
  print "Peak correlation:"
  print "  cutoff  CCpeak"
  for cutoff in [i/100. for i in range(0,100,5)]+[0.99, 1.0]:
    cc_peak = maptbx.cc_peak(map_1=m1_he, map_2=m2_he, cutoff=cutoff)
    print "  %3.2f   %7.4f" % (cutoff, cc_peak)
    cc_peaks.append((cutoff, cc_peak))

  # compute discrepancy function (D-function)
  discrepancies = list()
  cutoffs = flex.double([i/20. for i in range(1,20)])
  df = maptbx.discrepancy_function(map_1=m1_he, map_2=m2_he, cutoffs=cutoffs)
  print "Discrepancy function:"
  print "  cutoff  D"
  for c, d in zip(cutoffs, df):
    print "  %3.2f   %7.4f" % (c,d)
    discrepancies.append((c, d))

  # compute and output histograms
  h1 = maptbx.histogram(map=m1, n_bins=10000)
  h2 = maptbx.histogram(map=m2, n_bins=10000)
  print "Map histograms:"
  print "Map 1 (%s)     Map 2 (%s)"%(params.input.map_1,params.input.map_2)
  print "(map_value,cdf,frequency) <> (map_value,cdf,frequency)"
  for a1,c1,v1, a2,c2,v2 in zip(h1.arguments(), h1.c_values(), h1.values(),
                                h2.arguments(), h2.c_values(), h2.values()):
    print "(%9.5f %9.5f %9.5f) <> (%9.5f %9.5f %9.5f)"%(a1,c1,v1, a2,c2,v2)

  # store results
  s1_dict = create_statistics_dict(s1)
  s2_dict = create_statistics_dict(s2)
  results = dict()
  results['map_files'] = (params.input.map_1, params.input.map_2)
  results['map_statistics'] = (s1_dict, s2_dict)
  results['cc_input_maps'] = cc_input_maps
  results['cc_quantile'] = cc_quantile
  results['cc_peaks'] = cc_peaks
  results['discrepancies'] = discrepancies
  results['map_histograms'] = ( (h1.arguments(), h1.c_values(), h1.values()),
                                (h2.arguments(), h2.c_values(), h2.values()) )

  return results
def run(args, out=sys.stdout, validated=False):
  show_citation(out=out)
  if (len(args) == 0):
    master_phil.show(out=out)
    print >> out,\
      '\nUsage: phenix.map_comparison <CCP4> <CCP4>\n',\
      '       phenix.map_comparison <CCP4> <MTZ> mtz_label_1=<label>\n',\
      '       phenix.map_comparison <MTZ 1> mtz_label_1=<label 1> <MTZ 2> mtz_label_2=<label 2>\n'
    sys.exit()

  # process arguments
  params = None
  input_attributes = ['map_1', 'mtz_1', 'map_2', 'mtz_2']
  try: # automatic parsing
    params = phil.process_command_line_with_files(
      args=args, master_phil=master_phil).work.extract()
  except Exception: # map_file_def only handles one map phil
    from libtbx.phil.command_line import argument_interpreter
    arg_int = argument_interpreter(master_phil=master_phil)
    command_line_args = list()
    map_files = list()
    for arg in args:
      if (os.path.isfile(arg)):
        map_files.append(arg)
      else:
        command_line_args.append(arg_int.process(arg))
    params = master_phil.fetch(sources=command_line_args).extract()

    # check if more files are necessary
    n_defined = 0
    for attribute in input_attributes:
      if (getattr(params.input, attribute) is not None):
        n_defined += 1

    # matches files to phil scope, stops once there is sufficient data
    for map_file in map_files:
      if (n_defined < 2):
        current_map = file_reader.any_file(map_file)
        if (current_map.file_type == 'ccp4_map'):
          n_defined += 1
          if (params.input.map_1 is None):
            params.input.map_1 = map_file
          elif (params.input.map_2 is None):
            params.input.map_2 = map_file
        elif (current_map.file_type == 'hkl'):
          n_defined += 1
          if (params.input.mtz_1 is None):
            params.input.mtz_1 = map_file
          elif (params.input.mtz_2 is None):
            params.input.mtz_2 = map_file
      else:
        print >> out, 'WARNING: only the first two files are used'
        break

  # validate arguments (GUI sets validated to true, no need to run again)
  assert (params is not None)
  if (not validated):
    validate_params(params)

  # ---------------------------------------------------------------------------
  # check if maps need to be generated from mtz
  n_maps = 0
  maps = list()
  map_names = list()
  for attribute in input_attributes:
    filename = getattr(params.input, attribute)
    if (filename is not None):
      map_names.append(filename)
      current_map = file_reader.any_file(filename)
      maps.append(current_map)
      if (current_map.file_type == 'ccp4_map'):
        n_maps += 1

  # construct maps, if necessary
  crystal_gridding = None
  m1 = None
  m2 = None

  # 1 map, 1 mtz file
  if (n_maps == 1):
    for current_map in maps:
      if (current_map.file_type == 'ccp4_map'):
        uc = current_map.file_object.unit_cell()
        sg_info = space_group_info(current_map.file_object.space_group_number)
        n_real = current_map.file_object.unit_cell_grid
        crystal_gridding = maptbx.crystal_gridding(
          uc, space_group_info=sg_info, pre_determined_n_real=n_real)
        m1 = current_map.file_object.map_data()
    if (crystal_gridding is not None):
      label = None
      for attribute in [('mtz_1', 'mtz_label_1'),
                        ('mtz_2', 'mtz_label_2')]:
        filename = getattr(params.input, attribute[0])
        label = getattr(params.input, attribute[1])
        if ( (filename is not None) and (label is not None) ):
          break
      # labels will match currently open mtz file
      for current_map in maps:
        if (current_map.file_type == 'hkl'):
          m2 = miller.fft_map(
            crystal_gridding=crystal_gridding,
            fourier_coefficients=current_map.file_server.get_miller_array(
              label)).apply_sigma_scaling().real_map_unpadded()
    else:
      raise Sorry('Gridding is not defined.')

  # 2 mtz files
  elif (n_maps == 0):
    crystal_symmetry = get_crystal_symmetry(maps[0])
    d_min = min(get_d_min(maps[0]), get_d_min(maps[1]))
    crystal_gridding = maptbx.crystal_gridding(
      crystal_symmetry.unit_cell(), d_min=d_min,
      resolution_factor=params.options.resolution_factor,
      space_group_info=crystal_symmetry.space_group_info())
    m1 = miller.fft_map(
      crystal_gridding=crystal_gridding,
      fourier_coefficients=maps[0].file_server.get_miller_array(
        params.input.mtz_label_1)).apply_sigma_scaling().real_map_unpadded()
    m2 = miller.fft_map(
      crystal_gridding=crystal_gridding,
      fourier_coefficients=maps[1].file_server.get_miller_array(
        params.input.mtz_label_2)).apply_sigma_scaling().real_map_unpadded()

  # 2 maps
  else:
    m1 = maps[0].file_object.map_data()
    m2 = maps[1].file_object.map_data()

  # ---------------------------------------------------------------------------
  # analyze maps
  assert ( (m1 is not None) and (m2 is not None) )

  # show general statistics
  s1 = maptbx.more_statistics(m1)
  s2 = maptbx.more_statistics(m2)
  show_overall_statistics(out=out, s=s1, header="Map 1 (%s):"%map_names[0])
  show_overall_statistics(out=out, s=s2, header="Map 2 (%s):"%map_names[1])
  cc_input_maps = flex.linear_correlation(x = m1.as_1d(),
                                          y = m2.as_1d()).coefficient()
  print >> out, "CC, input maps: %6.4f" % cc_input_maps

  # compute CCpeak
  cc_peaks = list()
  m1_he = maptbx.volume_scale(map = m1,  n_bins = 10000).map_data()
  m2_he = maptbx.volume_scale(map = m2,  n_bins = 10000).map_data()
  cc_quantile = flex.linear_correlation(x = m1_he.as_1d(),
                                        y = m2_he.as_1d()).coefficient()
  print >> out, "CC, quantile rank-scaled (histogram equalized) maps: %6.4f" % \
    cc_quantile
  print >> out, "Peak correlation:"
  print >> out, "  cutoff  CCpeak"
  cutoffs = [i/100.  for i in range(1,90)]+ [i/1000 for i in range(900,1000)]
  for cutoff in cutoffs:
    cc_peak = maptbx.cc_peak(map_1=m1_he, map_2=m2_he, cutoff=cutoff)
    print >> out, "  %3.2f   %7.4f" % (cutoff, cc_peak)
    cc_peaks.append((cutoff, cc_peak))

  # compute discrepancy function (D-function)
  discrepancies = list()
  cutoffs = flex.double(cutoffs)
  df = maptbx.discrepancy_function(map_1=m1_he, map_2=m2_he, cutoffs=cutoffs)
  print >> out, "Discrepancy function:"
  print >> out, "  cutoff  D"
  for c, d in zip(cutoffs, df):
    print >> out, "  %3.2f   %7.4f" % (c,d)
    discrepancies.append((c, d))

  # compute and output histograms
  h1 = maptbx.histogram(map=m1, n_bins=10000)
  h2 = maptbx.histogram(map=m2, n_bins=10000)
  print >> out, "Map histograms:"
  print >> out, "Map 1 (%s)     Map 2 (%s)"%\
    (params.input.map_1,params.input.map_2)
  print >> out, "(map_value,cdf,frequency) <> (map_value,cdf,frequency)"
  for a1,c1,v1, a2,c2,v2 in zip(h1.arguments(), h1.c_values(), h1.values(),
                                h2.arguments(), h2.c_values(), h2.values()):
    print >> out, "(%9.5f %9.5f %9.5f) <> (%9.5f %9.5f %9.5f)"%\
      (a1,c1,v1, a2,c2,v2)

  # store results
  s1_dict = create_statistics_dict(s=s1)
  s2_dict = create_statistics_dict(s=s2)
  results = dict()
  inputs = list()
  for attribute in input_attributes:
    filename = getattr(params.input,attribute)
    if (filename is not None):
      inputs.append(filename)
  assert (len(inputs) == 2)
  results['map_files'] = inputs
  results['map_statistics'] = (s1_dict, s2_dict)
  results['cc_input_maps'] = cc_input_maps
  results['cc_quantile'] = cc_quantile
  results['cc_peaks'] = cc_peaks
  results['discrepancies'] = discrepancies
  results['map_histograms'] = ( (h1.arguments(), h1.c_values(), h1.values()),
                                (h2.arguments(), h2.c_values(), h2.values()) )

  return results
Exemple #43
0
    def setup(self):
        """Set everything up..."""

        # check arguments are all ascii

        Debug.write("Start parsing command line: " + str(sys.argv))

        for token in sys.argv:
            try:
                token.encode("ascii")
            except UnicodeDecodeError:
                raise RuntimeError("non-ascii characters in input")

        self._argv = copy.deepcopy(sys.argv)

        replacements = {
            "-2d": "pipeline=2d",
            "-2di": "pipeline=2di",
            "-3d": "pipeline=3d",
            "-3di": "pipeline=3di",
            "-3dii": "pipeline=3dii",
            "-3dd": "pipeline=3dd",
            "-dials": "pipeline=dials",
            "-quick": "dials.fast_mode=true",
            "-failover": "failover=true",
            "-small_molecule": "small_molecule=true",
        }
        for k, v in replacements.iteritems():
            if k in self._argv:
                print(
                    "***\nCommand line option %s is deprecated.\nPlease use %s instead\n***"
                    % (k, v))
                self._argv[self._argv.index(k)] = v
        if "-atom" in self._argv:
            idx = self._argv.index("-atom")
            element = self._argv[idx + 1]
            self._argv[idx:idx + 2] = ["atom=%s" % element]
            print(
                "***\nCommand line option -atom %s is deprecated.\nPlease use atom=%s instead\n***"
                % (element, element))

        # first of all try to interpret arguments as phil parameters/files

        from xia2.Handlers.Phil import master_phil
        from libtbx.phil import command_line

        cmd_line = command_line.argument_interpreter(master_phil=master_phil)
        working_phil, self._argv = cmd_line.process_and_fetch(
            args=self._argv, custom_processor="collect_remaining")

        PhilIndex.merge_phil(working_phil)
        try:
            params = PhilIndex.get_python_object()
        except RuntimeError as e:
            raise Sorry(e)

        # sanity check / interpret Auto in input
        from libtbx import Auto

        if params.xia2.settings.input.atom is None:
            if params.xia2.settings.input.anomalous is Auto:
                PhilIndex.update("xia2.settings.input.anomalous=false")
        else:
            if params.xia2.settings.input.anomalous is False:
                raise Sorry(
                    "Setting anomalous=false and atom type inconsistent")
            params.xia2.settings.input.anomalous = True
            PhilIndex.update("xia2.settings.input.anomalous=true")

        if params.xia2.settings.resolution.keep_all_reflections is Auto:
            if (params.xia2.settings.small_molecule is True
                    and params.xia2.settings.resolution.d_min is None
                    and params.xia2.settings.resolution.d_max is None):
                PhilIndex.update(
                    "xia2.settings.resolution.keep_all_reflections=true")
            else:
                PhilIndex.update(
                    "xia2.settings.resolution.keep_all_reflections=false")

        if params.xia2.settings.small_molecule is True:
            Debug.write("Small molecule selected")
            if params.xia2.settings.symmetry.chirality is None:
                PhilIndex.update("xia2.settings.symmetry.chirality=nonchiral")
            params = PhilIndex.get_python_object()

        # pipeline options
        self._read_pipeline()

        for (parameter, value) in (
            ("project", params.xia2.settings.project),
            ("crystal", params.xia2.settings.crystal),
        ):
            validate_project_crystal_name(parameter, value)

        Debug.write("Project: %s" % params.xia2.settings.project)
        Debug.write("Crystal: %s" % params.xia2.settings.crystal)

        # FIXME add some consistency checks in here e.g. that there are
        # images assigned, there is a lattice assigned if cell constants
        # are given and so on

        params = PhilIndex.get_python_object()
        mp_params = params.xia2.settings.multiprocessing
        from xia2.Handlers.Environment import get_number_cpus

        if mp_params.mode == "parallel":
            if mp_params.type == "qsub":
                if which("qsub") is None:
                    raise Sorry("qsub not available")
            if mp_params.njob is Auto:
                mp_params.njob = get_number_cpus()
                if mp_params.nproc is Auto:
                    mp_params.nproc = 1
            elif mp_params.nproc is Auto:
                mp_params.nproc = get_number_cpus()
        elif mp_params.mode == "serial":
            if mp_params.type == "qsub":
                if which("qsub") is None:
                    raise Sorry("qsub not available")
            if mp_params.njob is Auto:
                mp_params.njob = 1
            if mp_params.nproc is Auto:
                mp_params.nproc = get_number_cpus()

        PhilIndex.update("xia2.settings.multiprocessing.njob=%d" %
                         mp_params.njob)
        PhilIndex.update("xia2.settings.multiprocessing.nproc=%d" %
                         mp_params.nproc)
        params = PhilIndex.get_python_object()
        mp_params = params.xia2.settings.multiprocessing

        if mp_params.nproc > 1 and os.name == "nt":
            raise Sorry("nproc > 1 is not supported on Windows.")  # #191

        if params.xia2.settings.indexer is not None:
            add_preference("indexer", params.xia2.settings.indexer)
        if params.xia2.settings.refiner is not None:
            add_preference("refiner", params.xia2.settings.refiner)
        if params.xia2.settings.integrater is not None:
            add_preference("integrater", params.xia2.settings.integrater)
        if params.xia2.settings.scaler is not None:
            add_preference("scaler", params.xia2.settings.scaler)

        if params.xia2.settings.multi_sweep_indexing is Auto:
            if (params.xia2.settings.small_molecule is True
                    and "dials" == params.xia2.settings.indexer):
                PhilIndex.update("xia2.settings.multi_sweep_indexing=True")
            else:
                PhilIndex.update("xia2.settings.multi_sweep_indexing=False")
        if (params.xia2.settings.multi_sweep_indexing is True
                and params.xia2.settings.multiprocessing.mode == "parallel"):
            Chatter.write(
                "Multi sweep indexing disabled:\nMSI is not available for parallel processing."
            )
            PhilIndex.update("xia2.settings.multi_sweep_indexing=False")

        input_json = params.xia2.settings.input.json
        if input_json is not None and len(input_json):
            for json_file in input_json:
                assert os.path.isfile(json_file)
                load_experiments(json_file)

        reference_geometry = params.xia2.settings.input.reference_geometry
        if reference_geometry is not None and len(reference_geometry) > 0:
            reference_geometries = "\n".join([
                "xia2.settings.input.reference_geometry=%s" %
                os.path.abspath(g)
                for g in params.xia2.settings.input.reference_geometry
            ])
            Debug.write(reference_geometries)
            PhilIndex.update(reference_geometries)
            Debug.write("xia2.settings.trust_beam_centre=true")
            PhilIndex.update("xia2.settings.trust_beam_centre=true")
            params = PhilIndex.get_python_object()

        params = PhilIndex.get_python_object()
        if params.xia2.settings.input.xinfo is not None:
            xinfo_file = os.path.abspath(params.xia2.settings.input.xinfo)
            PhilIndex.update("xia2.settings.input.xinfo=%s" % xinfo_file)
            params = PhilIndex.get_python_object()
            self.set_xinfo(xinfo_file)

            # issue #55 if not set ATOM in xinfo but anomalous=true or atom= set
            # on commandline, set here, should be idempotent

            if params.xia2.settings.input.anomalous is True:
                crystals = self._xinfo.get_crystals()
                for xname in crystals:
                    xtal = crystals[xname]
                    Debug.write("Setting anomalous for crystal %s" % xname)
                    xtal.set_anomalous(True)
        else:
            xinfo_file = "%s/automatic.xinfo" % os.path.abspath(os.curdir)
            PhilIndex.update("xia2.settings.input.xinfo=%s" % xinfo_file)
            params = PhilIndex.get_python_object()

        if params.dials.find_spots.phil_file is not None:
            PhilIndex.update(
                "dials.find_spots.phil_file=%s" %
                os.path.abspath(params.dials.find_spots.phil_file))
        if params.dials.index.phil_file is not None:
            PhilIndex.update("dials.index.phil_file=%s" %
                             os.path.abspath(params.dials.index.phil_file))
        if params.dials.refine.phil_file is not None:
            PhilIndex.update("dials.refine.phil_file=%s" %
                             os.path.abspath(params.dials.refine.phil_file))
        if params.dials.integrate.phil_file is not None:
            PhilIndex.update("dials.integrate.phil_file=%s" %
                             os.path.abspath(params.dials.integrate.phil_file))
        if params.xds.index.xparm is not None:
            Flags.set_xparm(params.xds.index.xparm)
        if params.xds.index.xparm_ub is not None:
            Flags.set_xparm_ub(params.xds.index.xparm_ub)

        if params.xia2.settings.scale.freer_file is not None:
            freer_file = os.path.abspath(params.xia2.settings.scale.freer_file)
            if not os.path.exists(freer_file):
                raise RuntimeError("%s does not exist" % freer_file)
            from xia2.Modules.FindFreeFlag import FindFreeFlag

            column = FindFreeFlag(freer_file)
            Debug.write("FreeR_flag column in %s found: %s" %
                        (freer_file, column))
            PhilIndex.update("xia2.settings.scale.freer_file=%s" % freer_file)

        if params.xia2.settings.scale.reference_reflection_file is not None:
            reference_reflection_file = os.path.abspath(
                params.xia2.settings.scale.reference_reflection_file)
            if not os.path.exists(reference_reflection_file):
                raise RuntimeError("%s does not exist" %
                                   reference_reflection_file)
            PhilIndex.update(
                "xia2.settings.scale.reference_reflection_file=%s" %
                reference_reflection_file)

        params = PhilIndex.get_python_object()

        datasets = unroll_datasets(PhilIndex.params.xia2.settings.input.image)

        for dataset in datasets:

            start_end = None

            # here we only care about ':' which are later than C:\
            if ":" in dataset[3:]:
                tokens = dataset.split(":")
                # cope with windows drives i.e. C:\data\blah\thing_0001.cbf:1:100
                if len(tokens[0]) == 1:
                    tokens = ["%s:%s" % (tokens[0], tokens[1])] + tokens[2:]
                if len(tokens) != 3:
                    raise RuntimeError("/path/to/image_0001.cbf:start:end")

                dataset = tokens[0]
                start_end = int(tokens[1]), int(tokens[2])

            from xia2.Applications.xia2setup import is_hd5f_name

            if os.path.exists(os.path.abspath(dataset)):
                dataset = os.path.abspath(dataset)
            else:
                directories = [os.getcwd()] + self._argv[1:]
                found = False
                for d in directories:
                    if os.path.exists(os.path.join(d, dataset)):
                        dataset = os.path.join(d, dataset)
                        found = True
                        break
                if not found:
                    raise Sorry("Could not find %s in %s" %
                                (dataset, " ".join(directories)))

            if is_hd5f_name(dataset):
                self._hdf5_master_files.append(dataset)
                if start_end:
                    Debug.write("Image range: %d %d" % start_end)
                    if dataset not in self._default_start_end:
                        self._default_start_end[dataset] = []
                    self._default_start_end[dataset].append(start_end)
                else:
                    Debug.write("No image range specified")

            else:
                template, directory = image2template_directory(
                    os.path.abspath(dataset))

                self._default_template.append(os.path.join(
                    directory, template))
                self._default_directory.append(directory)

                Debug.write("Interpreted from image %s:" % dataset)
                Debug.write("Template %s" % template)
                Debug.write("Directory %s" % directory)

                if start_end:
                    Debug.write("Image range: %d %d" % start_end)
                    key = os.path.join(directory, template)
                    if key not in self._default_start_end:
                        self._default_start_end[key] = []
                    self._default_start_end[key].append(start_end)
                else:
                    Debug.write("No image range specified")

        # finally, check that all arguments were read and raise an exception
        # if any of them were nonsense.

        with open("xia2-working.phil", "wb") as f:
            f.write(PhilIndex.working_phil.as_str())
            f.write(
                os.linesep
            )  # temporarily required for https://github.com/dials/dials/issues/522
        with open("xia2-diff.phil", "wb") as f:
            f.write(PhilIndex.get_diff().as_str())
            f.write(
                os.linesep
            )  # temporarily required for https://github.com/dials/dials/issues/522

        Debug.write("\nDifference PHIL:")
        Debug.write(PhilIndex.get_diff().as_str(), strip=False)

        Debug.write("Working PHIL:")
        Debug.write(PhilIndex.working_phil.as_str(), strip=False)

        nonsense = "Unknown command-line options:"
        was_nonsense = False

        for j, argv in enumerate(self._argv):
            if j == 0:
                continue
            if argv[0] != "-" and "=" not in argv:
                continue
            if j not in self._understood:
                nonsense += " %s" % argv
                was_nonsense = True

        if was_nonsense:
            raise RuntimeError(nonsense)
Exemple #44
0
def run(args):
    cmd_line = command_line.argument_interpreter(
        master_params=master_phil_scope)
    working_phil, args = cmd_line.process_and_fetch(
        args=args, custom_processor="collect_remaining")
    working_phil.show()
    params = working_phil.extract()
    batch_multiplier = params.batch_multiplier
    split_at_batch = params.split_at_batch
    assert split_at_batch is not None

    assert len(args) == 1
    file_name = args[0]
    reader = any_reflection_file(file_name)
    assert reader.file_type() == "ccp4_mtz"

    as_miller_arrays = reader.as_miller_arrays(merge_equivalents=False)
    mtz_object = reader.file_content()
    intensities = [
        ma for ma in as_miller_arrays if ma.info().labels == ["I", "SIGI"]
    ][0]
    intensities = intensities.customized_copy(
        indices=mtz_object.extract_original_index_miller_indices()).set_info(
            intensities.info())
    batch_ids = [
        ma for ma in as_miller_arrays if ma.info().labels == ["BATCH"]
    ][0]
    batch_ids = batch_ids.customized_copy(
        indices=mtz_object.extract_original_index_miller_indices()).set_info(
            batch_ids.info())
    intensities = intensities.customized_copy(anomalous_flag=True).set_info(
        intensities.info())
    intensities.set_observation_type_xray_intensity()

    run_id = flex.int()
    run_batch_id = flex.int()
    for b in batch_ids.data():
        r_id, b_id = divmod(b, batch_multiplier)
        run_id.append(r_id)
        run_batch_id.append(b_id)

    sel = run_batch_id < split_at_batch

    for negate in (False, True):
        if not negate:
            outfile = "split_1.mtz"
        else:
            outfile = "split_2.mtz"

        intensities_sel = intensities.select(sel, negate=negate).set_info(
            intensities.info())

        result = iotbx.merging_statistics.dataset_statistics(intensities_sel)
        result.show()

        intensities_merged_anom = intensities_sel.merge_equivalents().array()
        intensities_merged = (intensities_sel.customized_copy(
            anomalous_flag=False).merge_equivalents().array())

        dataset = intensities_merged_anom.as_mtz_dataset(
            "I", wavelength=intensities.info().wavelength)
        dataset.add_miller_array(miller_array=intensities_merged,
                                 column_root_label="IMEAN")
        m = dataset.mtz_object()
        m.write(outfile)

    if params.export_unmerged:

        # for negate in (False, True):
        # if not negate:
        # isel = sel.iselection()
        # outfile = "unmerged_1.mtz"
        # else:
        # isel = (~sel).iselection()
        # outfile = "unmerged_2.mtz"

        # m = iotbx.mtz.object()
        # m.set_title(mtz_object.title())
        # m.set_space_group_info(mtz_object.space_group_info())

        # batches = mtz_object.batches()
        # batch_sel = flex.bool(batches.size(), False)
        # for i, b in enumerate(batches):
        # r_id, b_id = divmod(b.num(), batch_multiplier)
        # if (((not negate) and (b_id < split_at_batch)) or
        # (negate and (b_id >= split_at_batch))):
        # o = m.add_batch()
        # o.set_num(b.num())
        # o.set_nbsetid(b.nbsetid())
        # o.set_ncryst(b.ncryst())
        # o.set_time1(b.time1())
        # o.set_time2(b.time2())
        # o.set_title(b.title())
        # o.set_ndet(b.ndet())
        # o.set_theta(b.theta())
        # o.set_lbmflg(b.lbmflg())
        # o.set_alambd(b.alambd())
        # o.set_delamb(b.delamb())
        # o.set_delcor(b.delcor())
        # o.set_divhd(b.divhd())
        # o.set_divvd(b.divvd())
        # o.set_so(b.so())
        # o.set_bbfac(b.bbfac())
        # o.set_bscale(b.bscale())
        # o.set_sdbfac(b.sdbfac())
        # o.set_sdbscale(b.sdbscale())
        # o.set_nbscal(b.nbscal())
        # o.set_cell(b.cell())
        # o.set_lbcell(b.lbcell())
        # o.set_umat(b.umat())
        # o.set_crydat(b.crydat())
        # o.set_lcrflg(b.lcrflg())
        # o.set_datum(b.datum())
        # o.set_detlm(b.detlm())
        # o.set_dx(b.dx())
        # o.set_e1(b.e1())
        # o.set_e2(b.e2())
        # o.set_e3(b.e3())
        # o.set_gonlab(b.gonlab())
        # o.set_jsaxs(b.jsaxs())
        # o.set_ngonax(b.ngonax())
        # o.set_phixyz(b.phixyz())
        # o.set_phistt(b.phistt())
        # o.set_phirange(b.phirange())
        # o.set_phiend(b.phiend())
        # o.set_scanax(b.scanax())
        # o.set_misflg(b.misflg())
        # o.set_jumpax(b.jumpax())
        # o.set_ldtype(b.ldtype())

        # for x in m.crystals():
        # x_ = m.add_crystal(x.name(), x.project_name(), x.unit_cell_parameters())
        # for d in x.datasets():
        # d_ = x_.add_dataset(d.name(), d.wavelength())

        # nref = isel.size()
        # m.adjust_column_array_sizes(nref)
        # m.set_n_reflections(nref)

        # for column in d.columns():
        # d_.add_column(column.label(), column.type()).set_values(
        # column.extract_values())
        # print

        # m.write(outfile)

        for negate in (False, True):
            if negate:
                outfile = "split_unmerged_1.mtz"
            else:
                outfile = "split_unmerged_2.mtz"
            reader = any_reflection_file(file_name)
            mtz_object = reader.file_content()
            if negate:
                isel = (~sel).iselection()
            else:
                isel = sel.iselection()
            mtz_object.delete_reflections(isel)
            mtz_object.write(outfile)
def run(args):
  cmd_line = command_line.argument_interpreter(master_params=master_phil_scope)
  working_phil, files = cmd_line.process_and_fetch(
    args=args, custom_processor="collect_remaining")
  working_phil.show()
  params = working_phil.extract()

  miller_array_all = None
  lattice_ids = None
  space_group = None
  file_name_dict = {}
  lattice_id = -1

  for file_name in files:
    lattice_id += 1
    #print "lattice_id: %i" %(lattice_id)
    reader = any_reflection_file(file_name)

    as_miller_arrays = reader.as_miller_arrays(merge_equivalents=False)
    #for ma in as_miller_arrays: print ma.info().labels
    intensities = [ma for ma in as_miller_arrays
                   if ma.info().labels == ['I', 'SIGI']][0]
    intensities = intensities.customized_copy(anomalous_flag=True).set_info(
      intensities.info())
    intensities.set_observation_type_xray_intensity()
    #intensities.crystal_symmetry().show_summary()
    #print intensities.info().labels
    if space_group is None:
      space_group = intensities.space_group()
    else:
      assert intensities.space_group() == space_group
    assert reader.file_type() == 'ccp4_mtz'

    file_name_dict[lattice_id] = file_name

    ids = intensities.customized_copy(
      data=flex.double(intensities.size(), lattice_id), sigmas=None)
    assert ids.size() == intensities.size()
    if miller_array_all is None:
      miller_array_all = intensities
      lattice_ids = ids
    else:
      miller_array_all = miller_array_all.customized_copy(
        indices=miller_array_all.indices().concatenate(intensities.indices()),
        data=miller_array_all.data().concatenate(intensities.data()),
        sigmas=miller_array_all.sigmas().concatenate(intensities.sigmas()))
      lattice_ids = lattice_ids.customized_copy(
        indices=lattice_ids.indices().concatenate(ids.indices()),
        data=lattice_ids.data().concatenate(ids.data()))
    assert miller_array_all.size() == lattice_ids.size()

    intensities = intensities.map_to_asu()
    intensities = intensities.customized_copy(anomalous_flag=True)
    intensities_p1 = intensities.expand_to_p1().merge_equivalents().array()
    intensities = intensities_p1.customized_copy(
      crystal_symmetry=intensities.crystal_symmetry())

  L = (miller_array_all, lattice_ids)
  L[0].crystal_symmetry().show_summary()
  from cctbx.merging import brehm_diederichs
  if params.nproc == 1:
    result_sets = brehm_diederichs.run(
      L, asymmetric=params.asymmetric, nproc=1, show_plot=params.show_plot,
      save_plot=params.save_plot)
  else:
    result_sets = brehm_diederichs.run_multiprocess(
      L, asymmetric=params.asymmetric, nproc=params.nproc,
      show_plot=params.show_plot, save_plot=params.save_plot)

  out_file = open('reindex.txt', 'wb')

  for reindexing_op, wedges in result_sets.iteritems():
    cb_op = sgtbx.change_of_basis_op(reindexing_op)
    for wedge in wedges:
      file_name = file_name_dict[wedge]
      if out_file is not None:
        print >> out_file, file_name, cb_op.as_hkl()
      basename = os.path.basename(file_name)
      out_name = os.path.splitext(basename)[0] + params.suffix + ".mtz"
      reader = any_reflection_file(file_name)
      assert reader.file_type() == 'ccp4_mtz'
      mtz_object = reader.file_content()
      if not cb_op.is_identity_op():
        print "reindexing %s" %file_name
        mtz_object.change_basis_in_place(cb_op)
      mtz_object.write(out_name)
Exemple #46
0
  def __init__(self, master_phil):

    from libtbx.phil import command_line
    self.interpreter = command_line.argument_interpreter( master_phil = master_phil )
    self.file_handlers = []
Exemple #47
0
def run(args, out=sys.stdout, validated=False):
    show_citation(out=out)
    if (len(args) == 0):
        master_phil.show(out=out)
        print('\nUsage: phenix.map_comparison <CCP4> <CCP4>\n',\
          '       phenix.map_comparison <CCP4> <MTZ> mtz_label_1=<label>\n',\
          '       phenix.map_comparison <MTZ 1> mtz_label_1=<label 1> <MTZ 2> mtz_label_2=<label 2>\n', file=out)
        sys.exit()

    # process arguments
    params = None
    input_attributes = ['map_1', 'mtz_1', 'map_2', 'mtz_2']
    try:  # automatic parsing
        params = phil.process_command_line_with_files(
            args=args, master_phil=master_phil).work.extract()
    except Exception:  # map_file_def only handles one map phil
        from libtbx.phil.command_line import argument_interpreter
        arg_int = argument_interpreter(master_phil=master_phil)
        command_line_args = list()
        map_files = list()
        for arg in args:
            if (os.path.isfile(arg)):
                map_files.append(arg)
            else:
                command_line_args.append(arg_int.process(arg))
        params = master_phil.fetch(sources=command_line_args).extract()

        # check if more files are necessary
        n_defined = 0
        for attribute in input_attributes:
            if (getattr(params.input, attribute) is not None):
                n_defined += 1

        # matches files to phil scope, stops once there is sufficient data
        for map_file in map_files:
            if (n_defined < 2):
                current_map = file_reader.any_file(map_file)
                if (current_map.file_type == 'ccp4_map'):
                    n_defined += 1
                    if (params.input.map_1 is None):
                        params.input.map_1 = map_file
                    elif (params.input.map_2 is None):
                        params.input.map_2 = map_file
                elif (current_map.file_type == 'hkl'):
                    n_defined += 1
                    if (params.input.mtz_1 is None):
                        params.input.mtz_1 = map_file
                    elif (params.input.mtz_2 is None):
                        params.input.mtz_2 = map_file
            else:
                print('WARNING: only the first two files are used', file=out)
                break

    # validate arguments (GUI sets validated to true, no need to run again)
    assert (params is not None)
    if (not validated):
        validate_params(params)

    # ---------------------------------------------------------------------------
    # check if maps need to be generated from mtz
    n_maps = 0
    maps = list()
    map_names = list()
    for attribute in input_attributes:
        filename = getattr(params.input, attribute)
        if (filename is not None):
            map_names.append(filename)
            current_map = file_reader.any_file(filename)
            maps.append(current_map)
            if (current_map.file_type == 'ccp4_map'):
                n_maps += 1

    # construct maps, if necessary
    crystal_gridding = None
    m1 = None
    m2 = None

    # 1 map, 1 mtz file
    if (n_maps == 1):
        for current_map in maps:
            if (current_map.file_type == 'ccp4_map'):
                uc = current_map.file_object.unit_cell()
                sg_info = space_group_info(
                    current_map.file_object.space_group_number)
                n_real = current_map.file_object.unit_cell_grid
                crystal_gridding = maptbx.crystal_gridding(
                    uc, space_group_info=sg_info, pre_determined_n_real=n_real)
                m1 = current_map.file_object.map_data()
        if (crystal_gridding is not None):
            label = None
            for attribute in [('mtz_1', 'mtz_label_1'),
                              ('mtz_2', 'mtz_label_2')]:
                filename = getattr(params.input, attribute[0])
                label = getattr(params.input, attribute[1])
                if ((filename is not None) and (label is not None)):
                    break
            # labels will match currently open mtz file
            for current_map in maps:
                if (current_map.file_type == 'hkl'):
                    m2 = miller.fft_map(
                        crystal_gridding=crystal_gridding,
                        fourier_coefficients=current_map.file_server.
                        get_miller_array(
                            label)).apply_sigma_scaling().real_map_unpadded()
        else:
            raise Sorry('Gridding is not defined.')

    # 2 mtz files
    elif (n_maps == 0):
        crystal_symmetry = get_crystal_symmetry(maps[0])
        d_min = min(get_d_min(maps[0]), get_d_min(maps[1]))
        crystal_gridding = maptbx.crystal_gridding(
            crystal_symmetry.unit_cell(),
            d_min=d_min,
            resolution_factor=params.options.resolution_factor,
            space_group_info=crystal_symmetry.space_group_info())
        m1 = miller.fft_map(
            crystal_gridding=crystal_gridding,
            fourier_coefficients=maps[0].file_server.get_miller_array(
                params.input.mtz_label_1)).apply_sigma_scaling(
                ).real_map_unpadded()
        m2 = miller.fft_map(
            crystal_gridding=crystal_gridding,
            fourier_coefficients=maps[1].file_server.get_miller_array(
                params.input.mtz_label_2)).apply_sigma_scaling(
                ).real_map_unpadded()

    # 2 maps
    else:
        m1 = maps[0].file_object.map_data()
        m2 = maps[1].file_object.map_data()

    # ---------------------------------------------------------------------------
    # analyze maps
    assert ((m1 is not None) and (m2 is not None))

    # show general statistics
    s1 = maptbx.more_statistics(m1)
    s2 = maptbx.more_statistics(m2)
    show_overall_statistics(out=out, s=s1, header="Map 1 (%s):" % map_names[0])
    show_overall_statistics(out=out, s=s2, header="Map 2 (%s):" % map_names[1])
    cc_input_maps = flex.linear_correlation(x=m1.as_1d(),
                                            y=m2.as_1d()).coefficient()
    print("CC, input maps: %6.4f" % cc_input_maps, file=out)

    # compute CCpeak
    cc_peaks = list()
    m1_he = maptbx.volume_scale(map=m1, n_bins=10000).map_data()
    m2_he = maptbx.volume_scale(map=m2, n_bins=10000).map_data()
    cc_quantile = flex.linear_correlation(x=m1_he.as_1d(),
                                          y=m2_he.as_1d()).coefficient()
    print("CC, quantile rank-scaled (histogram equalized) maps: %6.4f" % \
      cc_quantile, file=out)
    print("Peak correlation:", file=out)
    print("  cutoff  CCpeak", file=out)
    cutoffs = [i / 100.
               for i in range(1, 90)] + [i / 1000 for i in range(900, 1000)]
    for cutoff in cutoffs:
        cc_peak = maptbx.cc_peak(map_1=m1_he, map_2=m2_he, cutoff=cutoff)
        print("  %3.2f   %7.4f" % (cutoff, cc_peak), file=out)
        cc_peaks.append((cutoff, cc_peak))

    # compute discrepancy function (D-function)
    discrepancies = list()
    cutoffs = flex.double(cutoffs)
    df = maptbx.discrepancy_function(map_1=m1_he, map_2=m2_he, cutoffs=cutoffs)
    print("Discrepancy function:", file=out)
    print("  cutoff  D", file=out)
    for c, d in zip(cutoffs, df):
        print("  %3.2f   %7.4f" % (c, d), file=out)
        discrepancies.append((c, d))

    # compute and output histograms
    h1 = maptbx.histogram(map=m1, n_bins=10000)
    h2 = maptbx.histogram(map=m2, n_bins=10000)
    print("Map histograms:", file=out)
    print("Map 1 (%s)     Map 2 (%s)"%\
      (params.input.map_1,params.input.map_2), file=out)
    print("(map_value,cdf,frequency) <> (map_value,cdf,frequency)", file=out)
    for a1, c1, v1, a2, c2, v2 in zip(h1.arguments(), h1.c_values(),
                                      h1.values(), h2.arguments(),
                                      h2.c_values(), h2.values()):
        print("(%9.5f %9.5f %9.5f) <> (%9.5f %9.5f %9.5f)"%\
          (a1,c1,v1, a2,c2,v2), file=out)

    # store results
    s1_dict = create_statistics_dict(s=s1)
    s2_dict = create_statistics_dict(s=s2)
    results = dict()
    inputs = list()
    for attribute in input_attributes:
        filename = getattr(params.input, attribute)
        if (filename is not None):
            inputs.append(filename)
    assert (len(inputs) == 2)
    results['map_files'] = inputs
    results['map_statistics'] = (s1_dict, s2_dict)
    results['cc_input_maps'] = cc_input_maps
    results['cc_quantile'] = cc_quantile
    results['cc_peaks'] = cc_peaks
    results['discrepancies'] = discrepancies
    # TODO, verify h1,h2 are not dicts, e.g. .values is py2/3 compat. I assume it is here
    results['map_histograms'] = ((h1.arguments(), h1.c_values(), h1.values()),
                                 (h2.arguments(), h2.c_values(), h2.values()))

    return results
Exemple #48
0
class Script(object):

  def __init__(self):
    from dials.util.options import OptionParser
    from libtbx.phil import parse

    usage  = "usage: %prog [options] [param.phil] " \
             "sweep.json crystal.json intensities.mtz"

    phil_scope = parse('''
      output = simulated.pickle
        .type = str
        .help = "The filename for the simulated reflections"
    ''')

    # Create the parser
    self.parser = OptionParser(
      usage=usage,
      phil=self.phil_scope())

  @staticmethod
  def map_to_image_space(refl, d, dhs, dks, dls):
    from scitbx.array_family import flex
    d_elems = d.elems
    bb = refl.bounding_box
    dxs = d_elems[0] * dhs + d_elems[1] * dks + d_elems[2] * dls
    dys = d_elems[3] * dhs + d_elems[4] * dks + d_elems[5] * dls
    dzs = d_elems[6] * dhs + d_elems[7] * dks + d_elems[8] * dls
    xs = flex.floor(dxs + refl.image_coord_px[0]).iround() - bb[0]
    ys = flex.floor(dys + refl.image_coord_px[1]).iround() - bb[2]
    zs = flex.floor(dzs + refl.frame_number).iround() - bb[4]
    xyz = flex.vec3_int(zs, ys, xs)
    xyz = xyz.select((xs >= 0 and xs < (bb[1] - bb[0])) &
                     (ys >= 0 and ys < (bb[3] - bb[2])) &
                     (zs >= 0 and zs < (bb[5] - bb[4])))
    for _xyz in xyz:
      refl.shoebox[_xyz] += 1

    return

  def main(self):
    # FIXME import simulation code
    import cPickle as pickle
    import math
    from dials.util.command_line import Importer
    from dials.algorithms.integration import ReflectionPredictor
    from libtbx.utils import Sorry

    # Parse the command line
    params, options, args = self.parser.parse_args()

    importer = Importer(args)
    if len(importer.imagesets) == 0 and len(importer.crystals) == 0:
      self.config().print_help()
      return
    if len(importer.imagesets) != 1:
      raise Sorry('need 1 sweep: %d given' % len(importer.imagesets))
    if len(importer.crystals) != 1:
      raise Sorry('need 1 crystal: %d given' % len(importer.crystals))
    sweep = importer.imagesets[0]
    crystal = importer.crystals[0]

    # generate predictions for possible reflections => generate a
    # reflection list

    predict = ReflectionPredictor()
    predicted = predict(sweep, crystal)

    # sort with James's reflection table: should this not go somewhere central?
    from dials.scratch.jmp.container.reflection_table import ReflectionTable

    # calculate shoebox sizes: take parameters from params & transform
    # from reciprocal space to image space to decide how big a shoe box to use

    table = ReflectionTable()
    table['miller_index'] = predicted.miller_index()
    indexer = table.index_map('miller_index')

    candidates = []

    unique = sorted(indexer)

    for h, k, l in unique:

      try:
        for _h in h - 1, h + 1:
          if not indexer[(_h, k, l)]:
            raise ValueError, 'missing'
        for _k in k - 1, k + 1:
          if not indexer[(h, _k, l)]:
            raise ValueError, 'missing'
        for _l in l - 1, l + 1:
          if not indexer[(h, k, _l)]:
            raise ValueError, 'missing'
        candidates.append((h, k, l))
      except ValueError, e:
        continue

    from dials.algorithms.simulation.utils import build_prediction_matrix

    from dials.algorithms.simulation.generate_test_reflections import \
     master_phil
    from libtbx.phil import command_line
    cmd = command_line.argument_interpreter(master_params=master_phil)
    working_phil = cmd.process_and_fetch(args=args[2:])
    params = working_phil.extract()

    node_size = params.rs_node_size
    window_size = params.rs_window_size
    reference = params.integrated_data_file
    scale = params.integrated_data_file_scale

    if reference:
      counts_database = { }
      from iotbx import mtz
      m = mtz.object(reference)
      mi = m.extract_miller_indices()
      i = m.extract_reals('IMEAN').data
      s = m.space_group().build_derived_point_group()
      for j in range(len(mi)):
        for op in s.all_ops():
          hkl = tuple(map(int, op * mi[j]))
          counts = max(0, int(math.floor(i[j] * scale)))
          counts_database[hkl] = counts
          counts_database[(-hkl[0], -hkl[1], -hkl[2])] = counts
    else:
      def constant_factory(value):
        import itertools
        return itertools.repeat(value).next
      from collections import defaultdict
      counts_database = defaultdict(constant_factory(params.counts))

    from dials.model.data import ReflectionList

    useful = ReflectionList()
    d_matrices = []

    for h, k, l in candidates:
      hkl = predicted[indexer[(h, k, l)][0]]
      _x = hkl.image_coord_px[0]
      _y = hkl.image_coord_px[1]
      _z = hkl.frame_number

      # build prediction matrix
      mhkl = predicted[indexer[(h - 1, k, l)][0]]
      phkl = predicted[indexer[(h + 1, k, l)][0]]
      hmkl = predicted[indexer[(h, k - 1, l)][0]]
      hpkl = predicted[indexer[(h, k + 1, l)][0]]
      hkml = predicted[indexer[(h, k, l - 1)][0]]
      hkpl = predicted[indexer[(h, k, l + 1)][0]]
      d = build_prediction_matrix(hkl, mhkl, phkl, hmkl, hpkl, hkml, hkpl)
      d_matrices.append(d)

      # construct the shoebox parameters: outline the ellipsoid
      x, y, z = [], [], []

      for dh in (1, 0, 0), (0, 1, 0), (0, 0, 1):
        dxyz = -1 * window_size * d * dh
        x.append(dxyz[0] + _x)
        y.append(dxyz[1] + _y)
        z.append(dxyz[2] + _z)
        dxyz = window_size * d * dh
        x.append(dxyz[0] + _x)
        y.append(dxyz[1] + _y)
        z.append(dxyz[2] + _z)

      hkl.bounding_box = (int(math.floor(min(x))), int(math.floor(max(x)) + 1),
                          int(math.floor(min(y))), int(math.floor(max(y)) + 1),
                          int(math.floor(min(z))), int(math.floor(max(z)) + 1))
      try:
        counts = counts_database[hkl.miller_index]
        useful.append(hkl)
      except KeyError, e:
        continue
Exemple #49
0
def process_input(args,
                  phil_args,
                  input_file,
                  mode='auto',
                  now=None):
  """ Read and parse parameter file

      input: input_file_list - PHIL-format files w/ parameters

      output: params - PHIL-formatted parameters
              txt_output - plain text-formatted parameters
  """

  from libtbx.phil.command_line import argument_interpreter
  from libtbx.utils import Sorry

  if mode == 'file':
    user_phil = [ip.parse(open(inp).read()) for inp in [input_file]]
    working_phil = master_phil.fetch(sources=user_phil)
    params = working_phil.extract()
  elif mode == 'auto':
    params = master_phil.extract()
    params.description = 'IOTA parameters auto-generated on {}'.format(now)
    params.input = [input_file]

  final_phil = master_phil.format(python_object=params)

  # Parse in-line params into phil
  argument_interpreter = argument_interpreter(master_phil=master_phil)
  consume = []
  for arg in phil_args:
    try:
      command_line_params = argument_interpreter.process(arg=arg)
      final_phil = final_phil.fetch(sources=[command_line_params,])
      consume.append(arg)
    except Sorry as e:
      pass
  for item in consume:
    phil_args.remove(item)
  if len(phil_args) > 0:
    raise Sorry("Not all arguments processed, remaining: {}".format(phil_args))

  # Perform command line check and modify params accordingly
  params = final_phil.extract()

  if mode == 'auto':
    output_dir = os.path.abspath(os.curdir)
    if params.advanced.integrate_with == 'dials':
      params.dials.target = os.path.join(output_dir, 'dials.phil')
    elif params.advanced.integrate_with == 'cctbx':
      params.cctbx.target = os.path.join(output_dir, 'cctbx.phil')

  # Check for -r option and set random subset parameter
  if args.random > 0:
    params.advanced.random_sample.flag_on = True
    params.advanced.random_sample.number = args.random[0]

  # Check for -n option and set number of processors override
  # (for parallel map only, for now)
  if args.nproc > 0:
    params.n_processors = args.nproc[0]

  # Check for -c option and set flags to exit IOTA after raw image conversion
  if args.convert:
    params.image_conversion.convert_only = True

  # Check -p option to see if converted file prefix is supplied; will run
  # conversion automatically if prefix is supplied
  if str(args.prefix).lower() != "auto":
    params.image_conversion.convert_images = True
    params.image_conversion.rename_pickle_prefix = args.prefix

  #Check -s option to bypass grid search and run selection/integration only
  if args.select:
    params.cctbx.selection.select_only.flag_on = True

  # Check if grid search is turned off; if so, set everything to zero
  if str(params.cctbx.grid_search.type).lower() == 'none':
    params.cctbx.grid_search.area_range = 0
    params.cctbx.grid_search.height_range = 0
    params.cctbx.grid_search.sig_height_search = False

  final_phil = master_phil.format(python_object=params)

  temp_phil = [final_phil]
  #diff_phil = master_phil.fetch_diff(sources=temp_phil)
  diff_phil = master_phil.fetch(sources=temp_phil)

  with Capturing() as output:
    diff_phil.show()
  diff_out = ''
  for one_output in output:
    diff_out += one_output + '\n'

  if mode == 'auto':
    with Capturing() as diff_output:
      final_phil.show()
    txt_out = ''
    for one_output in diff_output:
      txt_out += one_output + '\n'
    write_defaults(os.path.abspath(os.curdir), txt_out, params.advanced.integrate_with)

  return params, diff_out
def run(args):
  import os
  from libtbx.phil import command_line
  from libtbx.utils import Sorry, Usage

  if len(args) == 0:
    from cStringIO import StringIO
    s = StringIO()
    master_phil_scope.show(out=s)
    raise Usage("""\
dxtbx.export_bitmaps image_files [options]

% s
""" %s.getvalue())

  from dxtbx.datablock import DataBlockFactory
  unhandled = []
  datablocks = DataBlockFactory.from_args(
    args, verbose=False, unhandled=unhandled)
  assert len(datablocks) > 0
  imagesets = datablocks[0].extract_imagesets()

  cmd_line = command_line.argument_interpreter(master_params=master_phil_scope)
  working_phil = cmd_line.process_and_fetch(args=unhandled)
  working_phil.show()
  params = working_phil.extract()

  brightness = params.brightness / 100
  vendortype = "made up"

  # check that binning is a power of 2
  binning = params.binning
  if not (binning > 0 and ((binning & (binning - 1)) == 0)):
    raise Sorry("binning must be a power of 2")

  output_dir = params.output_dir
  if output_dir is None:
    output_dir = "."
  elif not os.path.exists(output_dir):
    os.makedirs(output_dir)

  from rstbx.slip_viewer.tile_generation \
       import _get_flex_image, _get_flex_image_multipanel

  for imageset in imagesets:
    detector = imageset.get_detector()
    panel = detector[0]
    # XXX is this inclusive or exclusive?
    saturation = panel.get_trusted_range()[1]
    for i_image, image in enumerate(imageset):

      if len(detector) > 1:
        # FIXME This doesn't work properly, as flex_image.size2() is incorrect
        # also binning doesn't work
        assert binning == 1
        flex_image = _get_flex_image_multipanel(
          brightness=brightness,
          panels=detector,
          raw_data=image)
      else:
        flex_image = _get_flex_image(
          brightness=brightness,
          data=image,
          binning=binning,
          saturation=saturation,
          vendortype=vendortype)

      flex_image.setWindow(0, 0, 1)
      flex_image.adjust(color_scheme=colour_schemes.get(params.colour_scheme))

      # now export as a bitmap
      flex_image.prep_string()
      import Image
      # XXX is size//binning safe here?
      pil_img = Image.fromstring(
        'RGB', (flex_image.size2()//binning,
                flex_image.size1()//binning),
        flex_image.export_string)

      basename = os.path.basename(os.path.splitext(imageset.paths()[i_image])[0])
      path = os.path.join(
        output_dir, basename + '.' + params.format)

      print "Exporting %s" %path
      tmp_stream = open(path, 'wb')
      pil_img.save(tmp_stream, format=params.format)
      tmp_stream.close()
def load_cxi_phil(path, args=[]):
    import os
    from labelit.phil_preferences import iotbx_defs, libtbx_defs
    from iotbx import phil
    from libtbx.phil.command_line import argument_interpreter
    from libtbx.utils import Sorry

    exts = ["", ".params", ".phil"]
    foundIt = False
    for ext in exts:
        if os.path.exists(path + ext):
            foundIt = True
            path += ext
            break
    if not foundIt:
        raise Sorry("Target not found: " + path)

    master_phil = phil.parse(input_string=iotbx_defs + libtbx_defs,
                             process_includes=True)

    horizons_phil = master_phil.fetch(
        sources=[phil.parse(file_name=path, process_includes=True)])

    argument_interpreter = argument_interpreter(master_phil=master_phil)
    consume = []
    for arg in args:
        try:
            command_line_params = argument_interpreter.process(arg=arg)
            horizons_phil = horizons_phil.fetch(sources=[
                command_line_params,
            ])
            consume.append(arg)

        except Sorry as e:
            pass

    for item in consume:
        args.remove(item)

    if len(args) > 0:
        raise Sorry("Not all arguments processed")

    params = horizons_phil.extract()
    if params.distl.tile_translations is not None and params.distl.quad_translations is not None:
        return params

    from spotfinder.applications.xfel.cxi_phil import cxi_versioned_extract
    args = [
        "distl.detector_format_version=%s" %
        params.distl.detector_format_version
    ]

    versioned_extract = cxi_versioned_extract(args).persist.commands

    if params.distl.quad_translations is None:
        params.distl.quad_translations = versioned_extract.distl.quad_translations

    if params.distl.tile_translations is None:
        params.distl.tile_translations = versioned_extract.distl.tile_translations

    return params