Пример #1
0
def main():
  from dials.util.options import OptionParser
  from dials.util.options import flatten_datablocks
  import libtbx.load_env

  usage = "%s [options] image_*.cbf" % (
    libtbx.env.dispatcher_name)

  parser = OptionParser(
    usage=usage,
    phil=phil_scope,
    read_datablocks=True,
    read_datablocks_from_images=True,
    epilog=help_message)

  params, options = parser.parse_args(show_diff_phil=True)
  datablocks = flatten_datablocks(params.input.datablock)

  if len(datablocks) == 0:
    parser.print_help()
    exit()

  datablock = datablocks[0]
  imageset = datablock.extract_imagesets()[0]
  stability_fft(imageset, params)
Пример #2
0
def run(args):

    import libtbx.load_env
    from dials.util.options import OptionParser
    from dials.util.options import flatten_reflections
    from libtbx.utils import Sorry
    from libtbx.phil import parse

    phil_scope = parse(
        """
    hklout = hklout.pickle
      .type = str
      .help = "The output pickle file"
  """
    )

    usage = "%s integrated.pickle [hklout=hklout.pickle]" % (libtbx.env.dispatcher_name)

    parser = OptionParser(usage=usage, read_reflections=True, check_format=False, phil=phil_scope)
    params, options = parser.parse_args(show_diff_phil=True)
    reflections = flatten_reflections(params.input.reflections)
    if len(reflections) != 1:
        raise Sorry("exactly 1 reflection table must be specified")

    integrated_data = reflections[0]
    filter_good_reflections(integrated_data).as_pickle(params.hklout)
Пример #3
0
class Script(object):
    def __init__(self):
        # Create the parser
        self.parser = OptionParser(
            read_experiments=True,
            read_datablocks=True,
            read_reflections=True,
            read_datablocks_from_images=True,
            check_format=False,
        )

    def run(self):
        params, options = self.parser.parse_args(show_diff_phil=True)
        datablocks = flatten_datablocks(params.input.datablock)
        experiments = flatten_experiments(params.input.experiments)
        reflections = flatten_reflections(params.input.reflections)
        if len(reflections) > 0:
            reflections = reflections[0]
        else:
            reflections = None

        all_detectors = []
        for db in datablocks:
            all_detectors.extend(db.unique_detectors())

        all_detectors.extend(experiments.detectors())
        display_detectors(all_detectors[: min(len(all_detectors), 10)], reflections=reflections)
Пример #4
0
def run(args):
  import libtbx.load_env
  usage = "%s [options]" %libtbx.env.dispatcher_name

  parser = OptionParser(
    usage=usage,
    phil=phil_scope,
    check_format=False,
    epilog=help_message)

  params, options, args = parser.parse_args(
    show_diff_phil=True, return_unhandled=True)

  results = []
  for mtz in args:
    print mtz
    assert os.path.isfile(mtz), mtz
    results.append(get_merging_stats(
                     mtz, anomalous=params.anomalous,
                     n_bins=params.n_bins,
                     use_internal_variance=params.use_internal_variance,
                     eliminate_sys_absent=params.eliminate_sys_absent))
  plot_merging_stats(results, labels=params.labels,
                     size_inches=params.size_inches,
                     image_dir=params.image_dir)
Пример #5
0
def run(args):
  from dials.util.options import OptionParser
  from dials.util.options import flatten_experiments
  from dials.util.options import flatten_reflections
  import libtbx.load_env

  usage = "%s [options] integrated.pickle experiments.json" % (
    libtbx.env.dispatcher_name)

  parser = OptionParser(
    usage=usage,
    phil=phil_scope,
    read_experiments=True,
    read_reflections=True,
    check_format=False,
    epilog=help_message)

  params, options = parser.parse_args(show_diff_phil=False)
  experiments = flatten_experiments(params.input.experiments)
  reflections = flatten_reflections(params.input.reflections)

  if len(experiments) != 1 or len(reflections) != 1:
    parser.print_help()
    exit()

  if not 'shoebox' in reflections[0]:
    print 'Please add shoeboxes to reflection pickle'
    exit()

  results = main(reflections[0], experiments[0], params)

  if results:
    print 'mean result: %.3f' % (sum(results) / len(results))
Пример #6
0
def run(args):
  from dials.util.options import OptionParser
  import libtbx.load_env

  usage = "%s [options] find_spots.json" %(
    libtbx.env.dispatcher_name)

  parser = OptionParser(
    usage=usage,
    phil=phil_scope,
    epilog=help_message)

  filenames = [arg for arg in args if os.path.isfile(arg)]
  args = [arg for arg in args if not arg in filenames]

  params, options, args = parser.parse_args(
    show_diff_phil=True, return_unhandled=True)
  if params.nproc is libtbx.Auto:
    from libtbx.introspection import number_of_processors
    params.nproc = number_of_processors(return_value_if_unknown=-1)
  print 'nproc: %i' %params.nproc
  results = work_all(filenames, args, nproc=params.nproc)
  print results

  if params.json is not None:
    import json
    with open(params.json, 'wb') as f:
      json.dump(results, f)
Пример #7
0
class Script(object):
  ''' The debugging visualization program. '''

  def __init__(self):
    '''Initialise the script.'''
    from dials.util.options import OptionParser
    import libtbx.load_env

    # The script usage
    usage  = "usage: %s [options] experiment.json" \
              % libtbx.env.dispatcher_name

    # Create the parser
    self.parser = OptionParser(
      usage=usage,
      epilog=help_message,
      read_reflections=True)

  def run(self):


    from dials.util.options import flatten_reflections
    from dials.viewer.viewer_interface import extract_n_show

    # Parse the command line
    params, options = self.parser.parse_args(show_diff_phil=True)
    table = flatten_reflections(params.input.reflections)
    if len(table) == 0:
      self.parser.print_help()
      return

    extract_n_show(table[0])
Пример #8
0
def run(args):

  from dials.util.options import OptionParser
  from dials.util.options import flatten_datablocks
  import libtbx.load_env

  usage = "%s [options] image_*.cbf" % (
    libtbx.env.dispatcher_name)

  parser = OptionParser(
    usage=usage,
    phil=phil_scope,
    read_datablocks=True,
    read_datablocks_from_images=True,
    epilog=help_message)

  params, options = parser.parse_args(show_diff_phil=True)
  datablocks = flatten_datablocks(params.input.datablock)

  if len(datablocks) == 0 and len(experiments) == 0 and len(reflections) == 0:
    parser.print_help()
    exit()

  assert(len(datablocks) == 1)

  datablock = datablocks[0]
  imagesets = datablock.extract_imagesets()

  assert(len(imagesets) == 1)

  imageset = imagesets[0]

  images = imageset.indices()
  if params.frames:
    images = params.frames

  d_spacings = []
  intensities = []
  sigmas = []

  for indx in images:
    print 'For frame %d:' % indx
    d, I, sig = background(imageset, indx, n_bins=params.n_bins)

    print '%8s %8s %8s' % ('d', 'I', 'sig')
    for j in range(len(I)):
      print '%8.3f %8.3f %8.3f' % (d[j], I[j], sig[j])

    d_spacings.append(d)
    intensities.append(I)
    sigmas.append(sig)

  if params.plot:
    from matplotlib import pyplot
    fig = pyplot.figure()
    for d, I, sig in zip(d_spacings, intensities, sigmas):
      ds2 = 1/flex.pow2(d)
      pyplot.plot(ds2, I)

    pyplot.show()
Пример #9
0
def run(args):
    import libtbx.load_env

    usage = "%s [options] experiment.json indexed.pickle" % libtbx.env.dispatcher_name

    parser = OptionParser(
        usage=usage,
        phil=phil_scope,
        read_reflections=True,
        read_experiments=True,
        check_format=False,
        epilog=help_message,
    )

    params, options = parser.parse_args(show_diff_phil=True)

    reflections = flatten_reflections(params.input.reflections)
    experiments = flatten_experiments(params.input.experiments)
    if len(reflections) == 0 or len(experiments) == 0:
        parser.print_help()
        return
    assert len(reflections) == 1
    assert len(experiments) == 1
    experiment = experiments[0]
    reflections = reflections[0]

    test_P1_crystal_indexing(reflections, experiment, params)
    test_crystal_pointgroup_symmetry(reflections, experiment, params)
Пример #10
0
    def read_experiment_file(self, experiment_file):

        ### open DIALS json file
        phil_scope_str='''
            experiments = 'example_refined_experiments.json'
              '''
        phil_scope = parse(phil_scope_str, process_includes=True)
        parser = OptionParser(
            phil=phil_scope,
            check_format=False,
            read_experiments=True)
        params, options = parser.parse_args(args=[experiment_file], show_diff_phil=True)
        experiments = flatten_experiments(params.input.experiments)
        exp_xtal = experiments.crystals()[0]

        ### define useful attributes
        self.crystal = exp_xtal
        uc = self.crystal.get_unit_cell()
        uc_nospace = str(uc).replace(" ", "")
        uc_nospace_noparen = uc_nospace[1:-1]
        self.unit_cell = uc_nospace_noparen
        self.space_group = self.crystal.get_space_group()
        self.laue_group = self.space_group.laue_group_type()
        # self.a_matrix = crystal.get_A()
        self.experiments = experiments
Пример #11
0
def run(args):

  from dials.util.options import OptionParser
  from dials.util.options import flatten_datablocks

  parser = OptionParser(
    read_datablocks=True,
    read_datablocks_from_images=True,
    phil=phil_scope,
    check_format=True)

  params, options = parser.parse_args(show_diff_phil=True)
  datablocks = flatten_datablocks(params.input.datablock)
  assert len(datablocks) == 1
  imagesets = datablocks[0].extract_imagesets()

  img_count = 0
  import time
  t0 = time.time()
  for imgset in imagesets:
    for i in range(len(imgset)):
      if params.data == 'raw':
        imgset.get_raw_data(i)
      else:
        imgset.get_corrected_data(i)
      img_count += 1
      print "Read %i images" %img_count
  t1 = time.time()
  t = t1 - t0
  print "Read %i images in %.2fs (%.1f images/s)" %(
    img_count, t, img_count/t)

  return
def run(args):

  from dials.util.options import OptionParser
  from dials.util.options import flatten_experiments
  from libtbx.utils import Sorry
  import libtbx.load_env

  usage = "%s [options] experiments.json" %libtbx.env.dispatcher_name

  parser = OptionParser(
    usage=usage,
    phil=phil_scope,
    read_experiments=True,
    check_format=False,
    epilog=help_message)

  params, options = parser.parse_args(show_diff_phil=True)
  experiments = flatten_experiments(params.input.experiments)
  if len(experiments) <= 1:
    parser.print_help()
    return

  hkl = flex.miller_index(params.hkl)

  from dials.algorithms.indexing.compare_orientation_matrices import \
       show_rotation_matrix_differences
  show_rotation_matrix_differences(experiments.crystals(),
                                   miller_indices=hkl)
Пример #13
0
def test_function2():
    from dials.util.options import OptionParser
    from time import time
    from omptbx import omp_get_max_threads

    args = [
        "/media/upc86896/Betelgeuse/Data/xia2_test_data/i19_sucrose/processed/dataset1/experiments.json",
        "/media/upc86896/Betelgeuse/Data/xia2_test_data/i19_sucrose/processed/dataset1/shoeboxes_0.pickle",
    ]

    parser = OptionParser(read_experiments=True, read_reflections=True)
    params, options = parser.parse_args(args=args)

    experiment = params.input.experiments[0].data[0]
    reflections = params.input.reflections[0].data

    print "N Threads: ", omp_get_max_threads()
    print "N Refl: ", len(reflections)

    from dials.algorithms.integration.fitrs import test_function

    st = time()
    test_function(
        experiment.beam, experiment.detector, experiment.goniometer, experiment.scan, 0.071016, 0.390601, 5, reflections
    )
    print "Time: ", time() - st
def run(args):
  import libtbx.load_env
  from libtbx.utils import Sorry
  from dials.util import log
  usage = "%s [options] datablock.json strong.pickle" %libtbx.env.dispatcher_name

  parser = OptionParser(
    usage=usage,
    phil=phil_scope,
    read_datablocks=True,
    read_reflections=True,
    check_format=False,
    epilog=help_message)

  params, options = parser.parse_args(show_diff_phil=False)
  datablocks = flatten_datablocks(params.input.datablock)
  reflections = flatten_reflections(params.input.reflections)

  if len(datablocks) == 0 or len(reflections) == 0:
    parser.print_help()
    exit(0)

  # Configure the logging
  log.config(
    info=params.output.log,
    debug=params.output.debug_log)

  # Log the diff phil
  diff_phil = parser.diff_phil.as_str()
  if diff_phil is not '':
    info('The following parameters have been modified:\n')
    info(diff_phil)

  imagesets = []
  for datablock in datablocks:
    imagesets.extend(datablock.extract_imagesets())

  assert len(imagesets) > 0
  assert len(reflections) == len(imagesets)

  if params.scan_range is not None and len(params.scan_range) > 0:
    reflections = [
      filter_reflections_by_scan_range(refl, params.scan_range)
      for refl in reflections]

  dps_params = dps_phil_scope.extract()
  # for development, we want an exhaustive plot of beam probability map:
  dps_params.indexing.plot_search_scope = params.plot_search_scope
  dps_params.indexing.mm_search_scope = params.mm_search_scope

  new_detector, new_beam = discover_better_experimental_model(
    imagesets, reflections, params, dps_params, nproc=params.nproc,
    wide_search_binning=params.wide_search_binning)
  for imageset in imagesets:
    imageset.set_detector(new_detector)
    imageset.set_beam(new_beam)
  from dxtbx.serialize import dump
  dump.datablock(datablock, params.output.datablock)
Пример #15
0
def run(args):
  from dials.util import log
  import libtbx.load_env
  usage = "%s experiments.json indexed.pickle [options]" %libtbx.env.dispatcher_name


  from dials.util.options import OptionParser
  from dials.util.options import flatten_reflections
  from dials.util.options import flatten_experiments
  from dials.array_family import flex

  parser = OptionParser(
    usage=usage,
    phil=phil_scope,
    read_experiments=True,
    read_reflections=True,
    check_format=False,
    epilog=help_message)

  params, options = parser.parse_args(show_diff_phil=False)

  # Configure the logging
  #log.config(info=params.output.log, debug=params.output.debug_log)

  from dials.util.version import dials_version
  logger.info(dials_version())

  # Log the diff phil
  diff_phil = parser.diff_phil.as_str()
  if diff_phil is not '':
    logger.info('The following parameters have been modified:\n')
    logger.info(diff_phil)

  experiments = flatten_experiments(params.input.experiments)
  reflections = flatten_reflections(params.input.reflections)
  assert(len(reflections) == 1)
  reflections = reflections[0]

  if len(experiments) == 0:
    parser.print_help()
    return

  #from dials.command_line import refine
  #params = refine.phil_scope.extract()
  indexed_reflections = reflections.select(reflections['id'] > -1)
  from dials.algorithms.refinement import RefinerFactory
  refiner = RefinerFactory.from_parameters_data_experiments(
    params, indexed_reflections, experiments)
  #refiner.run()
  rmsds = refiner.rmsds()
  import math
  xy_rmsds = math.sqrt(rmsds[0]**2 + rmsds[1]**2)

  print rmsds



  return
Пример #16
0
def run(args):

  from dials.util.options import OptionParser
  from dials.util.options import flatten_datablocks
  from dials.util.options import flatten_experiments
  import libtbx.load_env

  usage = "%s [options] datablock.json" %(
    libtbx.env.dispatcher_name)

  parser = OptionParser(
    usage=usage,
    phil=phil_scope,
    read_datablocks=True,
    read_experiments=True,
    check_format=True,
    epilog=help_message)

  params, options = parser.parse_args(show_diff_phil=True)
  datablocks = flatten_datablocks(params.input.datablock)
  experiments = flatten_experiments(params.input.experiments)

  if (len(datablocks) == 0 and len(experiments) == 0):
    parser.print_help()
    exit(0)

  if len(datablocks) == 0 and len(experiments) > 0:
    imagesets = experiments.imagesets()
  else:
    imagesets = []
    for datablock in datablocks:
      imagesets.extend(datablock.extract_imagesets())

  assert len(imagesets) == 1
  imageset = imagesets[0]
  gonio = imageset.get_goniometer()
  if not params.detector_distance:
    detector = imageset.get_detector()
    if len(detector) > 1:
      params.detector_distance = detector.hierarchy().get_directed_distance()
    else:
      params.detector_distance = detector[0].get_directed_distance()
  if params.angle:
    assert len(params.angle) == len(gonio.get_angles())
  else:
    for angle in gonio.get_angles():
      params.angle.append(angle)

  import wxtbx.app
  a = wxtbx.app.CCTBXApp(0)
  a.settings = params
  f = ExperimentViewer(
    None, -1, "Experiment viewer", size=(1024,768))
  f.load_imageset(imageset)
  f.Show()
  a.SetTopWindow(f)
  #a.Bind(wx.EVT_WINDOW_DESTROY, lambda evt: tb_icon.Destroy(), f)
  a.MainLoop()
Пример #17
0
class Script(object):
  ''' The integration program. '''

  def __init__(self):
    '''Initialise the script.'''
    from dials.util.options import OptionParser
    import libtbx.load_env

    # The script usage
    usage  = "usage: %s [options] experiment.json" % libtbx.env.dispatcher_name

    # Create the parser
    self.parser = OptionParser(
      usage=usage,
      phil=phil_scope,
      epilog=help_message,
      read_experiments=True)

  def run(self):
    ''' Analyse the background '''
    from dials.util.command_line import heading
    from dials.util.options import flatten_experiments
    from dials.util import log
    from logging import info, debug
    from time import time
    from libtbx.utils import Sorry

    # Parse the command line
    params, options = self.parser.parse_args(show_diff_phil=False)
    experiments = flatten_experiments(params.input.experiments)
    if len(experiments) == 0:
      self.parser.print_help()
      return

    assert len(experiments) == 1

    # Get the imageset
    imageset = experiments[0].imageset

    total_image = None
    total_mask = None
    for i in range(len(imageset)):
      print i
      image = imageset.get_raw_data(i)
      mask = imageset.get_mask(i)
      if total_image is None:
        total_image = image[0]
        total_mask = mask[0]
      else:
        total_image += image[0]
    total_image /= len(imageset)
    print min(total_image)
    print max(total_image)
    print sum(total_image) / len(total_image)

    from matplotlib import pylab
    pylab.imshow(total_image.as_numpy_array(), vmin=0,vmax=2)
    pylab.show()
Пример #18
0
def run(args):
  import libtbx.load_env
  from libtbx.utils import Sorry
  from dials.util import log
  from logging import info
  import cPickle as pickle
  usage = "%s [options] datablock.json strong.pickle" % \
    libtbx.env.dispatcher_name

  # Create the option parser
  parser = OptionParser(
    usage=usage,
    phil=phil_scope,
    read_reflections=True,
    read_datablocks=True,
    check_format=False,
    epilog=help_message)

  # Get the parameters
  params, options = parser.parse_args(show_diff_phil=False)

  # Configure the log
  log.config(
    params.verbosity,
    info='dials.find_hot_pixels.log',
    debug='dials.find_hot_pixels.debug.log')

  # Log the diff phil
  diff_phil = parser.diff_phil.as_str()
  if diff_phil is not '':
    info('The following parameters have been modified:\n')
    info(diff_phil)

  datablocks = flatten_datablocks(params.input.datablock)
  reflections = flatten_reflections(params.input.reflections)

  if len(datablocks) == 0 and len(reflections) == 0:
    parser.print_help()
    exit(0)

  if len(datablocks) > 1:
    raise Sorry("Only one DataBlock can be processed at a time")
  else:
    imagesets = datablocks[0].extract_imagesets()
  if len(reflections) == 0:
    raise Sorry("No reflection lists found in input")
  if len(reflections) > 1:
    raise Sorry("Multiple reflections lists provided in input")

  assert(len(reflections) == 1)
  reflections = reflections[0]

  mask = hot_pixel_mask(imagesets[0], reflections)
  pickle.dump(mask, open(params.output.mask, 'w'), pickle.HIGHEST_PROTOCOL)

  print 'Wrote hot pixel mask to %s' % params.output.mask
  return
Пример #19
0
def run(args):

  from dials.util.options import OptionParser
  from dials.util.options import flatten_datablocks
  from dials.util.options import flatten_experiments
  from dials.util.options import flatten_reflections

  parser = OptionParser(
    phil=master_phil,
    read_datablocks=True,
    read_experiments=True,
    read_reflections=True,
    check_format=False)

  params, options = parser.parse_args(show_diff_phil=True)
  datablocks = flatten_datablocks(params.input.datablock)
  experiments = flatten_experiments(params.input.experiments)
  reflections = flatten_reflections(params.input.reflections)[0]
  if len(params.input.reflections) == 2:
    reflections2 = flatten_reflections(params.input.reflections)[1]
  else:
    reflections2 = None

  # find the reflections in the second set that DO NOT match those in the
  # first set
  mask, _ = reflections2.match_with_reference(reflections)
  reflections2 = reflections2.select(~mask)
  print "{0} reflections from the second set do not match the first". \
    format(len(reflections2))
  #reflections2 = reflections2.select(reflections2["miller_index"] == (-7,2,-25))

  if len(datablocks) == 0:
    if len(experiments) > 0:
      imagesets = experiments.imagesets()
    else:
      parser.print_help()
      return
  elif len(datablocks) > 1:
    raise Sorry("Only one DataBlock can be processed at a time")
  else:
    imagesets = datablocks[0].extract_imagesets()

  if len(imagesets) > 1:
    raise Sorry("Only one ImageSet can be processed at a time")
  imageset = imagesets[0]

  import wxtbx.app
  a = wxtbx.app.CCTBXApp(0)
  a.settings = params
  f = PredRelpViewer(
    None, -1, "Prediction reciprocal lattice viewer", size=(1024,768))
  f.load_reflections2(reflections2)
  f.load_models(imageset, reflections)
  f.Show()
  a.SetTopWindow(f)
  #a.Bind(wx.EVT_WINDOW_DESTROY, lambda evt: tb_icon.Destroy(), f)
  a.MainLoop()
Пример #20
0
def run(args):

  from dials.util.options import OptionParser
  from dials.util.options import flatten_datablocks
  from dials.util.options import flatten_experiments
  from dials.util.options import flatten_reflections
  import libtbx.load_env

  usage = "%s [options] datablock.json reflections.pickle" %(
    libtbx.env.dispatcher_name)

  parser = OptionParser(
    usage=usage,
    phil=phil_scope,
    read_datablocks=True,
    read_experiments=True,
    read_reflections=True,
    check_format=False,
    epilog=help_message)

  params, options = parser.parse_args(show_diff_phil=True)
  datablocks = flatten_datablocks(params.input.datablock)
  experiments = flatten_experiments(params.input.experiments)
  reflections = flatten_reflections(params.input.reflections)

  if (len(datablocks) == 0 and len(experiments) == 0) or len(reflections) == 0:
    parser.print_help()
    exit(0)

  if len(datablocks) == 0 and len(experiments) > 0:
    imagesets = experiments.imagesets()
  else:
    imagesets = []
    for datablock in datablocks:
      imagesets.extend(datablock.extract_imagesets())

  if len(reflections) > 1:
    assert len(reflections) == len(imagesets)
    from scitbx.array_family import flex
    for i in range(len(reflections)):
      reflections[i]['imageset_id'] = flex.int(len(reflections[i]), i)
      if i > 0:
        reflections[0].extend(reflections[i])

  reflections = reflections[0]

  import wxtbx.app
  a = wxtbx.app.CCTBXApp(0)
  a.settings = params
  f = ReciprocalLatticeViewer(
    None, -1, "Reflection data viewer", size=(1024,768))
  f.load_models(imagesets, reflections)
  f.Show()
  a.SetTopWindow(f)
  #a.Bind(wx.EVT_WINDOW_DESTROY, lambda evt: tb_icon.Destroy(), f)
  a.MainLoop()
Пример #21
0
class Script(object):
  ''' A class to encapsulate the script. '''

  def __init__(self):
    ''' Initialise the script. '''
    from dials.util.options import OptionParser
    import libtbx.load_env

    # Create the parser
    usage = "usage: %s [options] datablock.json" % libtbx.env.dispatcher_name
    self.parser = OptionParser(
      usage=usage,
      epilog=help_message,
      phil=phil_scope,
      read_datablocks=True)

  def run(self):
    ''' Run the script. '''
    from dials.util.options import flatten_datablocks
    from dxtbx.datablock import DataBlockDumper
    from libtbx.utils import Sorry
    import cPickle as pickle

    # Parse the command line arguments
    params, options = self.parser.parse_args(show_diff_phil=True)
    datablocks = flatten_datablocks(params.input.datablock)

    # Check number of args
    if len(datablocks) == 0:
      self.parser.print_help()
      return

    # Check the mask file is given
    if params.input.mask is None:
      self.parser.print_help()
      return

    # Check nbumber of datablocks
    if len(datablocks) != 1:
      raise Sorry('exactly 1 datablock must be specified')

    # Get the imageset
    datablock = datablocks[0]
    imagesets = datablock.extract_imagesets()
    if len(imagesets) != 1:
      raise Sorry('datablock must contain exactly 1 imageset')
    imageset = imagesets[0]

    # Set the lookup
    imageset.external_lookup.mask.filename = params.input.mask

    # Dump the datablock
    print "Writing datablock to %s" % params.output.datablock
    dump = DataBlockDumper(datablock)
    dump.as_json(filename=params.output.datablock)
Пример #22
0
def run(args):
  import libtbx.load_env
  usage = "%s experiments.json [options]" %libtbx.env.dispatcher_name

  parser = OptionParser(
    usage=usage,
    phil=phil_scope,
    read_experiments=True,
    check_format=False,
    epilog=help_message)

  params, options = parser.parse_args(show_diff_phil=True)
  experiments = flatten_experiments(params.input.experiments)
  if len(experiments) == 0:
    parser.print_help()
    return
  elif len(experiments) > 1:
    raise Sorry("More than one experiment present")

  assert len(params.miller_index), "Must specify at least one miller_index to predict."

  experiment = experiments[0]

  reflections = flex.reflection_table()
  miller_indices = flex.miller_index()
  entering_flags = flex.bool()
  for mi in params.miller_index:
    miller_indices.append(mi)
    miller_indices.append(mi)
    entering_flags.append(True)
    entering_flags.append(False)
  reflections['miller_index'] = miller_indices
  reflections['entering'] = entering_flags
  reflections['id'] = flex.size_t(len(reflections), 0)

  if params.expand_to_p1:
    from cctbx.miller import expand_to_p1_iselection
    proxy = expand_to_p1_iselection(
      experiment.crystal.get_space_group(),
      anomalous_flag=True,
      indices=miller_indices,
      build_iselection=True)
    reflections = reflections.select(proxy.iselection)
    reflections['miller_index'] = proxy.indices

  from dials.algorithms.refinement.prediction.managed_predictors import ExperimentsPredictor
  predictor = ExperimentsPredictor([experiment])
  predicted = predictor.predict(reflections)

  zmin, zmax = experiment.scan.get_array_range()
  z = predicted['xyzcal.px'].parts()[2]
  predicted = predicted.select((z >= zmin) & (z <= zmax))

  show_predictions(predicted)
Пример #23
0
class Script(object):
  ''' A class to encapsulate the script. '''

  def __init__(self):
    ''' Initialise the script. '''
    from dials.util.options import OptionParser
    import libtbx.load_env

    # The script usage
    usage = "usage: %s [options] /path/to/image/reflection/files" % libtbx.env.dispatcher_name
    self.parser = OptionParser(
      epilog=help_message,
      usage=usage,
      phil=phil_scope,
      read_reflections=True)

  def run(self):
    ''' Run the script. '''
    from dials.array_family import flex
    from dials.util.command_line import Command
    from libtbx.utils import Sorry

    # Parse the command line arguments
    params, options = self.parser.parse_args(show_diff_phil=True)
    if len(params.input.reflections) == 0:
      self.parser.print_help()
      return
    if len(params.input.reflections) <= 1:
      raise Sorry('more than 1 reflection table must be specified')
    tables = [p.data for p in params.input.reflections]

    # Get the number of rows and columns
    nrows = [t.nrows() for t in tables]
    ncols = [t.ncols() for t in tables]

    # Merge the reflection lists
    if params.method == "update":
      assert(all(n == nrows[0] for n in nrows[1:]))
      table = tables[0]
      for t in tables[1:]:
        table.update(t)
    elif params.method == "extend":
      assert(all(n == ncols[0] for n in ncols[1:]))
      table = tables[0]
      for t in tables[1:]:
        table.extend(t)
    else:
      raise RuntimeError('unknown method, %s' % params.method)

    # Write the reflections to the file
    Command.start('Writing %d reflections to %s' % (len(table), params.output))
    table.as_pickle(params.output)
    Command.end('Wrote %d reflections to %s' % (len(table), params.output))
Пример #24
0
def run(args):

  from dials.util.options import OptionParser
  from dials.util.options import flatten_datablocks
  from dials.util.options import flatten_experiments
  from dials.util.options import flatten_reflections
  from dials.util import log

  usage = "%s [options] datablock.json reflections.pickle" %(
    libtbx.env.dispatcher_name)

  parser = OptionParser(
    usage=usage,
    phil=phil_scope,
    read_datablocks=True,
    read_experiments=True,
    read_reflections=True,
    check_format=False,
    epilog=help_message)

  params, options = parser.parse_args()
  datablocks = flatten_datablocks(params.input.datablock)
  experiments = flatten_experiments(params.input.experiments)
  reflections = flatten_reflections(params.input.reflections)

  if (len(datablocks) == 0 and len(experiments) == 0) or len(reflections) == 0:
    parser.print_help()
    exit(0)

  ## Configure the logging
  #log.config(info='dials.rl_png.log')

  # Log the diff phil
  diff_phil = parser.diff_phil.as_str()
  if diff_phil is not '':
    logger.info('The following parameters have been modified:\n')
    logger.info(diff_phil)

  reflections = reflections[0]

  if len(datablocks) == 0 and len(experiments) > 0:
    imagesets = experiments.imagesets()
  else:
    imagesets = []
    for datablock in datablocks:
      imagesets.extend(datablock.extract_imagesets())

  f = ReciprocalLatticeJson(settings=params)
  f.load_models(imagesets, reflections)
  f.as_json(filename=params.output.json, compact=params.output.compact)
  print
Пример #25
0
def run(args):

  from dials.util.options import OptionParser
  from dials.util.options import flatten_datablocks
  from dials.util.options import flatten_experiments
  import libtbx.load_env

  usage = "%s [options] datablock.json | experiments.json" %(
    libtbx.env.dispatcher_name)

  parser = OptionParser(
    usage=usage,
    phil=phil_scope,
    read_datablocks=True,
    read_experiments=True,
    check_format=False,
    epilog=help_message)

  params, options = parser.parse_args(show_diff_phil=True)
  experiments = flatten_experiments(params.input.experiments)
  datablocks = flatten_datablocks(params.input.datablock)

  if len(experiments) == 0 and len(datablocks) == 0:
    parser.print_help()
    exit(0)

  from dials.command_line.dials_import import ManualGeometryUpdater
  update_geometry = ManualGeometryUpdater(params)


  if len(experiments):
    imagesets = experiments.imagesets()

  elif len(datablocks):

    assert len(datablocks) == 1
    imagesets = datablocks[0].extract_imageset()

  for imageset in imagesets:
    imageset_new = update_geometry(imageset)
    imageset.set_detector(imageset_new.get_detector())
    imageset.set_beam(imageset_new.get_beam())
    imageset.set_goniometer(imageset_new.get_goniometer())
    imageset.set_scan(imageset_new.get_scan())

  from dxtbx.serialize import dump
  if len(experiments):
    print "Saving modified experiments to %s" %params.output.experiments
    dump.experiment_list(experiments, params.output.experiments)
  elif len(datablocks):
    raise NotImplemented
Пример #26
0
    def read_reflection_file(self, reflection_file):

        ### open DIALS pickle file
        phil_scope_str='''
            reflections = 'example_refined.pickle'
              '''
        phil_scope = parse(phil_scope_str, process_includes=True)
        parser = OptionParser(
            phil=phil_scope,
            check_format=False,
            read_reflections=True)
        params, options = parser.parse_args(args=[reflection_file], show_diff_phil=True)
        reflections = flatten_reflections(params.input.reflections)
        self.reflections = reflections
Пример #27
0
def run(args):
  import libtbx.load_env
  from dials.array_family import flex
  from dials.util import log
  from dials.util.version import dials_version

  usage = "%s [options] experiment.json indexed.pickle" % \
    libtbx.env.dispatcher_name

  parser = OptionParser(
    usage=usage,
    phil=phil_scope,
    read_reflections=True,
    read_experiments=True,
    check_format=False,
    epilog=help_message)

  params, options = parser.parse_args(show_diff_phil=True)

  # Configure the logging
  log.config(info=params.output.log, debug=params.output.debug_log)
  logger.info(dials_version())

  reflections = flatten_reflections(params.input.reflections)
  experiments = flatten_experiments(params.input.experiments)
  if len(reflections) == 0 or len(experiments) == 0:
    parser.print_help()
    return
  assert(len(reflections) == 1)
  assert(len(experiments) == 1)
  experiment = experiments[0]
  reflections = reflections[0]

  # remove reflections with 0, 0, 0 index
  zero = (reflections['miller_index'] == (0, 0, 0))
  logger.info('Removing %d unindexed reflections' % zero.count(True))
  reflections = reflections.select(~zero)

  h, k, l = reflections['miller_index'].as_vec3_double().parts()

  h = h.iround()
  k = k.iround()
  l = l.iround()

  logger.info('Range on h: %d to %d' % (flex.min(h), flex.max(h)))
  logger.info('Range on k: %d to %d' % (flex.min(k), flex.max(k)))
  logger.info('Range on l: %d to %d' % (flex.min(l), flex.max(l)))

  test_P1_crystal_indexing(reflections, experiment, params)
  test_crystal_pointgroup_symmetry(reflections, experiment, params)
Пример #28
0
class Script(object):
  def __init__(self):
    # Create the parser
    self.parser = OptionParser(
      read_experiments=True,
      read_datablocks=True,
      read_reflections=True,
      read_datablocks_from_images=True,
      check_format=False)

  def run(self):
    # load at least two detectors from the command line
    params, options = self.parser.parse_args(show_diff_phil=True)
    datablocks  = flatten_datablocks(params.input.datablock)
    experiments = flatten_experiments(params.input.experiments)

    # collect all detectors found
    all_detectors = []
    for db in datablocks:
      all_detectors.extend(db.unique_detectors())

    all_detectors.extend(experiments.detectors())

    assert len(all_detectors) >= 2

    a = all_detectors[0]
    b = all_detectors[1]

    level = 0
    pga = a.hierarchy()
    pgb = b.hierarchy()

    while True:
      # starting at the top of the hierarchy, show diffs in local origins at each level
      print "Level", level
      oa = col(pga.get_local_origin())
      ob = col(pgb.get_local_origin())

      print "  Detector a", oa.elems
      print "  Detector b", ob.elems
      print "  Diffs", (ob-oa).elems

      if hasattr(pga, 'children'):
        pga = pga[0]
        pgb = pgb[0]
        level += 1
      else:
        break
Пример #29
0
def run(args):
  import libtbx.load_env
  usage = "%s [options]" %libtbx.env.dispatcher_name

  parser = OptionParser(
    usage=usage,
    phil=phil_scope,
    check_format=False,
    epilog=help_message)

  params, options, args = parser.parse_args(show_diff_phil=True,
                                            return_unhandled=True)

  assert len(args) == 1
  from iotbx.reflection_file_reader import any_reflection_file

  intensities = None

  f = args[0]

  arrays = any_reflection_file(f).as_miller_arrays(merge_equivalents=False)
  for ma in arrays:
    print ma.info().labels
    if ma.info().labels == ['I', 'SIGI']:
      intensities = ma
    elif ma.info().labels == ['IMEAN', 'SIGIMEAN']:
      intensities = ma
    elif ma.info().labels == ['I(+)', 'SIGI(+)', 'I(-)', 'SIGI(-)']:
      intensities = ma

  assert intensities is not None

  i_sigi = intensities.data()/intensities.sigmas()

  # set backend before importing pyplot
  import matplotlib
  matplotlib.use('Agg')

  from matplotlib import pyplot
  pyplot.scatter(intensities.data(), i_sigi, marker='+', s=2, alpha=0.5, c='black')
  pyplot.gca().set_xscale('log',basex=10)
  xlim = pyplot.xlim()
  ylim = pyplot.ylim()
  pyplot.ylim(0, ylim[1])
  pyplot.xlim(1, xlim[1])
  pyplot.savefig('i_sigi_vs_i.png')
  pyplot.clf()
Пример #30
0
class Script(object):
  ''' Class to parse the command line options. '''

  def __init__(self):
    ''' Set the expected options. '''
    from dials.util.options import OptionParser
    import libtbx.load_env

    # Create the option parser
    usage = "usage: %s [options] /path/to/refined/json/file" % libtbx.env.dispatcher_name
    self.parser = OptionParser(
      usage=usage,
      sort_options=True,
      phil=phil_scope,
      read_experiments=True,
      read_reflections=True,
      epilog=help_message)

  def run(self):
    ''' Parse the options. '''
    from dials.util.options import flatten_experiments, flatten_reflections
    # Parse the command line arguments
    params, options = self.parser.parse_args(show_diff_phil=True)
    self.params = params
    experiments = flatten_experiments(params.input.experiments)
    reflections = flatten_reflections(params.input.reflections)

    assert len(reflections) == len(experiments) == 1
    reflections = reflections[0]
    exp = experiments[0]

    from dials.algorithms.indexing import index_reflections
    from dials.algorithms.indexing.indexer import indexer_base

    reflections['id'] = flex.int(len(reflections), -1)
    reflections['imageset_id'] = flex.int(len(reflections), 0)
    reflections = indexer_base.map_spots_pixel_to_mm_rad(reflections, exp.detector, exp.scan)

    indexer_base.map_centroids_to_reciprocal_space(
      reflections, exp.detector, exp.beam, exp.goniometer,)

    index_reflections(reflections,
                      experiments, params.d_min,
                      tolerance=0.3)
    indexed_reflections = reflections.select(reflections['miller_index'] != (0,0,0))
    print "Indexed %d reflections out of %d"%(len(indexed_reflections), len(reflections))
    easy_pickle.dump("indexedstrong.pickle", indexed_reflections)
Пример #31
0
def run(args=None):
    usage = "dials.background [options] image_*.cbf"

    parser = OptionParser(
        usage=usage,
        phil=phil_scope,
        read_experiments=True,
        read_experiments_from_images=True,
        epilog=help_message,
    )

    params, options = parser.parse_args(args, show_diff_phil=True)

    # Ensure we have either a data block or an experiment list
    experiments = flatten_experiments(params.input.experiments)
    imagesets = experiments.imagesets()

    if params.output.plot:
        import matplotlib

        matplotlib.use("agg")

        import matplotlib.ticker as mticker
        from matplotlib import pyplot

        fig = pyplot.figure(figsize=params.output.size_inches)
        ax = fig.add_subplot(111)

    for i_imgset, imageset in enumerate(imagesets):
        first, last = imageset.get_scan().get_image_range()
        images = range(first, last + 1)

        if params.images:
            if min(params.images) < first or max(params.images) > last:
                raise Sorry("image outside of scan range")
            images = params.images

        d_spacings = []
        intensities = []
        sigmas = []

        for indx in images:
            print(f"For imageset {i_imgset} image {indx}:")
            d, I, sig = background(
                imageset,
                indx - first,  # indices passed to imageset.get_raw_data start from zero
                n_bins=params.n_bins,
                corrected=params.corrected,
                mask_params=params.masking,
            )

            print(f"{'d':>8} {'I':>8} {'sig':>8}")
            for j in range(len(I)):
                print(f"{d[j]:8.3f} {I[j]:8.3f} {sig[j]:8.3f}")

            d_spacings.append(d)
            intensities.append(I)
            sigmas.append(sig)

        if params.output.plot:
            ax.set_xlabel(r"resolution ($\AA$)")
            ax.set_ylabel(r"$\langle I_b \rangle$")
            for indx, d, I, sig in zip(images, d_spacings, intensities, sigmas):
                filenames = imageset.reader().paths()
                if len(imagesets) > 1:
                    label = (
                        f"{filenames[indx - first]}"
                        if len(filenames) > 1
                        else f"{filenames[0]} image {indx}"
                    )
                else:
                    label = f"image {indx}" if len(images) > 1 else f""
                ds2 = 1 / flex.pow2(d)
                ax.plot(ds2, I, label=label)
            xticks = ax.get_xticks().tolist()
            ax.xaxis.set_major_locator(mticker.FixedLocator(xticks))
            x_tick_labs = [
                "" if e <= 0.0 else f"{math.sqrt(1.0 / e):.2f}" for e in xticks
            ]
            ax.set_xticklabels(x_tick_labs)

    if params.output.plot:
        try:
            if len(imagesets) > 1 or len(images) > 1:
                # Plot a legend if there are fewer lines than the number of colours
                # in the colour cycle
                if len(ax.lines) <= len(
                    pyplot.rcParams["axes.prop_cycle"].by_key()["color"]
                ):
                    pyplot.gca().legend()
            pyplot.savefig(params.output.plot)
        except ValueError:
            raise Sorry(f"Unable to save plot to {params.output.plot}")
class Script(object):
    ''' Class to parse the command line options. '''
    def __init__(self):
        ''' Set the expected options. '''
        from dials.util.options import OptionParser
        import libtbx.load_env

        # Create the option parser
        usage = "usage: %s experiment1.json experiment2.json reflections1.pickle reflections2.pickle" % libtbx.env.dispatcher_name
        self.parser = OptionParser(usage=usage,
                                   sort_options=True,
                                   phil=phil_scope,
                                   read_experiments=True,
                                   read_datablocks=True,
                                   read_reflections=True,
                                   epilog=help_message)

    def run(self):
        ''' Parse the options. '''
        from dials.util.options import flatten_experiments, flatten_datablocks, flatten_reflections
        # Parse the command line arguments
        params, options = self.parser.parse_args(show_diff_phil=True)
        self.params = params
        experiments = flatten_experiments(params.input.experiments)
        datablocks = flatten_datablocks(params.input.datablock)
        reflections = flatten_reflections(params.input.reflections)

        # Find all detector objects
        detectors = []
        detectors.extend(experiments.detectors())
        dbs = []
        for datablock in datablocks:
            dbs.extend(datablock.unique_detectors())
        detectors.extend(dbs)

        # Verify inputs
        if len(detectors) != 2:
            print "Please provide two experiments and or datablocks for comparison"
            return

        # These lines exercise the iterate_detector_at_level and iterate_panels functions
        # for a detector with 4 hierarchy levels
        """
    print "Testing iterate_detector_at_level"
    for level in xrange(4):
      print "iterating at level", level
      for panelg in iterate_detector_at_level(detectors[0].hierarchy(), 0, level):
        print panelg.get_name()

    print "Testing iterate_panels"
    for level in xrange(4):
      print "iterating at level", level
      for panelg in iterate_detector_at_level(detectors[0].hierarchy(), 0, level):
        for panel in iterate_panels(panelg):
          print panel.get_name()
    """
        tmp = []
        for refls in reflections:
            print "N reflections total:", len(refls)
            sel = refls.get_flags(refls.flags.used_in_refinement)
            if sel.count(True) > 0:
                refls = refls.select(sel)
                print "N reflections used in refinement", len(refls)
                print "Reporting only on those reflections used in refinement"

            refls['difference_vector_norms'] = (
                refls['xyzcal.mm'] - refls['xyzobs.mm.value']).norms()
            tmp.append(refls)
        reflections = tmp

        s0 = col(
            flex.vec3_double([col(b.get_s0())
                              for b in experiments.beams()]).mean())

        # Compute a set of radial and transverse displacements for each reflection
        print "Setting up stats..."
        tmp_refls = []
        for refls, expts in zip(
                reflections,
            [wrapper.data for wrapper in params.input.experiments]):
            tmp = flex.reflection_table()
            assert len(expts.detectors()) == 1
            dect = expts.detectors()[0]
            # Need to construct a variety of vectors
            for panel_id, panel in enumerate(dect):
                panel_refls = refls.select(refls['panel'] == panel_id)
                bcl = flex.vec3_double()
                # Compute the beam center in lab space (a vector pointing from the origin to where the beam would intersect
                # the panel, if it did intersect the panel)
                for expt_id in set(panel_refls['id']):
                    beam = expts[expt_id].beam
                    s0_ = beam.get_s0()
                    expt_refls = panel_refls.select(
                        panel_refls['id'] == expt_id)
                    beam_centre = panel.get_beam_centre_lab(s0_)
                    bcl.extend(flex.vec3_double(len(expt_refls), beam_centre))
                panel_refls['beam_centre_lab'] = bcl

                # Compute obs in lab space
                x, y, _ = panel_refls['xyzobs.mm.value'].parts()
                c = flex.vec2_double(x, y)
                panel_refls['obs_lab_coords'] = panel.get_lab_coord(c)
                # Compute deltaXY in panel space. This vector is relative to the panel origin
                x, y, _ = (panel_refls['xyzcal.mm'] -
                           panel_refls['xyzobs.mm.value']).parts()
                # Convert deltaXY to lab space, subtracting off of the panel origin
                panel_refls['delta_lab_coords'] = panel.get_lab_coord(
                    flex.vec2_double(x, y)) - panel.get_origin()
                tmp.extend(panel_refls)
            refls = tmp
            # The radial vector points from the center of the reflection to the beam center
            radial_vectors = (refls['obs_lab_coords'] -
                              refls['beam_centre_lab']).each_normalize()
            # The transverse vector is orthogonal to the radial vector and the beam vector
            transverse_vectors = radial_vectors.cross(
                refls['beam_centre_lab']).each_normalize()
            # Compute the raidal and transverse components of each deltaXY
            refls['radial_displacements'] = refls['delta_lab_coords'].dot(
                radial_vectors)
            refls['transverse_displacements'] = refls['delta_lab_coords'].dot(
                transverse_vectors)

            tmp_refls.append(refls)
        reflections = tmp_refls

        # storage for plots
        refl_counts = {}

        # Data for all tables
        pg_bc_dists = flex.double()
        root1 = detectors[0].hierarchy()
        root2 = detectors[1].hierarchy()
        all_weights = flex.double()
        all_refls_count = flex.int()

        # Data for lab space table
        lab_table_data = []
        lab_delta_table_data = []
        all_lab_x = flex.double()
        all_lab_y = flex.double()
        all_lab_z = flex.double()
        pg_lab_x_sigmas = flex.double()
        pg_lab_y_sigmas = flex.double()
        pg_lab_z_sigmas = flex.double()
        all_rotX = flex.double()
        all_rotY = flex.double()
        all_rotZ = flex.double()
        pg_rotX_sigmas = flex.double()
        pg_rotY_sigmas = flex.double()
        pg_rotZ_sigmas = flex.double()
        all_delta_x = flex.double()
        all_delta_y = flex.double()
        all_delta_z = flex.double()
        all_delta_xy = flex.double()
        all_delta_xyz = flex.double()
        all_delta_r = flex.double()
        all_delta_t = flex.double()
        all_delta_norm = flex.double()

        if params.hierarchy_level > 0:
            # Data for local table
            local_table_data = []
            local_delta_table_data = []
            all_local_x = flex.double()
            all_local_y = flex.double()
            all_local_z = flex.double()
            pg_local_x_sigmas = flex.double()
            pg_local_y_sigmas = flex.double()
            pg_local_z_sigmas = flex.double()
            all_local_rotX = flex.double()
            all_local_rotY = flex.double()
            all_local_rotZ = flex.double()
            pg_local_rotX_sigmas = flex.double()
            pg_local_rotY_sigmas = flex.double()
            pg_local_rotZ_sigmas = flex.double()
            all_local_delta_x = flex.double()
            all_local_delta_y = flex.double()
            all_local_delta_z = flex.double()
            all_local_delta_xy = flex.double()
            all_local_delta_xyz = flex.double()

        # Data for RMSD table
        rmsds_table_data = []

        for pg_id, (pg1, pg2) in enumerate(
                zip(
                    iterate_detector_at_level(root1, 0,
                                              params.hierarchy_level),
                    iterate_detector_at_level(root2, 0,
                                              params.hierarchy_level))):
            # Count up the number of reflections in this panel group pair for use as a weighting scheme
            total_refls = 0
            pg1_refls = 0
            pg2_refls = 0
            for p1, p2 in zip(iterate_panels(pg1), iterate_panels(pg2)):
                r1 = len(reflections[0].select(
                    reflections[0]['panel'] == id_from_name(
                        detectors[0], p1.get_name())))
                r2 = len(reflections[1].select(
                    reflections[1]['panel'] == id_from_name(
                        detectors[1], p2.get_name())))
                total_refls += r1 + r2
                pg1_refls += r1
                pg2_refls += r2
            if pg1_refls == 0 and pg2_refls == 0:
                print "No reflections on panel group", pg_id
                continue
            all_refls_count.append(total_refls)
            all_weights.append(pg1_refls)
            all_weights.append(pg2_refls)

            assert pg1.get_name() == pg2.get_name()
            refl_counts[pg1.get_name()] = total_refls

            # Compute RMSDs
            row = ["%d" % pg_id]
            for pg, refls, det in zip([pg1, pg2], reflections, detectors):
                pg_refls = flex.reflection_table()
                for p in iterate_panels(pg):
                    pg_refls.extend(
                        refls.select(
                            refls['panel'] == id_from_name(det, p.get_name())))
                if len(pg_refls) == 0:
                    rmsd = r_rmsd = t_rmsd = 0
                else:
                    rmsd = math.sqrt(
                        flex.sum_sq(pg_refls['difference_vector_norms']) /
                        len(pg_refls)) * 1000
                    r_rmsd = math.sqrt(
                        flex.sum_sq(pg_refls['radial_displacements']) /
                        len(pg_refls)) * 1000
                    t_rmsd = math.sqrt(
                        flex.sum_sq(pg_refls['transverse_displacements']) /
                        len(pg_refls)) * 1000

                row.extend([
                    "%6.1f" % rmsd,
                    "%6.1f" % r_rmsd,
                    "%6.1f" % t_rmsd,
                    "%8d" % len(pg_refls)
                ])
            rmsds_table_data.append(row)

            dists = flex.double()
            lab_x = flex.double()
            lab_y = flex.double()
            lab_z = flex.double()
            rot_X = flex.double()
            rot_Y = flex.double()
            rot_Z = flex.double()

            for pg in [pg1, pg2]:
                bc = col(pg.get_beam_centre_lab(s0))
                ori = get_center(pg)

                dists.append((ori - bc).length())

                ori_lab = pg.get_origin()
                lab_x.append(ori_lab[0])
                lab_y.append(ori_lab[1])
                lab_z.append(ori_lab[2])

                f = col(pg.get_fast_axis())
                s = col(pg.get_slow_axis())
                n = col(pg.get_normal())
                basis = sqr(
                    [f[0], s[0], n[0], f[1], s[1], n[1], f[2], s[2], n[2]])
                rotX, rotY, rotZ = basis.r3_rotation_matrix_as_x_y_z_angles(
                    deg=True)
                rot_X.append(rotX)
                rot_Y.append(rotY)
                rot_Z.append(rotZ)

            all_lab_x.extend(lab_x)
            all_lab_y.extend(lab_y)
            all_lab_z.extend(lab_z)
            all_rotX.extend(rot_X)
            all_rotY.extend(rot_Y)
            all_rotZ.extend(rot_Z)

            pg_weights = flex.double([pg1_refls, pg2_refls])
            if 0 in pg_weights:
                dist_m = dist_s = 0
                lx_m = lx_s = ly_m = ly_s = lz_m = lz_s = 0
                lrx_m = lrx_s = lry_m = lry_s = lrz_m = lrz_s = 0
                dx = dy = dz = dxy = dxyz = dr = dt = dnorm = 0
            else:
                stats = flex.mean_and_variance(dists, pg_weights)
                dist_m = stats.mean()
                dist_s = stats.gsl_stats_wsd()

                stats = flex.mean_and_variance(lab_x, pg_weights)
                lx_m = stats.mean()
                lx_s = stats.gsl_stats_wsd()

                stats = flex.mean_and_variance(lab_y, pg_weights)
                ly_m = stats.mean()
                ly_s = stats.gsl_stats_wsd()

                stats = flex.mean_and_variance(lab_z, pg_weights)
                lz_m = stats.mean()
                lz_s = stats.gsl_stats_wsd()

                stats = flex.mean_and_variance(rot_X, pg_weights)
                lrx_m = stats.mean()
                lrx_s = stats.gsl_stats_wsd()

                stats = flex.mean_and_variance(rot_Y, pg_weights)
                lry_m = stats.mean()
                lry_s = stats.gsl_stats_wsd()

                stats = flex.mean_and_variance(rot_Z, pg_weights)
                lrz_m = stats.mean()
                lrz_s = stats.gsl_stats_wsd()

                dx = lab_x[0] - lab_x[1]
                dy = lab_y[0] - lab_y[1]
                dz = lab_z[0] - lab_z[1]
                dxy = math.sqrt(dx**2 + dy**2)
                dxyz = math.sqrt(dx**2 + dy**2 + dz**2)

                delta = col([lab_x[0], lab_y[0], lab_z[0]]) - col(
                    [lab_x[1], lab_y[1], lab_z[1]])
                pg1_center = get_center_lab(pg1).normalize()
                transverse = s0.cross(pg1_center).normalize()
                radial = transverse.cross(s0).normalize()
                dr = delta.dot(radial)
                dt = delta.dot(transverse)
                dnorm = col(pg1.get_normal()).angle(col(pg2.get_normal()),
                                                    deg=True)

            pg_bc_dists.append(dist_m)
            pg_lab_x_sigmas.append(lx_s)
            pg_lab_y_sigmas.append(ly_s)
            pg_lab_z_sigmas.append(lz_s)
            pg_rotX_sigmas.append(lrx_s)
            pg_rotY_sigmas.append(lry_s)
            pg_rotZ_sigmas.append(lrz_s)
            all_delta_x.append(dx)
            all_delta_y.append(dy)
            all_delta_z.append(dz)
            all_delta_xy.append(dxy)
            all_delta_xyz.append(dxyz)
            all_delta_r.append(dr)
            all_delta_t.append(dt)
            all_delta_norm.append(dnorm)

            lab_table_data.append([
                "%d" % pg_id,
                "%5.1f" % dist_m,
                "%9.3f" % lx_m,
                "%9.3f" % lx_s,
                "%9.3f" % ly_m,
                "%9.3f" % ly_s,
                "%9.3f" % lz_m,
                "%9.3f" % lz_s,
                "%9.3f" % lrx_m,
                "%9.3f" % lrx_s,
                "%9.3f" % lry_m,
                "%9.3f" % lry_s,
                "%9.3f" % lrz_m,
                "%9.3f" % lrz_s,
                "%6d" % total_refls
            ])

            lab_delta_table_data.append([
                "%d" % pg_id,
                "%5.1f" % dist_m,
                "%9.1f" % (dx * 1000),
                "%9.1f" % (dy * 1000),
                "%9.3f" % dz,
                "%9.1f" % (dxy * 1000),
                "%9.3f" % dxyz,
                "%9.1f" % (dr * 1000),
                "%9.1f" % (dt * 1000),
                "%9.3f" % dnorm,
                "%6d" % total_refls
            ])

            if params.hierarchy_level > 0:
                local_x = flex.double()
                local_y = flex.double()
                local_z = flex.double()
                l_rot_X = flex.double()
                l_rot_Y = flex.double()
                l_rot_Z = flex.double()
                l_dx = flex.double()
                l_dy = flex.double()
                l_dz = flex.double()
                l_dxy = flex.double()
                l_dxyz = flex.double()

                for pg in [pg1, pg2]:

                    l_ori = pg.get_local_origin()
                    local_x.append(l_ori[0])
                    local_y.append(l_ori[1])
                    local_z.append(l_ori[2])

                    f = col(pg.get_local_fast_axis())
                    s = col(pg.get_local_slow_axis())
                    n = f.cross(s)
                    basis = sqr(
                        [f[0], s[0], n[0], f[1], s[1], n[1], f[2], s[2], n[2]])
                    rotX, rotY, rotZ = basis.r3_rotation_matrix_as_x_y_z_angles(
                        deg=True)
                    l_rot_X.append(rotX)
                    l_rot_Y.append(rotY)
                    l_rot_Z.append(rotZ)

                all_local_x.extend(local_x)
                all_local_y.extend(local_y)
                all_local_z.extend(local_z)
                all_local_rotX.extend(l_rot_X)
                all_local_rotY.extend(l_rot_Y)
                all_local_rotZ.extend(l_rot_Z)

                pg_weights = flex.double([pg1_refls, pg2_refls])
                if 0 in pg_weights:
                    lx_m = lx_s = ly_m = ly_s = lz_m = lz_s = 0
                    lrx_m = lrx_s = lry_m = lry_s = lrz_m = lrz_s = 0
                    ldx = ldy = ldz = ldxy = ldxyz = 0
                else:
                    stats = flex.mean_and_variance(local_x, pg_weights)
                    lx_m = stats.mean()
                    lx_s = stats.gsl_stats_wsd()

                    stats = flex.mean_and_variance(local_y, pg_weights)
                    ly_m = stats.mean()
                    ly_s = stats.gsl_stats_wsd()

                    stats = flex.mean_and_variance(local_z, pg_weights)
                    lz_m = stats.mean()
                    lz_s = stats.gsl_stats_wsd()

                    stats = flex.mean_and_variance(l_rot_X, pg_weights)
                    lrx_m = stats.mean()
                    lrx_s = stats.gsl_stats_wsd()

                    stats = flex.mean_and_variance(l_rot_Y, pg_weights)
                    lry_m = stats.mean()
                    lry_s = stats.gsl_stats_wsd()

                    stats = flex.mean_and_variance(l_rot_Z, pg_weights)
                    lrz_m = stats.mean()
                    lrz_s = stats.gsl_stats_wsd()

                    ldx = local_x[0] - local_x[1]
                    ldy = local_y[0] - local_y[1]
                    ldz = local_z[0] - local_z[1]
                    ldxy = math.sqrt(ldx**2 + ldy**2)
                    ldxyz = math.sqrt(ldx**2 + ldy**2 + ldz**2)

                pg_local_x_sigmas.append(lx_s)
                pg_local_y_sigmas.append(ly_s)
                pg_local_z_sigmas.append(lz_s)
                pg_local_rotX_sigmas.append(lrx_s)
                pg_local_rotY_sigmas.append(lry_s)
                pg_local_rotZ_sigmas.append(lrz_s)
                all_local_delta_x.append(ldx)
                all_local_delta_y.append(ldy)
                all_local_delta_z.append(ldz)
                all_local_delta_xy.append(ldxy)
                all_local_delta_xyz.append(ldxyz)

                local_table_data.append([
                    "%d" % pg_id,
                    "%5.1f" % dist_m,
                    "%9.3f" % lx_m,
                    "%9.3f" % lx_s,
                    "%9.3f" % ly_m,
                    "%9.3f" % ly_s,
                    "%9.3f" % lz_m,
                    "%9.3f" % lz_s,
                    "%9.3f" % lrx_m,
                    "%9.3f" % lrx_s,
                    "%9.3f" % lry_m,
                    "%9.3f" % lry_s,
                    "%9.3f" % lrz_m,
                    "%9.3f" % lrz_s,
                    "%6d" % total_refls
                ])

                local_delta_table_data.append([
                    "%d" % pg_id,
                    "%5.1f" % dist_m,
                    "%9.1f" % (ldx * 1000),
                    "%9.1f" % (ldy * 1000),
                    "%9.3f" % ldz,
                    "%9.1f" % (ldxy * 1000),
                    "%9.3f" % ldxyz,
                    "%6d" % total_refls
                ])

        # Set up table output, starting with lab table
        table_d = {d: row for d, row in zip(pg_bc_dists, lab_table_data)}
        table_header = [
            "PanelG", "Radial", "Lab X", "Lab X", "Lab Y", "Lab Y", "Lab Z",
            "Lab Z", "Rot X", "Rot X", "Rot Y", "Rot Y", "Rot Z", "Rot Z", "N"
        ]
        table_header2 = [
            "Id", "Dist", "", "Sigma", "", "Sigma", "", "Sigma", "", "Sigma",
            "", "Sigma", "", "Sigma", "Refls"
        ]
        table_header3 = [
            "", "(mm)", "(mm)", "(mm)", "(mm)", "(mm)", "(mm)", "(mm)",
            "(deg)", "(deg)", "(deg)", "(deg)", "(deg)", "(deg)", ""
        ]
        lab_table_data = [table_header, table_header2, table_header3]
        lab_table_data.extend([table_d[key] for key in sorted(table_d)])

        if len(all_weights) > 1:
            r1 = ["All"]
            r2 = ["Mean"]
            for data, weights, fmt in [
                [None, None, None],
                [all_lab_x, all_weights.as_double(), "%9.3f"],
                [pg_lab_x_sigmas,
                 all_refls_count.as_double(), "%9.3f"],
                [all_lab_y, all_weights.as_double(), "%9.3f"],
                [pg_lab_y_sigmas,
                 all_refls_count.as_double(), "%9.3f"],
                [all_lab_z, all_weights.as_double(), "%9.3f"],
                [pg_lab_z_sigmas,
                 all_refls_count.as_double(), "%9.3f"],
                [all_rotX, all_weights.as_double(), "%9.3f"],
                [pg_rotX_sigmas,
                 all_refls_count.as_double(), "%9.3f"],
                [all_rotY, all_weights.as_double(), "%9.3f"],
                [pg_rotY_sigmas,
                 all_refls_count.as_double(), "%9.3f"],
                [all_rotZ, all_weights.as_double(), "%9.3f"],
                [pg_rotZ_sigmas,
                 all_refls_count.as_double(), "%9.3f"]
            ]:
                r2.append("")
                if data is None and weights is None:
                    r1.append("")
                    continue
                stats = flex.mean_and_variance(data, weights)
                r1.append(fmt % stats.mean())

            r1.append("")
            r2.append("%6.1f" % flex.mean(all_refls_count.as_double()))
            lab_table_data.append(r1)
            lab_table_data.append(r2)

        from libtbx import table_utils
        print "Detector statistics relative to lab origin"
        print table_utils.format(lab_table_data,
                                 has_header=3,
                                 justify='center',
                                 delim=" ")
        print "PanelG Id: panel group id or panel id, depending on hierarchy_level. For each panel group, weighted means and weighted standard deviations (Sigmas) for the properties listed below are computed using the matching panel groups between the input experiments."
        print "Radial dist: distance from center of panel group to the beam center"
        print "Lab X, Y and Z: mean coordinate in lab space"
        print "Rot X, Y and Z: rotation of panel group around lab X, Y and Z axes"
        print "N refls: number of reflections summed between both matching panel groups. This number is used as a weight when computing means and standard deviations."
        print "All: weighted mean of the values shown"
        print

        # Next, deltas in lab space
        table_d = {d: row for d, row in zip(pg_bc_dists, lab_delta_table_data)}
        table_header = [
            "PanelG", "Radial", "Lab dX", "Lab dY", "Lab dZ", "Lab dXY",
            "Lab dXYZ", "Lab dR", "Lab dT", "Lab dNorm", "N"
        ]
        table_header2 = ["Id", "Dist", "", "", "", "", "", "", "", "", "Refls"]
        table_header3 = [
            "", "(mm)", "(microns)", "(microns)", "(mm)", "(microns)", "(mm)",
            "(microns)", "(microns)", "(deg)", ""
        ]
        lab_delta_table_data = [table_header, table_header2, table_header3]
        lab_delta_table_data.extend([table_d[key] for key in sorted(table_d)])

        if len(all_weights) > 1:
            r1 = ["WMean"]
            r2 = ["WStddev"]
            r3 = ["Mean"]
            for data, weights, fmt in [
                [None, None, None],
                [all_delta_x * 1000,
                 all_refls_count.as_double(), "%9.1f"],
                [all_delta_y * 1000,
                 all_refls_count.as_double(), "%9.1f"],
                [all_delta_z,
                 all_refls_count.as_double(), "%9.3f"],
                [all_delta_xy * 1000,
                 all_refls_count.as_double(), "%9.1f"],
                [all_delta_xyz,
                 all_refls_count.as_double(), "%9.3f"],
                [all_delta_r * 1000,
                 all_refls_count.as_double(), "%9.1f"],
                [all_delta_t * 1000,
                 all_refls_count.as_double(), "%9.1f"],
                [all_delta_norm,
                 all_refls_count.as_double(), "%9.3f"]
            ]:
                r3.append("")
                if data is None and weights is None:
                    r1.append("")
                    r2.append("")
                    continue
                stats = flex.mean_and_variance(data, weights)
                r1.append(fmt % stats.mean())
                if len(data) > 1:
                    r2.append(fmt % stats.gsl_stats_wsd())
                else:
                    r2.append("-")

            r1.append("")
            r2.append("")
            r3.append("%6.1f" % flex.mean(all_refls_count.as_double()))
            lab_delta_table_data.append(r1)
            lab_delta_table_data.append(r2)
            lab_delta_table_data.append(r3)

        print "Detector deltas in lab space"
        print table_utils.format(lab_delta_table_data,
                                 has_header=3,
                                 justify='center',
                                 delim=" ")
        print "PanelG Id: panel group id or panel id, depending on hierarchy_level. For each panel group, weighted means and weighted standard deviations (Sigmas) for the properties listed below are computed using the matching panel groups between the input experiments."
        print "Radial dist: distance from center of panel group to the beam center"
        print "Lab dX, dY and dZ: delta between X, Y and Z coordinates in lab space"
        print "Lab dR, dT and dZ: radial and transverse components of dXY in lab space"
        print "Lab dNorm: angle between normal vectors in lab space"
        print "N refls: number of reflections summed between both matching panel groups. This number is used as a weight when computing means and standard deviations."
        print "WMean: weighted mean of the values shown"
        print "WStddev: weighted standard deviation of the values shown"
        print "Mean: mean of the values shown"
        print

        if params.hierarchy_level > 0:
            # Local table
            table_d = {d: row for d, row in zip(pg_bc_dists, local_table_data)}
            table_header = [
                "PanelG", "Radial", "Local X", "Local X", "Local Y", "Local Y",
                "Local Z", "Local Z", "Rot X", "Rot X", "Rot Y", "Rot Y",
                "Rot Z", "Rot Z", "N"
            ]
            table_header2 = [
                "Id", "Dist", "", "Sigma", "", "Sigma", "", "Sigma", "",
                "Sigma", "", "Sigma", "", "Sigma", "Refls"
            ]
            table_header3 = [
                "", "(mm)", "(mm)", "(mm)", "(mm)", "(mm)", "(mm)", "(mm)",
                "(deg)", "(deg)", "(deg)", "(deg)", "(deg)", "(deg)", ""
            ]
            local_table_data = [table_header, table_header2, table_header3]
            local_table_data.extend([table_d[key] for key in sorted(table_d)])

            if len(all_weights) > 1:
                r1 = ["All"]
                r2 = ["Mean"]
                for data, weights, fmt in [
                    [None, None, None],
                    [all_local_x,
                     all_weights.as_double(), "%9.3f"],
                    [pg_local_x_sigmas,
                     all_refls_count.as_double(), "%9.3f"],
                    [all_local_y,
                     all_weights.as_double(), "%9.3f"],
                    [pg_local_y_sigmas,
                     all_refls_count.as_double(), "%9.3f"],
                    [all_local_z,
                     all_weights.as_double(), "%9.3f"],
                    [pg_local_z_sigmas,
                     all_refls_count.as_double(), "%9.3f"],
                    [all_local_rotX,
                     all_weights.as_double(), "%9.3f"],
                    [
                        pg_local_rotX_sigmas,
                        all_refls_count.as_double(), "%9.3f"
                    ], [all_local_rotY,
                        all_weights.as_double(), "%9.3f"],
                    [
                        pg_local_rotY_sigmas,
                        all_refls_count.as_double(), "%9.3f"
                    ], [all_local_rotZ,
                        all_weights.as_double(), "%9.3f"],
                    [
                        pg_local_rotZ_sigmas,
                        all_refls_count.as_double(), "%9.3f"
                    ]
                ]:
                    r2.append("")
                    if data is None and weights is None:
                        r1.append("")
                        continue
                    stats = flex.mean_and_variance(data, weights)
                    r1.append(fmt % stats.mean())

                r1.append("")
                r2.append("%6.1f" % flex.mean(all_refls_count.as_double()))
                local_table_data.append(r1)
                local_table_data.append(r2)

            print "Detector statistics in local frame of each panel group"
            print table_utils.format(local_table_data,
                                     has_header=3,
                                     justify='center',
                                     delim=" ")
            print "PanelG Id: panel group id or panel id, depending on hierarchy_level. For each panel group, weighted means and weighted standard deviations (Sigmas) for the properties listed below are computed using the matching panel groups between the input experiments."
            print "Radial dist: distance from center of panel group to the beam center"
            print "Lab X, Y and Z: mean coordinate in relative to parent panel group"
            print "Rot X, Y and Z: rotation of panel group around parent panel group X, Y and Z axes"
            print "N refls: number of reflections summed between both matching panel groups. This number is used as a weight when computing means and standard deviations."
            print "All: weighted mean of the values shown"
            print

            # Next, deltas in local space
            table_d = {
                d: row
                for d, row in zip(pg_bc_dists, local_delta_table_data)
            }
            table_header = [
                "PanelG", "Radial", "Local dX", "Local dY", "Local dZ",
                "Local dXY", "Local dXYZ", "N"
            ]
            table_header2 = ["Id", "Dist", "", "", "", "", "", "Refls"]
            table_header3 = [
                "", "(mm)", "(microns)", "(microns)", "(mm)", "(microns)",
                "(mm)", ""
            ]
            local_delta_table_data = [
                table_header, table_header2, table_header3
            ]
            local_delta_table_data.extend(
                [table_d[key] for key in sorted(table_d)])

            if len(all_weights) > 1:
                r1 = ["WMean"]
                r2 = ["WStddev"]
                r3 = ["Mean"]
                for data, weights, fmt in [
                    [None, None, None],
                    [
                        all_local_delta_x * 1000,
                        all_refls_count.as_double(), "%9.1f"
                    ],
                    [
                        all_local_delta_y * 1000,
                        all_refls_count.as_double(), "%9.1f"
                    ],
                    [all_local_delta_z,
                     all_refls_count.as_double(), "%9.3f"],
                    [
                        all_local_delta_xy * 1000,
                        all_refls_count.as_double(), "%9.1f"
                    ],
                    [
                        all_local_delta_xyz,
                        all_refls_count.as_double(), "%9.3f"
                    ]
                ]:
                    r3.append("")
                    if data is None and weights is None:
                        r1.append("")
                        r2.append("")
                        continue
                    stats = flex.mean_and_variance(data, weights)
                    r1.append(fmt % stats.mean())
                    r2.append(fmt % stats.gsl_stats_wsd())

                r1.append("")
                r2.append("")
                r3.append("%6.1f" % flex.mean(all_refls_count.as_double()))
                local_delta_table_data.append(r1)
                local_delta_table_data.append(r2)
                local_delta_table_data.append(r3)

            print "Detector deltas relative to panel group origin"
            print table_utils.format(local_delta_table_data,
                                     has_header=3,
                                     justify='center',
                                     delim=" ")
            print "PanelG Id: panel group id or panel id, depending on hierarchy_level. For each panel group, weighted means and weighted standard deviations (Sigmas) for the properties listed below are computed using the matching panel groups between the input experiments."
            print "Radial dist: distance from center of panel group to the beam center"
            print "Local dX, dY and dZ: delta between X, Y and Z coordinates in the local frame of the panel group"
            print "N refls: number of reflections summed between both matching panel groups. This number is used as a weight when computing means and standard deviations."
            print "All: weighted mean of the values shown"
            print

        #RMSD table
        table_d = {d: row for d, row in zip(pg_bc_dists, rmsds_table_data)}
        table_header = ["PanelG"]
        table_header2 = ["Id"]
        table_header3 = [""]
        for i in xrange(len(detectors)):
            table_header.extend(["D%d" % i] * 4)
            table_header2.extend(["RMSD", "rRMSD", "tRMSD", "N refls"])
            table_header3.extend(["(microns)"] * 3)
            table_header3.append("")
        rmsds_table_data = [table_header, table_header2, table_header3]
        rmsds_table_data.extend([table_d[key] for key in sorted(table_d)])

        row = ["Overall"]
        for refls in reflections:
            row.append("%6.1f" % (math.sqrt(
                flex.sum_sq(refls['difference_vector_norms']) / len(refls)) *
                                  1000))
            row.append("%6.1f" % (math.sqrt(
                flex.sum_sq(refls['radial_displacements']) / len(refls)) *
                                  1000))
            row.append("%6.1f" % (math.sqrt(
                flex.sum_sq(refls['transverse_displacements']) / len(refls)) *
                                  1000))
            row.append("%8d" % len(refls))
        rmsds_table_data.append(row)

        print "RMSDs by detector number"
        print table_utils.format(rmsds_table_data,
                                 has_header=3,
                                 justify='center',
                                 delim=" ")
        print "PanelG Id: panel group id or panel id, depending on hierarchy_level"
        print "RMSD: root mean squared deviation between observed and predicted spot locations"
        print "rRMSD: RMSD of radial components of the observed-predicted vectors"
        print "tRMSD: RMSD of transverse components of the observed-predicted vectors"
        print "N refls: number of reflections"

        if params.tag is None:
            tag = ""
        else:
            tag = "%s " % params.tag

        if params.show_plots:
            # Plot the results
            self.detector_plot_dict(detectors[0],
                                    refl_counts,
                                    u"%sN reflections" % tag,
                                    u"%6d",
                                    show=False)

    def detector_plot_dict(self,
                           detector,
                           data,
                           title,
                           units_str,
                           show=True,
                           reverse_colormap=False):
        """
    Use matplotlib to plot a detector, color coding panels according to data
    @param detector detector reference detector object
    @param data python dictionary of panel names as keys and numbers as values
    @param title title string for plot
    @param units_str string with a formatting statment for units on each panel
    """
        # initialize the color map
        values = flex.double(data.values())
        norm = Normalize(vmin=flex.min(values), vmax=flex.max(values))
        if reverse_colormap:
            cmap = plt.cm.get_cmap(self.params.colormap + "_r")
        else:
            cmap = plt.cm.get_cmap(self.params.colormap)
        sm = cm.ScalarMappable(norm=norm, cmap=cmap)
        if len(values) == 0:
            print "no values"
            return
        elif len(values) == 1:
            sm.set_array(np.arange(values[0], values[0],
                                   1))  # needed for colorbar
        else:
            sm.set_array(
                np.arange(flex.min(values), flex.max(values),
                          (flex.max(values) - flex.min(values)) /
                          20))  # needed for colorbar

        fig = plt.figure()
        ax = fig.add_subplot(111, aspect='equal')
        max_dim = 0
        root = detector.hierarchy()
        rf = col(root.get_fast_axis())
        rs = col(root.get_slow_axis())
        for pg_id, pg in enumerate(
                iterate_detector_at_level(root, 0,
                                          self.params.hierarchy_level)):
            if pg.get_name() not in data:
                continue
            # get panel coordinates
            p0, p1, p2, p3 = get_bounds(root, pg)

            v1 = p1 - p0
            v2 = p3 - p0
            vcen = ((v2 / 2) + (v1 / 2)) + p0

            # add the panel to the plot
            ax.add_patch(
                Polygon((p0[0:2], p1[0:2], p2[0:2], p3[0:2]),
                        closed=True,
                        color=sm.to_rgba(data[pg.get_name()]),
                        fill=True))
            ax.annotate("%d %s" % (pg_id, units_str % data[pg.get_name()]),
                        vcen[0:2],
                        ha='center')

            if self.params.draw_normal_arrows:
                pgn = col(pg.get_normal())
                v = col((rf.dot(pgn), rs.dot(pgn), 0))
                v *= 10000
                ax.arrow(vcen[0],
                         vcen[1],
                         v[0],
                         v[1],
                         head_width=5.0,
                         head_length=10.0,
                         fc='k',
                         ec='k')

            # find the plot maximum dimensions
            for p in [p0, p1, p2, p3]:
                for c in p[0:2]:
                    if abs(c) > max_dim:
                        max_dim = abs(c)

        # plot the results
        ax.set_xlim((-max_dim, max_dim))
        ax.set_ylim((-max_dim, max_dim))
        ax.set_xlabel("mm")
        ax.set_ylabel("mm")
        fig.colorbar(sm)
        plt.title(title)
        if show:
            plt.show()
Пример #33
0
def run(args: List[str] = None, phil: libtbx.phil.scope = phil_scope) -> None:
    usage = "dials.missing_reflections [options] scaled.expt scaled.refl"

    parser = OptionParser(
        usage=usage,
        phil=phil,
        read_reflections=True,
        read_experiments=True,
        check_format=False,
        epilog=__doc__,
    )

    params, options = parser.parse_args(args=args, show_diff_phil=False)

    # Configure the logging.
    dials.util.log.config(options.verbose)
    logger.info(dials_version())

    # Log the difference between the PHIL scope definition and the active PHIL scope,
    # which will include the parsed user inputs.
    diff_phil = parser.diff_phil.as_str()
    if diff_phil:
        logger.info("The following parameters have been modified:\n%s",
                    diff_phil)

    experiments = flatten_experiments(params.input.experiments)
    reflections = flatten_reflections(params.input.reflections)

    if not experiments or not reflections:
        parser.print_help()
        return
    if len(reflections) != 1 and len(experiments) != len(reflections):
        sys.exit(
            "Number of experiments must equal the number of reflection tables")

    from dials.util.multi_dataset_handling import (
        assign_unique_identifiers,
        parse_multiple_datasets,
    )

    reflections = parse_multiple_datasets(reflections)
    experiments, reflections = assign_unique_identifiers(
        experiments, reflections)

    if all("inverse_scale_factor" in refl for refl in reflections):
        # Assume all arrays have been scaled
        miller_array = scaled_data_as_miller_array(reflections,
                                                   experiments,
                                                   anomalous_flag=False)
    else:
        # Else get the integrated intensities
        miller_arrays = filtered_arrays_from_experiments_reflections(
            experiments,
            reflections,
        )
        miller_array = miller_arrays[0]
        for ma in miller_arrays[1:]:
            miller_array = miller_array.concatenate(ma)

    if params.d_min or params.d_max:
        miller_array = miller_array.resolution_filter(d_min=params.d_min,
                                                      d_max=params.d_max)

    # Print overall summary of input miller array
    s = io.StringIO()
    ma_unique = miller_array.unique_under_symmetry()
    ma_unique.show_comprehensive_summary(f=s)
    logger.info(f"\n{s.getvalue()}")

    # Get the regions of missing reflections
    complete_set, unique_ms = missing_reflections.connected_components(
        miller_array)
    unique_ms = [
        ms for ms in unique_ms if ms.size() >= params.min_component_size
    ]

    # Print some output for user
    if len(unique_ms):
        logger.info(
            "The following connected regions of missing reflections have been identified:"
        )
        n_expected = complete_set.size()
        rows = []
        for ms in unique_ms:
            d_max, d_min = (uctbx.d_star_sq_as_d(ds2)
                            for ds2 in ms.min_max_d_star_sq())
            rows.append([
                ms.size(),
                f"{100 * ms.size() / n_expected:.1f}",
                f"{d_max:.2f}-{d_min:.2f}",
            ])
        logger.info(
            tabulate(
                rows,
                headers=["# reflections", "% missing",
                         "Resolution range (Å)"]))
    else:
        logger.info("No connected regions of missing reflections identified")
Пример #34
0
def run(args=None):
    usage = "usage: dxtbx.detector_superpose reference.json moving.json "
    parser = OptionParser(
        usage=usage,
        sort_options=True,
        phil=phil_scope,
        check_format=False,
        epilog=help_message,
    )
    params, options = parser.parse_args(args, show_diff_phil=True)

    reference_experiments = ExperimentListFactory.from_json_file(
        params.reference_experiments, check_format=False)
    if len(reference_experiments.detectors()) != 1:
        sys.exit("Error: Please ensure reference has only 1 detector model")
    reference = reference_experiments.detectors()[0]

    moving_experiments = ExperimentListFactory.from_json_file(
        params.moving_experiments, check_format=False)
    if len(moving_experiments.detectors()) != 1:
        sys.exit("Error: Please ensure moving has only 1 detector model")
    moving = moving_experiments.detectors()[0]

    # Get list of panels to compare
    if params.panel_list is None or len(params.panel_list) == 0:
        assert len(reference) == len(moving), "Detectors not same length"
        panel_ids = list(range(len(reference)))
    else:
        max_p_id = max(params.panel_list)
        assert max_p_id < len(reference), (
            "Reference detector must be at least %d panels long given the panel list"
            % (max_p_id + 1))
        assert max_p_id < len(
            moving
        ), "Moving detector must be at least %d panels long given the panel list" % (
            max_p_id + 1)
        panel_ids = params.panel_list

    if params.fit_target == "centers":
        assert (
            len(panel_ids) >= 3
        ), "When using centers as target for superpose, detector needs at least 3 panels"

    def rmsd_from_centers(a, b):
        assert len(a) == len(b)
        assert len(a) % 4 == len(b) % 4 == 0
        ca = flex.vec3_double()
        cb = flex.vec3_double()
        for i in range(len(a) // 4):
            ca.append(a[i:i + 4].mean())
            cb.append(b[i:i + 4].mean())
        return 1000 * math.sqrt((ca - cb).sum_sq() / len(ca))

    cycles = 0
    while True:
        cycles += 1

        # Treat panels as a list of 4 sites (corners) or 1 site (centers) for use with lsq superpose
        reference_sites = flex.vec3_double()
        moving_sites = flex.vec3_double()
        for panel_id in panel_ids:
            for detector, sites in zip([reference, moving],
                                       [reference_sites, moving_sites]):
                panel = detector[panel_id]
                size = panel.get_image_size()
                corners = flex.vec3_double([
                    panel.get_pixel_lab_coord(point) for point in [
                        (0, 0),
                        (0, size[1] - 1),
                        (size[0] - 1, size[1] - 1),
                        (size[0] - 1, 0),
                    ]
                ])
                if params.fit_target == "corners":
                    sites.extend(corners)
                elif params.fit_target == "centers":
                    sites.append(corners.mean())

        # Compute super position
        rmsd = 1000 * math.sqrt(
            (reference_sites - moving_sites).sum_sq() / len(reference_sites))
        print("RMSD before fit: %.1f microns" % rmsd)
        if params.fit_target == "corners":
            rmsd = rmsd_from_centers(reference_sites, moving_sites)
            print("RMSD of centers before fit: %.1f microns" % rmsd)
        lsq = least_squares_fit(reference_sites, moving_sites)
        rmsd = 1000 * math.sqrt(
            (reference_sites - lsq.other_sites_best_fit()).sum_sq() /
            len(reference_sites))
        print("RMSD of fit: %.1f microns" % rmsd)
        if params.fit_target == "corners":
            rmsd = rmsd_from_centers(reference_sites,
                                     lsq.other_sites_best_fit())
            print("RMSD of fit of centers: %.1f microns" % rmsd)
        (
            angle,
            axis,
        ) = lsq.r.r3_rotation_matrix_as_unit_quaternion(
        ).unit_quaternion_as_axis_and_angle(deg=True)
        print("Axis and angle of rotation: (%.3f, %.3f, %.3f), %.2f degrees" %
              (axis[0], axis[1], axis[2], angle))
        print("Translation (x, y, z, in microns): (%.3f, %.3f, %.3f)" %
              (1000 * lsq.t).elems)

        # Apply the shifts
        if params.apply_at_hierarchy_level is None:
            iterable = moving
        else:
            iterable = iterate_detector_at_level(
                moving.hierarchy(), level=params.apply_at_hierarchy_level)

        for group in iterable:
            fast = col(group.get_fast_axis())
            slow = col(group.get_slow_axis())
            ori = col(group.get_origin())

            group.set_frame(lsq.r * fast, lsq.r * slow, (lsq.r * ori) + lsq.t)

        if not params.repeat_until_converged:
            break

        if approx_equal(angle, 0.0, out=None) and approx_equal(
            (1000 * lsq.t).length(), 0.0, out=None):
            print("Converged after", cycles, "cycles")
            break
        else:
            print("Movement not close to zero, repeating fit")
            print()

    dump.experiment_list(moving_experiments, params.output_experiments)

    moved_sites = flex.vec3_double()
    for panel_id in panel_ids:
        panel = moving[panel_id]
        size = panel.get_image_size()
        corners = flex.vec3_double([
            panel.get_pixel_lab_coord(point) for point in [
                (0, 0),
                (0, size[1] - 1),
                (size[0] - 1, size[1] - 1),
                (size[0] - 1, 0),
            ]
        ])
        if params.fit_target == "corners":
            moved_sites.extend(corners)
        elif params.fit_target == "centers":
            moved_sites.append(corners.mean())

    # Re-compute RMSD after moving detector components
    rmsd = 1000 * math.sqrt(
        (reference_sites - moved_sites).sum_sq() / len(reference_sites))
    print("RMSD of fit after movement: %.1f microns" % rmsd)
    if params.fit_target == "corners":
        rmsd = rmsd_from_centers(reference_sites, moved_sites)
        print("RMSD of fit of centers after movement: %.1f microns" % rmsd)

    if params.panel_list is not None:
        reference_sites = flex.vec3_double()
        moved_sites = flex.vec3_double()
        for panel_id in range(len(reference)):
            for detector, sites in zip([reference, moving],
                                       [reference_sites, moved_sites]):
                panel = detector[panel_id]
                size = panel.get_image_size()
                corners = flex.vec3_double([
                    panel.get_pixel_lab_coord(point) for point in [
                        (0, 0),
                        (0, size[1] - 1),
                        (size[0] - 1, size[1] - 1),
                        (size[0] - 1, 0),
                    ]
                ])
                if params.fit_target == "corners":
                    sites.extend(corners)
                elif params.fit_target == "centers":
                    sites.append(corners.mean())
        # Re-compute RMSD for full detector after moving detector components
        rmsd = 1000 * math.sqrt(
            (reference_sites - moved_sites).sum_sq() / len(reference_sites))
        print("RMSD of whole detector fit after movement: %.1f microns" % rmsd)
        if params.fit_target == "corners":
            rmsd = rmsd_from_centers(reference_sites, moved_sites)
            print(
                "RMSD of whole detector fit of centers after movement: %.1f microns"
                % rmsd)
Пример #35
0
class Script(object):
    '''A class for running the script.'''
    def __init__(self):
        '''Initialise the script.'''
        from dials.util.options import OptionParser
        import libtbx.load_env

        # The script usage
        usage = "usage: %s [options] [param.phil] "\
                "datablock.json" \
                % libtbx.env.dispatcher_name

        # Initialise the base class
        self.parser = OptionParser(usage=usage,
                                   phil=phil_scope,
                                   epilog=help_message,
                                   read_datablocks=True,
                                   read_reflections=True)

    def run(self):
        '''Execute the script.'''
        from dials.array_family import flex
        from dials.util.options import flatten_datablocks
        from dials.util.options import flatten_reflections
        from time import time
        from dials.util import log
        from libtbx.utils import Sorry
        start_time = time()

        # Parse the command line
        params, options = self.parser.parse_args(show_diff_phil=False)

        # Configure the logging
        log.config(params.verbosity,
                   info=params.output.log,
                   debug=params.output.debug_log)

        from dials.util.version import dials_version
        logger.info(dials_version())

        # Log the diff phil
        diff_phil = self.parser.diff_phil.as_str()
        if diff_phil is not '':
            logger.info('The following parameters have been modified:\n')
            logger.info(diff_phil)

        # Ensure we have a data block
        datablocks = flatten_datablocks(params.input.datablock)
        reflections = flatten_reflections(params.input.reflections)
        if len(datablocks) == 0 and len(reflections) == 0:
            self.parser.print_help()
            return
        elif len(datablocks) != len(reflections):
            raise Sorry(
                "Must have same number of datablocks and reflection tables")

        # Combine the datablocks and reflections
        datablock, reflections = combine(datablocks, reflections, params)

        # Save the reflections to file
        logger.info('\n' + '-' * 80)
        reflections.as_pickle(params.output.reflections)
        logger.info('Saved {0} reflections to {1}'.format(
            len(reflections), params.output.reflections))

        # Save the datablock
        from dxtbx.datablock import DataBlockDumper
        logger.info('Saving datablocks to {0}'.format(params.output.datablock))
        dump = DataBlockDumper(datablocks)
        dump.as_file(params.output.datablock)

        # Print the time
        logger.info("Time Taken: %f" % (time() - start_time))
Пример #36
0
def main():
    usage = "dials.generate_distortion_maps [options] image_*.cbf"

    parser = OptionParser(
        usage=usage,
        phil=scope,
        read_experiments=True,
        read_experiments_from_images=True,
        check_format=False,
        epilog=help_message,
    )

    params, options = parser.parse_args()

    # Configure the logging
    log.config(verbosity=options.verbose, logfile=params.output.log)

    logger.info(dials_version())

    # Log the diff phil
    diff_phil = parser.diff_phil.as_str()
    if diff_phil != "":
        logger.info("The following parameters have been modified:\n")
        logger.info(diff_phil)

    experiments = flatten_experiments(params.input.experiments)

    if len(experiments) == 0:
        parser.print_help()
        exit()

    assert len(experiments) == 1

    imagesets = experiments.imagesets()

    assert len(imagesets) == 1

    imageset = imagesets[0]

    if params.mode == "translate":
        op = params.translate
        logger.info("Generating translation map with dx={0}, dy={1}".format(
            op.dx, op.dy))
        dx, dy = make_dx_dy_translate(imageset, op.dx, op.dy)
    elif params.mode == "ellipse":
        op = params.ellipse
        logger.info("Generating elliptical map with phi={0}, l1={1}, "
                    "l2={2}, centre_xy={3},{4}".format(op.phi, op.l1, op.l2,
                                                       *op.centre_xy))
        dx, dy = make_dx_dy_ellipse(imageset, op.phi, op.l1, op.l2,
                                    op.centre_xy)
    else:
        raise Sorry("Unrecognised mode")

    logger.info("Saving X distortion map to {0}".format(params.output.x_map))
    with open(params.output.x_map, "wb") as f:
        pickle.dump(dx, f, pickle.HIGHEST_PROTOCOL)

    logger.info("Saving Y distortion map to {0}".format(params.output.y_map))
    with open(params.output.y_map, "wb") as f:
        pickle.dump(dy, f, pickle.HIGHEST_PROTOCOL)
Пример #37
0
class Script(object):
    """A class for running the script."""
    def __init__(self):
        """Initialise the script."""
        from libtbx.phil import parse

        # The phil scope
        phil_scope = parse(
            """
            hklin = None
                .type = path
                .help = "MTZ file (containing observed and calculated structure "
                        "factors)"

            Fo = F
                .type = str
                .help = "MTZ column name for Fobs"

            Fc = FC_ALL
                .type = str
                .help = "MTZ column name for Fcalc (FC_ALL from Refmac includes the "
                        "bulk solvent contribution)"

            max_Fc = 300
                .type = float
                .help = "Set plot limits to display data up to this value of Fc"

            plot_filename = Fo_vs_Fc.pdf
                .type = str
                .help = "Filename for plot"

            fit_hyperbola = True
                .type = bool
                .help = "Calculate and show the fit of a hyperbolic function given by "
                        "|Fo|^2 = |Fc|^2 + |Fe|^2, where |Fe| describes the error term "
                        "containing information about dynamic scattering and other "
                        "effects"

            show_y_eq_x = True
                .type = bool
                .help = "Plot y=x as a dashed line"
            """,
            process_includes=True,
        )

        # The script usage
        usage = "usage: dials.plot_Fo_vs_Fc hklin=refined.mtz"

        # Create the parser
        self.parser = OptionParser(usage=usage,
                                   phil=phil_scope,
                                   epilog=__doc__)

        self.model_fit = None

        return

    def _extract_data_from_mtz(self):
        try:
            m = mtz.object(self.params.hklin)
        except RuntimeError:
            raise Sorry("Could not read {0}".format(self.params.hklin))

        mad = m.as_miller_arrays_dict()
        mad = {k[-1]: v for (k, v) in mad.items()}
        fobs = mad.get(self.params.Fo)
        fc = mad.get(self.params.Fc)

        if [fobs, fc].count(None) > 0:
            raise Sorry(
                "Columns {0} not found in available labels: {1}".format(
                    ", ".join([self.params.Fo, self.params.Fc]),
                    ", ".join(m.column_labels()),
                ))

        # Find common reflections (some fobs might be missing)
        fobs, fc = fobs.common_sets(fc)

        self.fobs = fobs.data()
        self.fc = fc.amplitudes().data()

        return

    def _plot(self):
        fig = plt.figure()
        ax = fig.add_subplot(111)
        minor_loc = MultipleLocator(10)
        ax.yaxis.set_minor_locator(minor_loc)
        ax.xaxis.set_minor_locator(minor_loc)
        ax.grid(True, which="minor")
        ax.set_axisbelow(True)
        ax.set_aspect("equal")
        ax.set_xlabel(r"$F_c$")
        ax.set_ylabel(r"$F_o$")
        ax.scatter(self.fc, self.fobs, s=1, c="indianred")

        if self.params.max_Fc:
            ax.set_xlim((0, self.params.max_Fc))
            ax.set_ylim((0, self.params.max_Fc))

        if self.params.show_y_eq_x:
            ax.plot(ax.get_xlim(),
                    ax.get_ylim(),
                    ls="--",
                    c="0.0",
                    linewidth=0.8)

        if self.model_fit:
            x = flex.double_range(0, int(ax.get_xlim()[1]))
            y = self.model_fit(x)
            ax.plot(x, y, c="0.0", linewidth=0.8)

        print("Saving plot to {0}".format(self.params.plot_filename))
        plt.savefig(self.params.plot_filename)

    def run(self):
        """Execute the script."""

        # Parse the command line
        self.params, _ = self.parser.parse_args(show_diff_phil=True)

        if self.params.hklin is None:
            self.parser.print_help()
            sys.exit()

        self._extract_data_from_mtz()

        if self.params.fit_hyperbola:
            # fit by NLLS Levenberg Marquardt algorithm
            hyperbola_fit = HyperbolaFit(self.fc, self.fobs)
            hyperbola_fit.restart()
            normal_eqns_solving.levenberg_marquardt_iterations(
                hyperbola_fit,
                track_all=True,
                gradient_threshold=1e-8,
                step_threshold=1e-8,
                tau=1e-4,
                n_max_iterations=200,
            )
            intercept = hyperbola_fit.param[0]

            print(
                "Model fit described by the formula: |Fo|^2 = sqrt(|Fc|^2 + |Fe|^2)"
            )
            print("where |Fe| = {:.5f}\n".format(sqrt(intercept)))

            print("Goodness of fit:")
            gof = hyperbola_fit.goodness_of_fit()
            print("SSE: {:.5g}".format(gof["SSE"]))
            print("R-square: {:.5f}".format(gof["R-square"]))
            print("RMSE: {:.2f}".format(gof["RMSE"]))
            print()

            # Set the model_fit function using the determined intercept
            def hyperbola(x, c):
                return flex.sqrt(flex.pow2(x) + c)

            from functools import partial

            self.model_fit = partial(hyperbola, c=intercept)

        if self.params.plot_filename:
            self._plot()

        return
Пример #38
0
class Script(object):
    """ The integration program. """
    def __init__(self, phil=phil_scope):
        """Initialise the script."""
        from dials.util.options import OptionParser

        # The script usage
        usage = "usage: dials.integrate [options] models.expt"

        # Create the parser
        self.parser = OptionParser(
            usage=usage,
            phil=phil,
            epilog=help_message,
            read_experiments=True,
            read_reflections=True,
        )

    def run(self, args=None):
        """ Perform the integration. """
        from dials.util.command_line import heading
        from dials.util.options import flatten_reflections, flatten_experiments
        from dials.util import log
        from time import time
        from dials.util import Sorry

        # Check the number of arguments is correct
        start_time = time()

        # Parse the command line
        params, options = self.parser.parse_args(args=args,
                                                 show_diff_phil=False)
        reference = flatten_reflections(params.input.reflections)
        experiments = flatten_experiments(params.input.experiments)
        if len(reference) == 0 and len(experiments) == 0:
            self.parser.print_help()
            return
        if len(reference) == 0:
            reference = None
        elif len(reference) != 1:
            raise Sorry("more than 1 reflection file was given")
        else:
            reference = reference[0]
        if len(experiments) == 0:
            raise Sorry("no experiment list was specified")

        # Save phil parameters
        if params.output.phil is not None:
            with open(params.output.phil, "w") as outfile:
                outfile.write(self.parser.diff_phil.as_str())

        if __name__ == "__main__":
            # Configure logging
            log.config(params.verbosity,
                       info=params.output.log,
                       debug=params.output.debug_log)

        from dials.util.version import dials_version

        logger.info(dials_version())

        # Log the diff phil
        diff_phil = self.parser.diff_phil.as_str()
        if diff_phil != "":
            logger.info("The following parameters have been modified:\n")
            logger.info(diff_phil)

        for abs_params in params.absorption_correction:
            if abs_params.apply:
                if not (params.integration.debug.output
                        and not params.integration.debug.separate_files):
                    raise Sorry(
                        "Shoeboxes must be saved to integration intermediates to apply an absorption correction. "
                        +
                        "Set integration.debug.output=True, integration.debug.separate_files=False and "
                        +
                        "integration.debug.delete_shoeboxes=True to temporarily store shoeboxes."
                    )

        # Print if we're using a mask
        for i, exp in enumerate(experiments):
            mask = exp.imageset.external_lookup.mask
            if mask.filename is not None:
                if mask.data:
                    logger.info("Using external mask: %s" % mask.filename)
                    for tile in mask.data:
                        logger.info(" Mask has %d pixels masked" %
                                    tile.data().count(False))

        # Print the experimental models
        for i, exp in enumerate(experiments):
            logger.info("=" * 80)
            logger.info("")
            logger.info("Experiments")
            logger.info("")
            logger.info("Models for experiment %d" % i)
            logger.info("")
            logger.info(str(exp.beam))
            logger.info(str(exp.detector))
            if exp.goniometer:
                logger.info(str(exp.goniometer))
            if exp.scan:
                logger.info(str(exp.scan))
            logger.info(str(exp.crystal))

        logger.info("=" * 80)
        logger.info("")
        logger.info(heading("Initialising"))
        logger.info("")

        # Load the data
        reference, rubbish = self.process_reference(reference)

        # Check pixels don't belong to neighbours
        if reference is not None:
            if exp.goniometer is not None and exp.scan is not None:
                self.filter_reference_pixels(reference, experiments)
        logger.info("")

        # Initialise the integrator
        from dials.algorithms.profile_model.factory import ProfileModelFactory
        from dials.algorithms.integration.integrator import IntegratorFactory

        # Modify experiment list if scan range is set.
        experiments, reference = self.split_for_scan_range(
            experiments, reference, params.scan_range)

        # Modify experiment list if exclude images is set
        experiments = self.exclude_images(experiments, params.exclude_images)

        # Predict the reflections
        logger.info("")
        logger.info("=" * 80)
        logger.info("")
        logger.info(heading("Predicting reflections"))
        logger.info("")
        predicted = flex.reflection_table.from_predictions_multi(
            experiments,
            dmin=params.prediction.d_min,
            dmax=params.prediction.d_max,
            margin=params.prediction.margin,
            force_static=params.prediction.force_static,
            padding=params.prediction.padding,
        )

        # Match reference with predicted
        if reference:
            matched, reference, unmatched = predicted.match_with_reference(
                reference)
            assert len(matched) == len(predicted)
            assert matched.count(True) <= len(reference)
            if matched.count(True) == 0:
                raise Sorry("""
          Invalid input for reference reflections.
          Zero reference spots were matched to predictions
        """)
            elif len(unmatched) != 0:
                logger.info("")
                logger.info("*" * 80)
                logger.info(
                    "Warning: %d reference spots were not matched to predictions"
                    % (len(unmatched)))
                logger.info("*" * 80)
                logger.info("")
            rubbish.extend(unmatched)

            if len(experiments) > 1:
                # filter out any experiments without matched reference reflections
                # f_: filtered
                from dxtbx.model.experiment_list import ExperimentList

                f_reference = flex.reflection_table()
                f_predicted = flex.reflection_table()
                f_rubbish = flex.reflection_table()
                f_experiments = ExperimentList()
                good_expt_count = 0

                def refl_extend(src, dest, eid):
                    tmp = src.select(src["id"] == eid)
                    tmp["id"] = flex.int(len(tmp), good_expt_count)
                    dest.extend(tmp)

                for expt_id, experiment in enumerate(experiments):
                    if len(reference.select(reference["id"] == expt_id)) != 0:
                        refl_extend(reference, f_reference, expt_id)
                        refl_extend(predicted, f_predicted, expt_id)
                        refl_extend(rubbish, f_rubbish, expt_id)
                        f_experiments.append(experiment)
                        good_expt_count += 1
                    else:
                        logger.info(
                            "Removing experiment %d: no reference reflections matched to predictions"
                            % expt_id)

                reference = f_reference
                predicted = f_predicted
                experiments = f_experiments
                rubbish = f_rubbish

        # Select a random sample of the predicted reflections
        if not params.sampling.integrate_all_reflections:
            predicted = self.sample_predictions(experiments, predicted, params)

        # Compute the profile model
        if (params.create_profile_model and reference is not None
                and "shoebox" in reference):
            experiments = ProfileModelFactory.create(params, experiments,
                                                     reference)
        else:
            experiments = ProfileModelFactory.create(params, experiments)
            for expr in experiments:
                if expr.profile is None:
                    raise Sorry("No profile information in experiment list")
        del reference

        # Compute the bounding box
        predicted.compute_bbox(experiments)

        # Create the integrator
        logger.info("")
        integrator = IntegratorFactory.create(params, experiments, predicted)

        # Integrate the reflections
        reflections = integrator.integrate()

        # Append rubbish data onto the end
        if rubbish is not None and params.output.include_bad_reference:
            mask = flex.bool(len(rubbish), True)
            rubbish.unset_flags(mask, rubbish.flags.integrated_sum)
            rubbish.unset_flags(mask, rubbish.flags.integrated_prf)
            rubbish.set_flags(mask, rubbish.flags.bad_reference)
            reflections.extend(rubbish)

        # Correct integrated intensities for absorption correction, if necessary
        for abs_params in params.absorption_correction:
            if abs_params.apply and abs_params.algorithm == "fuller_kapton":
                from dials.algorithms.integration.kapton_correction import (
                    multi_kapton_correction, )

                experiments, reflections = multi_kapton_correction(
                    experiments,
                    reflections,
                    abs_params.fuller_kapton,
                    logger=logger)()

        if params.significance_filter.enable:
            from dials.algorithms.integration.stills_significance_filter import (
                SignificanceFilter, )
            from dxtbx.model.experiment_list import ExperimentList

            sig_filter = SignificanceFilter(params)
            filtered_refls = sig_filter(experiments, reflections)
            accepted_expts = ExperimentList()
            accepted_refls = flex.reflection_table()
            logger.info(
                "Removed %d reflections out of %d when applying significance filter"
                % (len(reflections) - len(filtered_refls), len(reflections)))
            for expt_id, expt in enumerate(experiments):
                refls = filtered_refls.select(filtered_refls["id"] == expt_id)
                if len(refls) > 0:
                    accepted_expts.append(expt)
                    refls["id"] = flex.int(len(refls), len(accepted_expts) - 1)
                    accepted_refls.extend(refls)
                else:
                    logger.info(
                        "Removed experiment %d which has no reflections left after applying significance filter"
                        % expt_id)

            if len(accepted_refls) == 0:
                raise Sorry(
                    "No reflections left after applying significance filter")
            experiments = accepted_expts
            reflections = accepted_refls

        # Delete the shoeboxes used for intermediate calculations, if requested
        if params.integration.debug.delete_shoeboxes and "shoebox" in reflections:
            del reflections["shoebox"]

        # Save the reflections
        self.save_reflections(reflections, params.output.reflections)
        self.save_experiments(experiments, params.output.experiments)

        # Write a report if requested
        if params.output.report is not None:
            integrator.report().as_file(params.output.report)

        # Print the total time taken
        logger.info("\nTotal time taken: %f" % (time() - start_time))

        return experiments, reflections

    def process_reference(self, reference):
        """ Load the reference spots. """
        from time import time
        from dials.util import Sorry

        if reference is None:
            return None, None
        st = time()
        assert "miller_index" in reference
        assert "id" in reference
        logger.info("Processing reference reflections")
        logger.info(" read %d strong spots" % len(reference))
        mask = reference.get_flags(reference.flags.indexed)
        rubbish = reference.select(~mask)
        if mask.count(False) > 0:
            reference.del_selected(~mask)
            logger.info(" removing %d unindexed reflections" %
                        mask.count(False))
        if len(reference) == 0:
            raise Sorry("""
        Invalid input for reference reflections.
        Expected > %d indexed spots, got %d
      """ % (0, len(reference)))
        mask = reference.get_flags(reference.flags.bad_for_refinement,
                                   all=False)
        if mask.count(True) > 0:
            rubbish.extend(reference.select(mask))
            reference.del_selected(mask)
            logger.info(
                " removing %d reflections marked as bad for refinement" %
                mask.count(True))
        mask = reference["miller_index"] == (0, 0, 0)
        if mask.count(True) > 0:
            rubbish.extend(reference.select(mask))
            reference.del_selected(mask)
            logger.info(" removing %d reflections with hkl (0,0,0)" %
                        mask.count(True))
        mask = reference["id"] < 0
        if mask.count(True) > 0:
            raise Sorry("""
        Invalid input for reference reflections.
        %d reference spots have an invalid experiment id
      """ % mask.count(True))
        if (reference["panel"]
                == reference["shoebox"].panels()).count(False) > 0:
            raise RuntimeError(
                'reflection table "panel" column does not match "shoebox" panel'
            )
        logger.info(" using %d indexed reflections" % len(reference))
        logger.info(" found %d junk reflections" % len(rubbish))
        logger.info(" time taken: %g" % (time() - st))
        return reference, rubbish

    def filter_reference_pixels(self, reference, experiments):
        """
        Set any pixel closer to other reflections to background

        """
        modified_count = 0
        for experiment, indices in reference.iterate_experiments_and_indices(
                experiments):
            subset = reference.select(indices)
            modified = subset["shoebox"].mask_neighbouring(
                subset["miller_index"],
                experiment.beam,
                experiment.detector,
                experiment.goniometer,
                experiment.scan,
                experiment.crystal,
            )
            modified_count += modified.count(True)
            reference.set_selected(indices, subset)
        logger.info(" masked neighbouring pixels in %d shoeboxes" %
                    modified_count)
        return reference

    def save_reflections(self, reflections, filename):
        """ Save the reflections to file. """
        from time import time

        st = time()
        logger.info("Saving %d reflections to %s" %
                    (len(reflections), filename))
        reflections.as_file(filename)
        logger.info(" time taken: %g" % (time() - st))

    def save_experiments(self, experiments, filename):
        """ Save the profile model parameters. """
        from time import time

        st = time()
        logger.info("Saving the experiments to %s" % filename)
        experiments.as_file(filename)
        logger.info(" time taken: %g" % (time() - st))

    def sample_predictions(self, experiments, predicted, params):
        """ Select a random sample of the predicted reflections to integrate. """

        nref_per_degree = params.sampling.reflections_per_degree
        min_sample_size = params.sampling.minimum_sample_size
        max_sample_size = params.sampling.maximum_sample_size

        # this code is very similar to David's code in algorithms/refinement/reflection_manager.py!

        # constants
        from math import pi

        RAD2DEG = 180.0 / pi

        working_isel = flex.size_t()
        for iexp, exp in enumerate(experiments):

            sel = predicted["id"] == iexp
            isel = sel.iselection()
            # refs = self._reflections.select(sel)
            nrefs = sample_size = len(isel)

            # set sample size according to nref_per_degree (per experiment)
            if exp.scan and nref_per_degree:
                sweep_range_rad = exp.scan.get_oscillation_range(deg=False)
                width = abs(sweep_range_rad[1] - sweep_range_rad[0]) * RAD2DEG
                sample_size = int(nref_per_degree * width)
            else:
                sweep_range_rad = None

            # adjust sample size if below the chosen limit
            sample_size = max(sample_size, min_sample_size)

            # set maximum sample size if requested
            if max_sample_size:
                sample_size = min(sample_size, max_sample_size)

            # determine subset and collect indices
            if sample_size < nrefs:
                isel = isel.select(flex.random_selection(nrefs, sample_size))
            working_isel.extend(isel)

        # create subset
        return predicted.select(working_isel)

    def exclude_images(self, experiments, exclude_images):

        if exclude_images is not None and len(exclude_images) > 0:
            for experiment in experiments:
                imageset = experiment.imageset
                for index in exclude_images:
                    imageset.mark_for_rejection(index, True)

        return experiments

    def split_for_scan_range(self, experiments, reference, scan_range):
        """ Update experiments when scan range is set. """
        from dxtbx.model.experiment_list import ExperimentList
        from dxtbx.model.experiment_list import Experiment

        # Only do anything is the scan range is set
        if scan_range is not None and len(scan_range) > 0:

            # Ensure that all experiments have the same imageset and scan
            iset = [e.imageset for e in experiments]
            scan = [e.scan for e in experiments]
            assert all(x == iset[0] for x in iset)
            assert all(x == scan[0] for x in scan)

            # Get the imageset and scan
            iset = experiments[0].imageset
            scan = experiments[0].scan

            # Get the array range
            if scan is not None:
                frames_start, frames_end = scan.get_array_range()
                assert scan.get_num_images() == len(iset)
            else:
                frames_start, frames_end = (0, len(iset))

            # Create the new lists
            new_experiments = ExperimentList()
            new_reference_all = reference.split_by_experiment_id()
            new_reference = flex.reflection_table()
            for i in range(len(new_reference_all) - len(experiments)):
                new_reference_all.append(flex.reflection_table())
            assert len(new_reference_all) == len(experiments)

            # Loop through all the scan ranges and create a new experiment list with
            # the requested scan ranges.
            for scan_start, scan_end in scan_range:
                # Validate the requested scan range
                if scan_end == scan_start:
                    raise Sorry(
                        "Scan range end must be higher than start; pass {},{} for single image"
                        .format(scan_start, scan_start + 1))
                if scan_end < scan_start:
                    raise Sorry("Scan range must be in ascending order")
                elif scan_start < frames_start or scan_end > frames_end:
                    raise Sorry(
                        "Scan range must be within image range {}..{}".format(
                            frames_start, frames_end))

                assert scan_end > scan_start
                assert scan_start >= frames_start
                assert scan_end <= frames_end

                index_start = scan_start - frames_start
                index_end = index_start + (scan_end - scan_start)
                assert index_start < index_end
                assert index_start >= 0
                assert index_end <= len(iset)
                new_iset = iset[index_start:index_end]
                if scan is None:
                    new_scan = None
                else:
                    new_scan = scan[index_start:index_end]
                for i, e1 in enumerate(experiments):
                    e2 = Experiment()
                    e2.beam = e1.beam
                    e2.detector = e1.detector
                    e2.goniometer = e1.goniometer
                    e2.crystal = e1.crystal
                    e2.profile = e1.profile
                    e2.imageset = new_iset
                    e2.scan = new_scan
                    new_reference_all[i]["id"] = flex.int(
                        len(new_reference_all[i]), len(new_experiments))
                    new_reference.extend(new_reference_all[i])
                    new_experiments.append(e2)
            experiments = new_experiments
            reference = new_reference

            # Print some information
            logger.info(
                "Modified experiment list to integrate over requested scan range"
            )
            for scan_start, scan_end in scan_range:
                logger.info(" scan_range = %d -> %d" % (scan_start, scan_end))
            logger.info("")

        # Return the experiments
        return experiments, reference
Пример #39
0
def run(args):
    import libtbx.load_env
    usage = """\
%s datablock.json reflections.pickle [options]""" % libtbx.env.dispatcher_name
    from dials.util.options import OptionParser
    from dials.util.options import flatten_datablocks
    from dials.util.options import flatten_experiments
    from dials.util.options import flatten_reflections
    from scitbx.array_family import flex
    from scitbx import matrix
    from libtbx.utils import Sorry
    parser = OptionParser(usage=usage,
                          phil=master_phil_scope,
                          read_datablocks=True,
                          read_experiments=True,
                          read_reflections=True,
                          check_format=False)

    params, options = parser.parse_args(show_diff_phil=True)
    datablocks = flatten_datablocks(params.input.datablock)
    reflections = flatten_reflections(params.input.reflections)
    experiments = flatten_experiments(params.input.experiments)
    if len(datablocks) == 1:
        imageset = datablocks[0].extract_imagesets()[0]
    elif len(datablocks) > 1:
        raise Sorry("Only one DataBlock can be processed at a time")
    elif len(experiments.imagesets()) > 0:
        imageset = experiments.imagesets()[0]
        imageset.set_detector(experiments[0].detector)
        imageset.set_beam(experiments[0].beam)
        imageset.set_goniometer(experiments[0].goniometer)
    else:
        parser.print_help()
        return

    detector = imageset.get_detector()
    scan = imageset.get_scan()

    panel_origin_shifts = {0: (0, 0, 0)}
    try:
        hierarchy = detector.hierarchy()
    except AttributeError:
        hierarchy = None
    for i_panel in range(1, len(detector)):
        origin_shift = matrix.col(detector[0].get_origin()) \
          - matrix.col(detector[i_panel].get_origin())
        panel_origin_shifts[i_panel] = origin_shift

    observed_xyz = flex.vec3_double()
    predicted_xyz = flex.vec3_double()

    for reflection_list in reflections:

        if len(params.scan_range):
            sel = flex.bool(len(reflection_list), False)

            xyzcal_px = None
            xyzcal_px = None

            if 'xyzcal.px' in reflection_list:
                xyzcal_px = reflection_list['xyzcal.px']
            if 'xyzobs.px.value' in reflection_list:
                xyzobs_px = reflection_list['xyzobs.px.value']

            if xyzcal_px is not None and not xyzcal_px.norms().all_eq(0):
                centroids_frame = xyzcal_px.parts()[2]
            elif xyzobs_px is not None and not xyzobs_px.norms().all_eq(0):
                centroids_frame = xyzobs_px.parts()[2]
            else:
                raise Sorry("No pixel coordinates given in input reflections.")

            reflections_in_range = False
            for scan_range in params.scan_range:
                if scan_range is None: continue
                range_start, range_end = scan_range
                sel |= ((centroids_frame >= range_start) &
                        (centroids_frame < range_end))
            reflection_list = reflection_list.select(sel)
        if params.first_n_reflections is not None:
            centroid_positions = reflection_list.centroid_position()
            centroids_frame = centroid_positions.parts()[2]
            perm = flex.sort_permutation(centroids_frame)
            perm = perm[:min(reflection_list.size(), params.first_n_reflections
                             )]
            reflection_list = reflection_list.select(perm)
        if params.crystal_id is not None:
            reflection_list = reflection_list.select(
                reflection_list['id'] == params.crystal_id)

        xyzcal_px = None
        xyzcal_px = None
        xyzobs_mm = None
        xyzcal_mm = None

        if 'xyzcal.px' in reflection_list:
            xyzcal_px = reflection_list['xyzcal.px']
        if 'xyzobs.px.value' in reflection_list:
            xyzobs_px = reflection_list['xyzobs.px.value']
        if 'xyzcal.mm' in reflection_list:
            xyzcal_mm = reflection_list['xyzcal.mm']
        if 'xyzobs.mm.value' in reflection_list:
            xyzobs_mm = reflection_list['xyzobs.mm.value']

        panel_ids = reflection_list['panel']
        if xyzobs_mm is None and xyzobs_px is not None:
            xyzobs_mm = flex.vec3_double()
            for i_panel in range(len(detector)):
                xyzobs_px_panel = xyzobs_px.select(panel_ids == i_panel)

                from dials.algorithms.centroid import centroid_px_to_mm_panel
                xyzobs_mm_panel, _, _ = centroid_px_to_mm_panel(
                    detector[i_panel], scan, xyzobs_px_panel,
                    flex.vec3_double(xyzobs_px_panel.size()),
                    flex.vec3_double(xyzobs_px_panel.size()))
                xyzobs_mm.extend(xyzobs_mm_panel)

        if xyzobs_mm is not None:
            observed_xyz.extend(xyzobs_mm)
        if xyzcal_mm is not None:
            predicted_xyz.extend(xyzcal_mm)

    obs_x, obs_y, _ = observed_xyz.parts()
    pred_x, pred_y, _ = predicted_xyz.parts()

    try:
        import matplotlib

        if not params.output.show_plot:
            # http://matplotlib.org/faq/howto_faq.html#generate-images-without-having-a-window-appear
            matplotlib.use('Agg')  # use a non-interactive backend
        from matplotlib import pyplot
    except ImportError:
        raise Sorry("matplotlib must be installed to generate a plot.")

    fig = pyplot.figure()
    fig.set_size_inches(params.output.size_inches)
    fig.set_dpi(params.output.dpi)
    pyplot.axes().set_aspect('equal')
    marker_size = params.output.marker_size
    if obs_x.size():
        pyplot.scatter(obs_x,
                       obs_y,
                       marker='o',
                       c='white',
                       s=marker_size,
                       alpha=1)
    if pred_x.size():
        pyplot.scatter(pred_x, pred_y, marker='+', s=marker_size, c='blue')
    #assert len(detector) == 1
    panel = detector[0]
    #if len(detector) > 1:
    xmin = max([
        detector[i_panel].get_image_size_mm()[0] +
        panel_origin_shifts[i_panel][0] for i_panel in range(len(detector))
    ])
    xmax = max([
        detector[i_panel].get_image_size_mm()[0] +
        panel_origin_shifts[i_panel][0] for i_panel in range(len(detector))
    ])
    ymax = max([
        detector[i_panel].get_image_size_mm()[1] +
        panel_origin_shifts[i_panel][1] for i_panel in range(len(detector))
    ])
    ymax = max([
        detector[i_panel].get_image_size_mm()[1] +
        panel_origin_shifts[i_panel][1] for i_panel in range(len(detector))
    ])
    try:
        beam_centre = hierarchy.get_beam_centre(imageset.get_beam().get_s0())
    except Exception:
        beam_centre = detector[0].get_beam_centre(imageset.get_beam().get_s0())
    pyplot.scatter([beam_centre[0]], [beam_centre[1]],
                   marker='+',
                   c='blue',
                   s=100)
    pyplot.xlim(0, xmax)
    pyplot.ylim(0, ymax)
    pyplot.gca().invert_yaxis()
    pyplot.title('Centroid x,y-coordinates')
    pyplot.xlabel('x-coordinate (mm)')
    pyplot.ylabel('y-coordinate (mm)')
    if params.output.file_name is not None:
        pyplot.savefig(params.output.file_name,
                       size_inches=params.output.size_inches,
                       dpi=params.output.dpi,
                       bbox_inches='tight')
    if params.output.show_plot:
        pyplot.show()
Пример #40
0
def run(args=None):
    from dials.util.options import OptionParser, flatten_experiments

    usage = "dials.shadow_plot [options] models.expt"

    parser = OptionParser(
        usage=usage,
        phil=phil_scope,
        read_experiments=True,
        check_format=True,
        epilog=help_message,
    )

    params, options = parser.parse_args(args, show_diff_phil=True)
    experiments = flatten_experiments(params.input.experiments)

    if len(experiments) == 0:
        parser.print_help()
        sys.exit(0)

    assert len(experiments) == 1
    imagesets = experiments.imagesets()

    imageset = imagesets[0]
    goniometer = imageset.get_goniometer()
    detector = imageset.get_detector()
    scan = imageset.get_scan()
    masker = imageset.masker()
    if masker is None:
        raise Sorry("Goniometer model does not support shadowing.")
    angles = goniometer.get_angles()
    names = goniometer.get_names()
    scan_axis = goniometer.get_scan_axis()
    phi = angles[0]

    if params.step_size is libtbx.Auto:
        if params.mode == "1d":
            step = scan.get_oscillation()[1]
        else:
            step = 10
    else:
        step = params.step_size

    if params.mode == "1d":
        if params.oscillation_range is not None:
            start, end = params.oscillation_range
        else:
            start, end = scan.get_oscillation_range()

        scan_points = flex.double(libtbx.utils.frange(start, end, step=step))
        n_px_shadowed = flex.double(scan_points.size(), 0)
        n_px_tot = flex.double(scan_points.size(), 0)

        assert len(angles) == 3
        for i, scan_angle in enumerate(scan_points):
            shadow = masker.project_extrema(detector, scan_angle)
            for p_id in range(len(detector)):
                px_x, px_y = detector[p_id].get_image_size()
                n_px_tot[i] += px_x * px_y
                if shadow[p_id].size() < 4:
                    continue
                n_px_shadowed[i] += polygon_area(shadow[p_id])

    else:
        kappa_values = flex.double(libtbx.utils.frange(0, 360, step=step))
        omega_values = flex.double(libtbx.utils.frange(0, 360, step=step))
        grid = flex.grid(kappa_values.size(), omega_values.size())
        n_px_shadowed = flex.double(grid, 0)
        n_px_tot = flex.double(grid, 0)

        assert len(angles) == 3
        for i, kappa in enumerate(kappa_values):
            for j, omega in enumerate(omega_values):
                masker.set_goniometer_angles((phi, kappa, omega))
                masker.extrema_at_scan_angle(omega)
                shadow = masker.project_extrema(detector, omega)
                for p_id in range(len(detector)):
                    px_x, px_y = detector[p_id].get_image_size()
                    n_px_tot[i, j] += px_x * px_y
                    if shadow[p_id].size() < 4:
                        continue
                    n_px_shadowed[i, j] += polygon_area(shadow[p_id])

    fraction_shadowed = n_px_shadowed / n_px_tot

    if params.output.json is not None:
        if params.mode == "2d":
            raise Sorry("json output not supported for mode=2d")

        print("Writing json output to %s" % params.output.json)
        d = {
            "scan_points": list(scan_points),
            "fraction_shadowed": list(fraction_shadowed),
        }
        with open(params.output.json, "w") as f:
            json.dump(d, f)

    if params.output.plot is not None:
        import matplotlib

        matplotlib.use("Agg")
        from matplotlib import pyplot as plt

        plt.style.use("ggplot")

        if params.mode == "1d":
            plt.plot(
                scan_points.as_numpy_array(), fraction_shadowed.as_numpy_array() * 100
            )
            plt.xlabel("%s angle (degrees)" % names[scan_axis])
            plt.ylabel("Shadowed area (%)")
            if params.y_max is not None:
                plt.ylim(0, params.y_max)
            else:
                plt.ylim(0, plt.ylim()[1])
        else:
            fig = plt.imshow(
                fraction_shadowed.as_numpy_array() * 100, interpolation="bicubic"
            )
            plt.xlabel("%s angle (degrees)" % names[2])
            plt.ylabel("%s angle (degrees)" % names[1])
            plt.xlim(0, 360 / step - 0.5)
            plt.ylim(0, 360 / step - 0.5)

            ticks = (0, 50, 100, 150, 200, 250, 300, 350)
            fig.axes.xaxis.set_major_locator(
                matplotlib.ticker.FixedLocator([k / step for k in ticks])
            )
            fig.axes.yaxis.set_major_locator(
                matplotlib.ticker.FixedLocator([k / step for k in ticks])
            )
            fig.axes.set_xticklabels(["%.0f" % k for k in ticks])
            fig.axes.set_yticklabels(["%.0f" % k for k in ticks])
            cbar = plt.colorbar()
            cbar.set_label("Shadowed area (%)")

        if params.output.size_inches is not None:
            fig = plt.gcf()
            fig.set_size_inches(params.output.size_inches)
        plt.tight_layout()
        print("Saving plot to %s" % params.output.plot)
        plt.savefig(params.output.plot)
Пример #41
0
class Script(object):
    """A class for running the script."""
    def __init__(self):
        """Initialise the script."""
        from libtbx.phil import parse

        # The phil scope
        phil_scope = parse(
            """
scale = unit *max_cell ewald_sphere_radius
    .type = choice
    .help = "Choose the scale for the direction vector in orthogonal"
            "coordinates prior to transformation into fractional"
            "coordinates [uvw]"

plot_filename = None
    .type = str
    .help = "Filename for a plot of angle between neighbouring frames"
            "(set to None for no plot)"
""",
            process_includes=True,
        )

        usage = "dials.frame_orientations refined.expt refined.refl"

        # Create the parser
        self.parser = OptionParser(
            usage=usage,
            phil=phil_scope,
            read_experiments=True,
            check_format=False,
            epilog=__doc__,
        )

    def run(self):
        """Execute the script."""

        # Parse the command line
        self.params, _ = self.parser.parse_args(show_diff_phil=True)

        if not self.params.input.experiments:
            self.parser.print_help()
            sys.exit()

        # Try to load the models
        experiments = flatten_experiments(self.params.input.experiments)
        nexp = len(experiments)
        if nexp == 0:
            self.parser.print_help()
            sys.exit("No Experiments found in the input")

        # Set up a plot if requested
        if self.params.plot_filename:
            plt.figure()

        header = [
            "Image",
            "Beam direction (xyz)",
            "Zone axis [uvw]",
            "Angles between beam\nand axes a, b, c (deg)",
            "Angle from\nprevious image (deg)",
        ]
        for iexp, exp in enumerate(experiments):
            print("For Experiment id = {}".format(iexp))
            print(exp.beam)
            print(exp.crystal)
            print(exp.scan)

            if self.params.scale == "ewald_sphere_radius":
                scale = 1.0 / exp.beam.get_wavelength()
            elif self.params.scale == "max_cell":
                uc = exp.crystal.get_unit_cell()
                scale = max(uc.parameters()[0:3])
            else:
                scale = 1.0
            print("Beam direction scaled by {0} = {1:.3f} to "
                  "calculate zone axis\n".format(self.params.scale, scale))

            dat = extract_experiment_data(exp, scale)
            images = dat["images"]
            directions = dat["directions"]
            zone_axes = dat["zone_axes"]
            real_space_axes = dat["real_space_axes"]

            # calculate the angle between the beam and each crystal axis
            axis_angles = []
            for d, rsa in zip(directions, real_space_axes):
                angles = [d.angle(a, deg=True) for a in rsa]
                axis_angles.append("{:.2f} {:.2f} {:.2f}".format(*angles))

            # calculate the orientation offset between each image
            offset = [
                e1.angle(e2, deg=True)
                for e1, e2 in zip(zone_axes[:-1], zone_axes[1:])
            ]
            str_off = ["---"] + ["{:.8f}".format(e) for e in offset]

            rows = []
            for i, d, z, a, o in zip(
                    images,
                    directions,
                    zone_axes,
                    axis_angles,
                    str_off,
            ):
                row = [
                    str(i),
                    "{:.8f} {:.8f} {:.8f}".format(*d.elems),
                    "{:.8f} {:.8f} {:.8f}".format(*z.elems),
                    a,
                    o,
                ]
                rows.append(row)

            # Print the table
            print(tabulate(rows, header))

            # Add to the plot, if requested
            if self.params.plot_filename:
                plt.scatter(images[1:], offset, s=1)

        # Finish and save plot, if requested
        if self.params.plot_filename:
            plt.xlabel("Image number")
            plt.ylabel(r"Angle from previous image $\left(^\circ\right)$")
            plt.title(r"Angle between neighbouring images")
            print("Saving plot to {}".format(self.params.plot_filename))
            plt.savefig(self.params.plot_filename)

        print()
Пример #42
0
    from dials.util.options import OptionParser
    from dials.util.options import flatten_reflections
    from dials.util.options import flatten_experiments

    dials.util.log.print_banner()
    usage_message = "dials.image_viewer models.expt [observations.refl]"
    parser = OptionParser(
        usage=usage_message,
        phil=phil_scope,
        read_experiments=True,
        read_reflections=True,
        read_experiments_from_images=True,
        epilog=help_message,
    )
    params, options = parser.parse_args(show_diff_phil=True)
    experiments = [x.data for x in params.input.experiments]
    reflections = flatten_reflections(params.input.reflections)

    if len(experiments) == 0:
        parser.print_help()
        exit(0)

    flat_expts = flatten_experiments(params.input.experiments)
    if params.load_models:
        if any(e.detector is None for e in flat_expts):
            sys.exit("Error: experiment has no detector")
        if any(e.beam is None for e in flat_expts):
            sys.exit("Error: experiment has no beam")

    if params.mask is not None:
Пример #43
0
class Script(object):
    '''A class for running the script.'''
    def __init__(self):
        self.mpi_helper = mpi_helper()
        self.mpi_logger = mpi_logger()

    def __del__(self):
        self.mpi_helper.finalize()

    def parse_input(self):
        '''Parse input at rank 0 and broadcast the input parameters and options to all ranks'''

        if self.mpi_helper.rank == 0:
            from xfel.merging.application.phil.phil import phil_scope
            help_message = '''Merge xfel data.'''

            # The script usage
            import libtbx.load_env
            self.usage = "usage: %s [options] [param.phil] " % libtbx.env.dispatcher_name
            self.parser = None
            '''Initialize the script.'''
            from dials.util.options import OptionParser
            # Create the parser
            self.parser = OptionParser(usage=self.usage,
                                       phil=phil_scope,
                                       epilog=help_message)

            # Parse the command line. quick_parse is required for MPI compatibility
            params, options = self.parser.parse_args(show_diff_phil=True,
                                                     quick_parse=True)

            # Log the modified phil parameters
            diff_phil_str = self.parser.diff_phil.as_str()
            if diff_phil_str is not "":
                self.mpi_logger.main_log(
                    "The following parameters have been modified:\n%s" %
                    diff_phil_str)

            # prepare for transmitting input parameters to all ranks
            transmitted = dict(params=params, options=options)
        else:
            transmitted = None

        # broadcast parameters and options to all ranks
        self.mpi_logger.log("Broadcasting input parameters...")
        self.mpi_logger.log_step_time("BROADCAST_INPUT_PARAMS")

        transmitted = self.mpi_helper.comm.bcast(transmitted, root=0)

        self.params = transmitted['params']
        self.options = transmitted['options']

        self.mpi_logger.set_log_file_paths(self.params)

        self.mpi_logger.log("Received input parameters and options")
        self.mpi_logger.log_step_time("BROADCAST_INPUT_PARAMS", True)

    def run(self):

        import datetime
        time_now = datetime.datetime.now()

        self.mpi_logger.log(str(time_now))
        if self.mpi_helper.rank == 0:
            self.mpi_logger.main_log(str(time_now))

        self.mpi_logger.log_step_time("TOTAL")

        self.mpi_logger.log_step_time("PARSE_INPUT_PARAMS")
        self.parse_input()
        self.mpi_logger.log_step_time("PARSE_INPUT_PARAMS", True)

        # Create the workers using the factories
        self.mpi_logger.log_step_time("CREATE_WORKERS")
        from xfel.merging import application
        import importlib

        workers = []
        steps = default_steps if self.params.dispatch.step_list is None else self.params.dispatch.step_list
        for step in steps:
            step_factory_name = step
            step_additional_info = []

            step_info = step.split(' ')
            assert len(step_info) > 0
            if len(step_info) > 1:
                step_factory_name = step_info[0]
                step_additional_info = step_info[1:]

            factory = importlib.import_module('xfel.merging.application.' +
                                              step_factory_name + '.factory')
            workers.extend(
                factory.factory.from_parameters(self.params,
                                                step_additional_info,
                                                mpi_helper=self.mpi_helper,
                                                mpi_logger=self.mpi_logger))

        # Perform phil validation up front
        for worker in workers:
            worker.validate()
        self.mpi_logger.log_step_time("CREATE_WORKERS", True)

        # Do the work
        experiments = reflections = None
        step = 0
        while (workers):
            worker = workers.pop(0)
            self.mpi_logger.log_step_time("STEP_" + worker.__repr__())
            # Log worker name, i.e. execution step name
            step += 1
            if step > 1:
                self.mpi_logger.log('')
            step_desc = "STEP %d: %s" % (step, worker)
            self.mpi_logger.log(step_desc)

            if self.mpi_helper.rank == 0:
                if step > 1:
                    self.mpi_logger.main_log('')
                self.mpi_logger.main_log(step_desc)

            # Execute worker
            experiments, reflections = worker.run(experiments, reflections)
            self.mpi_logger.log_step_time("STEP_" + worker.__repr__(), True)

        if self.params.output.save_experiments_and_reflections:
            from dxtbx.model.experiment_list import ExperimentListDumper
            import os
            if 'id' not in reflections:
                from dials.array_family import flex
                id_ = flex.int(len(reflections), -1)
                for expt_number, expt in enumerate(experiments):
                    sel = reflections['exp_id'] == expt.identifier
                    id_.set_selected(sel, expt_number)
                reflections['id'] = id_

            reflections.as_pickle(
                os.path.join(
                    self.params.output.output_dir, self.params.output.prefix +
                    "_%06d.refl" % self.mpi_helper.rank))
            dump = ExperimentListDumper(experiments)
            dump.as_file(
                os.path.join(
                    self.params.output.output_dir, self.params.output.prefix +
                    "_%06d.expt" % self.mpi_helper.rank))

        self.mpi_logger.log_step_time("TOTAL", True)
Пример #44
0
class Script(object):
    ''' Encapsulate the script in a class. '''
    def __init__(self):
        ''' Initialise the script. '''
        from dials.util.options import OptionParser
        import libtbx.load_env

        # The script usage
        usage  = "usage: %s [options] experiments.json spots.pickle" \
          % libtbx.env.dispatcher_name
        self.parser = OptionParser(usage=usage,
                                   epilog=help_message,
                                   phil=phil_scope,
                                   read_reflections=True,
                                   read_experiments=True,
                                   check_format=False)

    def run(self):
        ''' Run the script. '''
        from dials.algorithms.profile_model.factory import ProfileModelFactory
        from dials.util.command_line import Command
        from dials.array_family import flex
        from dials.util.options import flatten_reflections, flatten_experiments
        from dxtbx.model.experiment_list import ExperimentListDumper
        from libtbx.utils import Sorry
        from dials.util import log

        log.config()

        # Parse the command line
        params, options = self.parser.parse_args(show_diff_phil=True)
        reflections = flatten_reflections(params.input.reflections)
        experiments = flatten_experiments(params.input.experiments)
        if len(reflections) == 0 and len(experiments) == 0:
            self.parser.print_help()
            return
        if len(reflections) != 1:
            raise Sorry('exactly 1 reflection table must be specified')
        if len(experiments) == 0:
            raise Sorry('no experiments were specified')
        if (not 'background.mean'
                in reflections[0]) and params.subtract_background:
            raise Sorry(
                'for subtract_background need background.mean in reflections')

        reflections, _ = self.process_reference(reflections[0], params)

        # Check pixels don't belong to neighbours
        self.filter_reference_pixels(reflections, experiments)

        # Predict the reflections
        logger.info("")
        logger.info("=" * 80)
        logger.info("")
        logger.info("Predicting reflections")
        logger.info("")
        predicted = flex.reflection_table.from_predictions_multi(
            experiments,
            dmin=params.prediction.d_min,
            dmax=params.prediction.d_max,
            margin=params.prediction.margin,
            force_static=params.prediction.force_static,
            padding=params.prediction.padding)

        # Match with predicted
        matched, reflections, unmatched = predicted.match_with_reference(
            reflections)
        assert (len(matched) == len(predicted))
        assert (matched.count(True) <= len(reflections))
        if matched.count(True) == 0:
            raise Sorry('''
        Invalid input for reference reflections.
        Zero reference spots were matched to predictions
      ''')
        elif len(unmatched) != 0:
            logger.info('')
            logger.info('*' * 80)
            logger.info(
                'Warning: %d reference spots were not matched to predictions' %
                (len(unmatched)))
            logger.info('*' * 80)
            logger.info('')

        # Create the profile model
        experiments = ProfileModelFactory.create(params, experiments,
                                                 reflections)
        for model in experiments:
            sigma_b = model.profile.sigma_b(deg=True)
            sigma_m = model.profile.sigma_m(deg=True)
            if isinstance(sigma_b, type(1.0)):
                logger.info('Sigma B: %f' % sigma_b)
                logger.info('Sigma M: %f' % sigma_m)
            else:  # scan varying
                mean_sigma_b = sum(sigma_b) / len(sigma_b)
                mean_sigma_m = sum(sigma_m) / len(sigma_m)
                logger.info('Sigma B: %f' % mean_sigma_b)
                logger.info('Sigma M: %f' % mean_sigma_m)

        # Wrtie the parameters
        Command.start("Writing experiments to %s" % params.output)
        dump = ExperimentListDumper(experiments)
        with open(params.output, "w") as outfile:
            outfile.write(dump.as_json())
        Command.end("Wrote experiments to %s" % params.output)

    def process_reference(self, reference, params):
        ''' Load the reference spots. '''
        from dials.array_family import flex
        from time import time
        from libtbx.utils import Sorry
        if reference is None:
            return None, None
        st = time()
        assert ("miller_index" in reference)
        assert ("id" in reference)
        logger.info('Processing reference reflections')
        logger.info(' read %d strong spots' % len(reference))
        mask = reference.get_flags(reference.flags.indexed)
        rubbish = reference.select(mask == False)
        if mask.count(False) > 0:
            reference.del_selected(mask == False)
            logger.info(' removing %d unindexed reflections' %
                        mask.count(False))
        if len(reference) == 0:
            raise Sorry('''
        Invalid input for reference reflections.
        Expected > %d indexed spots, got %d
      ''' % (0, len(reference)))
        mask = reference.get_flags(reference.flags.centroid_outlier)
        if mask.count(True) > 0:
            rubbish.extend(reference.select(mask))
            reference.del_selected(mask)
            logger.info(
                ' removing %d reflections marked as centroid outliers' %
                mask.count(True))
        mask = reference['miller_index'] == (0, 0, 0)
        if mask.count(True) > 0:
            rubbish.extend(reference.select(mask))
            reference.del_selected(mask)
            logger.info(' removing %d reflections with hkl (0,0,0)' %
                        mask.count(True))
        mask = reference['id'] < 0
        if mask.count(True) > 0:
            raise Sorry('''
        Invalid input for reference reflections.
        %d reference spots have an invalid experiment id
      ''' % mask.count(True))
        logger.info(' using %d indexed reflections' % len(reference))
        logger.info(' found %d junk reflections' % len(rubbish))
        from dials.array_family import flex
        if 'background.mean' in reference and params.subtract_background:
            logger.info(
                ' subtracting background from %d reference reflections' %
                len(reference))
            for spot in reference:
                spot['shoebox'].data -= spot['background.mean']
        logger.info(' time taken: %g' % (time() - st))
        return reference, rubbish

    def filter_reference_pixels(self, reference, experiments):
        '''
    Set any pixel closer to other reflections to background

    '''
        modified_count = 0
        for experiment, indices in reference.iterate_experiments_and_indices(
                experiments):
            subset = reference.select(indices)
            modified = subset['shoebox'].mask_neighbouring(
                subset['miller_index'], experiment.beam, experiment.detector,
                experiment.goniometer, experiment.scan, experiment.crystal)
            modified_count += modified.count(True)
            reference.set_selected(indices, subset)
        logger.info(" masked neighbouring pixels in %d shoeboxes" %
                    modified_count)
        return reference
Пример #45
0
def run(args):
  import libtbx.load_env
  from scitbx import matrix
  from cctbx.sgtbx import lattice_symmetry_group
  from scitbx.math import r3_rotation_axis_and_angle_from_matrix

  usage = "%s [options] experiment_0.json ..." % \
    libtbx.env.dispatcher_name

  parser = OptionParser(
    usage=usage,
    phil=phil_scope,
    read_experiments=True,
    check_format=False,
    epilog=help_message)

  params, options = parser.parse_args(show_diff_phil=True)
  experiments = params.input.experiments

  # check input
  space_group = None
  for experiment in experiments:
    assert(len(experiment.data.goniometers()) == 1)
    assert(len(experiment.data.crystals()) == 1)
    crystal = experiment.data.crystals()[0]
    if space_group is None:
      space_group = crystal.get_space_group()
    else:
      assert(crystal.get_space_group() == space_group)

  reference_U = None
  reference_space_group = None

  for j, experiment in enumerate(experiments):
    goniometer = experiment.data.goniometers()[0]
    F = matrix.sqr(goniometer.get_fixed_rotation())
    crystal = experiment.data.crystals()[0]
    U = matrix.sqr(crystal.get_U())
    B = matrix.sqr(crystal.get_B())
    UB = F * U * B
    UBt = UB.transpose().elems
    a, b, c = matrix.col(UBt[0:3]), matrix.col(UBt[3:6]), matrix.col(UBt[6:9])
    axis = matrix.col(goniometer.get_rotation_axis())
    from math import pi
    r2d = 180 / pi
    abc = [a, b, c]
    abc_names = 'abc'
    distances = [(r2d * (min(axis.angle(_a), pi - axis.angle(_a))), k)
                 for k, _a in enumerate(abc)]
    close = sorted(distances)[0]
    if reference_U is None:
      reference_U = U
      reference_space_group = lattice_symmetry_group(crystal.get_unit_cell(),
                                                     max_delta=0.0)
      print '%s possible lattice ops' % len(reference_space_group.all_ops())

    print 'Experiment %d' % j
    print 'Closest (original) axis: %s* %.2f' % \
      (abc_names[close[1]], close[0])

    results = []
    for op in reference_space_group.all_ops():
      R = B * matrix.sqr(op.r().as_double()).transpose() * B.inverse()
      relative = (U * R).inverse() * reference_U
      rot = r3_rotation_axis_and_angle_from_matrix(relative)
      results.append((abs(rot.angle()), op.r().as_hkl(), rot))
    results.sort()
    print 'Best reindex op for experiment %d: %12s (%.3f)' % \
      (j, results[0][1], 180.0 * results[0][2].angle() / pi)

    if results[0][0] > (5 * pi / 180.0):
      print 'Rotation: axis: %.4f %.4f %.4f' % results[0][2].axis
      print '          angle: %.4f degrees' % \
        (180.0 * results[0][2].angle() / pi)
Пример #46
0
class Script:
    """A class for running the script."""
    def __init__(self):
        """Initialise the script."""
        # The script usage
        usage = ("usage: dials.two_theta_refine [options] [param.phil] "
                 "models.expt observations.refl")

        # Create the parser
        self.parser = OptionParser(
            usage=usage,
            phil=working_phil,
            read_reflections=True,
            read_experiments=True,
            check_format=False,
            epilog=help_message,
        )

    @staticmethod
    def check_input(reflections):
        """Check the input is suitable for refinement. So far just check keys in
        the reflection table. Maybe later check experiments have overlapping models
        etc."""

        msg = (
            "The supplied reflection table does not have the required data " +
            "column: {0}")
        for key in ["xyzobs.mm.value", "xyzobs.mm.variance"]:
            if key not in reflections:
                msg = msg.format(key)
                sys.exit(msg)

        # FIXME add other things to be checked here
        return

    @staticmethod
    def combine_crystals(experiments):
        """Replace all crystals in the experiments list with the first crystal"""

        new_experiments = ExperimentList()
        ref_crystal = experiments[0].crystal
        for exp in experiments:
            new_experiments.append(
                Experiment(
                    beam=exp.beam,
                    detector=exp.detector,
                    scan=exp.scan,
                    goniometer=exp.goniometer,
                    crystal=ref_crystal,
                    imageset=exp.imageset,
                    identifier=exp.identifier,
                ))
        return new_experiments

    @staticmethod
    def filter_integrated_centroids(reflections):
        """Filter reflections to include only those with the integrated and the
        strong flag set, but only if there are apparently some integrated
        reflections"""

        orig_len = len(reflections)
        mask = reflections.get_flags(reflections.flags.integrated)
        if mask.count(True) == 0:
            return reflections
        reflections = reflections.select(mask)
        mask = reflections.get_flags(reflections.flags.strong)
        reflections = reflections.select(mask)

        logger.info(
            "{} out of {} reflections remain after filtering to keep only strong"
            " and integrated centroids".format(len(reflections), orig_len))
        return reflections

    @staticmethod
    def convert_to_P1(reflections, experiments):
        """Convert the input crystals to P 1 and reindex the reflections"""
        for iexp, exp in enumerate(experiments):
            sel = reflections["id"] == iexp
            xl = exp.crystal
            sg = xl.get_space_group()
            op = sg.info().change_of_basis_op_to_primitive_setting()
            exp.crystal = xl.change_basis(op)
            exp.crystal.set_space_group(sgtbx.space_group("P 1"))
            hkl_reindexed = op.apply(reflections["miller_index"].select(sel))
            reflections["miller_index"].set_selected(sel, hkl_reindexed)
        return reflections, experiments

    @staticmethod
    def create_refiner(params, reflections, experiments):
        # Only parameterise the crystal unit cell
        det_params = None
        beam_params = None
        xlo_params = None
        xluc_params = []
        for crystal in experiments.crystals():
            exp_ids = experiments.indices(crystal)
            xluc_params.append(
                CrystalUnitCellParameterisation(crystal,
                                                experiment_ids=exp_ids))

        # Two theta prediction equation parameterisation
        pred_param = TwoThetaPredictionParameterisation(
            experiments, det_params, beam_params, xlo_params, xluc_params)
        param_reporter = ParameterReporter(det_params, beam_params, xlo_params,
                                           xluc_params)

        # ReflectionManager, currently without outlier rejection
        # Note: If not all reflections are used, then the filtering must be
        # communicated to generate_cif/mmcif() to be included in the CIF file!
        refman = TwoThetaReflectionManager(reflections,
                                           experiments,
                                           outlier_detector=None)

        # Reflection predictor
        ref_predictor = TwoThetaExperimentsPredictor(experiments)

        # Two theta target
        target = TwoThetaTarget(experiments, ref_predictor, refman, pred_param)

        # Switch on correlation matrix tracking if a correlation plot is requested
        journal = None
        if params.output.correlation_plot.filename is not None:
            journal = refinery_phil_scope.extract().refinery.journal
            journal.track_parameter_correlation = True

        # Minimisation engine - hardcoded to LevMar for now.
        refinery = Refinery(
            target=target,
            prediction_parameterisation=pred_param,
            log=None,
            tracking=journal,
            max_iterations=20,
        )

        # Refiner
        refiner = Refiner(
            experiments=experiments,
            pred_param=pred_param,
            param_reporter=param_reporter,
            refman=refman,
            target=target,
            refinery=refinery,
        )

        return refiner

    @staticmethod
    def cell_param_table(crystal):
        """Construct a table of cell parameters and their ESDs"""

        cell = crystal.get_unit_cell().parameters()
        esd = crystal.get_cell_parameter_sd()
        vol = crystal.get_unit_cell().volume()
        vol_esd = crystal.get_cell_volume_sd()
        header = ["Parameter", "Value", "Estimated sd"]
        rows = []
        names = ["a", "b", "c", "alpha", "beta", "gamma"]
        for n, p, e in zip(names, cell, esd):
            rows.append([n, f"{p:9.5f}", f"{e:9.5f}"])
        rows.append(["\nvolume", f"\n{vol:9.5f}", f"\n{vol_esd:9.5f}"])
        return tabulate(rows, header)

    @staticmethod
    def generate_p4p(crystal, beam, filename):
        logger.info("Saving P4P info to %s", filename)
        cell = crystal.get_unit_cell().parameters()
        esd = crystal.get_cell_parameter_sd()
        vol = crystal.get_unit_cell().volume()
        vol_esd = crystal.get_cell_volume_sd()

        open(filename, "w").write("\n".join([
            "TITLE    Auto-generated .p4p file from dials.two_theta_refine",
            "CELL     %.4f %.4f %.4f %.4f %.4f %.4f %.4f" % tuple(cell +
                                                                  (vol, )),
            "CELLSD   %.4f %.4f %.4f %.4f %.4f %.4f %.4f" % tuple(esd +
                                                                  (vol_esd, )),
            "SOURCE   SYNCH   %.6f" % beam.get_wavelength(),
            "",
        ]))
        return

    @staticmethod
    def generate_cif(crystal, refiner, filename):
        logger.info("Saving CIF information to %s", filename)

        block = iotbx.cif.model.block()
        block["_audit_creation_method"] = dials_version()
        block["_audit_creation_date"] = datetime.date.today().isoformat()
        #   block["_publ_section_references"] = '' # once there is a reference...

        for cell, esd, cifname in zip(
                crystal.get_unit_cell().parameters(),
                crystal.get_cell_parameter_sd(),
            [
                "length_a",
                "length_b",
                "length_c",
                "angle_alpha",
                "angle_beta",
                "angle_gamma",
            ],
        ):
            block[f"_cell_{cifname}"] = format_float_with_standard_uncertainty(
                cell, esd)
        block["_cell_volume"] = format_float_with_standard_uncertainty(
            crystal.get_unit_cell().volume(), crystal.get_cell_volume_sd())

        used_reflections = refiner.get_matches()
        block["_cell_measurement_reflns_used"] = len(used_reflections)
        block["_cell_measurement_theta_min"] = (
            flex.min(used_reflections["2theta_obs.rad"]) * 180 / math.pi / 2)
        block["_cell_measurement_theta_max"] = (
            flex.max(used_reflections["2theta_obs.rad"]) * 180 / math.pi / 2)
        block["_diffrn_reflns_number"] = len(used_reflections)
        miller_span = miller.index_span(used_reflections["miller_index"])
        min_h, min_k, min_l = miller_span.min()
        max_h, max_k, max_l = miller_span.max()
        block["_diffrn_reflns_limit_h_min"] = min_h
        block["_diffrn_reflns_limit_h_max"] = max_h
        block["_diffrn_reflns_limit_k_min"] = min_k
        block["_diffrn_reflns_limit_k_max"] = max_k
        block["_diffrn_reflns_limit_l_min"] = min_l
        block["_diffrn_reflns_limit_l_max"] = max_l
        block["_diffrn_reflns_theta_min"] = (
            flex.min(used_reflections["2theta_obs.rad"]) * 180 / math.pi / 2)
        block["_diffrn_reflns_theta_max"] = (
            flex.max(used_reflections["2theta_obs.rad"]) * 180 / math.pi / 2)

        cif = iotbx.cif.model.cif()
        cif["two_theta_refine"] = block
        with open(filename, "w") as fh:
            cif.show(out=fh)

    @staticmethod
    def generate_mmcif(crystal, refiner, filename):
        logger.info("Saving mmCIF information to %s", filename)

        block = iotbx.cif.model.block()
        block["_audit.revision_id"] = 1
        block["_audit.creation_method"] = dials_version()
        block["_audit.creation_date"] = datetime.date.today().isoformat()
        block["_entry.id"] = "two_theta_refine"
        #   block["_publ.section_references"] = '' # once there is a reference...

        block["_cell.entry_id"] = "two_theta_refine"
        for cell, esd, cifname in zip(
                crystal.get_unit_cell().parameters(),
                crystal.get_cell_parameter_sd(),
            [
                "length_a",
                "length_b",
                "length_c",
                "angle_alpha",
                "angle_beta",
                "angle_gamma",
            ],
        ):
            block[f"_cell.{cifname}"] = f"{cell:.8f}"
            block[f"_cell.{cifname}_esd"] = f"{esd:.8f}"
        block["_cell.volume"] = f"{crystal.get_unit_cell().volume():f}"
        block["_cell.volume_esd"] = f"{crystal.get_cell_volume_sd():f}"

        used_reflections = refiner.get_matches()
        block["_cell_measurement.entry_id"] = "two_theta_refine"
        block["_cell_measurement.reflns_used"] = len(used_reflections)
        block["_cell_measurement.theta_min"] = (
            flex.min(used_reflections["2theta_obs.rad"]) * 180 / math.pi / 2)
        block["_cell_measurement.theta_max"] = (
            flex.max(used_reflections["2theta_obs.rad"]) * 180 / math.pi / 2)
        block["_exptl_crystal.id"] = 1
        block["_diffrn.id"] = "two_theta_refine"
        block["_diffrn.crystal_id"] = 1
        block["_diffrn_reflns.diffrn_id"] = "two_theta_refine"
        block["_diffrn_reflns.number"] = len(used_reflections)
        miller_span = miller.index_span(used_reflections["miller_index"])
        min_h, min_k, min_l = miller_span.min()
        max_h, max_k, max_l = miller_span.max()
        block["_diffrn_reflns.limit_h_min"] = min_h
        block["_diffrn_reflns.limit_h_max"] = max_h
        block["_diffrn_reflns.limit_k_min"] = min_k
        block["_diffrn_reflns.limit_k_max"] = max_k
        block["_diffrn_reflns.limit_l_min"] = min_l
        block["_diffrn_reflns.limit_l_max"] = max_l
        block["_diffrn_reflns.theta_min"] = (
            flex.min(used_reflections["2theta_obs.rad"]) * 180 / math.pi / 2)
        block["_diffrn_reflns.theta_max"] = (
            flex.max(used_reflections["2theta_obs.rad"]) * 180 / math.pi / 2)

        cif = iotbx.cif.model.cif()
        cif["two_theta_refine"] = block
        with open(filename, "w") as fh:
            cif.show(out=fh)

    def run(self, args=None):
        """Execute the script."""

        # Parse the command line
        params, _ = self.parser.parse_args(args, show_diff_phil=False)

        # set up global reflections list
        reflections = flex.reflection_table()

        # loop through the input, building up the global lists
        reflections_list, input_experiments = reflections_and_experiments_from_files(
            params.input.reflections, params.input.experiments)

        experiments = copy.deepcopy(input_experiments)
        reflections_list = parse_multiple_datasets(reflections_list)
        for refs in reflections_list:
            reflections.extend(refs)

        # Try to load the models and data
        nexp = len(experiments)
        if nexp == 0:
            print("No Experiments found in the input")
            self.parser.print_help()
            return
        if not reflections:
            print("No reflection data found in the input")
            self.parser.print_help()
            return

        self.check_input(reflections)

        # Configure the logging
        log.config(logfile=params.output.log)
        logger.info(dials_version())

        # Log the diff phil
        diff_phil = self.parser.diff_phil.as_str()
        if diff_phil != "":
            logger.info("The following parameters have been modified:\n")
            logger.info(diff_phil)

        # Convert to P 1?
        if params.refinement.triclinic:
            reflections, experiments = self.convert_to_P1(
                reflections, experiments)

        # Combine crystals?
        if params.refinement.combine_crystal_models and len(experiments) > 1:
            logger.info("Combining %s crystal models", len(experiments))
            experiments = self.combine_crystals(experiments)

        # Filter integrated centroids?
        if params.refinement.filter_integrated_centroids:
            reflections = self.filter_integrated_centroids(reflections)

        # Filter data if scaled to remove outliers
        if "inverse_scale_factor" in reflections:
            try:
                reflections = filter_reflection_table(
                    reflections,
                    ["scale"],
                    partiality_threshold=params.refinement.
                    partiality_threshold,
                )
            except ValueError as e:
                logger.warn(e)
                logger.info(
                    "Filtering on scaled data failed, proceeding with integrated data."
                )

        # Get the refiner
        logger.info("Configuring refiner")
        refiner = self.create_refiner(params, reflections, experiments)

        # Refine the geometry
        if nexp == 1:
            logger.info("Performing refinement of a single Experiment...")
        else:
            logger.info(f"Performing refinement of {nexp} Experiments...")
        refiner.run()

        # get the refined experiments
        experiments = copy.deepcopy(input_experiments)
        for expt, refined_expt in zip(experiments, refiner.get_experiments()):
            expt.crystal.set_recalculated_unit_cell(
                refined_expt.crystal.get_unit_cell())
            expt.crystal.set_recalculated_cell_parameter_sd(
                refined_expt.crystal.get_cell_parameter_sd())
            expt.crystal.set_recalculated_cell_volume_sd(
                refined_expt.crystal.get_cell_volume_sd())
        crystals = refiner.get_experiments().crystals()

        if len(crystals) == 1:
            # output the refined model for information
            logger.info("")
            logger.info("Final refined crystal model:")
            logger.info(crystals[0])
            logger.info(self.cell_param_table(crystals[0]))

        # Save the refined experiments to file
        output_experiments_filename = params.output.experiments
        logger.info(
            f"Saving refined experiments to {output_experiments_filename}")
        experiments.as_file(output_experiments_filename)

        # Create correlation plots
        if params.output.correlation_plot.filename is not None:
            create_correlation_plots(refiner, params.output)

        if params.output.cif is not None:
            self.generate_cif(crystals[0], refiner, filename=params.output.cif)

        if params.output.p4p is not None:
            self.generate_p4p(crystals[0],
                              experiments[0].beam,
                              filename=params.output.p4p)

        if params.output.mmcif is not None:
            self.generate_mmcif(crystals[0],
                                refiner,
                                filename=params.output.mmcif)
Пример #47
0
class Script(object):
  '''A class for running the script.'''

  def __init__(self):
    '''Initialise the script.'''
    from dials.util.options import OptionParser
    import libtbx.load_env

    # The script usage
    usage = "usage: %s [options] [param.phil] filenames" % libtbx.env.dispatcher_name

    # Create the parser
    self.parser = OptionParser(
      usage=usage,
      phil=phil_scope,
      check_format=False,
      read_experiments=True,
      read_reflections=True,
      epilog=help_message
      )

  def run(self):
    '''Execute the script.'''
    # Parse the command line
    params, options = self.parser.parse_args(show_diff_phil=True)

    n_bins = 10
    arbitrary_padding = 1
    legend = []

    # local container for axes
    figures = {}

    for plot in ['deltaXY','isigi']:
      fig, ax1 = plt.subplots()
      fig.suptitle(plot)
      ax2 = ax1.twinx()
      figures[plot] = {'fig':fig, 'ax1': ax1, 'ax2': ax2}

    def plotit(reflections, experiments):
      """
      Make the plots for a set of reflections and experiments.
      """
      detector = experiments.detectors()[0]
      beam = experiments.beams()[0] # only used to compute resolution of 2theta
      reflections = reflections.select(reflections['intensity.sum.variance'] > 0)

      # Setup up deltaXY and two theta bins
      reflections['difference_vector_norms'] = (reflections['xyzcal.mm']-reflections['xyzobs.mm.value']).norms()
      reflections = setup_stats(detector, experiments, reflections, two_theta_only=True) # add two theta to reflection table
      sorted_two_theta = flex.sorted(reflections['two_theta_obs'])
      bin_low = [sorted_two_theta[int((len(sorted_two_theta)/n_bins) * i)] for i in range(n_bins)]
      bin_high = [bin_low[i+1] for i in range(n_bins-1)]
      bin_high.append(sorted_two_theta[-1]+arbitrary_padding)

      x_centers = flex.double()
      n_refls = flex.int()
      rmsds = flex.double()
      p25r = flex.double()
      p50r = flex.double()
      p75r = flex.double()
      p25i = flex.double()
      p50i = flex.double()
      p75i = flex.double()
      print("# 2theta Res N dXY IsigI")

      # Compute stats for each bin
      for i in range(n_bins):
        refls = reflections.select((reflections['two_theta_obs'] >= bin_low[i]) & (reflections['two_theta_obs'] < bin_high[i]))
        # Only compute deltaXY stats on reflections with I/sigI at least 5
        i_sigi = refls['intensity.sum.value']/flex.sqrt(refls['intensity.sum.variance'])
        refls = refls.select(i_sigi >= 5)
        n = len(refls)
        if n < 10: continue
        min_r, q1_r, med_r, q3_r, max_r = five_number_summary(1000*refls['difference_vector_norms'])

        n_refls.append(n)

        rmsds_ = 1000*math.sqrt(flex.sum_sq(refls['difference_vector_norms'])/n)

        min_i, q1_i, med_i, q3_i, max_i = five_number_summary(i_sigi)
        p25i.append(q1_i)
        p50i.append(med_i)
        p75i.append(q3_i)
        # x_center
        c = ((bin_high[i]-bin_low[i])/2) + bin_low[i]
        # resolution
        d = beam.get_wavelength()/(2*math.sin(math.pi*c/(2*180)))
        x_centers.append(c)
        rmsds.append(rmsds_)
        print("%d % 5.1f % 5.1f % 8d %.1f %.1f"%(i, c, d, n, med_r, med_i))
        p25r.append(q1_r)
        p50r.append(med_r)
        p75r.append(q3_r)

      # After binning, plot the results
      for plot in figures:
        ax1 = figures[plot]['ax1']
        ax2 = figures[plot]['ax2']
        if plot == 'isigi':
          line, = ax1.plot(x_centers.as_numpy_array(), p50i.as_numpy_array(), '-')
          line.set_label('Median')
          ax1.fill_between(x_centers.as_numpy_array(), p25i.as_numpy_array(), p75i.as_numpy_array(),
            interpolate = True, alpha = 0.50, color = line.get_color())
          line, = ax2.plot(x_centers.as_numpy_array(), n_refls.as_numpy_array(), '-', color = line.get_color())
          line.set_label('Median')
        elif plot == 'deltaXY':
          line, = ax1.plot(x_centers.as_numpy_array(), p50r.as_numpy_array(), '-')
          line.set_label('Median')
          ax1.fill_between(x_centers.as_numpy_array(), p25r.as_numpy_array(), p75r.as_numpy_array(),
            interpolate = True, alpha = 0.50, color = line.get_color())
          line, = ax2.plot(x_centers.as_numpy_array(), n_refls.as_numpy_array(), '-', color = line.get_color())
          line.set_label('Median')
        ax1.legend()
        ax2.legend()

    assert len(params.input.experiments) == len(params.input.reflections)

    # Plotit!
    for i in range(len(params.input.experiments)):
      plotit(params.input.reflections[i].data, params.input.experiments[i].data)

    # Set up labels
    for plot in figures:
      fig = figures[plot]['fig']
      ax1 = figures[plot]['ax1']
      ax2 = figures[plot]['ax2']
      if plot == 'isigi':
        ax1.set_ylabel("I/sigI")
        ax2.set_ylabel("N reflections")
        ax1.set_xlabel("Two theta (degrees)")
      elif plot == 'deltaXY':
        ax1.set_ylabel(r"$\Delta$XY")
        ax2.set_ylabel("N reflections")
        ax1.set_xlabel("Two theta (degrees)")
      if params.tag is not None:
        pass

    if params.save_pdf:
      from matplotlib.backends.backend_pdf import PdfPages
      pp = PdfPages('reflection_stats.pdf')
      for i in plt.get_fignums():
        pp.savefig(plt.figure(i), dpi=300)
      pp.close()
    else:
      plt.show()
Пример #48
0
class Script(object):
    def __init__(self):
        """Initialise the script."""

        # The script usage
        usage = (
            "usage: dials.rs_mapper map_file=output.ccp4 [max_resolution=6] [grid_size=192] "
            "[reverse_phi=False] [param.phil] "
            "{image1.file [image2.file ...]} | imported.expt")

        # Initialise the base class
        self.parser = OptionParser(usage=usage,
                                   phil=phil_scope,
                                   epilog=help_message,
                                   read_experiments=True)

    def run(self):
        # Parse the command line
        params, options = self.parser.parse_args(show_diff_phil=True)

        if not params.rs_mapper.map_file:
            raise RuntimeError("Please specify output map file (map_file=)")
        else:
            self.map_file = params.rs_mapper.map_file

        # Ensure we have either a data block or an experiment list
        self.experiments = flatten_experiments(params.input.experiments)
        if len(self.experiments) != 1:
            self.parser.print_help()
            print("Please pass either an experiment list\n")
            return

        self.reverse_phi = params.rs_mapper.reverse_phi
        self.grid_size = params.rs_mapper.grid_size
        self.max_resolution = params.rs_mapper.max_resolution
        self.ignore_mask = params.rs_mapper.ignore_mask

        self.grid = flex.double(
            flex.grid(self.grid_size, self.grid_size, self.grid_size), 0)
        self.counts = flex.int(
            flex.grid(self.grid_size, self.grid_size, self.grid_size), 0)

        for experiment in self.experiments:
            self.process_imageset(experiment.imageset)

        recviewer.normalize_voxels(self.grid, self.counts)

        # Let's use 1/(100A) as the unit so that the absolute numbers in the
        # "cell dimensions" field of the ccp4 map are typical for normal
        # MX maps. The values in 1/A would give the "cell dimensions" around
        # or below 1 and some MX programs would not handle it well.
        box_size = 100 * 2.0 / self.max_resolution
        uc = uctbx.unit_cell((box_size, box_size, box_size, 90, 90, 90))
        ccp4_map.write_ccp4_map(
            self.map_file,
            uc,
            sgtbx.space_group("P1"),
            (0, 0, 0),
            self.grid.all(),
            self.grid,
            flex.std_string(["cctbx.miller.fft_map"]),
        )

    def process_imageset(self, imageset):
        rec_range = 1 / self.max_resolution

        if len(imageset.get_detector()) != 1:
            raise Sorry("This program does not support multi-panel detectors.")

        panel = imageset.get_detector()[0]
        beam = imageset.get_beam()
        s0 = beam.get_s0()
        pixel_size = panel.get_pixel_size()
        xlim, ylim = imageset.get_raw_data(0)[0].all()
        if pixel_size[0] != pixel_size[1]:
            raise Sorry("This program does not support non-square pixels.")

        # cache transformation
        xy = recviewer.get_target_pixels(panel, s0, xlim, ylim,
                                         self.max_resolution)
        s1 = panel.get_lab_coord(xy * pixel_size[0])
        s1 = s1 / s1.norms() * (1 / beam.get_wavelength())
        S = s1 - s0

        for i in range(len(imageset)):
            axis = imageset.get_goniometer().get_rotation_axis()
            osc_range = imageset.get_scan(i).get_oscillation_range()
            print("Oscillation range: %.2f - %.2f" %
                  (osc_range[0], osc_range[1]))
            angle = (osc_range[0] + osc_range[1]) / 2 / 180 * math.pi
            if not self.reverse_phi:
                # the pixel is in S AFTER rotation. Thus we have to rotate BACK.
                angle *= -1
            rotated_S = S.rotate_around_origin(axis, angle)

            data = imageset.get_raw_data(i)[0]
            if not self.ignore_mask:
                mask = imageset.get_mask(i)[0]
                data.set_selected(~mask, 0)

            recviewer.fill_voxels(data, self.grid, self.counts, rotated_S, xy,
                                  rec_range)
Пример #49
0
            read_reflections=True,
            check_format=False,
            phil=phil_scope,
            epilog=help_message,
        )
    else:
        parser = OptionParser(
            usage=usage,
            read_experiments=True,
            read_reflections=True,
            phil=phil_scope,
            epilog=help_message,
        )

    # Get the parameters
    params, options = parser.parse_args(show_diff_phil=False)

    # Configure the logging
    log.config(info=params.output.log, debug=params.output.debug_log)

    # Print the version number
    logger.info(dials_version())
    if os.getenv("DIALS_EXPORT_DO_NOT_CHECK_FORMAT"):
        logger.info("(format checks disabled due to environment variable)")

    # Log the diff phil
    diff_phil = parser.diff_phil.as_str()
    if diff_phil != "":
        logger.info("The following parameters have been modified:\n")
        logger.info(diff_phil)
Пример #50
0
class Script(object):
  '''A class for running the script.'''

  def __init__(self):
    self.mpi_helper = mpi_helper()
    self.mpi_logger = mpi_logger()

  def __del__(self):
    self.mpi_helper.finalize()

  def parse_input(self):
    '''Parse input at rank 0 and broadcast the input parameters and options to all ranks'''

    if self.mpi_helper.rank == 0:
      from xfel.merging.application.phil.phil import phil_scope
      help_message = '''Merge xfel data.'''

      # The script usage
      import libtbx.load_env
      self.usage = "usage: %s [options] [param.phil] " % libtbx.env.dispatcher_name
      self.parser = None

      '''Initialize the script.'''
      from dials.util.options import OptionParser
      # Create the parser
      self.parser = OptionParser(
        usage=self.usage,
        phil=phil_scope,
        epilog=help_message)

      # Parse the command line. quick_parse is required for MPI compatibility
      params, options = self.parser.parse_args(show_diff_phil=True,quick_parse=True)

      # Log the modified phil parameters
      diff_phil_str = self.parser.diff_phil.as_str()
      if diff_phil_str != "":
        self.mpi_logger.main_log("The following parameters have been modified:\n%s"%diff_phil_str)

      # prepare for transmitting input parameters to all ranks
      transmitted = dict(params = params, options = options)

      # make the output folders
      try:
        os.mkdir(params.output.output_dir)
      except FileExistsError:
        pass

    else:
      transmitted = None

    # broadcast parameters and options to all ranks
    self.mpi_logger.log("Broadcasting input parameters...")
    self.mpi_logger.log_step_time("BROADCAST_INPUT_PARAMS")

    transmitted = self.mpi_helper.comm.bcast(transmitted, root = 0)

    self.params = transmitted['params']
    self.options = transmitted['options']

    self.mpi_logger.set_log_file_paths(self.params)

    self.mpi_logger.log("Received input parameters and options")
    self.mpi_logger.log_step_time("BROADCAST_INPUT_PARAMS", True)

  def run(self):
    import datetime
    time_now = datetime.datetime.now()

    self.mpi_logger.log(str(time_now))
    if self.mpi_helper.rank == 0:
      self.mpi_logger.main_log(str(time_now))

    self.mpi_logger.log_step_time("TOTAL")

    self.mpi_logger.log_step_time("PARSE_INPUT_PARAMS")
    self.parse_input()
    self.mpi_logger.log_step_time("PARSE_INPUT_PARAMS", True)

    if self.params.mp.debug.cProfile:
      import cProfile
      pr = cProfile.Profile()
      pr.enable()

    # Create the workers using the factories
    self.mpi_logger.log_step_time("CREATE_WORKERS")
    from xfel.merging import application
    import importlib, copy

    self._resolve_persistent_columns()

    workers = []
    steps = self.params.dispatch.step_list if self.params.dispatch.step_list else default_steps
    for step in steps:
      step_factory_name = step
      step_additional_info = []

      step_info = step.split('_')
      assert len(step_info) > 0
      if len(step_info) > 1:
        step_factory_name = step_info[0]
        step_additional_info = step_info[1:]

      try:
        factory = importlib.import_module('xfel.merging.application.' + step_factory_name + '.factory')
      except ModuleNotFoundError:
        # remember the system path so the custom worker can temporarily modify it
        sys_path = copy.deepcopy(sys.path)
        pathstr = os.path.join(
            '~', '.cctbx.xfel', 'merging', 'application', step_factory_name,
            'factory.py'
        )
        pathstr = os.path.expanduser(pathstr)
        modulename = 'xfel.merging.application.' + step_factory_name + '.factory'
        spec = importlib.util.spec_from_file_location(modulename, pathstr)
        factory = importlib.util.module_from_spec(spec)
        spec.loader.exec_module(factory)
        # reset the path
        sys.path = sys_path

      workers.extend(factory.factory.from_parameters(self.params, step_additional_info, mpi_helper=self.mpi_helper, mpi_logger=self.mpi_logger))

    # Perform phil validation up front
    for worker in workers:
      worker.validate()
    self.mpi_logger.log_step_time("CREATE_WORKERS", True)

    # Do the work
    experiments = reflections = None
    step = 0
    while(workers):
      worker = workers.pop(0)
      self.mpi_logger.log_step_time("STEP_" + worker.__repr__())
      # Log worker name, i.e. execution step name
      step += 1
      if step > 1:
        self.mpi_logger.log('')
      step_desc = "STEP %d: %s"%(step, worker)
      self.mpi_logger.log(step_desc)

      if self.mpi_helper.rank == 0:
        if step > 1:
          self.mpi_logger.main_log('')
        self.mpi_logger.main_log(step_desc)

      # Execute worker
      experiments, reflections = worker.run(experiments, reflections)
      self.mpi_logger.log_step_time("STEP_" + worker.__repr__(), True)
      if experiments:
        self.mpi_logger.log("Ending step with %d experiments"%len(experiments))

    if self.params.output.save_experiments_and_reflections:
      if len(reflections) and 'id' not in reflections:
        from dials.array_family import flex
        id_ = flex.int(len(reflections), -1)
        if experiments:
          for expt_number, expt in enumerate(experiments):
            sel = reflections['exp_id'] == expt.identifier
            id_.set_selected(sel, expt_number)
        else:
          for expt_number, exp_id in enumerate(set(reflections['exp_id'])):
            sel = reflections['exp_id'] == exp_id
            id_.set_selected(sel, expt_number)
        reflections['id'] = id_

        assert (reflections['id'] == -1).count(True) == 0, ((reflections['id'] == -1).count(True), len(reflections))

      if self.mpi_helper.size == 1:
        filename_suffix = ""
      else:
        filename_suffix = "_%06d"%self.mpi_helper.rank

      if len(reflections):
        reflections.as_pickle(os.path.join(self.params.output.output_dir, "%s%s.refl"%(self.params.output.prefix, filename_suffix)))
      if experiments:
        experiments.as_file(os.path.join(self.params.output.output_dir, "%s%s.expt"%(self.params.output.prefix, filename_suffix)))

    self.mpi_logger.log_step_time("TOTAL", True)

    if self.params.mp.debug.cProfile:
      pr.disable()
      pr.dump_stats(os.path.join(self.params.output.output_dir, "cpu_%s_%d.prof"%(self.params.output.prefix, self.mpi_helper.rank)))

  def _resolve_persistent_columns(self):
    if self.params.output.expanded_bookkeeping:
      if self.params.input.persistent_refl_cols is None:
        self.params.input.persistent_refl_cols = []
      keysCreatedByMerge = ["input_refl_index", "orig_exp_id", "file_list_mapping", "is_odd_experiment"]
      for key in keysCreatedByMerge:
        if key not in self.params.input.persistent_refl_cols:
          self.params.input.persistent_refl_cols.append(key)
Пример #51
0
class Script:
    def __init__(self):
        """Initialise the script."""
        # The phil scope
        phil_scope = parse(
            """
      by_detector = False
        .type = bool
        .help = "If True, instead of producing separate files for each"
                "experiment, experiments are grouped by unique detector"
                "model in the input set of experiments. For example, if"
                "there are five detector models in the input data, five"
                "sets of files will be produced, each containing"
                "experiments that reference a single detector model."
      by_wavelength = False
        .type = bool
        .help = "If True, group experiments by wavelength, from low to high"
                "(using a relative tolerance of 1e-4 to match wavelengths)."
      output {
        experiments_prefix = split
          .type = str
          .help = "Filename prefix for the split experimental models"

        reflections_prefix = split
          .type = str
          .help = "Filename prefix for the split reflections"

        template = "{prefix}_{index:0{maxindexlength:d}d}.{extension}"
          .type = str
          .expert_level = 2
          .help = "Template python format string for output filenames."
                  "Replaced variables are prefix (with"
                  "output.{experiments_prefix, reflections_prefix}),"
                  "index (number of split experiment), maxindexlength"
                  "(number of digits of total number of split experiments)"
                  "and extension (default file extension for model and"
                  "reflection files)"

        chunk_size = None
          .type = int
          .expert_level = 2
          .help = "If not None, instead of creating many individual"
                  "files, create composite files with no more than"
                  "chunk_size experiments per file."
        chunk_sizes = None
          .type = ints
          .expert_level = 2
          .help = "If not None, instead of creating many individual"
                  "files, create composite files with the number of"
                  "datasets given in the chunk_sizes list."

      }
    """,
            process_includes=True,
        )

        # The script usage
        usage = ("usage: dials.split_experiments [options] [param.phil] "
                 "experiments1.expt experiments2.expt reflections1.refl "
                 "reflections2.refl...")

        # Create the parser
        self.parser = OptionParser(
            usage=usage,
            phil=phil_scope,
            read_reflections=True,
            read_experiments=True,
            check_format=False,
            epilog=help_message,
        )

    def run(self, args=None):
        """Execute the script."""

        # Parse the command line
        params, _ = self.parser.parse_args(args, show_diff_phil=True)

        # Try to load the models and data
        if not params.input.experiments:
            print("No Experiments found in the input")
            self.parser.print_help()
            return
        if params.input.reflections:
            if len(params.input.reflections) != len(params.input.experiments):
                raise Sorry(
                    "The number of input reflections files does not match the "
                    "number of input experiments")
        reflections, experiments = reflections_and_experiments_from_files(
            params.input.reflections, params.input.experiments)
        if reflections:
            reflections = reflections[0]
        else:
            reflections = None

        experiments_template = functools.partial(
            params.output.template.format,
            prefix=params.output.experiments_prefix,
            maxindexlength=len(str(len(experiments) - 1)),
            extension="expt",
        )

        reflections_template = functools.partial(
            params.output.template.format,
            prefix=params.output.reflections_prefix,
            maxindexlength=len(str(len(experiments) - 1)),
            extension="refl",
        )

        if params.output.chunk_sizes:
            if not sum(params.output.chunk_sizes) == len(experiments):
                raise Sorry(
                    "Sum of chunk sizes list (%s) not equal to number of experiments (%s)"
                    % (sum(params.output.chunk_sizes), len(experiments)))

        if params.by_wavelength:
            if reflections:
                if not reflections.experiment_identifiers():
                    raise Sorry(
                        "Unable to split by wavelength as no experiment "
                        "identifiers are set in the reflection table.")
            if all(experiments.identifiers() == ""):
                raise Sorry("Unable to split by wavelength as no experiment "
                            "identifiers are set in the experiment list.")

            wavelengths = match_wavelengths(experiments)
            for i, wl in enumerate(sorted(wavelengths.keys())):
                expids = []
                new_exps = ExperimentList()
                exp_nos = wavelengths[wl]
                imageset_ids = []  # record imageset ids to set in refl table
                imagesets_found = OrderedSet()
                for j in exp_nos:
                    expids.append(experiments[j].identifier)  # string
                    new_exps.append(experiments[j])
                    imagesets_found.add(experiments[j].imageset)
                    imageset_ids.append(
                        imagesets_found.index(experiments[j].imageset))
                experiment_filename = experiments_template(index=i)
                print(
                    f"Saving experiments with wavelength {wl} to {experiment_filename}"
                )
                new_exps.as_json(experiment_filename)
                if reflections:
                    refls = reflections.select_on_experiment_identifiers(
                        expids)
                    refls["imageset_id"] = flex.int(refls.size(), 0)
                    # now set the imageset ids
                    for k, iset_id in enumerate(imageset_ids):
                        # select the experiment based on id (unique per sweep),
                        # and set the imageset_id (not necessarily unique per sweep
                        # if imageset is shared)
                        sel = refls["id"] == k
                        refls["imageset_id"].set_selected(sel, iset_id)
                    reflections_filename = reflections_template(index=i)
                    print("Saving reflections with wavelength %s to %s" %
                          (wl, reflections_filename))
                    refls.as_file(reflections_filename)

        elif params.by_detector:
            assert (not params.output.chunk_size
                    ), "chunk_size + by_detector is not implemented"
            if reflections is None:
                split_data = {
                    detector: {
                        "experiments": ExperimentList()
                    }
                    for detector in experiments.detectors()
                }
            else:
                split_data = {
                    detector: {
                        "experiments": ExperimentList(),
                        "reflections": flex.reflection_table(),
                        "imagesets_found": OrderedSet(),
                    }
                    for detector in experiments.detectors()
                }
            for i, experiment in enumerate(experiments):
                split_expt_id = experiments.detectors().index(
                    experiment.detector)
                experiment_filename = experiments_template(index=split_expt_id)
                print("Adding experiment %d to %s" % (i, experiment_filename))
                split_data[experiment.detector]["experiments"].append(
                    experiment)
                if reflections is not None:
                    reflections_filename = reflections_template(
                        index=split_expt_id)
                    split_data[experiment.detector]["imagesets_found"].add(
                        experiment.imageset)
                    print("Adding reflections for experiment %d to %s" %
                          (i, reflections_filename))
                    if reflections.experiment_identifiers().keys():
                        # first find which id value corresponds to experiment in question
                        identifier = experiment.identifier
                        id_ = None
                        for k in reflections.experiment_identifiers().keys():
                            if reflections.experiment_identifiers(
                            )[k] == identifier:
                                id_ = k
                                break
                        if id_ is None:
                            raise Sorry(
                                "Unable to find id matching experiment identifier in reflection table."
                            )
                        ref_sel = reflections.select(reflections["id"] == id_)
                        # now reset ids and reset/update identifiers map
                        for k in ref_sel.experiment_identifiers().keys():
                            del ref_sel.experiment_identifiers()[k]
                        new_id = len(
                            split_data[experiment.detector]["experiments"]) - 1
                        ref_sel["id"] = flex.int(len(ref_sel), new_id)
                        ref_sel.experiment_identifiers()[new_id] = identifier
                    else:
                        ref_sel = reflections.select(reflections["id"] == i)
                        ref_sel["id"] = flex.int(
                            len(ref_sel),
                            len(split_data[experiment.detector]["experiments"])
                            - 1,
                        )
                    iset_id = split_data[
                        experiment.detector]["imagesets_found"].index(
                            experiment.imageset)
                    ref_sel["imageset_id"] = flex.int(ref_sel.size(), iset_id)
                    split_data[experiment.detector]["reflections"].extend(
                        ref_sel)

            for i, detector in enumerate(experiments.detectors()):
                experiment_filename = experiments_template(index=i)
                print("Saving experiment %d to %s" % (i, experiment_filename))
                split_data[detector]["experiments"].as_json(
                    experiment_filename)

                if reflections is not None:
                    reflections_filename = reflections_template(index=i)
                    print("Saving reflections for experiment %d to %s" %
                          (i, reflections_filename))
                    split_data[detector]["reflections"].as_file(
                        reflections_filename)
        elif params.output.chunk_size or params.output.chunk_sizes:

            def save_chunk(chunk_id, expts, refls):
                experiment_filename = experiments_template(index=chunk_id)
                print("Saving chunk %d to %s" %
                      (chunk_id, experiment_filename))
                expts.as_json(experiment_filename)
                if refls is not None:
                    reflections_filename = reflections_template(index=chunk_id)
                    print("Saving reflections for chunk %d to %s" %
                          (chunk_id, reflections_filename))
                    refls.as_file(reflections_filename)

            chunk_counter = 0
            chunk_expts = ExperimentList()
            if reflections:
                chunk_refls = flex.reflection_table()
            else:
                chunk_refls = None
            next_iset_id = 0
            imagesets_found = OrderedSet()
            for i, experiment in enumerate(experiments):
                chunk_expts.append(experiment)
                if reflections:
                    if reflections.experiment_identifiers().keys():
                        # first find which id value corresponds to experiment in question
                        identifier = experiment.identifier
                        id_ = None
                        for k in reflections.experiment_identifiers().keys():
                            if reflections.experiment_identifiers(
                            )[k] == identifier:
                                id_ = k
                                break
                        if id_ is None:
                            raise Sorry(
                                "Unable to find id matching experiment identifier in reflection table."
                            )
                        ref_sel = reflections.select(reflections["id"] == id_)
                        # now reset ids and reset/update identifiers map
                        for k in ref_sel.experiment_identifiers().keys():
                            del ref_sel.experiment_identifiers()[k]
                        new_id = len(chunk_expts) - 1
                        ref_sel["id"] = flex.int(len(ref_sel), new_id)
                        ref_sel.experiment_identifiers()[new_id] = identifier
                    else:
                        ref_sel = reflections.select(reflections["id"] == i)
                        ref_sel["id"] = flex.int(len(ref_sel),
                                                 len(chunk_expts) - 1)
                    if experiment.imageset not in imagesets_found:
                        imagesets_found.add(experiment.imageset)
                        ref_sel["imageset_id"] = flex.int(
                            ref_sel.size(), next_iset_id)
                        next_iset_id += 1
                    else:
                        iset_id = imagesets_found.index(experiment.imageset)
                        ref_sel["imageset_id"] = flex.int(
                            ref_sel.size(), iset_id)
                    chunk_refls.extend(ref_sel)
                if params.output.chunk_sizes:
                    chunk_limit = params.output.chunk_sizes[chunk_counter]
                else:
                    chunk_limit = params.output.chunk_size
                if len(chunk_expts) == chunk_limit:
                    save_chunk(chunk_counter, chunk_expts, chunk_refls)
                    chunk_counter += 1
                    chunk_expts = ExperimentList()
                    if reflections:
                        chunk_refls = flex.reflection_table()
                    else:
                        chunk_refls = None
            if len(chunk_expts) > 0:
                save_chunk(chunk_counter, chunk_expts, chunk_refls)
        else:
            for i, experiment in enumerate(experiments):

                experiment_filename = experiments_template(index=i)
                print("Saving experiment %d to %s" % (i, experiment_filename))
                ExperimentList([experiment]).as_json(experiment_filename)

                if reflections is not None:
                    reflections_filename = reflections_template(index=i)
                    print("Saving reflections for experiment %d to %s" %
                          (i, reflections_filename))
                    ref_sel = reflections.select(reflections["id"] == i)
                    if ref_sel.experiment_identifiers().keys():
                        identifier = ref_sel.experiment_identifiers()[i]
                        for k in ref_sel.experiment_identifiers().keys():
                            del ref_sel.experiment_identifiers()[k]
                        ref_sel["id"] = flex.int(ref_sel.size(), 0)
                        ref_sel.experiment_identifiers()[0] = identifier
                    else:
                        ref_sel["id"] = flex.int(len(ref_sel), 0)
                    ref_sel["imageset_id"] = flex.int(len(ref_sel), 0)
                    ref_sel.as_file(reflections_filename)

        return
Пример #52
0
def run(args):
    from dials.util.options import OptionParser
    import libtbx.load_env

    usage = "%s [options] find_spots.json" % (libtbx.env.dispatcher_name)

    parser = OptionParser(usage=usage, phil=phil_scope, epilog=help_message)

    params, options, args = parser.parse_args(show_diff_phil=True,
                                              return_unhandled=True)

    positions = None
    if params.positions is not None:
        with open(params.positions, 'rb') as f:
            positions = flex.vec2_double()
            for line in f.readlines():
                line = line.replace('(', ' ').replace(')', '').replace(
                    ',', ' ').strip().split()
                assert len(line) == 3
                i, x, y = [float(l) for l in line]
                positions.append((x, y))

    assert len(args) == 1
    json_file = args[0]
    import json

    with open(json_file, 'rb') as f:
        results = json.load(f)

    n_indexed = flex.double()
    fraction_indexed = flex.double()
    n_spots = flex.double()
    n_lattices = flex.double()
    crystals = []
    image_names = flex.std_string()

    for r in results:
        n_spots.append(r['n_spots_total'])
        image_names.append(str(r['image']))
        if 'n_indexed' in r:
            n_indexed.append(r['n_indexed'])
            n_lattices.append(len(r['lattices']))
            for d in r['lattices']:
                from dxtbx.serialize.crystal import from_dict
                crystals.append(from_dict(d['crystal']))
        else:
            n_indexed.append(0)
            n_lattices.append(0)

    if n_indexed.size():
        sel = n_spots > 0
        fraction_indexed = flex.double(n_indexed.size(), 0)
        fraction_indexed.set_selected(
            sel,
            n_indexed.select(sel) / n_spots.select(sel))

    import matplotlib
    matplotlib.use('Agg')
    from matplotlib import pyplot

    blue = '#3498db'
    red = '#e74c3c'

    marker = 'o'
    alpha = 0.5
    lw = 0

    plot = True
    table = True
    grid = params.grid

    from libtbx import group_args
    from dials.algorithms.spot_finding.per_image_analysis \
         import plot_stats, print_table

    estimated_d_min = flex.double()
    d_min_distl_method_1 = flex.double()
    d_min_distl_method_2 = flex.double()
    n_spots_total = flex.int()
    n_spots_no_ice = flex.int()
    total_intensity = flex.double()

    for d in results:
        estimated_d_min.append(d['estimated_d_min'])
        d_min_distl_method_1.append(d['d_min_distl_method_1'])
        d_min_distl_method_2.append(d['d_min_distl_method_2'])
        n_spots_total.append(d['n_spots_total'])
        n_spots_no_ice.append(d['n_spots_no_ice'])
        total_intensity.append(d['total_intensity'])

    stats = group_args(image=image_names,
                       n_spots_total=n_spots_total,
                       n_spots_no_ice=n_spots_no_ice,
                       n_spots_4A=None,
                       n_indexed=n_indexed,
                       fraction_indexed=fraction_indexed,
                       total_intensity=total_intensity,
                       estimated_d_min=estimated_d_min,
                       d_min_distl_method_1=d_min_distl_method_1,
                       d_min_distl_method_2=d_min_distl_method_2,
                       noisiness_method_1=None,
                       noisiness_method_2=None)

    if plot:
        plot_stats(stats)
        pyplot.clf()
    if table:
        print_table(stats)

    n_rows = 10
    n_rows = min(n_rows, len(n_spots_total))
    perm_n_spots_total = flex.sort_permutation(n_spots_total, reverse=True)
    print 'Top %i images sorted by number of spots:' % n_rows
    print_table(stats, perm=perm_n_spots_total, n_rows=n_rows)
    if flex.max(n_indexed) > 0:
        perm_n_indexed = flex.sort_permutation(n_indexed, reverse=True)
        print 'Top %i images sorted by number of indexed reflections:' % n_rows
        print_table(stats, perm=perm_n_indexed, n_rows=n_rows)

    print "Number of indexed lattices: ", (n_indexed > 0).count(True)

    print "Number with valid d_min but failed indexing: ", (
        (d_min_distl_method_1 > 0) & (d_min_distl_method_2 > 0) &
        (estimated_d_min > 0) & (n_indexed == 0)).count(True)

    n_bins = 20
    spot_count_histogram(n_spots_total,
                         n_bins=n_bins,
                         filename='hist_n_spots_total.png',
                         log=True)
    spot_count_histogram(n_spots_no_ice,
                         n_bins=n_bins,
                         filename='hist_n_spots_no_ice.png',
                         log=True)
    spot_count_histogram(n_indexed.select(n_indexed > 0),
                         n_bins=n_bins,
                         filename='hist_n_indexed.png',
                         log=False)

    if len(crystals):
        plot_unit_cell_histograms(crystals)

    if params.stereographic_projections and len(crystals):
        from dxtbx.datablock import DataBlockFactory
        datablocks = DataBlockFactory.from_filenames([image_names[0]],
                                                     verbose=False)
        assert len(datablocks) == 1
        imageset = datablocks[0].extract_imagesets()[0]
        s0 = imageset.get_beam().get_s0()
        # XXX what if no goniometer?
        rotation_axis = imageset.get_goniometer().get_rotation_axis()

        indices = ((1, 0, 0), (0, 1, 0), (0, 0, 1))
        for i, index in enumerate(indices):

            from cctbx import crystal, miller
            from scitbx import matrix
            miller_indices = flex.miller_index([index])
            symmetry = crystal.symmetry(
                unit_cell=crystals[0].get_unit_cell(),
                space_group=crystals[0].get_space_group())
            miller_set = miller.set(symmetry, miller_indices)
            d_spacings = miller_set.d_spacings()
            d_spacings = d_spacings.as_non_anomalous_array().expand_to_p1()
            d_spacings = d_spacings.generate_bijvoet_mates()
            miller_indices = d_spacings.indices()

            # plane normal
            d0 = matrix.col(s0).normalize()
            d1 = d0.cross(matrix.col(rotation_axis)).normalize()
            d2 = d1.cross(d0).normalize()
            reference_poles = (d0, d1, d2)

            from dials.command_line.stereographic_projection import stereographic_projection
            projections = []

            for cryst in crystals:
                reciprocal_space_points = list(
                    cryst.get_U() *
                    cryst.get_B()) * miller_indices.as_vec3_double()
                projections.append(
                    stereographic_projection(reciprocal_space_points,
                                             reference_poles))

                #from dials.algorithms.indexing.compare_orientation_matrices import \
                #  difference_rotation_matrix_and_euler_angles
                #R_ij, euler_angles, cb_op = difference_rotation_matrix_and_euler_angles(
                #  crystals[0], cryst)
                #print max(euler_angles)

            from dials.command_line.stereographic_projection import plot_projections
            plot_projections(projections,
                             filename='projections_%s.png' % ('hkl'[i]))
            pyplot.clf()

    def plot_grid(values,
                  grid,
                  file_name,
                  cmap=pyplot.cm.Reds,
                  vmin=None,
                  vmax=None,
                  invalid='white'):
        values = values.as_double()
        # At DLS, fast direction appears to be largest direction
        if grid[0] > grid[1]:
            values.reshape(flex.grid(reversed(grid)))
            values = values.matrix_transpose()
        else:
            values.reshape(flex.grid(grid))

        Z = values.as_numpy_array()

        #f, (ax1, ax2) = pyplot.subplots(2)
        f, ax1 = pyplot.subplots(1)

        mesh1 = ax1.pcolormesh(values.as_numpy_array(),
                               cmap=cmap,
                               vmin=vmin,
                               vmax=vmax)
        mesh1.cmap.set_under(color=invalid, alpha=None)
        mesh1.cmap.set_over(color=invalid, alpha=None)
        #mesh2 = ax2.contour(Z, cmap=cmap, vmin=vmin, vmax=vmax)
        #mesh2 = ax2.contourf(Z, cmap=cmap, vmin=vmin, vmax=vmax)
        ax1.set_aspect('equal')
        ax1.invert_yaxis()
        #ax2.set_aspect('equal')
        #ax2.invert_yaxis()
        pyplot.colorbar(mesh1, ax=ax1)
        #pyplot.colorbar(mesh2, ax=ax2)
        pyplot.savefig(file_name, dpi=600)
        pyplot.clf()

    def plot_positions(values,
                       positions,
                       file_name,
                       cmap=pyplot.cm.Reds,
                       vmin=None,
                       vmax=None,
                       invalid='white'):
        values = values.as_double()
        assert positions.size() >= values.size()
        positions = positions[:values.size()]

        if vmin is None:
            vmin = flex.min(values)
        if vmax is None:
            vmax = flex.max(values)

        x, y = positions.parts()
        dx = flex.abs(x[1:] - x[:-1])
        dy = flex.abs(y[1:] - y[:-1])
        dx = dx.select(dx > 0)
        dy = dy.select(dy > 0)

        scale = 1 / flex.min(dx)
        #print scale
        x = (x * scale).iround()
        y = (y * scale).iround()

        from libtbx.math_utils import iceil
        z = flex.double(
            flex.grid(iceil(flex.max(y)) + 1,
                      iceil(flex.max(x)) + 1), -2)
        #print z.all()
        for x_, y_, z_ in zip(x, y, values):
            z[y_, x_] = z_

        plot_grid(z.as_1d(),
                  z.all(),
                  file_name,
                  cmap=cmap,
                  vmin=vmin,
                  vmax=vmax,
                  invalid=invalid)
        return

    if grid is not None or positions is not None:
        if grid is not None:
            positions = tuple(reversed(grid))
            plotter = plot_grid
        else:
            plotter = plot_positions

        cmap = pyplot.get_cmap(params.cmap)
        plotter(n_spots_total,
                positions,
                'grid_spot_count_total.png',
                cmap=cmap,
                invalid=params.invalid)
        plotter(n_spots_no_ice,
                positions,
                'grid_spot_count_no_ice.png',
                cmap=cmap,
                invalid=params.invalid)
        plotter(total_intensity,
                positions,
                'grid_total_intensity.png',
                cmap=cmap,
                invalid=params.invalid)
        if flex.max(n_indexed) > 0:
            plotter(n_indexed,
                    positions,
                    'grid_n_indexed.png',
                    cmap=cmap,
                    invalid=params.invalid)
            plotter(fraction_indexed,
                    positions,
                    'grid_fraction_indexed.png',
                    cmap=cmap,
                    vmin=0,
                    vmax=1,
                    invalid=params.invalid)

        for i, d_min in enumerate(
            (estimated_d_min, d_min_distl_method_1, d_min_distl_method_2)):
            from cctbx import uctbx
            d_star_sq = uctbx.d_as_d_star_sq(d_min)
            d_star_sq.set_selected(d_star_sq == 1, 0)
            vmin = flex.min(d_star_sq.select(d_star_sq > 0))
            vmax = flex.max(d_star_sq)

            vmin = flex.min(d_min.select(d_min > 0))
            vmax = flex.max(d_min)
            cmap = pyplot.get_cmap('%s_r' % params.cmap)
            d_min.set_selected(d_min <= 0, vmax)

            if i == 0:
                plotter(d_min,
                        positions,
                        'grid_d_min.png',
                        cmap=cmap,
                        vmin=vmin,
                        vmax=vmax,
                        invalid=params.invalid)
            else:
                plotter(d_min,
                        positions,
                        'grid_d_min_method_%i.png' % i,
                        cmap=cmap,
                        vmin=vmin,
                        vmax=vmax,
                        invalid=params.invalid)

    if flex.max(n_indexed) > 0:
        pyplot.hexbin(n_spots,
                      n_indexed,
                      bins='log',
                      cmap=pyplot.cm.jet,
                      gridsize=50)
        pyplot.colorbar()
        #pyplot.scatter(n_spots, n_indexed, marker=marker, alpha=alpha, c=blue, lw=lw)
        xlim = pyplot.xlim()
        ylim = pyplot.ylim()
        pyplot.plot([0, max(n_spots)], [0, max(n_spots)], c=red)
        pyplot.xlim(0, xlim[1])
        pyplot.ylim(0, ylim[1])
        pyplot.xlabel('# spots')
        pyplot.ylabel('# indexed')
        pyplot.savefig('n_spots_vs_n_indexed.png')
        pyplot.clf()

        pyplot.hexbin(n_spots,
                      fraction_indexed,
                      bins='log',
                      cmap=pyplot.cm.jet,
                      gridsize=50)
        pyplot.colorbar()
        #pyplot.scatter(
        #n_spots, fraction_indexed, marker=marker, alpha=alpha, c=blue, lw=lw)
        pyplot.xlim(0, pyplot.xlim()[1])
        pyplot.ylim(0, pyplot.ylim()[1])
        pyplot.xlabel('# spots')
        pyplot.ylabel('Fraction indexed')
        pyplot.savefig('n_spots_vs_fraction_indexed.png')
        pyplot.clf()

        pyplot.hexbin(n_indexed,
                      fraction_indexed,
                      bins='log',
                      cmap=pyplot.cm.jet,
                      gridsize=50)
        pyplot.colorbar()
        #pyplot.scatter(
        #n_indexed, fraction_indexed, marker=marker, alpha=alpha, c=blue, lw=lw)
        pyplot.xlim(0, pyplot.xlim()[1])
        pyplot.ylim(0, pyplot.ylim()[1])
        pyplot.xlabel('# indexed')
        pyplot.ylabel('Fraction indexed')
        pyplot.savefig('n_indexed_vs_fraction_indexed.png')
        pyplot.clf()

        pyplot.hexbin(n_spots,
                      n_lattices,
                      bins='log',
                      cmap=pyplot.cm.jet,
                      gridsize=50)
        pyplot.colorbar()
        #pyplot.scatter(
        #n_spots, n_lattices, marker=marker, alpha=alpha, c=blue, lw=lw)
        pyplot.xlim(0, pyplot.xlim()[1])
        pyplot.ylim(0, pyplot.ylim()[1])
        pyplot.xlabel('# spots')
        pyplot.ylabel('# lattices')
        pyplot.savefig('n_spots_vs_n_lattices.png')
        pyplot.clf()

    #pyplot.scatter(
    #  estimated_d_min, d_min_distl_method_1, marker=marker, alpha=alpha, c=blue, lw=lw)
    pyplot.hexbin(estimated_d_min,
                  d_min_distl_method_1,
                  bins='log',
                  cmap=pyplot.cm.jet,
                  gridsize=50)
    pyplot.colorbar()
    #pyplot.gca().set_aspect('equal')
    xlim = pyplot.xlim()
    ylim = pyplot.ylim()
    m = max(max(estimated_d_min), max(d_min_distl_method_1))
    pyplot.plot([0, m], [0, m], c=red)
    pyplot.xlim(0, xlim[1])
    pyplot.ylim(0, ylim[1])
    pyplot.xlabel('estimated_d_min')
    pyplot.ylabel('d_min_distl_method_1')
    pyplot.savefig('d_min_vs_distl_method_1.png')
    pyplot.clf()

    #pyplot.scatter(
    #  estimated_d_min, d_min_distl_method_2, marker=marker, alpha=alpha, c=blue, lw=lw)
    pyplot.hexbin(estimated_d_min,
                  d_min_distl_method_2,
                  bins='log',
                  cmap=pyplot.cm.jet,
                  gridsize=50)
    pyplot.colorbar()
    #pyplot.gca().set_aspect('equal')
    xlim = pyplot.xlim()
    ylim = pyplot.ylim()
    m = max(max(estimated_d_min), max(d_min_distl_method_2))
    pyplot.plot([0, m], [0, m], c=red)
    pyplot.xlim(0, xlim[1])
    pyplot.ylim(0, ylim[1])
    pyplot.xlabel('estimated_d_min')
    pyplot.ylabel('d_min_distl_method_2')
    pyplot.savefig('d_min_vs_distl_method_2.png')
    pyplot.clf()

    #pyplot.scatter(
    #  d_min_distl_method_1, d_min_distl_method_2, marker=marker, alpha=alpha, c=blue, lw=lw)
    pyplot.hexbin(d_min_distl_method_1,
                  d_min_distl_method_2,
                  bins='log',
                  cmap=pyplot.cm.jet,
                  gridsize=50)
    pyplot.colorbar()
    #pyplot.gca().set_aspect('equal')
    xlim = pyplot.xlim()
    ylim = pyplot.ylim()
    m = max(max(d_min_distl_method_1), max(d_min_distl_method_2))
    pyplot.plot([0, m], [0, m], c=red)
    pyplot.xlim(0, xlim[1])
    pyplot.ylim(0, ylim[1])
    pyplot.xlabel('d_min_distl_method_1')
    pyplot.ylabel('d_min_distl_method_2')
    pyplot.savefig('distl_method_1_vs_distl_method_2.png')
    pyplot.clf()

    pyplot.hexbin(n_spots,
                  estimated_d_min,
                  bins='log',
                  cmap=pyplot.cm.jet,
                  gridsize=50)
    pyplot.colorbar()
    #pyplot.scatter(
    #n_spots, estimated_d_min, marker=marker, alpha=alpha, c=blue, lw=lw)
    pyplot.xlim(0, pyplot.xlim()[1])
    pyplot.ylim(0, pyplot.ylim()[1])
    pyplot.xlabel('# spots')
    pyplot.ylabel('estimated_d_min')
    pyplot.savefig('n_spots_vs_d_min.png')
    pyplot.clf()

    pyplot.hexbin(n_spots,
                  d_min_distl_method_1,
                  bins='log',
                  cmap=pyplot.cm.jet,
                  gridsize=50)
    pyplot.colorbar()
    #pyplot.scatter(
    #n_spots, d_min_distl_method_1, marker=marker, alpha=alpha, c=blue, lw=lw)
    pyplot.xlim(0, pyplot.xlim()[1])
    pyplot.ylim(0, pyplot.ylim()[1])
    pyplot.xlabel('# spots')
    pyplot.ylabel('d_min_distl_method_1')
    pyplot.savefig('n_spots_vs_distl_method_1.png')
    pyplot.clf()

    pyplot.hexbin(n_spots,
                  d_min_distl_method_2,
                  bins='log',
                  cmap=pyplot.cm.jet,
                  gridsize=50)
    pyplot.colorbar()
    #pyplot.scatter(
    #n_spots, d_min_distl_method_2, marker=marker, alpha=alpha, c=blue, lw=lw)
    pyplot.xlim(0, pyplot.xlim()[1])
    pyplot.ylim(0, pyplot.ylim()[1])
    pyplot.xlabel('# spots')
    pyplot.ylabel('d_min_distl_method_2')
    pyplot.savefig('n_spots_vs_distl_method_2.png')
    pyplot.clf()
Пример #53
0
class Script(ParentScript):
  ''' Class to parse the command line options. '''

  def __init__(self):
    ''' Set the expected options. '''
    from dials.util.options import OptionParser
    import libtbx.load_env

    # Create the option parser
    usage = "usage: %s experiment1.json experiment2.json reflections1.pickle reflections2.pickle" % libtbx.env.dispatcher_name
    self.parser = OptionParser(
      usage=usage,
      sort_options=True,
      phil=phil_scope,
      read_experiments=True,
      read_reflections=True,
      check_format=False,
      epilog=help_message)

  def run(self):
    ''' Parse the options. '''
    from dials.util.options import flatten_experiments, flatten_reflections
    # Parse the command line arguments
    params, options = self.parser.parse_args(show_diff_phil=True)
    self.params = params
    experiments = flatten_experiments(params.input.experiments)
    reflections = flatten_reflections(params.input.reflections)

    # Find all detector objects
    detectors = []
    detectors.extend(experiments.detectors())

    # Verify inputs
    if len(detectors) != 2:
      raise Sorry("Please provide a reference and a moving set of experiments")

    reflections = reflections[1]
    detector = detectors[1]

    if not hasattr(detector, 'hierarchy'):
      raise Sorry("Script intended for hierarchical detectors")

    if params.max_hierarchy_level is None or str(params.max_hierarchy_level).lower() == 'auto':
      params.max_hierarchy_level = 0
      root = detector.hierarchy()
      while root.is_group():
        root = root[0]
        params.max_hierarchy_level += 1
      print "Found", params.max_hierarchy_level+1, "hierarchy levels"

    reference_root = detectors[0].hierarchy()
    moving_root = detector.hierarchy()
    rori = get_center(reference_root)
    rf = col(reference_root.get_fast_axis())
    rs = col(reference_root.get_slow_axis())
    r_norm = col(reference_root.get_normal())
    s0 = col(flex.vec3_double([col(b.get_s0()) for b in experiments.beams()]).mean())

    summary_table_header = ["Hierarchy","Delta XY","Delta XY","R Offsets","R Offsets","T Offsets","T Offsets","Z Offsets","Z Offsets","dR Norm","dR Norm","dT Norm","dT Norm","Local dNorm", "Local dNorm", "Rot Z","Rot Z"]
    summary_table_header2 = ["Level","","Sigma","","Sigma","","Sigma","","Sigma","","Sigma","","Sigma","","Sigma","","Sigma"]
    summary_table_header3 = ["","(microns)","(microns)","(microns)","(microns)","(microns)","(microns)","(microns)","(microns)","(deg)","(deg)","(deg)","(deg)","(deg)","(deg)","(deg)","(deg)"]
    summary_table_data = []
    summary_table_data.append(summary_table_header)
    summary_table_data.append(summary_table_header2)
    summary_table_data.append(summary_table_header3)

    table_header = ["PanelG","BC dist","Delta XY","R Offsets","T Offsets","Z Offsets","dR Norm","dT Norm","Local dNorm","Rot Z","N Refls"]
    table_header2 = ["ID","(mm)","(microns)","(microns)","(microns)","(microns)","(deg)","(deg)","(deg)","(deg)",""]

    from xfel.cftbx.detector.cspad_cbf_tbx import basis
    def get_full_basis_shift(pg):
      """Compute basis shift from pg to lab space"""
      shift = basis(panelgroup=pg)
      while True:
        parent = pg.parent()
        if parent is None:
          break
        shift = basis(panelgroup=parent) * shift
        pg = parent
      return shift

    # Iterate through the hierarchy levels
    for level in range(params.max_hierarchy_level+1):
      delta_xy = flex.double()
      r_offsets = flex.double()
      t_offsets = flex.double()
      z_offsets = flex.double()
      rot_z = flex.double()
      delta_r_norm = flex.double()
      delta_t_norm = flex.double()
      local_dnorm = flex.double()
      bc_dists = flex.double()
      weights = flex.double()

      rows = []

      for pg_id, (pg1, pg2) in enumerate(zip(iterate_detector_at_level(reference_root, 0, level),
                                             iterate_detector_at_level(moving_root, 0, level))):
        weight = 0
        for panel_id, p in enumerate(iterate_panels(pg2)):
          weight += len(reflections.select(reflections['panel'] == id_from_name(detector, p.get_name())))
        weights.append(weight)

        bc = col(pg1.get_beam_centre_lab(s0))
        ori = get_center(pg1)
        bc_dist = (ori-bc).length()
        bc_dists.append(bc_dist)

        z_dists = []
        ori_xy = []
        for pg in [pg1,pg2]:
          ori = pg.get_local_origin()
          ori_xy.append(col((ori[0], ori[1])))
          z_dists.append(ori[2]*1000)
        dxy = (ori_xy[1]-ori_xy[0]).length()*1000
        delta_xy.append(dxy)

        z_off = z_dists[1]-z_dists[0]
        z_offsets.append(z_off)

        pgo1 = col(pg1.get_origin())
        ro_pgo = pgo1 - rori # vector from the detector origin to the panel group origin
        if ro_pgo.length() == 0:
          radial = col((0,0,0))
          transverse = col((0,0,0))
        else:
          radial = ((rf.dot(ro_pgo) * rf) + (rs.dot(ro_pgo) * rs)).normalize() # component of ro_pgo in rf rs plane
          transverse = r_norm.cross(radial).normalize()
        # now radial and transverse are vectors othogonal to each other and the detector normal, such that
        # radial points at the panel group origin

        # compute shift in local frame, then convert that shift to lab space, then make it relative to the reference's origin, in lab space
        lpgo1 = col(pg1.get_local_origin())
        lpgo2 = col(pg2.get_local_origin())
        delta_pgo = (get_full_basis_shift(pg1) * (lpgo2-lpgo1)) - pgo1

        # v is the component of delta_pgo along the radial vector
        v = (radial.dot(delta_pgo) * radial)
        r_offset = v.length() * 1000
        angle = r_norm.angle(v, deg=True)
        if r_norm.cross(v).dot(transverse) < 0:
          r_offset = -r_offset
        r_offsets.append(r_offset)
        # v is the component of delta_pgo along the transverse vector
        v = (transverse.dot(delta_pgo) * transverse)
        t_offset = v.length() * 1000
        angle = r_norm.angle(v, deg=True)
        if r_norm.cross(v).dot(radial) < 0:
          t_offset = -t_offset
        t_offsets.append(t_offset)

        pgn1 = col(pg1.get_normal())
        pgf1 = col(pg1.get_fast_axis())
        pgs1 = col(pg1.get_slow_axis())
        pgn2 = col(pg2.get_normal())
        pgf2 = col(pg2.get_fast_axis())

        # v1 and v2 are the component of pgf1 and pgf2 in the rf rs plane
        v1 = (rf.dot(pgf1) * rf) + (rs.dot(pgf1) * rs)
        v2 = (rf.dot(pgf2) * rf) + (rs.dot(pgf2) * rs)
        rz = v1.angle(v2, deg=True)
        rot_z.append(rz)

        # v1 and v2 are the components of pgn1 and pgn2 in the r_norm radial plane
        v1 = (r_norm.dot(pgn1) * r_norm) + (radial.dot(pgn1) * radial)
        v2 = (r_norm.dot(pgn2) * r_norm) + (radial.dot(pgn2) * radial)
        drn = v1.angle(v2, deg=True)
        if v2.cross(v1).dot(transverse) < 0:
          drn = -drn
        delta_r_norm.append(drn)

        # v1 and v2 are the components of pgn1 and pgn2 in the r_norm transverse plane
        v1 = (r_norm.dot(pgn1) * r_norm) + (transverse.dot(pgn1) * transverse)
        v2 = (r_norm.dot(pgn2) * r_norm) + (transverse.dot(pgn2) * transverse)
        dtn = v1.angle(v2, deg=True)
        if v2.cross(v1).dot(radial) < 0:
          dtn = -dtn
        delta_t_norm.append(dtn)

        # Determine angle between normals in local space
        lpgf1 = col(pg1.get_local_fast_axis())
        lpgs1 = col(pg1.get_local_slow_axis())
        lpgn1 = lpgf1.cross(lpgs1)
        lpgf2 = col(pg2.get_local_fast_axis())
        lpgs2 = col(pg2.get_local_slow_axis())
        lpgn2 = lpgf2.cross(lpgs2)
        ldn = lpgn1.angle(lpgn2, deg=True)
        local_dnorm.append(ldn)

        row = ["%3d"%pg_id, "%6.1f"%bc_dist, "%6.1f"%dxy,
               "%6.1f"%r_offset, "%6.1f"%t_offset, "%6.1f"%z_off,
               "%.4f"%drn, "%.4f"%dtn, "%.4f"%ldn, "%.4f"%rz, "%8d"%weight]
        rows.append(row)

      wm_row = ["Weighted mean", ""]
      ws_row = ["Weighted stddev", ""]
      s_row = ["%d"%level]
      iterable = zip([delta_xy, r_offsets, t_offsets, z_offsets, delta_r_norm, delta_t_norm, local_dnorm, rot_z],
                     ["%6.1f","%6.1f","%6.1f","%6.1f","%.4f","%.4f","%.4f","%.4f"])
      if len(z_offsets) == 0:
        wm_row.extend(["%6.1f"%0]*8)
        ws_row.extend(["%6.1f"%0]*8)
        s_row.extend(["%6.1f"%0]*8)
      elif len(z_offsets) == 1:
        for data, fmt in iterable:
          wm_row.append(fmt%data[0])
          ws_row.append(fmt%0)
          s_row.append(fmt%data[0])
          s_row.append(fmt%0)
      else:
        for data, fmt in iterable:
          stats = flex.mean_and_variance(data, weights)
          wm_row.append(fmt%stats.mean())
          ws_row.append(fmt%stats.gsl_stats_wsd())
          s_row.append(fmt%stats.mean())
          s_row.append(fmt%stats.gsl_stats_wsd())
      wm_row.append("")
      ws_row.append("")
      summary_table_data.append(s_row)

      table_data = [table_header, table_header2]
      table_d = {d:row for d, row in zip(bc_dists, rows)}
      table_data.extend([table_d[key] for key in sorted(table_d)])
      table_data.append(wm_row)
      table_data.append(ws_row)

      from libtbx import table_utils
      print "Hierarchy level %d Detector shifts"%level
      print table_utils.format(table_data,has_header=2,justify='center',delim=" ")

    print "Detector shifts summary"
    print table_utils.format(summary_table_data,has_header=3,justify='center',delim=" ")

    print
    print """
Пример #54
0
class Script(object):
    '''A class for running the script.'''
    def __init__(self):
        from dials.util.options import OptionParser
        import libtbx.load_env

        usage = 'usage: %s [options] experiments.json indexed.pickle' \
                % libtbx.env.dispatcher_name

        self.parser = OptionParser(usage=usage,
                                   phil=phil_scope,
                                   epilog=help_message,
                                   check_format=False,
                                   read_experiments=True,
                                   read_reflections=True)

    def run(self):
        from dials.array_family import flex  # import dependency
        from scitbx import matrix
        from dials.util.options import flatten_experiments
        from dials.util.options import flatten_reflections

        params, options = self.parser.parse_args(show_diff_phil=True)

        experiments = flatten_experiments(params.input.experiments)
        reflections = flatten_reflections(params.input.reflections)

        if len(experiments) == 0:
            self.parser.print_help()
            return

        if len(reflections) != 1:
            self.parser.print_help()
            return

        reflections = reflections[0]

        print 'Read %d reflections' % len(reflections)

        indexed = reflections.select(
            reflections.get_flags(reflections.flags.indexed))

        print 'Kept %d indexed reflections' % len(indexed)

        for name in sorted(indexed.keys()):
            print 'Found column %s' % name

        for reflection in indexed[:3]:
            print reflection

        # verify that these experiments correspond to exactly one imageset, one
        # detector, one beam (obviously)
        for experiment in experiments[1:]:
            assert experiment.imageset == experiments[0].imageset
            assert experiment.beam == experiments[0].beam
            assert experiment.detector == experiments[0].detector

        # now perform some calculations - the only things different from one
        # experiment to the next will be crystal models
        crystals = [experiment.crystal for experiment in experiments]
        detector = experiments[0].detector
        beam = experiments[0].beam
        imageset = experiments[0].imageset

        # derived quantities
        wavelength = beam.get_wavelength()
        s0 = matrix.col(beam.get_s0())

        # in here do some jiggery-pokery to cope with this being interpreted as
        # a rotation image in here i.e. if scan is not None; derive goniometer
        # matrix else set this to identity

        scan = experiments[0].scan
        goniometer = experiments[0].goniometer

        if scan and goniometer:
            angle = scan.get_angle_from_array_index(
                0.5 * sum(imageset.get_array_range()))
            axis = matrix.col(goniometer.get_rotation_axis_datum())
            F = matrix.sqr(goniometer.get_fixed_rotation())
            S = matrix.sqr(goniometer.get_setting_rotation())
            R = S * axis.axis_and_angle_as_r3_rotation_matrix(angle,
                                                              deg=True) * F
        else:
            R = matrix.sqr((1, 0, 0, 0, 1, 0, 0, 0, 1))

        assert (len(detector) == 1)
Пример #55
0
def run(args):
    from dials.util.options import OptionParser
    from dials.util.options import flatten_experiments

    # The script usage
    usage = "dials.stereographic_projection [options] [param.phil] indexed.expt"

    parser = OptionParser(
        usage=usage,
        phil=phil_scope,
        read_experiments=True,
        check_format=False,
        epilog=help_message,
    )

    params, options = parser.parse_args(args=args, show_diff_phil=True)
    experiments = flatten_experiments(params.input.experiments)

    if not experiments:
        parser.print_help()
        return

    if not params.hkl and params.hkl_limit is None:
        sys.exit("Please provide hkl or hkl_limit parameters.")

    if params.plot.labels and len(params.plot.labels) != len(experiments):
        sys.exit(
            "Number of labels (%i) must equal number of experiments (%i)" %
            (len(params.plot.labels), len(experiments)))

    if params.hkl is not None and len(params.hkl):
        miller_indices = flex.miller_index(params.hkl)
    elif params.hkl_limit is not None:
        limit = params.hkl_limit
        miller_indices = flex.miller_index()
        for h in range(-limit, limit + 1):
            for k in range(-limit, limit + 1):
                for l in range(-limit, limit + 1):
                    if (h, k, l) == (0, 0, 0):
                        continue
                    miller_indices.append((h, k, l))

    crystals = experiments.crystals()

    symmetry = crystal.symmetry(unit_cell=crystals[0].get_unit_cell(),
                                space_group=crystals[0].get_space_group())
    miller_set = miller.set(symmetry, miller_indices)
    d_spacings = miller_set.d_spacings()
    if params.eliminate_sys_absent:
        d_spacings = d_spacings.eliminate_sys_absent()
    if params.expand_to_p1:
        d_spacings = d_spacings.as_non_anomalous_array().expand_to_p1()
    miller_indices = d_spacings.indices()

    # find the greatest common factor (divisor) between miller indices
    miller_indices_unique = flex.miller_index()
    for hkl in miller_indices:
        gcd = gcd_list(hkl)
        if gcd > 1:
            miller_indices_unique.append(tuple(int(h / gcd) for h in hkl))
        elif gcd < 1:
            pass
        else:
            miller_indices_unique.append(hkl)
    miller_indices = miller_indices_unique
    miller_indices = flex.miller_index(list(set(miller_indices)))

    ref_crystal = crystals[0]
    U = matrix.sqr(ref_crystal.get_U())
    B = matrix.sqr(ref_crystal.get_B())
    R = matrix.identity(3)

    if params.frame == "laboratory":
        reference_poles = reference_poles_perpendicular_to_beam(
            experiments[0].beam, experiments[0].goniometer)
        if params.use_starting_angle:
            rotation_axis = matrix.col(
                experiments[0].goniometer.get_rotation_axis())
            R = rotation_axis.axis_and_angle_as_r3_rotation_matrix(
                experiments[0].scan.get_oscillation()[0], deg=True)
        elif params.phi_angle != 0:
            rotation_axis = matrix.col(
                experiments[0].goniometer.get_rotation_axis())
            R = rotation_axis.axis_and_angle_as_r3_rotation_matrix(
                params.phi_angle, deg=True)
    else:
        if params.plane_normal is not None:
            plane_normal = params.plane_normal
        else:
            plane_normal = (0, 0, 1)
        reference_poles = reference_poles_crystal(ref_crystal,
                                                  plane_normal=plane_normal)

    if params.frame == "crystal":
        U = matrix.identity(3)

    reciprocal_space_points = list(R * U * B) * miller_indices.as_vec3_double()
    projections_ref = stereographic_projection(reciprocal_space_points,
                                               reference_poles)

    projections_all = [projections_ref]

    if experiments:
        from dials.algorithms.indexing.compare_orientation_matrices import (
            difference_rotation_matrix_axis_angle, )

        for expt in experiments[1:]:
            cryst = expt.crystal
            if params.frame == "crystal":
                R_ij, axis, angle, cb_op = difference_rotation_matrix_axis_angle(
                    ref_crystal, cryst)
                U = R_ij
            elif params.use_starting_angle:
                if params.use_starting_angle:
                    rotation_axis = matrix.col(
                        expt.goniometer.get_rotation_axis())
                    R = rotation_axis.axis_and_angle_as_r3_rotation_matrix(
                        expt.scan.get_oscillation()[0], deg=True)
            else:
                U = matrix.sqr(cryst.get_U())
            reciprocal_space_points = (
                list(R * U * matrix.sqr(cryst.get_B())) *
                miller_indices.as_vec3_double())
            projections = stereographic_projection(reciprocal_space_points,
                                                   reference_poles)
            projections_all.append(projections)

    if params.save_coordinates:
        with open("projections.txt", "w") as f:
            f.write("crystal h k l x y" + os.linesep)
            for i_cryst, projections in enumerate(projections_all):
                for hkl, proj in zip(miller_indices, projections):
                    f.write("%i " % (i_cryst + 1))
                    f.write("%i %i %i " % hkl)
                    f.write(("%f %f" + os.linesep) % proj)

    if params.plot.filename:
        epochs = None
        if params.plot.colour_map is not None:
            if experiments[0].scan is not None:
                epochs = [expt.scan.get_epochs()[0] for expt in experiments]
            else:
                epochs = [i for i, expt in enumerate(experiments)]
        plot_projections(
            projections_all,
            filename=params.plot.filename,
            colours=params.plot.colours,
            marker_size=params.plot.marker_size,
            font_size=params.plot.font_size,
            gridsize=params.plot.gridsize,
            label_indices=miller_indices
            if params.plot.label_indices else False,
            epochs=epochs,
            colour_map=params.plot.colour_map,
        )

    if params.json.filename:
        projections_as_json(projections_all,
                            filename=params.json.filename,
                            labels=params.plot.labels)
Пример #56
0
def run(args):
    usage = "dials.plot_reflections models.expt observations.refl [options]"
    from dials.util.options import OptionParser, reflections_and_experiments_from_files
    from scitbx.array_family import flex
    from scitbx import matrix

    parser = OptionParser(
        usage=usage,
        phil=master_phil_scope,
        read_experiments=True,
        read_reflections=True,
        check_format=False,
    )

    params, options = parser.parse_args(show_diff_phil=True)
    reflections, experiments = reflections_and_experiments_from_files(
        params.input.reflections, params.input.experiments
    )
    if len(experiments.imagesets()) > 0:
        imageset = experiments.imagesets()[0]
        imageset.set_detector(experiments[0].detector)
        imageset.set_beam(experiments[0].beam)
        imageset.set_goniometer(experiments[0].goniometer)
    else:
        parser.print_help()
        return

    detector = imageset.get_detector()
    scan = imageset.get_scan()

    panel_origin_shifts = {0: (0, 0, 0)}
    try:
        hierarchy = detector.hierarchy()
    except AttributeError:
        hierarchy = None
    for i_panel in range(1, len(detector)):
        origin_shift = matrix.col(detector[0].get_origin()) - matrix.col(
            detector[i_panel].get_origin()
        )
        panel_origin_shifts[i_panel] = origin_shift

    observed_xyz = flex.vec3_double()
    predicted_xyz = flex.vec3_double()

    for reflection_list in reflections:

        if len(params.scan_range):
            sel = flex.bool(len(reflection_list), False)

            xyzcal_px = None
            xyzobs_px = None

            if "xyzcal.px" in reflection_list:
                xyzcal_px = reflection_list["xyzcal.px"]
            if "xyzobs.px.value" in reflection_list:
                xyzobs_px = reflection_list["xyzobs.px.value"]

            if xyzcal_px is not None and not xyzcal_px.norms().all_eq(0):
                centroids_frame = xyzcal_px.parts()[2]
            elif xyzobs_px is not None and not xyzobs_px.norms().all_eq(0):
                centroids_frame = xyzobs_px.parts()[2]
            else:
                raise Sorry("No pixel coordinates given in input reflections.")

            for scan_range in params.scan_range:
                if scan_range is None:
                    continue
                range_start, range_end = scan_range
                sel |= (centroids_frame >= range_start) & (centroids_frame < range_end)
            reflection_list = reflection_list.select(sel)
        if params.first_n_reflections is not None:
            centroid_positions = reflection_list.centroid_position()
            centroids_frame = centroid_positions.parts()[2]
            perm = flex.sort_permutation(centroids_frame)
            perm = perm[: min(reflection_list.size(), params.first_n_reflections)]
            reflection_list = reflection_list.select(perm)
        if params.crystal_id is not None:
            reflection_list = reflection_list.select(
                reflection_list["id"] == params.crystal_id
            )

        xyzobs_px = None
        xyzobs_mm = None
        xyzcal_mm = None

        if "xyzobs.px.value" in reflection_list:
            xyzobs_px = reflection_list["xyzobs.px.value"]
        if "xyzcal.mm" in reflection_list:
            xyzcal_mm = reflection_list["xyzcal.mm"]
        if "xyzobs.mm.value" in reflection_list:
            xyzobs_mm = reflection_list["xyzobs.mm.value"]

        panel_ids = reflection_list["panel"]
        if xyzobs_mm is None and xyzobs_px is not None:
            xyzobs_mm = flex.vec3_double()
            for i_panel in range(len(detector)):
                xyzobs_px_panel = xyzobs_px.select(panel_ids == i_panel)

                from dials.algorithms.centroid import centroid_px_to_mm_panel

                xyzobs_mm_panel, _, _ = centroid_px_to_mm_panel(
                    detector[i_panel],
                    scan,
                    xyzobs_px_panel,
                    flex.vec3_double(xyzobs_px_panel.size()),
                    flex.vec3_double(xyzobs_px_panel.size()),
                )
                xyzobs_mm.extend(xyzobs_mm_panel)

        if xyzobs_mm is not None:
            observed_xyz.extend(xyzobs_mm)
        if xyzcal_mm is not None:
            predicted_xyz.extend(xyzcal_mm)

    obs_x, obs_y, _ = observed_xyz.parts()
    pred_x, pred_y, _ = predicted_xyz.parts()

    try:
        import matplotlib

        if not params.output.show_plot:
            # http://matplotlib.org/faq/howto_faq.html#generate-images-without-having-a-window-appear
            matplotlib.use("Agg")  # use a non-interactive backend
        import matplotlib.pyplot as plt
    except ImportError:
        raise Sorry("matplotlib must be installed to generate a plot.")

    fig = plt.figure()
    fig.set_size_inches(params.output.size_inches)
    fig.set_dpi(params.output.dpi)
    plt.axes().set_aspect("equal")
    marker_size = params.output.marker_size
    if obs_x.size():
        plt.scatter(
            obs_x,
            obs_y,
            marker="o",
            edgecolors="black",
            c="white",
            s=marker_size,
            alpha=1,
        )
    if pred_x.size():
        plt.scatter(pred_x, pred_y, marker="+", s=marker_size, c="blue")
    xmax = max(
        [
            detector[i_panel].get_image_size_mm()[0] + panel_origin_shifts[i_panel][0]
            for i_panel in range(len(detector))
        ]
    )
    ymax = max(
        [
            detector[i_panel].get_image_size_mm()[1] + panel_origin_shifts[i_panel][1]
            for i_panel in range(len(detector))
        ]
    )
    try:
        beam_centre = hierarchy.get_beam_centre(imageset.get_beam().get_s0())
    except Exception:
        beam_centre = detector[0].get_beam_centre(imageset.get_beam().get_s0())
    plt.scatter([beam_centre[0]], [beam_centre[1]], marker="+", c="blue", s=100)
    plt.xlim(0, xmax)
    plt.ylim(0, ymax)
    plt.gca().invert_yaxis()
    plt.title("Centroid x,y-coordinates")
    plt.xlabel("x-coordinate (mm)")
    plt.ylabel("y-coordinate (mm)")
    if params.output.file_name is not None:
        plt.savefig(
            params.output.file_name,
            dpi=params.output.dpi,
            bbox_inches="tight",
        )
    if params.output.show_plot:
        plt.show()
Пример #57
0
def run(args=None):
    from dials.util.options import OptionParser, reflections_and_experiments_from_files
    from dials.util.version import dials_version

    usage = "dials.export models.expt reflections.refl [options]"

    # Create the option parser
    parser = OptionParser(
        usage=usage,
        read_experiments=True,
        read_reflections=True,
        check_format=False,
        phil=phil_scope,
        epilog=help_message,
    )

    # Get the parameters
    params, options = parser.parse_args(args, show_diff_phil=False)

    # Configure the logging
    log.config(logfile=params.output.log)

    # Print the version number
    logger.info(dials_version())

    # Log the diff phil
    diff_phil = parser.diff_phil.as_str()
    if diff_phil != "":
        logger.info("The following parameters have been modified:\n")
        logger.info(diff_phil)

    if not params.input.experiments and not params.input.reflections:
        parser.print_help()
        sys.exit()

    # Get the experiments and reflections
    reflections, experiments = reflections_and_experiments_from_files(
        params.input.reflections, params.input.experiments
    )

    # do auto interpreting of intensity choice:
    # note that this may still fail certain checks further down the processing,
    # but these are the defaults to try
    if params.intensity in ([None], [Auto], ["auto"], Auto) and reflections:
        if ("intensity.scale.value" in reflections[0]) and (
            "intensity.scale.variance" in reflections[0]
        ):
            params.intensity = ["scale"]
            logger.info("Data appears to be scaled, setting intensity = scale")
        else:
            params.intensity = []
            if "intensity.sum.value" in reflections[0]:
                params.intensity.append("sum")
            if "intensity.prf.value" in reflections[0]:
                params.intensity.append("profile")
            logger.info(
                "Data appears to be unscaled, setting intensity = "
                + "+".join(params.intensity)
            )

    # Choose the exporter
    exporter = {
        "mtz": export_mtz,
        "sadabs": export_sadabs,
        "xds_ascii": export_xdsascii,
        "nxs": export_nexus,
        "mmcif": export_mmcif,
        "mosflm": export_mosflm,
        "xds": export_xds,
        "json": export_json,
    }.get(params.format)
    if not exporter:
        sys.exit(f"Unknown format: {params.format}")

    # Export the data
    try:
        exporter(params, experiments, reflections)
    except Exception as e:
        logger.error(f"Error: {e}")
        sys.exit(1)
Пример #58
0
def run(args):
    import libtbx.load_env
    from libtbx.utils import Sorry
    usage = "%s [options] experiments.json indexed.pickle" % libtbx.env.dispatcher_name

    parser = OptionParser(usage=usage,
                          phil=phil_scope,
                          read_reflections=True,
                          read_experiments=True,
                          check_format=False,
                          epilog=help_message)

    params, options = parser.parse_args(show_diff_phil=True)

    reflections = flatten_reflections(params.input.reflections)
    experiments = flatten_experiments(params.input.experiments)
    if len(experiments) == 0 and len(reflections) == 0:
        parser.print_help()
        return
    elif len(experiments.crystals()) > 1:
        raise Sorry("Only one crystal can be processed at a time")
    if params.change_of_basis_op is None:
        raise Sorry("Please provide a change_of_basis_op.")

    reference_crystal = None
    if params.reference is not None:
        from dxtbx.serialize import load
        reference_experiments = load.experiment_list(params.reference,
                                                     check_format=False)
        assert len(reference_experiments.crystals()) == 1
        reference_crystal = reference_experiments.crystals()[0]

    if len(experiments) and params.change_of_basis_op is libtbx.Auto:
        if reference_crystal is not None:
            from dials.algorithms.indexing.compare_orientation_matrices \
                 import difference_rotation_matrix_axis_angle

            cryst = experiments.crystals()[0]
            R, axis, angle, change_of_basis_op = difference_rotation_matrix_axis_angle(
                cryst, reference_crystal)
            print "Change of basis op: %s" % change_of_basis_op
            print "Rotation matrix to transform input crystal to reference::"
            print R.mathematica_form(format="%.3f", one_row_per_line=True)
            print "Rotation of %.3f degrees" % angle, "about axis (%.3f, %.3f, %.3f)" % axis

        elif len(reflections):
            assert len(reflections) == 1

            # always re-map reflections to reciprocal space
            from dials.algorithms.indexing import indexer
            refl_copy = flex.reflection_table()
            for i, imageset in enumerate(experiments.imagesets()):
                if 'imageset_id' in reflections[0]:
                    sel = (reflections[0]['imageset_id'] == i)
                else:
                    sel = (reflections[0]['id'] == i)
                refl = indexer.indexer_base.map_spots_pixel_to_mm_rad(
                    reflections[0].select(sel), imageset.get_detector(),
                    imageset.get_scan())

                indexer.indexer_base.map_centroids_to_reciprocal_space(
                    refl, imageset.get_detector(), imageset.get_beam(),
                    imageset.get_goniometer())
                refl_copy.extend(refl)

            # index the reflection list using the input experiments list
            refl_copy['id'] = flex.int(len(refl_copy), -1)
            from dials.algorithms.indexing import index_reflections
            index_reflections(refl_copy, experiments, tolerance=0.2)
            hkl_expt = refl_copy['miller_index']
            hkl_input = reflections[0]['miller_index']

            change_of_basis_op = derive_change_of_basis_op(hkl_input, hkl_expt)

            # reset experiments list since we don't want to reindex this
            experiments = []

    else:
        change_of_basis_op = sgtbx.change_of_basis_op(
            params.change_of_basis_op)

    if len(experiments):
        experiment = experiments[0]
        cryst_orig = copy.deepcopy(experiment.crystal)
        cryst_reindexed = cryst_orig.change_basis(change_of_basis_op)
        if params.space_group is not None:
            a, b, c = cryst_reindexed.get_real_space_vectors()
            cryst_reindexed = crystal_model(
                a, b, c, space_group=params.space_group.group())
        experiment.crystal.update(cryst_reindexed)

        print "Old crystal:"
        print cryst_orig
        print
        print "New crystal:"
        print cryst_reindexed
        print

        print "Saving reindexed experimental models to %s" % params.output.experiments
        dump.experiment_list(experiments, params.output.experiments)

    if len(reflections):
        assert (len(reflections) == 1)
        reflections = reflections[0]

        miller_indices = reflections['miller_index']

        if params.hkl_offset is not None:
            h, k, l = miller_indices.as_vec3_double().parts()
            h += params.hkl_offset[0]
            k += params.hkl_offset[1]
            l += params.hkl_offset[2]
            miller_indices = flex.miller_index(h.iround(), k.iround(),
                                               l.iround())
        non_integral_indices = change_of_basis_op.apply_results_in_non_integral_indices(
            miller_indices)
        if non_integral_indices.size() > 0:
            print "Removing %i/%i reflections (change of basis results in non-integral indices)" % (
                non_integral_indices.size(), miller_indices.size())
        sel = flex.bool(miller_indices.size(), True)
        sel.set_selected(non_integral_indices, False)
        miller_indices_reindexed = change_of_basis_op.apply(
            miller_indices.select(sel))
        reflections['miller_index'].set_selected(sel, miller_indices_reindexed)
        reflections['miller_index'].set_selected(~sel, (0, 0, 0))

        print "Saving reindexed reflections to %s" % params.output.reflections
        easy_pickle.dump(params.output.reflections, reflections)
Пример #59
0
class Script(object):
    ''' The integration program. '''
    def __init__(self):
        '''Initialise the script.'''
        from dials.util.options import OptionParser
        import libtbx.load_env

        # The script usage
        usage = "usage: %s [options] experiment.json" % libtbx.env.dispatcher_name

        # Create the parser
        self.parser = OptionParser(usage=usage,
                                   phil=phil_scope,
                                   epilog=help_message,
                                   read_experiments=True,
                                   read_reflections=True)

    def run(self):
        ''' Perform the integration. '''
        from dials.util.command_line import heading
        from dials.util.options import flatten_reflections, flatten_experiments
        from dials.util import log
        from time import time
        from libtbx.utils import Sorry

        # Check the number of arguments is correct
        start_time = time()

        # Parse the command line
        params, options = self.parser.parse_args(show_diff_phil=False)
        reference = flatten_reflections(params.input.reflections)
        experiments = flatten_experiments(params.input.experiments)
        if len(reference) == 0 and len(experiments) == 0:
            self.parser.print_help()
            return
        if len(reference) == 0:
            reference = None
        elif len(reference) != 1:
            raise Sorry('more than 1 reflection file was given')
        else:
            reference = reference[0]
        if len(experiments) == 0:
            raise Sorry('no experiment list was specified')

        # Save phil parameters
        if params.output.phil is not None:
            with open(params.output.phil, "w") as outfile:
                outfile.write(self.parser.diff_phil.as_str())

        # Configure logging
        log.config(params.verbosity,
                   info=params.output.log,
                   debug=params.output.debug_log)

        from dials.util.version import dials_version
        logger.info(dials_version())

        # Log the diff phil
        diff_phil = self.parser.diff_phil.as_str()
        if diff_phil is not '':
            logger.info('The following parameters have been modified:\n')
            logger.info(diff_phil)

        # Print if we're using a mask
        for i, exp in enumerate(experiments):
            mask = exp.imageset.external_lookup.mask
            if mask.filename is not None:
                if mask.data:
                    logger.info('Using external mask: %s' % mask.filename)
                    logger.info(' Mask has %d pixels masked' %
                                mask.data.count(False))

        # Print the experimental models
        for i, exp in enumerate(experiments):
            logger.debug("Models for experiment %d" % i)
            logger.debug("")
            logger.debug(str(exp.beam))
            logger.debug(str(exp.detector))
            if exp.goniometer:
                logger.debug(str(exp.goniometer))
            if exp.scan:
                logger.debug(str(exp.scan))
            logger.debug(str(exp.crystal))

        logger.info("=" * 80)
        logger.info("")
        logger.info(heading("Initialising"))
        logger.info("")

        # Load the data
        reference, rubbish = self.process_reference(reference)
        logger.info("")

        # Initialise the integrator
        from dials.algorithms.profile_model.factory import ProfileModelFactory
        from dials.algorithms.integration.integrator import IntegratorFactory
        from dials.array_family import flex

        # Modify experiment list if scan range is set.
        experiments, reference = self.split_for_scan_range(
            experiments, reference, params.scan_range)

        # Predict the reflections
        logger.info("")
        logger.info("=" * 80)
        logger.info("")
        logger.info(heading("Predicting reflections"))
        logger.info("")
        predicted = flex.reflection_table.from_predictions_multi(
            experiments,
            dmin=params.prediction.d_min,
            dmax=params.prediction.d_max,
            margin=params.prediction.margin,
            force_static=params.prediction.force_static)

        # Match reference with predicted
        if reference:
            matched, reference, unmatched = predicted.match_with_reference(
                reference)
            assert (len(matched) == len(predicted))
            assert (matched.count(True) <= len(reference))
            if matched.count(True) == 0:
                raise Sorry('''
          Invalid input for reference reflections.
          Zero reference spots were matched to predictions
        ''')
            elif len(unmatched) != 0:
                logger.info('')
                logger.info('*' * 80)
                logger.info(
                    'Warning: %d reference spots were not matched to predictions'
                    % (len(unmatched)))
                logger.info('*' * 80)
                logger.info('')
            rubbish.extend(unmatched)

            if len(experiments) > 1:
                # filter out any experiments without matched reference reflections
                # f_: filtered
                from dxtbx.model.experiment.experiment_list import ExperimentList
                f_reference = flex.reflection_table()
                f_predicted = flex.reflection_table()
                f_rubbish = flex.reflection_table()
                f_experiments = ExperimentList()
                good_expt_count = 0

                def refl_extend(src, dest, eid):
                    tmp = src.select(src['id'] == eid)
                    tmp['id'] = flex.int(len(tmp), good_expt_count)
                    dest.extend(tmp)

                for expt_id, experiment in enumerate(experiments):
                    if len(reference.select(reference['id'] == expt_id)) != 0:
                        refl_extend(reference, f_reference, expt_id)
                        refl_extend(predicted, f_predicted, expt_id)
                        refl_extend(rubbish, f_rubbish, expt_id)
                        f_experiments.append(experiment)
                        good_expt_count += 1
                    else:
                        logger.info(
                            "Removing experiment %d: no reference reflections matched to predictions"
                            % expt_id)

                reference = f_reference
                predicted = f_predicted
                experiments = f_experiments
                rubbish = f_rubbish

        # Select a random sample of the predicted reflections
        if not params.sampling.integrate_all_reflections:
            predicted = self.sample_predictions(experiments, predicted, params)

        # Compute the profile model
        if (params.create_profile_model and reference is not None
                and "shoebox" in reference):
            experiments = ProfileModelFactory.create(params, experiments,
                                                     reference)
        else:
            for expr in experiments:
                if expr.profile is None:
                    raise Sorry('No profile information in experiment list')
                expr.profile.params = params.profile
        del reference

        # Compute the bounding box
        predicted.compute_bbox(experiments)

        # Create the integrator
        logger.info("")
        integrator = IntegratorFactory.create(params, experiments, predicted)

        # Integrate the reflections
        reflections = integrator.integrate()

        # Append rubbish data onto the end
        if rubbish is not None and params.output.include_bad_reference:
            mask = flex.bool(len(rubbish), True)
            rubbish.unset_flags(mask, rubbish.flags.integrated_sum)
            rubbish.unset_flags(mask, rubbish.flags.integrated_prf)
            rubbish.set_flags(mask, rubbish.flags.bad_reference)
            reflections.extend(rubbish)

        # Save the reflections
        self.save_reflections(reflections, params.output.reflections)
        self.save_experiments(experiments, params.output.experiments)

        # Write a report if requested
        if params.output.report is not None:
            integrator.report().as_file(params.output.report)

        # Print the total time taken
        logger.info("\nTotal time taken: %f" % (time() - start_time))

    def process_reference(self, reference):
        ''' Load the reference spots. '''
        from dials.array_family import flex
        from time import time
        from libtbx.utils import Sorry
        if reference is None:
            return None, None
        st = time()
        assert ("miller_index" in reference)
        assert ("id" in reference)
        logger.info('Processing reference reflections')
        logger.info(' read %d strong spots' % len(reference))
        mask = reference.get_flags(reference.flags.indexed)
        rubbish = reference.select(mask == False)
        if mask.count(False) > 0:
            reference.del_selected(mask == False)
            logger.info(' removing %d unindexed reflections' %
                        mask.count(True))
        if len(reference) == 0:
            raise Sorry('''
        Invalid input for reference reflections.
        Expected > %d indexed spots, got %d
      ''' % (0, len(reference)))
        mask = reference.get_flags(reference.flags.centroid_outlier)
        if mask.count(True) > 0:
            rubbish.extend(reference.select(mask))
            reference.del_selected(mask)
            logger.info(
                ' removing %d reflections marked as centroid outliers' %
                mask.count(True))
        mask = reference['miller_index'] == (0, 0, 0)
        if mask.count(True) > 0:
            rubbish.extend(reference.select(mask))
            reference.del_selected(mask)
            logger.info(' removing %d reflections with hkl (0,0,0)' %
                        mask.count(True))
        mask = reference['id'] < 0
        if mask.count(True) > 0:
            raise Sorry('''
        Invalid input for reference reflections.
        %d reference spots have an invalid experiment id
      ''' % mask.count(True))
        logger.info(' using %d indexed reflections' % len(reference))
        logger.info(' found %d junk reflections' % len(rubbish))
        logger.info(' time taken: %g' % (time() - st))
        return reference, rubbish

    def save_reflections(self, reflections, filename):
        ''' Save the reflections to file. '''
        from time import time
        st = time()
        logger.info('Saving %d reflections to %s' %
                    (len(reflections), filename))
        reflections.as_pickle(filename)
        logger.info(' time taken: %g' % (time() - st))

    def save_experiments(self, experiments, filename):
        ''' Save the profile model parameters. '''
        from time import time
        from dxtbx.model.experiment.experiment_list import ExperimentListDumper
        st = time()
        logger.info('Saving the experiments to %s' % filename)
        dump = ExperimentListDumper(experiments)
        with open(filename, "w") as outfile:
            outfile.write(dump.as_json())
        logger.info(' time taken: %g' % (time() - st))

    def sample_predictions(self, experiments, predicted, params):
        ''' Select a random sample of the predicted reflections to integrate. '''
        from dials.array_family import flex
        nref_per_degree = params.sampling.reflections_per_degree
        min_sample_size = params.sampling.minimum_sample_size
        max_sample_size = params.sampling.maximum_sample_size

        # this code is very similar to David's code in algorithms/refinement/reflection_manager.py!

        # constants
        from math import pi
        RAD2DEG = 180. / pi
        DEG2RAD = pi / 180.

        working_isel = flex.size_t()
        for iexp, exp in enumerate(experiments):

            sel = predicted['id'] == iexp
            isel = sel.iselection()
            #refs = self._reflections.select(sel)
            nrefs = sample_size = len(isel)

            # set sample size according to nref_per_degree (per experiment)
            if exp.scan and nref_per_degree:
                sweep_range_rad = exp.scan.get_oscillation_range(deg=False)
                width = abs(sweep_range_rad[1] - sweep_range_rad[0]) * RAD2DEG
                sample_size = int(nref_per_degree * width)
            else:
                sweep_range_rad = None

            # adjust sample size if below the chosen limit
            sample_size = max(sample_size, min_sample_size)

            # set maximum sample size if requested
            if max_sample_size:
                sample_size = min(sample_size, max_sample_size)

            # determine subset and collect indices
            if sample_size < nrefs:
                isel = isel.select(flex.random_selection(nrefs, sample_size))
            working_isel.extend(isel)

        # create subset
        return predicted.select(working_isel)

    def split_for_scan_range(self, experiments, reference, scan_range):
        ''' Update experiments when scan range is set. '''
        from dxtbx.model.experiment.experiment_list import ExperimentList
        from dxtbx.model.experiment.experiment_list import Experiment
        from dials.array_family import flex

        # Only do anything is the scan range is set
        if scan_range is not None and len(scan_range) > 0:

            # Ensure that all experiments have the same imageset and scan
            iset = [e.imageset for e in experiments]
            scan = [e.scan for e in experiments]
            assert (all(x == iset[0] for x in iset))
            assert (all(x == scan[0] for x in scan))

            # Get the imageset and scan
            iset = experiments[0].imageset
            scan = experiments[0].scan

            # Get the array range
            if scan is not None:
                frame10, frame11 = scan.get_array_range()
                assert (scan.get_num_images() == len(iset))
            else:
                frame10, frame11 = (0, len(iset))

            # Create the new lists
            new_experiments = ExperimentList()
            new_reference_all = reference.split_by_experiment_id()
            new_reference = flex.reflection_table()
            for i in range(len(new_reference_all) - len(experiments)):
                new_reference_all.append(flex.reflection_table())
            assert (len(new_reference_all) == len(experiments))

            # Loop through all the scan ranges and create a new experiment list with
            # the requested scan ranges.
            for frame00, frame01 in scan_range:
                assert (frame01 > frame00)
                assert (frame00 >= frame10)
                assert (frame01 <= frame11)
                index0 = frame00 - frame10
                index1 = index0 + (frame01 - frame00)
                assert (index0 < index1)
                assert (index0 >= 0)
                assert (index1 <= len(iset))
                new_iset = iset[index0:index1]
                if scan is None:
                    new_scan = None
                else:
                    new_scan = scan[index0:index1]
                for i, e1 in enumerate(experiments):
                    e2 = Experiment()
                    e2.beam = e1.beam
                    e2.detector = e1.detector
                    e2.goniometer = e1.goniometer
                    e2.crystal = e1.crystal
                    e2.imageset = new_iset
                    e2.scan = new_scan
                    new_reference_all[i]['id'] = flex.int(
                        len(new_reference_all[i]), len(new_experiments))
                    new_reference.extend(new_reference_all[i])
                    new_experiments.append(e2)
            experiments = new_experiments
            reference = new_reference

            # Print some information
            logger.info(
                'Modified experiment list to integrate over requested scan range'
            )
            for frame00, frame01 in scan_range:
                logger.info(' scan_range = %d -> %d' % (frame00, frame01))
            logger.info('')

        # Return the experiments
        return experiments, reference
Пример #60
0
def run(args):

    from dials.util.options import OptionParser
    from dials.util.options import flatten_experiments
    from dials.util.options import flatten_datablocks
    from dials.util.options import flatten_reflections
    import libtbx.load_env

    usage = '%s [options] experiments.json integrated.pickle' % (
        libtbx.env.dispatcher_name)

    parser = OptionParser(usage=usage,
                          phil=phil_scope,
                          read_experiments=True,
                          read_reflections=True,
                          check_format=False,
                          epilog=help_message)

    params, options = parser.parse_args(show_diff_phil=True)
    experiments = flatten_experiments(params.input.experiments)
    reflections = flatten_reflections(params.input.reflections)

    if len(experiments) == 0 or len(reflections) == 0:
        parser.print_help()
        exit()

    experiment = experiments[0]
    reflections = reflections[0]

    from dials.algorithms.profile_model.gaussian_rs import CoordinateSystem

    sel = reflections.get_flags(reflections.flags.integrated)
    reflections = reflections.select(sel)

    goniometer = experiment.goniometer
    beam = experiment.beam

    m2 = goniometer.get_rotation_axis()
    s0 = beam.get_s0()

    from dials.array_family import flex
    phi1 = flex.double()
    phi2 = flex.double()

    phi_range = reflections.compute_phi_range(
        goniometer.get_rotation_axis(), beam.get_s0(),
        experiment.profile.sigma_m(deg=False), experiment.profile.n_sigma())
    phi1, phi2 = phi_range.parts()

    scan = experiment.scan
    exposure_time = scan.get_exposure_times()[0]
    assert scan.get_exposure_times().all_eq(exposure_time)
    phi_start, phi_width = scan.get_oscillation(deg=False)
    phi_range_dead = phi_width * (params.dead_time / 1000) / exposure_time

    sel_good = flex.bool(len(reflections), True)

    start, end = scan.get_array_range()
    for i in range(start, end):
        phi_dead_start = phi_start + (i + 1) * phi_width - phi_range_dead
        phi_dead_end = phi_dead_start + phi_range_dead

        left = phi1.deep_copy()
        left.set_selected(left < phi_dead_start, phi_dead_start)

        right = phi2.deep_copy()
        right.set_selected(right > phi_dead_end, phi_dead_end)

        overlap = (right - left) / (phi2 - phi1)

        sel = overlap > params.reject_fraction

        sel_good.set_selected(sel, False)
        print 'Rejecting %i reflections from image %i' % (sel.count(True), i)

    print 'Keeping %i reflections (rejected %i)' % (sel_good.count(True),
                                                    sel_good.count(False))

    from libtbx import easy_pickle
    easy_pickle.dump(params.output.reflections, reflections.select(sel_good))