예제 #1
0
  def run(self):
    '''Execute the script.'''
    from dials.array_family import flex
    from dials.util.options import flatten_datablocks
    from dials.util.options import flatten_reflections
    from time import time
    from dials.util import log
    from logging import info, debug
    from libtbx.utils import Sorry
    start_time = time()

    # Parse the command line
    params, options = self.parser.parse_args(show_diff_phil=False)

    # Configure the logging
    log.config(
      params.verbosity,
      info=params.output.log,
      debug=params.output.debug_log)

    from dials.util.version import dials_version
    info(dials_version())

    # Log the diff phil
    diff_phil = self.parser.diff_phil.as_str()
    if diff_phil is not '':
      info('The following parameters have been modified:\n')
      info(diff_phil)

    # Ensure we have a data block
    datablocks = flatten_datablocks(params.input.datablock)
    reflections = flatten_reflections(params.input.reflections)
    if len(datablocks) == 0 and len(reflections) == 0:
      self.parser.print_help()
      return
    elif len(datablocks) != len(reflections):
      raise Sorry("Must have same number of datablocks and reflection tables")

    # Combine the datablocks and reflections
    datablock, reflections = combine(
      datablocks,
      reflections,
      params)

    # Save the reflections to file
    info('\n' + '-' * 80)
    reflections.as_pickle(params.output.reflections)
    info('Saved {0} reflections to {1}'.format(
        len(reflections), params.output.reflections))

    # Save the datablock
    from dxtbx.datablock import DataBlockDumper
    info('Saving datablocks to {0}'.format(
      params.output.datablock))
    dump = DataBlockDumper(datablocks)
    dump.as_file(params.output.datablock)


    # Print the time
    info("Time Taken: %f" % (time() - start_time))
def run(args):
  import libtbx.load_env
  from libtbx.utils import Sorry
  from dials.util import log
  usage = "%s [options] datablock.json strong.pickle" %libtbx.env.dispatcher_name

  parser = OptionParser(
    usage=usage,
    phil=phil_scope,
    read_datablocks=True,
    read_reflections=True,
    check_format=False,
    epilog=help_message)

  params, options = parser.parse_args(show_diff_phil=False)
  datablocks = flatten_datablocks(params.input.datablock)
  reflections = flatten_reflections(params.input.reflections)

  if len(datablocks) == 0 or len(reflections) == 0:
    parser.print_help()
    exit(0)

  # Configure the logging
  log.config(
    info=params.output.log,
    debug=params.output.debug_log)

  # Log the diff phil
  diff_phil = parser.diff_phil.as_str()
  if diff_phil is not '':
    info('The following parameters have been modified:\n')
    info(diff_phil)

  imagesets = []
  for datablock in datablocks:
    imagesets.extend(datablock.extract_imagesets())

  assert len(imagesets) > 0
  assert len(reflections) == len(imagesets)

  if params.scan_range is not None and len(params.scan_range) > 0:
    reflections = [
      filter_reflections_by_scan_range(refl, params.scan_range)
      for refl in reflections]

  dps_params = dps_phil_scope.extract()
  # for development, we want an exhaustive plot of beam probability map:
  dps_params.indexing.plot_search_scope = params.plot_search_scope
  dps_params.indexing.mm_search_scope = params.mm_search_scope

  new_detector, new_beam = discover_better_experimental_model(
    imagesets, reflections, params, dps_params, nproc=params.nproc,
    wide_search_binning=params.wide_search_binning)
  for imageset in imagesets:
    imageset.set_detector(new_detector)
    imageset.set_beam(new_beam)
  from dxtbx.serialize import dump
  dump.datablock(datablock, params.output.datablock)
예제 #3
0
def run(args):
  import libtbx.load_env
  from libtbx.utils import Sorry
  from dials.util import log
  from logging import info
  import cPickle as pickle
  usage = "%s [options] datablock.json strong.pickle" % \
    libtbx.env.dispatcher_name

  # Create the option parser
  parser = OptionParser(
    usage=usage,
    phil=phil_scope,
    read_reflections=True,
    read_datablocks=True,
    check_format=False,
    epilog=help_message)

  # Get the parameters
  params, options = parser.parse_args(show_diff_phil=False)

  # Configure the log
  log.config(
    params.verbosity,
    info='dials.find_hot_pixels.log',
    debug='dials.find_hot_pixels.debug.log')

  # Log the diff phil
  diff_phil = parser.diff_phil.as_str()
  if diff_phil is not '':
    info('The following parameters have been modified:\n')
    info(diff_phil)

  datablocks = flatten_datablocks(params.input.datablock)
  reflections = flatten_reflections(params.input.reflections)

  if len(datablocks) == 0 and len(reflections) == 0:
    parser.print_help()
    exit(0)

  if len(datablocks) > 1:
    raise Sorry("Only one DataBlock can be processed at a time")
  else:
    imagesets = datablocks[0].extract_imagesets()
  if len(reflections) == 0:
    raise Sorry("No reflection lists found in input")
  if len(reflections) > 1:
    raise Sorry("Multiple reflections lists provided in input")

  assert(len(reflections) == 1)
  reflections = reflections[0]

  mask = hot_pixel_mask(imagesets[0], reflections)
  pickle.dump(mask, open(params.output.mask, 'w'), pickle.HIGHEST_PROTOCOL)

  print 'Wrote hot pixel mask to %s' % params.output.mask
  return
예제 #4
0
  def run(self):
    ''' Perform the integration. '''
    from dials.util.command_line import heading
    from dials.util.options import flatten_experiments
    from dials.util import log
    from time import time
    from libtbx.utils import Sorry
    from dials.array_family import flex

    # Parse the command line
    params, options = self.parser.parse_args(show_diff_phil=False)
    experiments = flatten_experiments(params.input.experiments)
    if len(experiments) == 0:
      self.parser.print_help()
      return

    assert len(experiments) == 1
    imageset = experiments[0].imageset
    beam = experiments[0].beam
    detector = experiments[0].detector
    goniometer = experiments[0].goniometer
    assert len(detector) == 1

    # Configure logging
    log.config()

    from dials.algorithms.background.gmodel import PolarTransform
    import cPickle as pickle
    model = pickle.load(open(params.model))
    image = model.data(0)
    mask = flex.bool(image.accessor(), True)

    # Do the transformation
    transform = PolarTransform(beam, detector[0], goniometer)
    result = transform.to_polar(image, mask)
    data = result.data()
    mask = result.mask()

    pickle.dump((data, mask), open(params.output.data, "w"))

    from matplotlib import pylab
    vmax = sorted(list(data))[int(0.99 * len(data))]
    figure = pylab.figure(figsize=(6,4))
    pylab.imshow(
      data.as_numpy_array(),
      interpolation = 'none',
      vmin          = 0,
      vmax          = vmax)
    ax1 = pylab.gca()
    ax1.get_xaxis().set_visible(False)
    ax1.get_yaxis().set_visible(False)
    cb = pylab.colorbar()
    cb.ax.tick_params(labelsize=8)
    logger.info("Saving polar model %s" % (params.output.image))
    pylab.savefig("%s" % (params.output.image), dpi=600, bbox_inches='tight')
예제 #5
0
파일: scale.py 프로젝트: dwpaley/dials
def run(args=None, phil=phil_scope):  # type: (List[str], phil.scope) -> None
    """Run the scaling from the command-line."""
    usage = """Usage: dials.scale integrated.refl integrated.expt
[integrated.refl(2) integrated.expt(2) ....] [options]"""

    parser = OptionParser(
        usage=usage,
        read_experiments=True,
        read_reflections=True,
        phil=phil,
        check_format=False,
        epilog=__doc__,
    )
    params, options = parser.parse_args(args=args, show_diff_phil=False)

    if not params.input.experiments or not params.input.reflections:
        parser.print_help()
        sys.exit()

    reflections, experiments = reflections_and_experiments_from_files(
        params.input.reflections, params.input.experiments)

    log.config(verbosity=options.verbose, logfile=params.output.log)
    logger.info(dials_version())

    diff_phil = parser.diff_phil.as_str()
    if diff_phil:
        logger.info("The following parameters have been modified:\n%s",
                    diff_phil)

    try:
        scaled_experiments, joint_table = run_scaling(params, experiments,
                                                      reflections)
    except ValueError as e:
        raise Sorry(e)
    else:
        # Note, cross validation mode does not produce scaled datafiles
        if scaled_experiments and joint_table:
            logger.info("Saving the experiments to %s",
                        params.output.experiments)
            scaled_experiments.as_file(params.output.experiments)
            logger.info("Saving the scaled reflections to %s",
                        params.output.reflections)
            joint_table.as_file(params.output.reflections)

            if params.output.unmerged_mtz:
                _export_unmerged_mtz(params, scaled_experiments, joint_table)

            if params.output.merged_mtz:
                _export_merged_mtz(params, scaled_experiments, joint_table)

    logger.info(
        "See dials.github.io/dials_scale_user_guide.html for more info on scaling options"
    )
예제 #6
0
def run(args=None):
    usage = "dials.estimate_resolution [options] (scaled.expt scaled.refl | scaled_unmerged.mtz)"

    parser = OptionParser(
        usage=usage,
        phil=phil_scope,
        read_reflections=True,
        read_experiments=True,
        check_format=False,
        epilog=__doc__,
    )

    params, options, unhandled = parser.parse_args(args=args,
                                                   return_unhandled=True,
                                                   show_diff_phil=True)

    reflections, experiments = reflections_and_experiments_from_files(
        params.input.reflections, params.input.experiments)
    if (not reflections or not experiments) and not unhandled:
        parser.print_help()
        return

    if reflections and experiments and unhandled:
        sys.exit(
            "Must provide either scaled unmerged mtz OR dials-format scaled reflections and experiments files"
        )

    # Configure the logging
    log.config(logfile=params.output.log, verbosity=options.verbose)
    logger.info(dials_version())

    if len(unhandled) == 1:
        scaled_unmerged = unhandled[0]
        m = resolution_analysis.Resolutionizer.from_unmerged_mtz(
            scaled_unmerged, params.resolution)
    else:
        reflections = parse_multiple_datasets(reflections)
        if len(experiments) != len(reflections):
            sys.exit(
                f"Mismatched number of experiments and reflection tables found: {len(experiments)} & {len(reflections)}."
            )
        m = resolution_analysis.Resolutionizer.from_reflections_and_experiments(
            reflections, experiments, params.resolution)

    plots = m.resolution_auto()

    if params.output.html:
        output_html_report(plots, params.output.html)

    if params.output.json:
        with open(params.output.json, "w") as fh:
            json.dump(plots, fh)

    return plots
예제 #7
0
    def run(self):
        """
        Perform the integration.

        """
        from time import time

        # Check the number of arguments is correct
        start_time = time()

        # Parse the command line
        params, options = self.parser.parse_args(show_diff_phil=False)
        reflections = flatten_reflections(params.input.reflections)
        experiments = flatten_experiments(params.input.experiments)
        if len(reflections) == 0 or len(experiments) == 0:
            self.parser.print_help()
            return
        elif len(reflections) != 1:
            raise Sorry("more than 1 reflection file was given")
        elif len(experiments) == 0:
            raise Sorry("no experiment list was specified")
        reflections = reflections[0]

        # Configure logging
        if __name__ == "__main__":
            # Configure the logging
            log.config(verbosity=options.verbose, logfile="dials.potato.log")

        # Log the diff phil
        diff_phil = self.parser.diff_phil.as_str()
        if diff_phil is not "":
            logger.info("The following parameters have been modified:\n")
            logger.info(diff_phil)

        # Contruct the integrator
        integrator = Integrator(experiments, reflections, params)

        # Do cycles of indexing and refinement
        for i in range(params.refinement.n_macro_cycles):
            integrator.reindex_strong_spots()
            integrator.integrate_strong_spots()
            integrator.refine()

        # Do the integration
        integrator.predict()
        integrator.integrate()

        # Get the reflections
        reflections = integrator.reflections
        experiments = integrator.experiments

        # Save the reflections
        reflections.as_pickle(params.output.reflections)
        experiments.as_file(params.output.experiments)
예제 #8
0
def run(args=None, phil=phil_scope):
    """Run the command-line script."""

    usage = "dials.compute_delta_cchalf [options] scaled.expt scaled.refl"

    parser = ArgumentParser(
        usage=usage,
        phil=phil,
        epilog=help_message,
        read_experiments=True,
        read_reflections=True,
        check_format=False,
    )

    params, _ = parser.parse_args(args=args, show_diff_phil=False)

    log.config(logfile=params.output.log)

    reflections, experiments = reflections_and_experiments_from_files(
        params.input.reflections, params.input.experiments)

    if not experiments and not reflections:
        if not params.input.mtzfile:
            parser.print_help()
            return
        else:
            try:
                script = CCHalfFromMTZ(params, params.input.mtzfile)
            except ValueError as e:
                sys.exit(f"Error: {e}")
    else:
        if not experiments or not reflections:
            parser.print_help()
            return
        else:
            if not len(reflections) == 1:
                exit("Only one reflection table can be provided")
            n_datasets = len(set(reflections[0]["id"]).difference({-1}))
            if n_datasets != len(experiments):
                exit(
                    """
The number of experiments (%s) does not match the number
of datasets in the reflection table (%s)
""",
                    len(experiments),
                    n_datasets,
                )
            try:
                script = CCHalfFromDials(params, experiments, reflections[0])
            except ValueError as e:
                sys.exit(f"Error: {e}")

    script.run()
    script.output()
예제 #9
0
    def run(self):
        '''Execute the script.'''
        from dials.array_family import flex
        from dials.util.options import flatten_datablocks
        from dials.util.options import flatten_reflections
        from time import time
        from dials.util import log
        from libtbx.utils import Sorry
        start_time = time()

        # Parse the command line
        params, options = self.parser.parse_args(show_diff_phil=False)

        # Configure the logging
        log.config(params.verbosity,
                   info=params.output.log,
                   debug=params.output.debug_log)

        from dials.util.version import dials_version
        logger.info(dials_version())

        # Log the diff phil
        diff_phil = self.parser.diff_phil.as_str()
        if diff_phil is not '':
            logger.info('The following parameters have been modified:\n')
            logger.info(diff_phil)

        # Ensure we have a data block
        datablocks = flatten_datablocks(params.input.datablock)
        reflections = flatten_reflections(params.input.reflections)
        if len(datablocks) == 0 and len(reflections) == 0:
            self.parser.print_help()
            return
        elif len(datablocks) != len(reflections):
            raise Sorry(
                "Must have same number of datablocks and reflection tables")

        # Combine the datablocks and reflections
        datablock, reflections = combine(datablocks, reflections, params)

        # Save the reflections to file
        logger.info('\n' + '-' * 80)
        reflections.as_pickle(params.output.reflections)
        logger.info('Saved {0} reflections to {1}'.format(
            len(reflections), params.output.reflections))

        # Save the datablock
        from dxtbx.datablock import DataBlockDumper
        logger.info('Saving datablocks to {0}'.format(params.output.datablock))
        dump = DataBlockDumper(datablocks)
        dump.as_file(params.output.datablock)

        # Print the time
        logger.info("Time Taken: %f" % (time() - start_time))
예제 #10
0
def run(args=None, phil=phil_scope):  # type: (List[str], phil.scope) -> None
    """Run the command-line script."""

    usage = "dials.damage_analysis [options] scaled.expt scaled.refl | scaled.mtz"

    parser = OptionParser(
        usage=usage,
        phil=phil,
        epilog=__doc__,
        read_experiments=True,
        read_reflections=True,
        check_format=False,
    )

    params, _, unhandled = parser.parse_args(args=args,
                                             show_diff_phil=False,
                                             return_unhandled=True)

    log.config(logfile=params.output.log)
    logger.info(dials_version())

    reflections, experiments = reflections_and_experiments_from_files(
        params.input.reflections, params.input.experiments)
    try:
        if experiments and reflections:
            if len(reflections) != 1:
                raise ValueError(
                    "A single input reflections datafile is required")
            if "inverse_scale_factor" not in reflections[0]:
                raise KeyError("Input data must be scaled.")
            script = PychefRunner.from_dials_data_files(
                params,
                experiments,
                reflections[0],
            )

        elif unhandled and os.path.isfile(unhandled[0]):
            try:
                mtz_object = mtz.object(file_name=unhandled[0])
            except RuntimeError:
                # If an error is encountered trying to read the file as an mtzfile
                raise ValueError(
                    "Input file cannot be read as a valid experiment/reflection file or MTZ file"
                )
            else:
                script = PychefRunner.from_mtz(params, mtz_object)
        else:
            parser.print_help()
            raise ValueError("Suitable input datafiles not provided")
    except (ValueError, KeyError) as e:
        sys.exit("Error: %s" % str(e))
    else:
        script.run()
        script.make_html_report(params.output.html, params.output.json)
예제 #11
0
파일: symmetry.py 프로젝트: hattne/dials
def run(args):
    usage = "dials.symmetry [options] models.expt observations.refl"

    parser = OptionParser(
        usage=usage,
        phil=phil_scope,
        read_reflections=True,
        read_experiments=True,
        check_format=False,
        epilog=help_message,
    )

    params, _, args = parser.parse_args(args=args,
                                        show_diff_phil=False,
                                        return_unhandled=True)

    # Configure the logging
    log.config(params.verbosity,
               info=params.output.log,
               debug=params.output.debug_log)

    from dials.util.version import dials_version

    logger.info(dials_version())

    # Log the diff phil
    diff_phil = parser.diff_phil.as_str()
    if diff_phil != "":
        logger.info("The following parameters have been modified:\n")
        logger.info(diff_phil)

    if params.seed is not None:
        flex.set_random_seed(params.seed)
        random.seed(params.seed)

    if not params.input.experiments or not params.input.reflections:
        parser.print_help()
        sys.exit()

    experiments = flatten_experiments(params.input.experiments)
    reflections = flatten_reflections(params.input.reflections)

    reflections = parse_multiple_datasets(reflections)
    if len(experiments) != len(reflections):
        raise Sorry(
            "Mismatched number of experiments and reflection tables found: %s & %s."
            % (len(experiments), len(reflections)))
    try:
        experiments, reflections = assign_unique_identifiers(
            experiments, reflections)
        symmetry(experiments, reflections, params=params)
    except ValueError as e:
        raise Sorry(e)
예제 #12
0
def run(args=None):
    """Run the script from the command-line."""
    usage = """Usage: dials.systematic_absences scaled.refl scaled.expt [options]"""

    parser = OptionParser(
        usage=usage,
        read_experiments=True,
        read_reflections=True,
        phil=phil_scope,
        check_format=False,
        epilog=help_message,
    )
    params, _ = parser.parse_args(args=args, show_diff_phil=False)

    if not params.input.experiments or not params.input.reflections:
        parser.print_help()
        sys.exit()

    reflections = flatten_reflections(params.input.reflections)
    experiments = flatten_experiments(params.input.experiments)

    log.config(verbosity=1, info=params.output.log)
    logger.info(dials_version())

    diff_phil = parser.diff_phil.as_str()
    if diff_phil != "":
        logger.info("The following parameters have been modified:\n")
        logger.info(diff_phil)

    ### Assert that all data have been scaled with dials - should only be
    # able to input one reflection table and experimentlist that are
    # matching and scaled together.
    if not len(reflections) == 1:
        raise Sorry("Only one reflection table can be given as input.")

    if (not "intensity.scale.value" in reflections[0]) and (
            not "intensity.prf.value" in reflections[0]):
        raise Sorry(
            "Unable to find integrated or scaled reflections in the reflection table."
        )

    try:
        run_sys_abs_checks(experiments, reflections, params.d_min)
    except ValueError as e:
        raise Sorry(e)

    if params.output.html:
        ScrewAxisObserver().generate_html_report(params.output.html)

    if params.output.experiments:
        dump = ExperimentListDumper(experiments)
        with open(params.output.experiments, "w") as outfile:
            outfile.write(dump.as_json(split=True))
    def run(self):
        ''' Perform the integration. '''
        from dials.util.options import flatten_experiments
        from dials.util import log
        from dials.array_family import flex

        # Parse the command line
        params, options = self.parser.parse_args(show_diff_phil=False)
        experiments = flatten_experiments(params.input.experiments)
        if len(experiments) == 0:
            self.parser.print_help()
            return

        assert len(experiments) == 1
        imageset = experiments[0].imageset
        beam = experiments[0].beam
        detector = experiments[0].detector
        goniometer = experiments[0].goniometer
        assert len(detector) == 1

        # Configure logging
        log.config()

        from dials.algorithms.background.gmodel import PolarTransform
        import cPickle as pickle
        model = pickle.load(open(params.model))
        image = model.data(0)
        mask = flex.bool(image.accessor(), True)

        # Do the transformation
        transform = PolarTransform(beam, detector[0], goniometer)
        result = transform.to_polar(image, mask)
        data = result.data()
        mask = result.mask()

        pickle.dump((data, mask), open(params.output.data, "w"))

        from matplotlib import pylab
        vmax = sorted(list(data))[int(0.99 * len(data))]
        figure = pylab.figure(figsize=(6, 4))
        pylab.imshow(data.as_numpy_array(),
                     interpolation='none',
                     vmin=0,
                     vmax=vmax)
        ax1 = pylab.gca()
        ax1.get_xaxis().set_visible(False)
        ax1.get_yaxis().set_visible(False)
        cb = pylab.colorbar()
        cb.ax.tick_params(labelsize=8)
        logger.info("Saving polar model %s" % (params.output.image))
        pylab.savefig("%s" % (params.output.image),
                      dpi=600,
                      bbox_inches='tight')
예제 #14
0
def run(args=None):
    import six.moves.cPickle as pickle

    from dials.util import Sorry, log

    usage = "dials.find_hot_pixels [options] models.expt strong.refl"

    # Create the option parser
    parser = OptionParser(
        usage=usage,
        phil=phil_scope,
        read_reflections=True,
        read_experiments=True,
        check_format=False,
        epilog=help_message,
    )

    # Get the parameters
    params, options = parser.parse_args(args, show_diff_phil=False)

    # Configure the log
    log.config(verbosity=options.verbose, logfile="dials.find_hot_pixels.log")

    # Log the diff phil
    diff_phil = parser.diff_phil.as_str()
    if diff_phil != "":
        logger.info("The following parameters have been modified:\n")
        logger.info(diff_phil)

    reflections, experiments = reflections_and_experiments_from_files(
        params.input.reflections, params.input.experiments)

    if len(experiments) == 0 and len(reflections) == 0:
        parser.print_help()
        exit(0)

    if len(experiments) > 1:
        raise Sorry("Only one experiment can be processed at a time")
    else:
        imagesets = experiments.imagesets()
    if len(reflections) == 0:
        raise Sorry("No reflection lists found in input")
    if len(reflections) > 1:
        raise Sorry("Multiple reflections lists provided in input")

    assert len(reflections) == 1
    reflections = reflections[0]

    mask = hot_pixel_mask(imagesets[0], reflections)
    with open(params.output.mask, "wb") as fh:
        pickle.dump(mask, fh, pickle.HIGHEST_PROTOCOL)

    print("Wrote hot pixel mask to %s" % params.output.mask)
예제 #15
0
파일: symmetry.py 프로젝트: dwpaley/dials
def run(args=None):
    """Run symmetry analysis from the command-line."""
    usage = "dials.symmetry [options] models.expt observations.refl"

    parser = OptionParser(
        usage=usage,
        phil=phil_scope,
        read_reflections=True,
        read_experiments=True,
        check_format=False,
        epilog=help_message,
    )

    params, options, args = parser.parse_args(
        args=args, show_diff_phil=False, return_unhandled=True
    )

    # Configure the logging
    log.config(verbosity=options.verbose, logfile=params.output.log)

    logger.info(dials_version())

    # Log the diff phil
    diff_phil = parser.diff_phil.as_str()
    if diff_phil != "":
        logger.info("The following parameters have been modified:\n")
        logger.info(diff_phil)

    if params.seed is not None:
        flex.set_random_seed(params.seed)
        random.seed(params.seed)

    if not params.input.experiments or not params.input.reflections:
        parser.print_help()
        sys.exit()

    reflections, experiments = reflections_and_experiments_from_files(
        params.input.reflections, params.input.experiments
    )

    reflections = parse_multiple_datasets(reflections)

    if len(experiments) != len(reflections):
        sys.exit(
            "Mismatched number of experiments and reflection tables found: %s & %s."
            % (len(experiments), len(reflections))
        )
    try:
        experiments, reflections = assign_unique_identifiers(experiments, reflections)
        symmetry(experiments, reflections, params=params)
    except ValueError as e:
        sys.exit(e)
예제 #16
0
파일: tst.py 프로젝트: kek-pf-mx/dials
def run(args):

    import random

    interp = phil_scope.command_line_argument_interpreter()
    params, unhandled = interp.process_and_fetch(
        args, custom_processor='collect_remaining')
    params = params.extract()

    if params.seed is not None:
        flex.set_random_seed(params.seed)
        random.seed(params.seed)

    from dials.util import log
    # Configure the logging
    log.config()

    datasets, expected_reindexing_ops = generate_test_data(
        space_group=params.space_group.group(),
        lattice_group=params.lattice_group,
        unit_cell=params.unit_cell,
        unit_cell_volume=params.unit_cell_volume,
        seed=params.seed,
        d_min=params.d_min,
        sigma=params.sigma,
        sample_size=params.sample_size,
        map_to_p1=params.map_to_p1,
        twin_fractions=params.twin_fractions)

    result = analyse_datasets(datasets, params)

    space_groups = {}
    reindexing_ops = {}
    for dataset_id in result.reindexing_ops.iterkeys():
        if 0 in result.reindexing_ops[dataset_id]:
            cb_op = result.reindexing_ops[dataset_id][0]
            reindexing_ops.setdefault(cb_op, [])
            reindexing_ops[cb_op].append(dataset_id)
        if dataset_id in result.space_groups:
            space_groups.setdefault(result.space_groups[dataset_id], [])
            space_groups[result.space_groups[dataset_id]].append(dataset_id)

    logger.info('Space groups:')
    for sg, datasets in space_groups.iteritems():
        logger.info(str(sg.info().reference_setting()))
        logger.info(datasets)

    logger.info('Reindexing operators:')
    for cb_op, datasets in reindexing_ops.iteritems():
        logger.info(cb_op)
        logger.info(datasets)
예제 #17
0
def run(args):
  import libtbx.load_env
  from dials.array_family import flex
  from dials.util import log
  from dials.util.version import dials_version

  usage = "%s [options] experiment.json indexed.pickle" % \
    libtbx.env.dispatcher_name

  parser = OptionParser(
    usage=usage,
    phil=phil_scope,
    read_reflections=True,
    read_experiments=True,
    check_format=False,
    epilog=help_message)

  params, options = parser.parse_args(show_diff_phil=True)

  # Configure the logging
  log.config(info=params.output.log, debug=params.output.debug_log)
  logger.info(dials_version())

  reflections = flatten_reflections(params.input.reflections)
  experiments = flatten_experiments(params.input.experiments)
  if len(reflections) == 0 or len(experiments) == 0:
    parser.print_help()
    return
  assert(len(reflections) == 1)
  assert(len(experiments) == 1)
  experiment = experiments[0]
  reflections = reflections[0]

  # remove reflections with 0, 0, 0 index
  zero = (reflections['miller_index'] == (0, 0, 0))
  logger.info('Removing %d unindexed reflections' % zero.count(True))
  reflections = reflections.select(~zero)

  h, k, l = reflections['miller_index'].as_vec3_double().parts()

  h = h.iround()
  k = k.iround()
  l = l.iround()

  logger.info('Range on h: %d to %d' % (flex.min(h), flex.max(h)))
  logger.info('Range on k: %d to %d' % (flex.min(k), flex.max(k)))
  logger.info('Range on l: %d to %d' % (flex.min(l), flex.max(l)))

  test_P1_crystal_indexing(reflections, experiment, params)
  test_crystal_pointgroup_symmetry(reflections, experiment, params)
예제 #18
0
def run(args):
  import libtbx.load_env
  from dials.array_family import flex
  from dials.util import log
  from dials.util.version import dials_version

  usage = "%s [options] experiment.json indexed.pickle" % \
    libtbx.env.dispatcher_name

  parser = OptionParser(
    usage=usage,
    phil=phil_scope,
    read_reflections=True,
    read_experiments=True,
    check_format=False,
    epilog=help_message)

  params, options = parser.parse_args(show_diff_phil=True)

  # Configure the logging
  log.config(info=params.output.log, debug=params.output.debug_log)
  logger.info(dials_version())

  reflections = flatten_reflections(params.input.reflections)
  experiments = flatten_experiments(params.input.experiments)
  if len(reflections) == 0 or len(experiments) == 0:
    parser.print_help()
    return
  assert(len(reflections) == 1)
  assert(len(experiments) == 1)
  experiment = experiments[0]
  reflections = reflections[0]

  # remove reflections with 0, 0, 0 index
  zero = (reflections['miller_index'] == (0, 0, 0))
  logger.info('Removing %d unindexed reflections' % zero.count(True))
  reflections = reflections.select(~zero)

  h, k, l = reflections['miller_index'].as_vec3_double().parts()

  h = h.iround()
  k = k.iround()
  l = l.iround()

  logger.info('Range on h: %d to %d' % (flex.min(h), flex.max(h)))
  logger.info('Range on k: %d to %d' % (flex.min(k), flex.max(k)))
  logger.info('Range on l: %d to %d' % (flex.min(l), flex.max(l)))

  test_P1_crystal_indexing(reflections, experiment, params)
  test_crystal_pointgroup_symmetry(reflections, experiment, params)
예제 #19
0
    def run(self, experiments, reflections):
        from dials.util import log
        self.logger.log_step_time("INTEGRATE")

        logfile = os.path.splitext(
            self.logger.rank_log_file_path)[0] + "_integrate.log"
        log.config(logfile=logfile)
        processor = integrate_only_processor(self.params)

        # Re-generate the image sets using their format classes so we can read the raw data
        # Integrate the experiments one at a time to not use up memory
        all_integrated_expts = ExperimentList()
        all_integrated_refls = flex.reflection_table()
        current_imageset = None
        current_imageset_path = None
        for expt_id, expt in enumerate(experiments):
            assert len(expt.imageset.paths()) == 1 and len(expt.imageset) == 1
            self.logger.log("Starting integration experiment %d" % expt_id)
            refls = reflections.select(
                reflections['exp_id'] == expt.identifier)
            if expt.imageset.paths()[0] != current_imageset_path:
                current_imageset_path = expt.imageset.paths()[0]
                current_imageset = ImageSetFactory.make_imageset(
                    expt.imageset.paths())
            idx = expt.imageset.indices()[0]
            expt.imageset = current_imageset[idx:idx + 1]
            idents = refls.experiment_identifiers()
            del idents[expt_id]
            idents[0] = expt.identifier
            refls['id'] = flex.int(len(refls), 0)

            try:
                integrated = processor.integrate(
                    experiments[expt_id:expt_id + 1], refls)
            except RuntimeError:
                self.logger.log("Error integrating expt %d" % expt_id)
                continue

            all_integrated_expts.append(expt)
            idents = integrated.experiment_identifiers()
            del idents[0]
            idents[expt_id] = expt.identifier
            integrated['id'] = flex.int(len(integrated),
                                        len(all_integrated_expts) - 1)
            all_integrated_refls.extend(integrated)

        self.logger.log("Integration done, %d experiments, %d reflections" %
                        (len(all_integrated_expts), len(all_integrated_refls)))
        return all_integrated_expts, all_integrated_refls
예제 #20
0
def run(phil=working_phil, args=None):
    usage = "dials.index [options] models.expt strong.refl"

    parser = OptionParser(
        usage=usage,
        phil=phil,
        read_reflections=True,
        read_experiments=True,
        check_format=False,
        epilog=help_message,
    )

    params, options = parser.parse_args(args=args, show_diff_phil=False)

    # Configure the logging
    log.config(verbosity=options.verbose, logfile=params.output.log)
    logger.info(dials_version())

    # Log the diff phil
    diff_phil = parser.diff_phil.as_str()
    if diff_phil != "":
        logger.info("The following parameters have been modified:\n")
        logger.info(diff_phil)

    experiments = flatten_experiments(params.input.experiments)
    reflections = flatten_reflections(params.input.reflections)

    if len(experiments) == 0:
        parser.print_help()
        return

    try:
        indexed_experiments, indexed_reflections = index(
            experiments, reflections, params)
    except (DialsIndexError, ValueError) as e:
        sys.exit(str(e))

    # Save experiments
    if params.output.split_experiments:
        logger.info("Splitting experiments before output")
        indexed_experiments = ExperimentList(
            [copy.deepcopy(re) for re in indexed_experiments])
    logger.info("Saving refined experiments to %s" % params.output.experiments)
    assert indexed_experiments.is_consistent()
    indexed_experiments.as_file(params.output.experiments)

    # Save reflections
    logger.info("Saving refined reflections to %s" % params.output.reflections)
    indexed_reflections.as_msgpack_file(filename=params.output.reflections)
예제 #21
0
파일: import.py 프로젝트: biochem-fan/dials
  def run(self):
    ''' Parse the options. '''
    from dxtbx.datablock import DataBlockFactory
    from dxtbx.datablock import DataBlockTemplateImporter
    from dials.util.options import flatten_datablocks
    from dials.util import log
    from logging import info, debug
    import cPickle as pickle
    from libtbx.utils import Sorry

    # Parse the command line arguments
    params, options = self.parser.parse_args(show_diff_phil=False)
    datablocks = flatten_datablocks(params.input.datablock)

    # Configure logging
    log.config(
      params.verbosity,
      info=params.output.log,
      debug=params.output.debug_log)
    from dials.util.version import dials_version
    info(dials_version())

    # Log the diff phil
    diff_phil = self.parser.diff_phil.as_str()
    if diff_phil is not '':
      info('The following parameters have been modified:\n')
      info(diff_phil)

    # Load reference geometry
    reference_detector = None
    reference_beam = None
    if params.input.reference_geometry is not None:
      from dxtbx.serialize import load
      try:
        experiments = load.experiment_list(
          params.input.reference_geometry, check_format=False)
        assert len(experiments.detectors()) == 1
        assert len(experiments.beams()) == 1
        reference_detector = experiments.detectors()[0]
        reference_beam = experiments.beams()[0]
      except Exception, e:
        datablock = load.datablock(params.input.reference_geometry)
        assert len(datablock) == 1
        imageset = datablock[0].extract_imagesets()[0]
        reference_detector = imageset.get_detector()
        reference_beam = imageset.get_beam()
예제 #22
0
    def run(self):
        ''' Run the script. '''
        from dials.util.masking import MaskGenerator
        from dials.util.options import flatten_datablocks
        from libtbx.utils import Sorry
        import six.moves.cPickle as pickle
        from dials.util import log
        from dxtbx.format.image import ImageBool

        # Parse the command line arguments
        params, options = self.parser.parse_args(show_diff_phil=True)
        datablocks = flatten_datablocks(params.input.datablock)

        # Configure logging
        log.config()

        # Check number of args
        if len(datablocks) == 0:
            self.parser.print_help()
            return

        if len(datablocks) != 1:
            raise Sorry('exactly 1 datablock must be specified')
        datablock = datablocks[0]
        imagesets = datablock.extract_imagesets()
        if len(imagesets) != 1:
            raise Sorry('datablock must contain exactly 1 imageset')
        imageset = imagesets[0]

        # Generate the mask
        generator = MaskGenerator(params)
        mask = generator.generate(imageset)

        # Save the mask to file
        print("Writing mask to %s" % params.output.mask)
        with open(params.output.mask, "wb") as fh:
            pickle.dump(mask, fh)

        # Save the datablock
        if params.output.datablock is not None:
            imageset.external_lookup.mask.data = ImageBool(mask)
            imageset.external_lookup.mask.filename = params.output.mask
            from dxtbx.datablock import DataBlockDumper
            print('Saving datablocks to {0}'.format(params.output.datablock))
            dump = DataBlockDumper(datablocks)
            dump.as_file(params.output.datablock)
def run(args):
    usage = "dials.check_indexing_symmetry [options] indexed.expt indexed.refl"

    parser = OptionParser(
        usage=usage,
        phil=phil_scope,
        read_reflections=True,
        read_experiments=True,
        check_format=False,
        epilog=help_message,
    )

    params, options = parser.parse_args(show_diff_phil=True)

    # Configure the logging
    log.config(logfile=params.output.log)
    logger.info(dials_version())

    reflections, experiments = reflections_and_experiments_from_files(
        params.input.reflections, params.input.experiments)
    if len(reflections) == 0 or len(experiments) == 0:
        parser.print_help()
        return
    assert len(reflections) == 1
    assert len(experiments) == 1
    experiment = experiments[0]
    reflections = reflections[0]

    # remove reflections with 0, 0, 0 index
    zero = reflections["miller_index"] == (0, 0, 0)
    logger.info("Removing %d unindexed reflections" % zero.count(True))
    reflections = reflections.select(~zero)

    h, k, l = reflections["miller_index"].as_vec3_double().parts()

    h = h.iround()
    k = k.iround()
    l = l.iround()

    logger.info("Range on h: %d to %d" % (flex.min(h), flex.max(h)))
    logger.info("Range on k: %d to %d" % (flex.min(k), flex.max(k)))
    logger.info("Range on l: %d to %d" % (flex.min(l), flex.max(l)))

    test_P1_crystal_indexing(reflections, experiment, params)
    test_crystal_pointgroup_symmetry(reflections, experiment, params)
예제 #24
0
def run():
    """Run the command line filtering script."""

    flags = list(flex.reflection_table.flags.names.items())
    flags.sort(key=itemgetter(0))

    phil_scope = parse(phil_str, process_includes=True)

    # The script usage
    usage = "usage: dials.filter_reflections [options] experiment.expt"

    # Create the parser
    parser = OptionParser(
        usage=usage,
        phil=phil_scope,
        epilog=help_message,
        read_reflections=True,
        read_experiments=True,
        check_format=False,
    )

    params, options = parser.parse_args(show_diff_phil=True)
    reflections = flatten_reflections(params.input.reflections)
    experiments = flatten_experiments(params.input.experiments)

    log.config(verbosity=options.verbose)

    if not reflections:
        parser.print_help()
        raise Sorry("No valid reflection file given")
    if len(reflections) != 1:
        parser.print_help()
        raise Sorry("Exactly 1 reflection file must be specified")
    reflections = reflections[0]

    # Check if any filter has been set using diff_phil
    filter_def = [
        o for o in parser.diff_phil.objects
        if o.name not in ["input", "output"]
    ]
    if not filter_def:
        print("No filter specified. Performing analysis instead.")
        run_analysis(flags, reflections)
    else:
        run_filtering(params, experiments, reflections)
예제 #25
0
def run(args=None, phil=working_phil):
    usage = "dials.index [options] models.expt strong.refl"

    parser = ArgumentParser(
        usage=usage,
        phil=phil,
        read_reflections=True,
        read_experiments=True,
        check_format=False,
        epilog=help_message,
    )

    params, options = parser.parse_args(args=args, show_diff_phil=False)

    # Configure the logging
    log.config(verbosity=options.verbose, logfile=params.output.log)
    logger.info(dials_version())

    # Log the diff phil
    diff_phil = parser.diff_phil.as_str()
    if diff_phil != "":
        logger.info("The following parameters have been modified:\n")
        logger.info(diff_phil)

    reflections, experiments = reflections_and_experiments_from_files(
        params.input.reflections, params.input.experiments)

    if len(experiments) == 0:
        parser.print_help()
        return

    try:
        indexed_experiments, indexed_reflections = index(
            experiments, reflections, params)
    except (DialsIndexError, ValueError) as e:
        sys.exit(str(e))

    # Save experiments
    logger.info("Saving refined experiments to %s", params.output.experiments)
    assert indexed_experiments.is_consistent()
    indexed_experiments.as_file(params.output.experiments)

    # Save reflections
    logger.info("Saving refined reflections to %s", params.output.reflections)
    indexed_reflections.as_file(filename=params.output.reflections)
예제 #26
0
def run(args):

    from dials.util.options import OptionParser
    from dials.util.options import flatten_experiments
    import libtbx.load_env

    usage = "%s [options] models.expt" % (libtbx.env.dispatcher_name)

    parser = OptionParser(
        usage=usage,
        phil=phil_scope,
        read_experiments=True,
        check_format=False,
        epilog=help_message,
    )

    from dials.util.version import dials_version

    logger.info(dials_version())

    params, options = parser.parse_args(show_diff_phil=False)
    experiments = flatten_experiments(params.input.experiments)

    if len(experiments) == 0:
        parser.print_help()
        exit(0)

    log.config()

    # Log the diff phil
    diff_phil = parser.diff_phil.as_str()
    if diff_phil != "":
        logger.info("The following parameters have been modified:\n")
        logger.info(diff_phil)

    uc_params = uc_params_from_experiments(experiments)
    panel_distances = panel_distances_from_experiments(experiments)
    outliers = outlier_selection(uc_params, iqr_ratio=params.iqr_ratio)
    plot_uc_histograms(uc_params, outliers, params.steps_per_angstrom)
    plot_uc_vs_detector_distance(
        uc_params, panel_distances, outliers, params.steps_per_angstrom
    )
    plot_number_of_crystals(experiments)
예제 #27
0
    def run(self):
        """
        Perform the integration.

        """
        from time import time

        # Check the number of arguments is correct
        start_time = time()

        # Parse the command line
        params, options = self.parser.parse_args(show_diff_phil=False)
        reflections = flatten_reflections(params.input.reflections)
        experiments = flatten_experiments(params.input.experiments)
        if len(reflections) == 0 or len(experiments) == 0:
            self.parser.print_help()
            return
        elif len(reflections) != 1:
            raise Sorry("more than 1 reflection file was given")
        elif len(experiments) == 0:
            raise Sorry("no experiment list was specified")
        reflections = reflections[0]

        # Configure logging
        log.config(info="dials.merge_stills.log",
                   debug="dials.merge_stills.debug.log")

        # Log the diff phil
        diff_phil = self.parser.diff_phil.as_str()
        if diff_phil is not "":
            logger.info("The following parameters have been modified:\n")
            logger.info(diff_phil)

        # Read the reference
        if params.input.reference:
            reference = self._read_reference(params.input.reference)
        else:
            reference = None

        # Do the merging
        reflections = scale_and_merge(experiments, reflections, params,
                                      reference)
예제 #28
0
def run(args):
    usage = (
        "dials.resolutionizer [options] (scaled.expt scaled.refl | scaled_unmerged.mtz)"
    )

    parser = OptionParser(
        usage=usage,
        phil=phil_scope,
        read_reflections=True,
        read_experiments=True,
        check_format=False,
        epilog=help_message,
    )

    params, options, unhandled = parser.parse_args(return_unhandled=True,
                                                   show_diff_phil=True)

    reflections, experiments = reflections_and_experiments_from_files(
        params.input.reflections, params.input.experiments)
    if (not reflections or not experiments) and not unhandled:
        parser.print_help()
        return

    if reflections and experiments and unhandled:
        sys.exit(
            "Must provide either scaled unmerged mtz OR dials-format scaled reflections and experiments files"
        )

    # Configure the logging
    log.config(logfile=params.output.log, verbosity=options.verbose)
    logger.info(dials_version())

    if len(unhandled) == 1:
        scaled_unmerged = unhandled[0]
        m = resolutionizer.Resolutionizer.from_unmerged_mtz(
            scaled_unmerged, params.resolutionizer)
    else:
        reflections = parse_multiple_datasets(reflections)
        m = resolutionizer.Resolutionizer.from_reflections_and_experiments(
            reflections, experiments, params.resolutionizer)

    m.resolution_auto()
예제 #29
0
def run(args=None):
    from dials.util.options import (
        ArgumentParser,
        reflections_and_experiments_from_files,
    )
    from dials.util.version import dials_version

    usage = "dials.export models.expt reflections.refl [options]"

    parser = ArgumentParser(
        usage=usage,
        read_experiments=True,
        read_reflections=True,
        phil=phil_scope,
        epilog=help_message,
    )

    # Get the parameters
    params, options = parser.parse_args(args, show_diff_phil=False)

    # Configure the logging
    log.config(logfile=params.output.log)

    # Print the version number
    logger.info(dials_version())

    # Log the diff phil
    diff_phil = parser.diff_phil.as_str()
    if diff_phil != "":
        logger.info("The following parameters have been modified:\n")
        logger.info(diff_phil)

    if not params.input.experiments and not params.input.reflections:
        parser.print_help()
        sys.exit()

    # Get the experiments and reflections
    reflections, experiments = reflections_and_experiments_from_files(
        params.input.reflections, params.input.experiments)

    exporter = BestExporter(params, experiments, reflections)
    exporter.export()
예제 #30
0
    def __init__(self, params_filename, output_tag, logfile=None):
        """
    @param params_filename cctbx.xfel/DIALS parameter file for processing
    @output_tag String that will prefix output files
    @logfile File name for logging
    """
        self.parsed_params = parse(file_name=params_filename)
        dials_params = phil_scope.fetch(self.parsed_params).extract()
        super(CctbxPsanaEventProcessor, self).__init__(dials_params,
                                                       output_tag)
        self.update_geometry = ManualGeometryUpdater(dials_params)
        simple_script = SimpleScript(dials_params)
        simple_script.load_reference_geometry()
        self.reference_detector = getattr(simple_script, 'reference_detector',
                                          None)
        self.output_tag = output_tag
        self.detector_params = None

        if logfile is not None:
            log.config(logfile=logfile)
예제 #31
0
def run(phil=working_phil, args=None):
    usage = "dials.index [options] models.expt strong.refl"

    parser = OptionParser(
        usage=usage,
        phil=phil,
        read_reflections=True,
        read_experiments=True,
        check_format=False,
        epilog=help_message,
    )

    params, options = parser.parse_args(args=args, show_diff_phil=False)

    from dials.util import log

    # Configure the logging
    log.config(verbosity=options.verbose, logfile=params.output.log)

    from dials.util.version import dials_version

    logger.info(dials_version())

    # Log the diff phil
    diff_phil = parser.diff_phil.as_str()
    if diff_phil != "":
        logger.info("The following parameters have been modified:\n")
        logger.info(diff_phil)

    experiments = flatten_experiments(params.input.experiments)
    reflections = flatten_reflections(params.input.reflections)

    if len(experiments) == 0:
        parser.print_help()
        return

    indexed = Index(experiments, reflections, params)
    indexed.export_experiments(params.output.experiments)
    indexed.export_reflections(params.output.reflections)
예제 #32
0
def run(args=None):
    usage = "dials.unit_cell_histogram [options] models.expt"

    parser = ArgumentParser(
        usage=usage,
        phil=phil_scope,
        read_experiments=True,
        check_format=False,
        epilog=help_message,
    )

    logger.info(dials_version())

    params, options = parser.parse_args(args, show_diff_phil=False)
    experiments = flatten_experiments(params.input.experiments)

    if len(experiments) == 0:
        parser.print_help()
        exit(0)

    log.config()

    # Log the diff phil
    diff_phil = parser.diff_phil.as_str()
    if diff_phil != "":
        logger.info("The following parameters have been modified:\n")
        logger.info(diff_phil)

    uc_params = uc_params_from_experiments(experiments)
    panel_distances = panel_distances_from_experiments(experiments)
    outliers = outlier_selection(uc_params, iqr_ratio=params.iqr_ratio)
    plot_uc_histograms(uc_params, outliers, params.steps_per_angstrom)
    plot_uc_vs_detector_distance(
        uc_params, panel_distances, outliers, params.steps_per_angstrom
    )
    plot_number_of_crystals(experiments)
예제 #33
0
  def run(self):
    '''Execute the script.'''
    from dials.util import log
    from time import time
    from libtbx import easy_mp
    import copy

    # Parse the command line
    params, options, all_paths = self.parser.parse_args(show_diff_phil=False, return_unhandled=True)

    # Check we have some filenames
    if not all_paths:
      self.parser.print_help()
      return

    # Save the options
    self.options = options
    self.params = params

    st = time()

    # Configure logging
    log.config(
      params.verbosity,
      info='dials.process.log',
      debug='dials.process.debug.log')

    # Log the diff phil
    diff_phil = self.parser.diff_phil.as_str()
    if diff_phil is not '':
      logger.info('The following parameters have been modified:\n')
      logger.info(diff_phil)

    self.load_reference_geometry()
    from dials.command_line.dials_import import ManualGeometryUpdater
    update_geometry = ManualGeometryUpdater(params)

    # Import stuff
    logger.info("Loading files...")
    pre_import = params.dispatch.pre_import or len(all_paths) == 1
    if pre_import:
      # Handle still imagesets by breaking them apart into multiple datablocks
      # Further handle single file still imagesets (like HDF5) by tagging each
      # frame using its index

      datablocks = [do_import(path) for path in all_paths]
      if self.reference_detector is not None:
        from dxtbx.model import Detector
        for datablock in datablocks:
          for imageset in datablock.extract_imagesets():
            for i in range(len(imageset)):
              imageset.set_detector(
                Detector.from_dict(self.reference_detector.to_dict()),
                index=i)

      for datablock in datablocks:
        for imageset in datablock.extract_imagesets():
          update_geometry(imageset)

      indices = []
      basenames = []
      split_datablocks = []
      for datablock in datablocks:
        for imageset in datablock.extract_imagesets():
          paths = imageset.paths()
          for i in xrange(len(imageset)):
            subset = imageset[i:i+1]
            split_datablocks.append(DataBlockFactory.from_imageset(subset)[0])
            indices.append(i)
            basenames.append(os.path.splitext(os.path.basename(paths[i]))[0])
      tags = []
      for i, basename in zip(indices, basenames):
        if basenames.count(basename) > 1:
          tags.append("%s_%05d"%(basename, i))
        else:
          tags.append(basename)

      # Wrapper function
      def do_work(item):
        Processor(copy.deepcopy(params)).process_datablock(item[0], item[1])

      iterable = zip(tags, split_datablocks)

    else:
      basenames = [os.path.splitext(os.path.basename(filename))[0] for filename in all_paths]
      tags = []
      for i, basename in enumerate(basenames):
        if basenames.count(basename) > 1:
          tags.append("%s_%05d"%(basename, i))
        else:
          tags.append(basename)

      # Wrapper function
      def do_work(item):
        tag, filename = item

        datablock = do_import(filename)
        imagesets = datablock.extract_imagesets()
        if len(imagesets) == 0 or len(imagesets[0]) == 0:
          logger.info("Zero length imageset in file: %s"%filename)
          return
        if len(imagesets) > 1:
          raise Abort("Found more than one imageset in file: %s"%filename)
        if len(imagesets[0]) > 1:
          raise Abort("Found a multi-image file. Run again with pre_import=True")

        if self.reference_detector is not None:
          from dxtbx.model import Detector
          imagesets[0].set_detector(Detector.from_dict(self.reference_detector.to_dict()))

        update_geometry(imagesets[0])

        Processor(copy.deepcopy(params)).process_datablock(tag, datablock)

      iterable = zip(tags, all_paths)

    # Process the data
    if params.mp.method == 'mpi':
      from mpi4py import MPI
      comm = MPI.COMM_WORLD
      rank = comm.Get_rank() # each process in MPI has a unique id, 0-indexed
      size = comm.Get_size() # size: number of processes running in this job

      for i, item in enumerate(iterable):
        if (i+rank)%size == 0:
          do_work(item)
    else:
      easy_mp.parallel_map(
        func=do_work,
        iterable=iterable,
        processes=params.mp.nproc,
        method=params.mp.method,
        preserve_order=True,
        preserve_exception_message=True)

     # Total Time
    logger.info("")
    logger.info("Total Time Taken = %f seconds" % (time() - st))
예제 #34
0
def run(args):
  from dials.util import log
  from logging import info
  import libtbx.load_env
  usage = "%s experiments.json indexed.pickle [options]" %libtbx.env.dispatcher_name

  parser = OptionParser(
    usage=usage,
    phil=phil_scope,
    read_experiments=True,
    read_reflections=True,
    check_format=False,
    epilog=help_message)

  params, options = parser.parse_args(show_diff_phil=False)

  # Configure the logging
  log.config(info=params.output.log, debug=params.output.debug_log)

  from dials.util.version import dials_version
  info(dials_version())

  # Log the diff phil
  diff_phil = parser.diff_phil.as_str()
  if diff_phil is not '':
    info('The following parameters have been modified:\n')
    info(diff_phil)

  experiments = flatten_experiments(params.input.experiments)
  reflections = flatten_reflections(params.input.reflections)
  assert(len(reflections) == 1)
  reflections = reflections[0]

  if len(experiments) == 0:
    parser.print_help()
    return
  elif len(experiments.crystals()) > 1:
    if params.crystal_id is not None:
      assert params.crystal_id < len(experiments.crystals())
      experiment_ids = experiments.where(crystal=experiments.crystals()[params.crystal_id])
      from dxtbx.model.experiment.experiment_list import ExperimentList
      experiments = ExperimentList([experiments[i] for i in experiment_ids])
      refl_selections = [reflections['id'] == i for i in experiment_ids]
      reflections['id'] = flex.int(len(reflections), -1)
      for i, sel in enumerate(refl_selections):
        reflections['id'].set_selected(sel, i)
      reflections = reflections.select(reflections['id'] > -1)
    else:
      raise Sorry("Only one crystal can be processed at a time: set crystal_id to choose experiment.")

  if params.refinement.reflections.outlier.algorithm in ('auto', libtbx.Auto):
    if experiments[0].goniometer is None:
      params.refinement.reflections.outlier.algorithm = 'sauter_poon'
    else:
      # different default to dials.refine
      # tukey is faster and more appropriate at the indexing step
      params.refinement.reflections.outlier.algorithm = 'tukey'

  from dials.algorithms.indexing.symmetry \
       import refined_settings_factory_from_refined_triclinic

  cb_op_to_primitive = experiments[0].crystal.get_space_group().info()\
    .change_of_basis_op_to_primitive_setting()
  if experiments[0].crystal.get_space_group().n_ltr() > 1:
    effective_group = experiments[0].crystal.get_space_group()\
      .build_derived_reflection_intensity_group(anomalous_flag=True)
    sys_absent_flags = effective_group.is_sys_absent(
      reflections['miller_index'])
    reflections = reflections.select(~sys_absent_flags)
  experiments[0].crystal.update(experiments[0].crystal.change_basis(cb_op_to_primitive))
  miller_indices = reflections['miller_index']
  miller_indices = cb_op_to_primitive.apply(miller_indices)
  reflections['miller_index'] = miller_indices

  Lfat = refined_settings_factory_from_refined_triclinic(
    params, experiments, reflections, lepage_max_delta=params.lepage_max_delta,
    nproc=params.nproc, refiner_verbosity=params.verbosity)
  s = StringIO()
  Lfat.labelit_printout(out=s)
  info(s.getvalue())
  from json import dumps
  from os.path import join
  open(join(params.output.directory, 'bravais_summary.json'), 'wb').write(dumps(Lfat.as_dict()))
  from dxtbx.serialize import dump
  import copy
  for subgroup in Lfat:
    expts = copy.deepcopy(experiments)
    for expt in expts:
      expt.crystal.update(subgroup.refined_crystal)
      expt.detector = subgroup.detector
      expt.beam = subgroup.beam
    dump.experiment_list(
      expts, join(params.output.directory, 'bravais_setting_%i.json' % (int(subgroup.setting_number))))
  return
예제 #35
0
  def run(self):
    """ Process all images assigned to this thread """

    params, options = self.parser.parse_args(
      show_diff_phil=True)

    # Check inputs
    if params.input.experiment is None or \
       params.input.run_num is None or \
       params.input.address is None:
      raise Usage(self.usage)

    if params.format.file_format == "cbf":
      if params.format.cbf.detz_offset is None:
        raise Usage(self.usage)
    elif params.format.file_format == "pickle":
      if params.input.cfg is None:
        raise Usage(self.usage)
    else:
      raise Usage(self.usage)

    if not os.path.exists(params.output.output_dir):
      raise Sorry("Output path not found:" + params.output.output_dir)

    self.params = params
    self.load_reference_geometry()

    # The convention is to put %s in the phil parameter to add a time stamp to
    # each output datafile. Save the initial templates here.
    self.strong_filename_template              = params.output.strong_filename
    self.indexed_filename_template             = params.output.indexed_filename
    self.refined_experiments_filename_template = params.output.refined_experiments_filename
    self.integrated_filename_template          = params.output.integrated_filename
    self.reindexedstrong_filename_template     = params.output.reindexedstrong_filename

    # Don't allow the strong reflections to be written unless there are enough to
    # process
    params.output.strong_filename = None

    # Save the paramters
    self.params_cache = copy.deepcopy(params)
    self.options = options

    if params.mp.method == "mpi":
      from mpi4py import MPI
      comm = MPI.COMM_WORLD
      rank = comm.Get_rank() # each process in MPI has a unique id, 0-indexed
      size = comm.Get_size() # size: number of processes running in this job
    elif params.mp.method == "sge" and \
        'SGE_TASK_ID'    in os.environ and \
        'SGE_TASK_FIRST' in os.environ and \
        'SGE_TASK_LAST'  in os.environ:
      if 'SGE_STEP_SIZE' in os.environ:
        assert int(os.environ['SGE_STEP_SIZE']) == 1
      if os.environ['SGE_TASK_ID'] == 'undefined' or os.environ['SGE_TASK_ID'] == 'undefined' or os.environ['SGE_TASK_ID'] == 'undefined':
        rank = 0
        size = 1
      else:
        rank = int(os.environ['SGE_TASK_ID']) - int(os.environ['SGE_TASK_FIRST'])
        size = int(os.environ['SGE_TASK_LAST']) - int(os.environ['SGE_TASK_FIRST']) + 1
    else:
      rank = 0
      size = 1

    # Configure the logging
    if params.output.logging_dir is None:
      info_path = ''
      debug_path = ''
    else:
      log_path = os.path.join(params.output.logging_dir, "log_rank%04d.out"%rank)
      error_path = os.path.join(params.output.logging_dir, "error_rank%04d.out"%rank)
      print "Redirecting stdout to %s"%log_path
      print "Redirecting stderr to %s"%error_path
      sys.stdout = open(log_path,'a', buffering=0)
      sys.stderr = open(error_path,'a',buffering=0)
      print "Should be redirected now"

      info_path = os.path.join(params.output.logging_dir, "info_rank%04d.out"%rank)
      debug_path = os.path.join(params.output.logging_dir, "debug_rank%04d.out"%rank)

    from dials.util import log
    log.config(params.verbosity, info=info_path, debug=debug_path)

    debug_dir = os.path.join(params.output.output_dir, "debug")
    if not os.path.exists(debug_dir):
      try:
        os.makedirs(debug_dir)
      except OSError, e:
        pass # due to multiprocessing, makedirs can sometimes fail
예제 #36
0
    def run(self, args=None):
        params, options = self.parser.parse_args(args, show_diff_phil=True)
        log.config(logfile="dials.complete_full_sphere.log")

        model_shadow = params.shadow

        experiments = flatten_experiments(params.input.experiments)

        if len(experiments) != 1:
            self.parser.print_help()
            return

        expt = experiments[0]

        axes = expt.goniometer.get_axes()

        if len(axes) != 3:
            sys.exit("This will only work with 3-axis goniometers")

        if not expt.imageset.reader().get_format():
            sys.exit("This will only work with images available")

        if not expt.imageset.reader().get_format().get_goniometer_shadow_masker():
            model_shadow = False

        beam = expt.beam
        det = expt.detector

        if params.resolution:
            resolution = params.resolution
        else:
            resolution = det.get_max_inscribed_resolution(expt.beam.get_s0())

        # at this point, predict all of the reflections in the scan possible (i.e.
        # extend scan to 360 degrees) - this points back to expt

        self.make_scan_360(expt.scan)

        # now get a full set of all unique miller indices
        all_indices = miller.build_set(
            crystal_symmetry=crystal.symmetry(
                space_group=expt.crystal.get_space_group(),
                unit_cell=expt.crystal.get_unit_cell(),
            ),
            anomalous_flag=True,
            d_min=resolution,
        )

        if model_shadow:
            obs, shadow = self.predict_to_miller_set_with_shadow(expt, resolution)
        else:
            obs = self.predict_to_miller_set(expt, resolution)

        logger.info(
            "Fraction of unique observations at datum: %.1f%%",
            100.0 * len(obs.indices()) / len(all_indices.indices()),
        )

        missing = all_indices.lone_set(other=obs)

        logger.info("%d unique reflections in blind region", len(missing.indices()))

        e1 = matrix.col(axes[0])
        e2 = matrix.col(axes[1])
        e3 = matrix.col(axes[2])

        s0n = matrix.col(beam.get_s0()).normalize()

        # rotate blind region about beam by +/- two theta
        two_theta = 2.0 * math.asin(0.5 * beam.get_wavelength() / resolution)
        R_ptt = s0n.axis_and_angle_as_r3_rotation_matrix(two_theta)
        R_ntt = s0n.axis_and_angle_as_r3_rotation_matrix(-two_theta)

        # now decompose to e3, e2, e1
        sol_plus = rotation_decomposition.solve_r3_rotation_for_angles_given_axes(
            R_ptt, e3, e2, e1, return_both_solutions=True, deg=True
        )

        sol_minus = rotation_decomposition.solve_r3_rotation_for_angles_given_axes(
            R_ntt, e3, e2, e1, return_both_solutions=True, deg=True
        )

        solutions = []
        if sol_plus:
            solutions.extend(sol_plus)
        if sol_minus:
            solutions.extend(sol_minus)

        if not solutions:
            sys.exit(f"Impossible two theta: {two_theta * 180.0 / math.pi:.3f},")

        logger.info("Maximum two theta: %.3f,", two_theta * 180.0 / math.pi)
        logger.info("%d solutions found", len(solutions))

        names = tuple(
            [n.replace("GON_", "").lower() for n in expt.goniometer.get_names()]
        )

        logger.info(" %8s %8s %8s  coverage expt.expt" % names)
        self.write_expt(experiments, "solution_0.expt")
        for j, s in enumerate(solutions):
            expt.goniometer.set_angles(s)
            if model_shadow:
                obs, shadow = self.predict_to_miller_set_with_shadow(expt, resolution)
            else:
                obs = self.predict_to_miller_set(expt, resolution)
            new = missing.common_set(obs)
            fout = "solution_%d.expt" % (j + 1)
            f = len(new.indices()) / len(missing.indices())

            logger.info("%8.3f %8.3f %8.3f %4.2f %s", s[0], s[1], s[2], f, fout)
            self.write_expt(experiments, fout)
예제 #37
0
  def run(self):
    """ Process all images assigned to this thread """

    params, options = self.parser.parse_args(
      show_diff_phil=True)

    # Configure the logging
    from dials.util import log
    log.config(params.verbosity)

    # Check inputs
    if params.input.experiment is None or \
       params.input.run_num is None or \
       params.input.address is None:
      raise Usage(self.usage)

    if params.format.file_format == "cbf":
      if params.format.cbf.detz_offset is None:
        raise Usage(self.usage)
    elif params.format.file_format == "pickle":
      if params.format.pickle.cfg is None:
        raise Usage(self.usage)
    else:
      raise Usage(self.usage)

    if not os.path.exists(params.output.output_dir):
      raise Sorry("Output path not found:" + params.output.output_dir)

    # The convention is to put %s in the phil parameter to add a time stamp to
    # each output datafile. Save the initial templates here.
    self.strong_filename_template              = params.output.strong_filename
    self.indexed_filename_template             = params.output.indexed_filename
    self.refined_experiments_filename_template = params.output.refined_experiments_filename
    self.integrated_filename_template          = params.output.integrated_filename

    # Don't allow the strong reflections to be written unless there are enough to
    # process
    params.output.strong_filename = None

    # Save the paramters
    self.params_cache = copy.deepcopy(params)
    self.options = options

    if params.mp.method == "mpi":
      from mpi4py import MPI
      comm = MPI.COMM_WORLD
      rank = comm.Get_rank() # each process in MPI has a unique id, 0-indexed
      size = comm.Get_size() # size: number of processes running in this job
    elif params.mp.method == "sge" and \
        'SGE_TASK_ID'    in os.environ and \
        'SGE_TASK_FIRST' in os.environ and \
        'SGE_TASK_LAST'  in os.environ:
      if 'SGE_STEP_SIZE' in os.environ:
        assert int(os.environ['SGE_STEP_SIZE']) == 1
      if os.environ['SGE_TASK_ID'] == 'undefined' or os.environ['SGE_TASK_ID'] == 'undefined' or os.environ['SGE_TASK_ID'] == 'undefined':
        rank = 0
        size = 1
      else:
        rank = int(os.environ['SGE_TASK_ID']) - int(os.environ['SGE_TASK_FIRST'])
        size = int(os.environ['SGE_TASK_LAST']) - int(os.environ['SGE_TASK_FIRST']) + 1
    else:
      rank = 0
      size = 1

    if params.output.logging_dir is not None:
      log_path = os.path.join(params.output.logging_dir, "log_rank%04d.out"%rank)
      error_path = os.path.join(params.output.logging_dir, "error_rank%04d.out"%rank)
      print "Redirecting stdout to %s"%log_path
      print "Redirecting stderr to %s"%error_path
      assert os.path.exists(log_path)
      sys.stdout = open(log_path,'a', buffering=0)
      sys.stderr = open(error_path,'a',buffering=0)
      print "Should be redirected now"

    debug_dir = os.path.join(params.output.output_dir, "debug")
    if not os.path.exists(debug_dir):
      os.makedirs(debug_dir)

    if params.debug.skip_processed_events or params.debug.skip_processed_events or params.debug.skip_bad_events:
      print "Reading debug files..."
      self.known_events = {}
      for filename in os.listdir(debug_dir):
        # format: hostname,timestamp,status
        for line in open(os.path.join(debug_dir, filename)):
          vals = line.strip().split(',')
          if len(vals) == 2:
            self.known_events[vals[1]] = "unknown"
          elif len(vals) == 3:
            self.known_events[vals[1]] = vals[2]

    debug_file_path = os.path.join(debug_dir, "debug_%d.txt"%rank)
    write_newline = os.path.exists(debug_file_path)
    self.debug_file_handle = open(debug_file_path, 'a', 0) # 0 for unbuffered
    if write_newline: # needed if the there was a crash
      self.debug_file_handle.write("\n")

    # set up psana
    if params.format.file_format=="pickle":
      psana.setConfigFile(params.format.pickle.cfg)
    dataset_name = "exp=%s:run=%s:idx"%(params.input.experiment,params.input.run_num)
    if params.input.xtc_dir is not None:
      if params.input.use_ffb:
        raise Sorry("Cannot specify the xtc_dir and use SLAC's ffb system")
      dataset_name += ":dir=%s"%params.input.xtc_dir
    elif params.input.use_ffb:
      # as ffb is only at SLAC, ok to hardcode /reg/d here
      dataset_name += ":dir=/reg/d/ffb/%s/%s/xtc"%(params.input.experiment[0:3],params.input.experiment)
    ds = psana.DataSource(dataset_name)

    if params.format.file_format == "cbf":
      self.psana_det = psana.Detector(params.input.address, ds.env())

    # set this to sys.maxint to analyze all events
    if params.dispatch.max_events is None:
      max_events = sys.maxint
    else:
      max_events = params.dispatch.max_events

    for run in ds.runs():
      if params.format.file_format == "cbf":
        # load a header only cspad cbf from the slac metrology
        self.base_dxtbx = cspad_cbf_tbx.env_dxtbx_from_slac_metrology(run, params.input.address)
        if self.base_dxtbx is None:
          raise Sorry("Couldn't load calibration file for run %d"%run.run())

        if params.format.cbf.gain_mask_value is not None:
          self.gain_mask = self.psana_det.gain_mask(gain=params.format.cbf.gain_mask_value)

      # list of all events
      times = run.times()
      nevents = min(len(times),max_events)
      if params.mp.method == "mpi" and size > 2:
        # use a client/server approach to be sure every process is busy as much as possible
        # only do this if there are more than 2 processes, as one process will be a server
        if rank == 0:
          # server process
          for t in times[:nevents]:
            # a client process will indicate it's ready by sending its rank
            rankreq = comm.recv(source=MPI.ANY_SOURCE)
            comm.send(t,dest=rankreq)
          # send a stop command to each process
          for rankreq in range(size-1):
            rankreq = comm.recv(source=MPI.ANY_SOURCE)
            comm.send('endrun',dest=rankreq)
        else:
          # client process
          while True:
            # inform the server this process is ready for an event
            comm.send(rank,dest=0)
            evttime = comm.recv(source=0)
            if evttime == 'endrun': break
            self.process_event(run, evttime)
      else:
        # chop the list into pieces, depending on rank.  This assigns each process
        # events such that the get every Nth event where N is the number of processes
        mytimes = [times[i] for i in xrange(nevents) if (i+rank)%size == 0]

        for i in xrange(len(mytimes)):
          self.process_event(run, mytimes[i])

      run.end()
    ds.end()
예제 #38
0
  def run(self):
    ''' Parse the options. '''
    from dials.util import log
    import libtbx
    from uuid import uuid4
    from dials.util.stream import ZMQStream, Decoder
    from os.path import join, exists
    import os
    import json
    from dxtbx.datablock import DataBlock

    # Parse the command line arguments in two passes to set up logging early
    params, options = self.parser.parse_args(show_diff_phil=False, quick_parse=True)

    # Configure logging
    log.config(
      params.verbosity,
      info=params.output.log,
      debug=params.output.debug_log)
    from dials.util.version import dials_version
    logger.info(dials_version())

    # Parse the command line arguments completely
    params, options = self.parser.parse_args(show_diff_phil=False)

    # Log the diff phil
    diff_phil = self.parser.diff_phil.as_str()
    if diff_phil is not '':
      logger.info('The following parameters have been modified:\n')
      logger.info(diff_phil)

    # Check a stream is given
    if params.input.host is None:
      raise Sorry("An input host needs to be given")

    # Check the directory
    if params.output.directory is None:
      raise Sorry("An output directory needs to be given")
    elif params.output.directory is libtbx.Auto:
      params.output.directory = "/dev/shm/dials-%s" % uuid4()

    # Make the output directory
    if exists(params.output.directory):
      raise Sorry('Directory "%s" already exists' % (params.output.directory))

    # Make the directory
    os.mkdir(params.output.directory)

    # Initialise the stream
    stream = ZMQStream(params.input.host, params.input.port)
    decoder = Decoder(
      params.output.directory,
      params.output.image_template)
    imageset = None
    while True:

      # Get the frames from zmq
      frames = stream.receive()

      # Decode the frames
      obj = decoder.decode(frames)

      # Process the object
      if obj.is_header():
        filename = join(params.output.directory, "metadata.json")
        with open(filename, "w") as outfile:
          json.dump(obj.header, outfile)
        imageset = obj.as_imageset(filename)
        datablocks = [DataBlock([imageset])]
        self.write_datablocks(datablocks, params)
      elif obj.is_image():
        assert imageset is not None
        filename = join(
          params.output.directory,
          params.output.image_template % obj.count)
        with open(filename, "wb") as outfile:
          outfile.write(obj.data)
        filename = join(
          params.output.directory,
          "%s.info" % (params.output.image_template % obj.count))
        with open(filename, "w") as outfile:
          json.dump(obj.info, outfile)
      elif obj.is_endofseries():
        assert imageset is not None
        break
      else:
        raise RuntimeError("Unknown object")

    # Close the stream
    stream.close()
예제 #39
0
    def run(self):

        print("Parsing input")
        params, options = self.parser.parse_args(show_diff_phil=True)

        #Configure the logging
        log.config(params.detector_phase.refinement.verbosity,
                   info='dials.refine.log',
                   debug='dials.refine.debug.log')

        # Try to obtain the models and data
        if not params.input.experiments:
            raise Sorry("No Experiments found in the input")
        if not params.input.reflections:
            raise Sorry("No reflection data found in the input")
        try:
            assert len(params.input.reflections) == len(
                params.input.experiments)
        except AssertionError:
            raise Sorry(
                "The number of input reflections files does not match the "
                "number of input experiments")

        # set up global experiments and reflections lists
        from dials.array_family import flex
        reflections = flex.reflection_table()
        global_id = 0
        from dxtbx.model.experiment_list import ExperimentList
        experiments = ExperimentList()

        if params.reference_detector == "first":
            # Use the first experiment of the first experiment list as the reference detector
            ref_exp = params.input.experiments[0].data[0]
        else:
            # Average all the detectors to generate a reference detector
            assert params.detector_phase.refinement.parameterisation.detector.hierarchy_level == 0
            from scitbx.matrix import col
            panel_fasts = []
            panel_slows = []
            panel_oris = []
            for exp_wrapper in params.input.experiments:
                exp = exp_wrapper.data[0]
                if panel_oris:
                    for i, panel in enumerate(exp.detector):
                        panel_fasts[i] += col(panel.get_fast_axis())
                        panel_slows[i] += col(panel.get_slow_axis())
                        panel_oris[i] += col(panel.get_origin())
                else:
                    for i, panel in enumerate(exp.detector):
                        panel_fasts.append(col(panel.get_fast_axis()))
                        panel_slows.append(col(panel.get_slow_axis()))
                        panel_oris.append(col(panel.get_origin()))

            ref_exp = copy.deepcopy(params.input.experiments[0].data[0])
            for i, panel in enumerate(ref_exp.detector):
                # Averaging the fast and slow axes can make them be non-orthagonal. Fix by finding
                # the vector that goes exactly between them and rotate
                # around their cross product 45 degrees from that vector in either direction
                vf = panel_fasts[i] / len(params.input.experiments)
                vs = panel_slows[i] / len(params.input.experiments)
                c = vf.cross(vs)
                angle = vf.angle(vs, deg=True)
                v45 = vf.rotate(c, angle / 2, deg=True)
                vf = v45.rotate(c, -45, deg=True)
                vs = v45.rotate(c, 45, deg=True)
                panel.set_frame(vf, vs,
                                panel_oris[i] / len(params.input.experiments))

            print("Reference detector (averaged):", str(ref_exp.detector))

        # set the experiment factory that combines a crystal with the reference beam
        # and the reference detector
        experiment_from_crystal = ExperimentFromCrystal(
            ref_exp.beam, ref_exp.detector)

        # keep track of the number of refl per accepted experiment for a table
        nrefs_per_exp = []

        # loop through the input, building up the global lists
        for ref_wrapper, exp_wrapper in zip(params.input.reflections,
                                            params.input.experiments):
            refs = ref_wrapper.data
            exps = exp_wrapper.data

            # there might be multiple experiments already here. Loop through them
            for i, exp in enumerate(exps):

                # select the relevant reflections
                sel = refs['id'] == i
                sub_ref = refs.select(sel)

                ## DGW commented out as reflections.minimum_number_of_reflections no longer exists
                #if len(sub_ref) < params.crystals_phase.refinement.reflections.minimum_number_of_reflections:
                #  print "skipping experiment", i, "in", exp_wrapper.filename, "due to insufficient strong reflections in", ref_wrapper.filename
                #  continue

                # build an experiment with this crystal plus the reference models
                combined_exp = experiment_from_crystal(exp.crystal)

                # next experiment ID in series
                exp_id = len(experiments)

                # check this experiment
                if not check_experiment(combined_exp, sub_ref):
                    print("skipping experiment", i, "in", exp_wrapper.filename,
                          "due to poor RMSDs")
                    continue

                # set reflections ID
                sub_ref['id'] = flex.int(len(sub_ref), exp_id)

                # keep number of reflections for the table
                nrefs_per_exp.append(len(sub_ref))

                # obtain mm positions on the reference detector
                sub_ref = indexer_base.map_spots_pixel_to_mm_rad(
                    sub_ref, combined_exp.detector, combined_exp.scan)

                # extend refl and experiments lists
                reflections.extend(sub_ref)
                experiments.append(combined_exp)

        # print number of reflections per accepted experiment
        from libtbx.table_utils import simple_table
        header = ["Experiment", "Nref"]
        rows = [(str(i), str(n)) for (i, n) in enumerate(nrefs_per_exp)]
        st = simple_table(rows, header)
        print("Number of reflections per experiment")
        print(st.format())

        for cycle in range(params.n_macrocycles):

            print("MACROCYCLE %02d" % (cycle + 1))
            print("=============\n")
            # first run: multi experiment joint refinement of detector with fixed beam and
            # crystals
            print("PHASE 1")

            # SET THIS TEST TO FALSE TO REFINE WHOLE DETECTOR AS SINGLE JOB
            if params.detector_phase.refinement.parameterisation.detector.hierarchy_level > 0:
                experiments = detector_parallel_refiners(
                    params.detector_phase, experiments, reflections)
            else:
                experiments = detector_refiner(params.detector_phase,
                                               experiments, reflections)

            # second run
            print("PHASE 2")
            experiments = crystals_refiner(params.crystals_phase, experiments,
                                           reflections)

        # Save the refined experiments to file
        output_experiments_filename = params.output.experiments_filename
        print('Saving refined experiments to {0}'.format(
            output_experiments_filename))
        from dxtbx.model.experiment_list import ExperimentListDumper
        dump = ExperimentListDumper(experiments)
        dump.as_json(output_experiments_filename)

        # Write out refined reflections, if requested
        if params.output.reflections_filename:
            print('Saving refined reflections to {0}'.format(
                params.output.reflections_filename))
            reflections.as_pickle(params.output.reflections_filename)

        return
예제 #40
0
파일: integrate.py 프로젝트: dials/dials
  def run(self):
    ''' Perform the integration. '''
    from dials.util.command_line import heading
    from dials.util.options import flatten_reflections, flatten_experiments
    from dials.util import log
    from time import time
    from libtbx.utils import Sorry

    # Check the number of arguments is correct
    start_time = time()

    # Parse the command line
    params, options = self.parser.parse_args(show_diff_phil=False)
    reference = flatten_reflections(params.input.reflections)
    experiments = flatten_experiments(params.input.experiments)
    if len(reference) == 0 and len(experiments) == 0:
      self.parser.print_help()
      return
    if len(reference) == 0:
      reference = None
    elif len(reference) != 1:
      raise Sorry('more than 1 reflection file was given')
    else:
      reference = reference[0]
    if len(experiments) == 0:
      raise Sorry('no experiment list was specified')

    # Save phil parameters
    if params.output.phil is not None:
      with open(params.output.phil, "w") as outfile:
        outfile.write(self.parser.diff_phil.as_str())

    # Configure logging
    log.config(
      params.verbosity,
      info=params.output.log,
      debug=params.output.debug_log)

    from dials.util.version import dials_version
    logger.info(dials_version())

    # Log the diff phil
    diff_phil = self.parser.diff_phil.as_str()
    if diff_phil is not '':
      logger.info('The following parameters have been modified:\n')
      logger.info(diff_phil)

    # Print if we're using a mask
    for i, exp in enumerate(experiments):
      mask = exp.imageset.external_lookup.mask
      if mask.filename is not None:
        if mask.data:
          logger.info('Using external mask: %s' % mask.filename)
          logger.info(' Mask has %d pixels masked' % mask.data.count(False))

    # Print the experimental models
    for i, exp in enumerate(experiments):
      logger.debug("Models for experiment %d" % i)
      logger.debug("")
      logger.debug(str(exp.beam))
      logger.debug(str(exp.detector))
      if exp.goniometer:
        logger.debug(str(exp.goniometer))
      if exp.scan:
        logger.debug(str(exp.scan))
      logger.debug(str(exp.crystal))

    logger.info("=" * 80)
    logger.info("")
    logger.info(heading("Initialising"))
    logger.info("")

    # Load the data
    reference, rubbish = self.process_reference(reference)
    logger.info("")

    # Initialise the integrator
    from dials.algorithms.profile_model.factory import ProfileModelFactory
    from dials.algorithms.integration.integrator import IntegratorFactory
    from dials.array_family import flex

    # Modify experiment list if scan range is set.
    experiments, reference = self.split_for_scan_range(
      experiments,
      reference,
      params.scan_range)

    # Predict the reflections
    logger.info("")
    logger.info("=" * 80)
    logger.info("")
    logger.info(heading("Predicting reflections"))
    logger.info("")
    predicted = flex.reflection_table.from_predictions_multi(
      experiments,
      dmin=params.prediction.d_min,
      dmax=params.prediction.d_max,
      margin=params.prediction.margin,
      force_static=params.prediction.force_static)

    # Match reference with predicted
    if reference:
      matched, reference, unmatched = predicted.match_with_reference(reference)
      assert(len(matched) == len(predicted))
      assert(matched.count(True) <= len(reference))
      if matched.count(True) == 0:
        raise Sorry('''
          Invalid input for reference reflections.
          Zero reference spots were matched to predictions
        ''')
      elif len(unmatched) != 0:
        logger.info('')
        logger.info('*' * 80)
        logger.info('Warning: %d reference spots were not matched to predictions' % (
          len(unmatched)))
        logger.info('*' * 80)
        logger.info('')
      rubbish.extend(unmatched)

      if len(experiments) > 1:
        # filter out any experiments without matched reference reflections
        # f_: filtered
        from dxtbx.model.experiment.experiment_list import ExperimentList
        f_reference = flex.reflection_table()
        f_predicted = flex.reflection_table()
        f_rubbish = flex.reflection_table()
        f_experiments = ExperimentList()
        good_expt_count = 0
        def refl_extend(src, dest, eid):
          tmp = src.select(src['id'] == eid)
          tmp['id'] = flex.int(len(tmp), good_expt_count)
          dest.extend(tmp)

        for expt_id, experiment in enumerate(experiments):
          if len(reference.select(reference['id'] == expt_id)) != 0:
            refl_extend(reference, f_reference, expt_id)
            refl_extend(predicted, f_predicted, expt_id)
            refl_extend(rubbish, f_rubbish, expt_id)
            f_experiments.append(experiment)
            good_expt_count += 1
          else:
            logger.info("Removing experiment %d: no reference reflections matched to predictions"%expt_id)

        reference = f_reference
        predicted = f_predicted
        experiments = f_experiments
        rubbish = f_rubbish

    # Select a random sample of the predicted reflections
    if not params.sampling.integrate_all_reflections:
      predicted = self.sample_predictions(experiments, predicted, params)

    # Compute the profile model
    if (params.create_profile_model and
        reference is not None and
        "shoebox" in reference):
      experiments = ProfileModelFactory.create(params, experiments, reference)
    else:
      for expr in experiments:
        if expr.profile is None:
          raise Sorry('No profile information in experiment list')
        expr.profile.params = params.profile
    del reference

    # Compute the bounding box
    predicted.compute_bbox(experiments)

    # Create the integrator
    logger.info("")
    integrator = IntegratorFactory.create(params, experiments, predicted)

    # Integrate the reflections
    reflections = integrator.integrate()

    # Append rubbish data onto the end
    if rubbish is not None and params.output.include_bad_reference:
      mask = flex.bool(len(rubbish), True)
      rubbish.unset_flags(mask, rubbish.flags.integrated_sum)
      rubbish.unset_flags(mask, rubbish.flags.integrated_prf)
      rubbish.set_flags(mask, rubbish.flags.bad_reference)
      reflections.extend(rubbish)

    # Save the reflections
    self.save_reflections(reflections, params.output.reflections)
    self.save_experiments(experiments, params.output.experiments)

    # Write a report if requested
    if params.output.report is not None:
      integrator.report().as_file(params.output.report)

    # Print the total time taken
    logger.info("\nTotal time taken: %f" % (time() - start_time))
예제 #41
0
파일: rl_png.py 프로젝트: biochem-fan/dials
def run(args):

  from dials.util.options import OptionParser
  from dials.util.options import flatten_datablocks
  from dials.util.options import flatten_experiments
  from dials.util.options import flatten_reflections
  from dials.util import log
  import libtbx.load_env

  usage = "%s [options] datablock.json reflections.pickle" %(
    libtbx.env.dispatcher_name)

  parser = OptionParser(
    usage=usage,
    phil=phil_scope,
    read_datablocks=True,
    read_experiments=True,
    read_reflections=True,
    check_format=False,
    epilog=help_message)

  params, options = parser.parse_args()
  datablocks = flatten_datablocks(params.input.datablock)
  experiments = flatten_experiments(params.input.experiments)
  reflections = flatten_reflections(params.input.reflections)

  if (len(datablocks) == 0 and len(experiments) == 0) or len(reflections) == 0:
    parser.print_help()
    exit(0)

  # Configure the logging
  log.config(info='dials.rl_png.log')

  # Log the diff phil
  diff_phil = parser.diff_phil.as_str()
  if diff_phil is not '':
    info('The following parameters have been modified:\n')
    info(diff_phil)

  reflections = reflections[0]

  if len(datablocks) == 0 and len(experiments) > 0:
    imagesets = experiments.imagesets()
  else:
    imagesets = []
    for datablock in datablocks:
      imagesets.extend(datablock.extract_imagesets())

  f = ReciprocalLatticePng(settings=params)
  f.load_models(imagesets, reflections)

  imageset = imagesets[0]
  rotation_axis = matrix.col(imageset.get_goniometer().get_rotation_axis())
  s0 = matrix.col(imageset.get_beam().get_s0())

  e1 = rotation_axis.normalize()
  e2 = s0.normalize()
  e3 = e1.cross(e2).normalize()
  #print e1
  #print e2
  #print e3

  f.viewer.plot('rl_rotation_axis.png', n=e1.elems)
  f.viewer.plot('rl_beam_vector', n=e2.elems)
  f.viewer.plot('rl_e3.png', n=e3.elems)

  n_solutions = params.basis_vector_search.n_solutions

  if len(experiments):
    for i, c in enumerate(experiments.crystals()):
      A = c.get_A()
      astar = A[:3]
      bstar = A[3:6]
      cstar = A[6:9]

      direct_matrix = A.inverse()
      a = direct_matrix[:3]
      b = direct_matrix[3:6]
      c = direct_matrix[6:9]

      prefix = ''
      if len(experiments.crystals()) > 1:
        prefix = '%i_' %(i+1)

      f.viewer.plot('rl_%sa.png' %prefix, n=a)
      f.viewer.plot('rl_%sb.png' %prefix, n=b)
      f.viewer.plot('rl_%sc.png' %prefix, n=c)

  elif n_solutions:
    from dials.command_line.discover_better_experimental_model \
         import run_dps, dps_phil_scope

    hardcoded_phil = dps_phil_scope.extract()
    hardcoded_phil.d_min = params.d_min
    result = run_dps((imagesets[0], reflections, hardcoded_phil))
    solutions = [matrix.col(v) for v in result['solutions']]
    for i in range(min(n_solutions, len(solutions))):
      v = solutions[i]
      #if i > 0:
        #for v1 in solutions[:i-1]:
          #angle = v.angle(v1, deg=True)
          #print angle
      f.viewer.plot('rl_solution_%s.png' %(i+1), n=v.elems)
예제 #42
0
파일: index.py 프로젝트: biochem-fan/dials
def run(args):
  import libtbx.load_env
  from libtbx.utils import Sorry
  from dials.util import log
  from logging import info
  usage = "%s [options] datablock.json strong.pickle" %libtbx.env.dispatcher_name

  parser = OptionParser(
    usage=usage,
    phil=phil_scope,
    read_reflections=True,
    read_datablocks=True,
    read_experiments=True,
    check_format=False,
    epilog=help_message)

  params, options = parser.parse_args(show_diff_phil=False)

  # Configure the logging
  log.config(
    params.verbosity,
    info=params.output.log,
    debug=params.output.debug_log)

  from dials.util.version import dials_version
  info(dials_version())

  # Log the diff phil
  diff_phil = parser.diff_phil.as_str()
  if diff_phil is not '':
    info('The following parameters have been modified:\n')
    info(diff_phil)

  datablocks = flatten_datablocks(params.input.datablock)
  experiments = flatten_experiments(params.input.experiments)
  reflections = flatten_reflections(params.input.reflections)

  if len(datablocks) == 0:
    if len(experiments) > 0:
      imagesets = experiments.imagesets()
    else:
      parser.print_help()
      return
  #elif len(datablocks) > 1:
    #raise Sorry("Only one DataBlock can be processed at a time")
  else:
    imagesets = []
    for datablock in datablocks:
      imagesets.extend(datablock.extract_imagesets())
  if len(experiments):
    known_crystal_models = experiments.crystals()
  else:
    known_crystal_models = None

  if len(reflections) == 0:
    raise Sorry("No reflection lists found in input")
  if len(reflections) > 1:
    #raise Sorry("Multiple reflections lists provided in input")
    assert len(reflections) == len(imagesets)
    from scitbx.array_family import flex
    for i in range(len(reflections)):
      reflections[i]['imageset_id'] = flex.int(len(reflections[i]), i)
      if i > 0:
        reflections[0].extend(reflections[i])

  #assert(len(reflections) == 1)
  reflections = reflections[0]

  for imageset in imagesets:
    if (imageset.get_goniometer() is not None and
        imageset.get_scan() is not None and
        imageset.get_scan().get_oscillation()[1] == 0):
      imageset.set_goniometer(None)
      imageset.set_scan(None)

  from dials.algorithms.indexing.indexer import indexer_base
  idxr = indexer_base.from_parameters(
    reflections, imagesets,
    known_crystal_models=known_crystal_models,
    params=params)
  refined_experiments = idxr.refined_experiments
  reflections = copy.deepcopy(idxr.refined_reflections)
  reflections.extend(idxr.unindexed_reflections)
  if len(refined_experiments):
    info("Saving refined experiments to %s" %params.output.experiments)
    idxr.export_as_json(refined_experiments,
                        file_name=params.output.experiments)
    info("Saving refined reflections to %s" %params.output.reflections)
    idxr.export_reflections(
      reflections, file_name=params.output.reflections)

    if params.output.unindexed_reflections is not None:
      info("Saving unindexed reflections to %s"
           %params.output.unindexed_reflections)
      idxr.export_reflections(idxr.unindexed_reflections,
                              file_name=params.output.unindexed_reflections)

  return
예제 #43
0
파일: screen.py 프로젝트: xia2/i19
  def run(self, args):
    from dials.util.version import dials_version
    from i19.util.version import i19_version
    version_information = "%s using %s (%s)" % (i19_version(), dials_version(), time.strftime("%Y-%m-%d %H:%M:%S"))
    start = timeit.default_timer()

    if len(args) == 0:
      print help_message
      print version_information
      return

    # Configure the logging
    from dials.util import log
    log.config(1, info='i19.screen.log', debug='i19.screen.debug.log')

    info(version_information)
    debug('Run with %s' % str(args))

    # FIXME use proper optionparser here. This works for now
    nproc = None
    if len(args) >= 1 and args[0].startswith('nproc='):
      nproc = args[0][6:]
      args = args[1:]
    self._count_processors(nproc=nproc)
    debug('Using %s processors' % self.nproc)

    if len(args) == 1 and args[0].endswith('.json'):
      self.json_file = args[0]
    else:
      self._import(args)
      self.json_file = 'datablock.json'

    self._find_spots()
    if not self._index():
      info("\nRetrying for stronger spots only...")
      os.rename("strong.pickle", "all_spots.pickle")
      self._find_spots(['sigma_strong=15'])
      if not self._index():
        warn("Giving up.")
        info("""
Could not find an indexing solution. You may want to have a look
at the reciprocal space by running:

  dials.reciprocal_lattice_viewer datablock.json all_spots.pickle

or, to only include stronger spots:

  dials.reciprocal_lattice_viewer datablock.json strong.pickle
""")
        sys.exit(1)

    if not self._create_profile_model():
      info("\nRefining model to attempt to increase number of valid spots...")
      self._refine()
      if not self._create_profile_model():
        warn("Giving up.")
        info("""
The identified indexing solution may not be correct. You may want to have a look
at the reciprocal space by running:

  dials.reciprocal_lattice_viewer experiments.json indexed.pickle
""")
        sys.exit(1)
    self._report()
    self._predict()
    self._check_intensities()
    self._refine_bravais()

    i19screen_runtime = timeit.default_timer() - start
    debug("Finished at %s, total runtime: %.1f" % (time.strftime("%Y-%m-%d %H:%M:%S"), i19screen_runtime))
    info("i19.screen successfully completed (%.1f sec)" % i19screen_runtime)
예제 #44
0
  def run(self):
    '''Execute the script.'''
    from dials.algorithms.refinement.two_theta_refiner import \
      TwoThetaReflectionManager, TwoThetaTarget, \
      TwoThetaPredictionParameterisation

    start_time = time()

    # Parse the command line
    params, options = self.parser.parse_args(show_diff_phil=False)

    # set up global experiments and reflections lists
    from dials.array_family import flex
    reflections = flex.reflection_table()
    global_id = 0
    from dxtbx.model.experiment.experiment_list import ExperimentList
    experiments=ExperimentList()

    # loop through the input, building up the global lists
    nrefs_per_exp = []
    for ref_wrapper, exp_wrapper in zip(params.input.reflections,
                                        params.input.experiments):
      refs = ref_wrapper.data
      exps = exp_wrapper.data
      for i, exp in enumerate(exps):
        sel = refs['id'] == i
        sub_ref = refs.select(sel)
        nrefs_per_exp.append(len(sub_ref))
        sub_ref['id'] = flex.int(len(sub_ref), global_id)
        reflections.extend(sub_ref)
        experiments.append(exp)
        global_id += 1

    # Try to load the models and data
    nexp = len(experiments)
    if nexp == 0:
      print "No Experiments found in the input"
      self.parser.print_help()
      return
    if len(reflections) == 0:
      print "No reflection data found in the input"
      self.parser.print_help()
      return

    self.check_input(reflections)

    # Configure the logging
    log.config(info=params.output.log,
      debug=params.output.debug_log)
    logger.info(dials_version())

    # Log the diff phil
    diff_phil = self.parser.diff_phil.as_str()
    if diff_phil is not '':
      logger.info('The following parameters have been modified:\n')
      logger.info(diff_phil)

    # Convert to P 1?
    if params.refinement.triclinic:
      reflections, experiments = self.convert_to_P1(reflections, experiments)

    # Combine crystals?
    if params.refinement.combine_crystal_models and len(experiments) > 1:
      logger.info('Combining {0} crystal models'.format(len(experiments)))
      experiments = self.combine_crystals(experiments)

    # Filter integrated centroids?
    if params.refinement.filter_integrated_centroids:
      reflections = self.filter_integrated_centroids(reflections)

    # Get the refiner
    logger.info('Configuring refiner')
    refiner = self.create_refiner(params, reflections, experiments)

    # Refine the geometry
    if nexp == 1:
      logger.info('Performing refinement of a single Experiment...')
    else:
      logger.info('Performing refinement of {0} Experiments...'.format(nexp))

    # Refine and get the refinement history
    history = refiner.run()

    # get the refined experiments
    experiments = refiner.get_experiments()
    crystals = experiments.crystals()

    if len(crystals) == 1:
      # output the refined model for information
      logger.info('')
      logger.info('Final refined crystal model:')
      logger.info(crystals[0])
      logger.info(self.cell_param_table(crystals[0]))

    # Save the refined experiments to file
    output_experiments_filename = params.output.experiments
    logger.info('Saving refined experiments to {0}'.format(output_experiments_filename))
    from dxtbx.model.experiment.experiment_list import ExperimentListDumper
    dump = ExperimentListDumper(experiments)
    dump.as_json(output_experiments_filename)

    # Correlation plot
    if params.output.correlation_plot.filename is not None:
      from os.path import splitext
      root, ext = splitext(params.output.correlation_plot.filename)
      if not ext: ext = ".pdf"

      steps = params.output.correlation_plot.steps
      if steps is None: steps = [history.get_nrows()-1]

      # extract individual column names or indices
      col_select = params.output.correlation_plot.col_select

      num_plots = 0
      for step in steps:
        fname_base = root
        if len(steps) > 1: fname_base += "_step%02d" % step
        plot_fname = fname_base + ext
        corrmat, labels = refiner.get_parameter_correlation_matrix(step, col_select)
        if [corrmat, labels].count(None) == 0:
          from dials.algorithms.refinement.refinement_helpers import corrgram
          plt = corrgram(corrmat, labels)
          if plt is not None:
            logger.info('Saving parameter correlation plot to {}'.format(plot_fname))
            plt.savefig(plot_fname)
            num_plots += 1
          mat_fname = fname_base + ".pickle"
          with open(mat_fname, 'wb') as handle:
            py_mat = corrmat.as_scitbx_matrix() #convert to pickle-friendly form
            logger.info('Saving parameter correlation matrix to {0}'.format(mat_fname))
            pickle.dump({'corrmat':py_mat, 'labels':labels}, handle)

      if num_plots == 0:
        msg = "Sorry, no parameter correlation plots were produced. Please set " \
              "track_parameter_correlation=True to ensure correlations are " \
              "tracked, and make sure correlation_plot.col_select is valid."
        logger.info(msg)

    if params.output.cif is not None:
      self.generate_cif(crystals[0], refiner, file=params.output.cif)

    if params.output.p4p is not None:
      self.generate_p4p(crystals[0], experiments[0].beam,
                        file=params.output.p4p)

    if params.output.mmcif is not None:
      self.generate_mmcif(crystals[0], refiner, file=params.output.mmcif)

    # Log the total time taken
    logger.info("\nTotal time taken: {0:.2f}s".format(time() - start_time))
예제 #45
0
  def run(self):
    '''Execute the script.'''
    from dials.array_family import flex
    from dials.util.options import flatten_datablocks
    from time import time
    from dials.util import log
    from libtbx.utils import Sorry
    import datetime
    start_time = time()

    # Parse the command line
    params, options = self.parser.parse_args(show_diff_phil=False)

    # Configure the logging
    log.config(
      params.verbosity,
      info=params.output.log,
      debug=params.output.debug_log)

    from dials.util.version import dials_version
    logger.info(dials_version())

    # Log the diff phil
    diff_phil = self.parser.diff_phil.as_str()
    if diff_phil is not '':
      logger.info('The following parameters have been modified:\n')
      logger.info(diff_phil)

    # Ensure we have a data block
    datablocks = flatten_datablocks(params.input.datablock)
    if len(datablocks) == 0:
      self.parser.print_help()
      return

    # Extend the first datablock
    datablock = datablocks[0]
    for db in datablocks[1:]:
      if datablock.format_class() != db.format_class():
        raise Sorry("Datablocks must have the same format")
      datablock.extend(db)

    # Get the imagesets and sweeps
    stills = datablock.extract_stills()
    sweeps = datablock.extract_sweeps()
    if len(stills) > 0:
      raise Sorry("Sets of still images are currently unsupported")
    logger.info("Number of sweeps = %d" % len(sweeps))

    # Sort the sweeps by timestamps
    logger.info("Sorting sweeps based on timestamp")
    sweeps = sorted(sweeps, key=lambda x: x.get_scan().get_epochs()[0])

    # Count the number of datasets from each day
    from collections import Counter
    counter = Counter()
    for s in sweeps:
      timestamp = s.get_scan().get_epochs()[0]
      timestamp = datetime.datetime.fromtimestamp(timestamp)
      timestamp = timestamp.strftime('%Y-%m-%d')
      counter[timestamp] += 1

    # Print the number of datasets on each day
    for timestamp in sorted(counter.keys()):
      logger.info("%d datasets collected on %s" % (counter[timestamp], timestamp))

    # Loop though and see if any models might be shared
    b_list = [ s.get_beam() for s in sweeps ]
    d_list = [ s.get_detector() for s in sweeps ]
    g_list = [ s.get_goniometer() for s in sweeps ]
    b_index = []
    d_index = []
    g_index = []
    for i in range(len(sweeps)):
      b = b_list[i]
      d = d_list[i]
      g = g_list[i]
      bn = i
      dn = i
      gn = i
      if i > 0:
        bj = b_index[-1]
        dj = d_index[-1]
        gj = g_index[-1]
        if b.is_similar_to(b_list[bj]):
          bn = bj
        if d.is_similar_to(d_list[dj]):
          dn = dj
        if g.is_similar_to(g_list[gj]):
          gn = gj
      b_index.append(bn)
      d_index.append(dn)
      g_index.append(gn)

    # Print a table of possibly shared models
    from libtbx.table_utils import format as table
    rows = [["Sweep", "ID", "Beam", "Detector", "Goniometer", "Date", "Time"]]
    for i in range(len(sweeps)):
      timestamp = sweeps[i].get_scan().get_epochs()[0]
      timestamp = datetime.datetime.fromtimestamp(timestamp)
      date_str = timestamp.strftime('%Y-%m-%d')
      time_str = timestamp.strftime('%H:%M:%S')
      row = [
        '%s' % sweeps[i].get_template(),
        '%s' % i,
        '%s' % b_index[i],
        '%s' % d_index[i],
        '%s' % g_index[i],
        '%s' % date_str,
        '%s' % time_str]
      rows.append(row)
    logger.info(table(rows, has_header=True, justify='left', prefix=' '))

    # Print the time
    logger.info("Time Taken: %f" % (time() - start_time))
예제 #46
0
  def run(self):

    print "Parsing input"
    params, options = self.parser.parse_args(show_diff_phil=True)

    #Configure the logging
    log.config(params.detector_phase.refinement.verbosity,
      info='dials.refine.log', debug='dials.refine.debug.log')

    # Try to obtain the models and data
    if not params.input.experiments:
      raise Sorry("No Experiments found in the input")
    if not params.input.reflections:
      raise Sorry("No reflection data found in the input")
    try:
      assert len(params.input.reflections) == len(params.input.experiments)
    except AssertionError:
      raise Sorry("The number of input reflections files does not match the "
        "number of input experiments")

    # set up global experiments and reflections lists
    from dials.array_family import flex
    reflections = flex.reflection_table()
    global_id = 0
    from dxtbx.model.experiment.experiment_list import ExperimentList
    experiments=ExperimentList()

    if params.reference_detector == "first":
      # Use the first experiment of the first experiment list as the reference detector
      ref_exp = params.input.experiments[0].data[0]
    else:
      # Average all the detectors to generate a reference detector
      assert params.detector_phase.refinement.parameterisation.detector.hierarchy_level == 0
      from scitbx.matrix import col
      panel_fasts = []
      panel_slows = []
      panel_oris = []
      for exp_wrapper in params.input.experiments:
        exp = exp_wrapper.data[0]
        if panel_oris:
          for i, panel in enumerate(exp.detector):
            panel_fasts[i] += col(panel.get_fast_axis())
            panel_slows[i] += col(panel.get_slow_axis())
            panel_oris[i] += col(panel.get_origin())
        else:
          for i, panel in enumerate(exp.detector):
            panel_fasts.append(col(panel.get_fast_axis()))
            panel_slows.append(col(panel.get_slow_axis()))
            panel_oris.append(col(panel.get_origin()))

      ref_exp = copy.deepcopy(params.input.experiments[0].data[0])
      for i, panel in enumerate(ref_exp.detector):
        # Averaging the fast and slow axes can make them be non-orthagonal. Fix by finding
        # the vector that goes exactly between them and rotate
        # around their cross product 45 degrees from that vector in either direction
        vf = panel_fasts[i]/len(params.input.experiments)
        vs = panel_slows[i]/len(params.input.experiments)
        c = vf.cross(vs)
        angle = vf.angle(vs, deg=True)
        v45 = vf.rotate(c, angle/2, deg=True)
        vf = v45.rotate(c, -45, deg=True)
        vs = v45.rotate(c, 45, deg=True)
        panel.set_frame(vf, vs,
                        panel_oris[i]/len(params.input.experiments))

      print "Reference detector (averaged):", str(ref_exp.detector)

    # set the experiment factory that combines a crystal with the reference beam
    # and the reference detector
    experiment_from_crystal=ExperimentFromCrystal(ref_exp.beam, ref_exp.detector)

    # keep track of the number of refl per accepted experiment for a table
    nrefs_per_exp = []

    # loop through the input, building up the global lists
    for ref_wrapper, exp_wrapper in zip(params.input.reflections,
                                        params.input.experiments):
      refs = ref_wrapper.data
      exps = exp_wrapper.data

      # there might be multiple experiments already here. Loop through them
      for i, exp in enumerate(exps):

        # select the relevant reflections
        sel = refs['id'] == i
        sub_ref = refs.select(sel)

        ## DGW commented out as reflections.minimum_number_of_reflections no longer exists
        #if len(sub_ref) < params.crystals_phase.refinement.reflections.minimum_number_of_reflections:
        #  print "skipping experiment", i, "in", exp_wrapper.filename, "due to insufficient strong reflections in", ref_wrapper.filename
        #  continue

        # build an experiment with this crystal plus the reference models
        combined_exp = experiment_from_crystal(exp.crystal)

        # next experiment ID in series
        exp_id = len(experiments)

        # check this experiment
        if not check_experiment(combined_exp, sub_ref):
          print "skipping experiment", i, "in", exp_wrapper.filename, "due to poor RMSDs"
          continue

        # set reflections ID
        sub_ref['id'] = flex.int(len(sub_ref), exp_id)

        # keep number of reflections for the table
        nrefs_per_exp.append(len(sub_ref))

        # obtain mm positions on the reference detector
        sub_ref = indexer_base.map_spots_pixel_to_mm_rad(sub_ref,
          combined_exp.detector, combined_exp.scan)

        # extend refl and experiments lists
        reflections.extend(sub_ref)
        experiments.append(combined_exp)

    # print number of reflections per accepted experiment
    from libtbx.table_utils import simple_table
    header = ["Experiment", "Nref"]
    rows = [(str(i), str(n)) for (i, n) in enumerate(nrefs_per_exp)]
    st = simple_table(rows, header)
    print "Number of reflections per experiment"
    print st.format()

    for cycle in range(params.n_macrocycles):

      print "MACROCYCLE %02d" % (cycle + 1)
      print "=============\n"
      # first run: multi experiment joint refinement of detector with fixed beam and
      # crystals
      print "PHASE 1"

      # SET THIS TEST TO FALSE TO REFINE WHOLE DETECTOR AS SINGLE JOB
      if params.detector_phase.refinement.parameterisation.detector.hierarchy_level > 0:
        experiments = detector_parallel_refiners(params.detector_phase, experiments, reflections)
      else:
        experiments = detector_refiner(params.detector_phase, experiments, reflections)

      # second run
      print "PHASE 2"
      experiments = crystals_refiner(params.crystals_phase, experiments, reflections)

    # Save the refined experiments to file
    output_experiments_filename = params.output.experiments_filename
    print 'Saving refined experiments to {0}'.format(output_experiments_filename)
    from dxtbx.model.experiment.experiment_list import ExperimentListDumper
    dump = ExperimentListDumper(experiments)
    dump.as_json(output_experiments_filename)

    # Write out refined reflections, if requested
    if params.output.reflections_filename:
      print 'Saving refined reflections to {0}'.format(
        params.output.reflections_filename)
      reflections.as_pickle(params.output.reflections_filename)

    return
예제 #47
0
    def run(self):
        """ Perform the integration. """
        from dials.util.command_line import heading
        from dials.util.options import flatten_reflections, flatten_experiments
        from dials.util import log
        from logging import info, debug
        from time import time
        from libtbx.utils import Sorry

        # Check the number of arguments is correct
        start_time = time()

        # Parse the command line
        params, options = self.parser.parse_args(show_diff_phil=False)
        reference = flatten_reflections(params.input.reflections)
        experiments = flatten_experiments(params.input.experiments)
        if len(reference) == 0 and len(experiments) == 0:
            self.parser.print_help()
            return
        if len(reference) == 0:
            reference = None
        elif len(reference) != 1:
            raise Sorry("more than 1 reflection file was given")
        else:
            reference = reference[0]
        if len(experiments) == 0:
            raise Sorry("no experiment list was specified")

        # Save phil parameters
        if params.output.phil is not None:
            with open(params.output.phil, "w") as outfile:
                outfile.write(self.parser.diff_phil.as_str())

        # Configure logging
        log.config(params.verbosity, info=params.output.log, debug=params.output.debug_log)

        from dials.util.version import dials_version

        info(dials_version())

        # Log the diff phil
        diff_phil = self.parser.diff_phil.as_str()
        if diff_phil is not "":
            info("The following parameters have been modified:\n")
            info(diff_phil)

        # Print if we're using a mask
        for i, exp in enumerate(experiments):
            mask = exp.imageset.external_lookup.mask
            if mask.filename is not None:
                info("Using external mask: %s" % mask.filename)
                info(" Mask has %d pixels masked" % mask.data.count(False))

        # Print the experimental models
        for i, exp in enumerate(experiments):
            debug("Models for experiment %d" % i)
            debug("")
            debug(str(exp.beam))
            debug(str(exp.detector))
            if exp.goniometer:
                debug(str(exp.goniometer))
            if exp.scan:
                debug(str(exp.scan))
            debug(str(exp.crystal))

        info("=" * 80)
        info("")
        info(heading("Initialising"))
        info("")

        # Load the data
        reference, rubbish = self.process_reference(reference)
        info("")

        # Initialise the integrator
        from dials.algorithms.profile_model.factory import ProfileModelFactory
        from dials.algorithms.integration.integrator import IntegratorFactory
        from dials.array_family import flex

        # Modify experiment list if scan range is set.
        experiments, reference = self.split_for_scan_range(experiments, reference, params.scan_range)

        # Predict the reflections
        info("")
        info("=" * 80)
        info("")
        info(heading("Predicting reflections"))
        info("")
        predicted = flex.reflection_table.from_predictions_multi(
            experiments,
            dmin=params.prediction.d_min,
            dmax=params.prediction.d_max,
            margin=params.prediction.margin,
            force_static=params.prediction.force_static,
        )

        # Match reference with predicted
        if reference:
            matched, reference, unmatched = predicted.match_with_reference(reference)
            assert len(matched) == len(predicted)
            assert matched.count(True) <= len(reference)
            if matched.count(True) == 0:
                raise Sorry(
                    """
          Invalid input for reference reflections.
          Zero reference spots were matched to predictions
        """
                )
            elif len(unmatched) != 0:
                info("")
                info("*" * 80)
                info("Warning: %d reference spots were not matched to predictions" % (len(unmatched)))
                info("*" * 80)
                info("")
            rubbish.extend(unmatched)

        # Select a random sample of the predicted reflections
        if not params.sampling.integrate_all_reflections:
            predicted = self.sample_predictions(experiments, predicted, params)

        # Compute the profile model
        if reference is not None and params.create_profile_model:
            experiments = ProfileModelFactory.create(params, experiments, reference)
        else:
            for expr in experiments:
                expr.profile.params = params.profile
        del reference

        # Compute the bounding box
        predicted.compute_bbox(experiments)

        # Create the integrator
        info("")
        integrator = IntegratorFactory.create(params, experiments, predicted)

        # Integrate the reflections
        reflections = integrator.integrate()

        # Append rubbish data onto the end
        if rubbish is not None and params.output.include_bad_reference:
            mask = flex.bool(len(rubbish), True)
            rubbish.unset_flags(mask, rubbish.flags.integrated_sum)
            rubbish.unset_flags(mask, rubbish.flags.integrated_prf)
            rubbish.set_flags(mask, rubbish.flags.bad_reference)
            reflections.extend(rubbish)

        # Save the reflections
        self.save_reflections(reflections, params.output.reflections)
        self.save_experiments(experiments, params.output.experiments)

        # Write a report if requested
        if params.output.report is not None:
            integrator.report().as_file(params.output.report)

        # Print the total time taken
        info("\nTotal time taken: %f" % (time() - start_time))
예제 #48
0
파일: refine.py 프로젝트: biochem-fan/dials
  def run(self):
    '''Execute the script.'''
    from time import time
    import cPickle as pickle
    from logging import info
    from dials.util import log
    from dials.algorithms.refinement import RefinerFactory
    from dials.util.options import flatten_reflections, flatten_experiments

    start_time = time()

    # Parse the command line
    params, options = self.parser.parse_args(show_diff_phil=False)
    reflections = flatten_reflections(params.input.reflections)
    experiments = flatten_experiments(params.input.experiments)

    # Try to load the models and data
    nexp = len(experiments)
    if nexp == 0:
      print "No Experiments found in the input"
      self.parser.print_help()
      return
    if len(reflections) == 0:
      print "No reflection data found in the input"
      self.parser.print_help()
      return
    if len(reflections) > 1:
      raise Sorry("Only one reflections list can be imported at present")
    reflections = reflections[0]

    self.check_input(reflections)

    # Configure the logging
    log.config(info=params.output.log,
      debug=params.output.debug_log)
    from dials.util.version import dials_version
    info(dials_version())

    # Log the diff phil
    diff_phil = self.parser.diff_phil.as_str()
    if diff_phil is not '':
      info('The following parameters have been modified:\n')
      info(diff_phil)

    # Modify options if necessary
    if params.output.correlation_plot.filename is not None:
      params.refinement.refinery.track_parameter_correlation = True

    # Get the refiner
    info('Configuring refiner')
    refiner = RefinerFactory.from_parameters_data_experiments(params,
        reflections, experiments)

    # Refine the geometry
    if nexp == 1:
      info('Performing refinement of a single Experiment...')
    else:
      info('Performing refinement of {0} Experiments...'.format(nexp))

    # Refine and get the refinement history
    history = refiner.run()

    if params.output.centroids:
      info("Writing table of centroids to '{0}'".format(
        params.output.centroids))
      self.write_centroids_table(refiner, params.output.centroids)

    # Write scan-varying parameters to file, if there were any
    if params.output.parameter_table:
      scan = refiner.get_scan()
      if scan:
        text = refiner.get_param_reporter().varying_params_vs_image_number(
            scan.get_array_range())
        if text:
          info("Writing scan-varying parameter table to {0}".format(
            params.output.parameter_table))
          f = open(params.output.parameter_table,"w")
          f.write(text)
          f.close()
        else:
          info("No scan-varying parameter table to write")

    # get the refined experiments
    experiments = refiner.get_experiments()
    crystals = experiments.crystals()

    if len(crystals) == 1:
      # output the refined model for information
      info('')
      info('Final refined crystal model:')
      info(crystals[0])

    # Save the refined experiments to file
    output_experiments_filename = params.output.experiments
    info('Saving refined experiments to {0}'.format(output_experiments_filename))
    from dxtbx.model.experiment.experiment_list import ExperimentListDumper
    dump = ExperimentListDumper(experiments)
    dump.as_json(output_experiments_filename)

    # Save reflections with updated predictions if requested (allow to switch
    # this off if it is a time-consuming step)
    if params.output.reflections:
      # Update predictions for all indexed reflections
      info('Updating predictions for indexed reflections')
      preds = refiner.predict_for_indexed()

      # just copy over the columns of interest, leaving behind things
      # added by e.g. scan-varying refinement such as 'block', the
      # U, B and UB matrices and gradients.
      reflections['s1'] = preds['s1']
      reflections['xyzcal.mm'] = preds['xyzcal.mm']
      reflections['xyzcal.px'] = preds['xyzcal.px']
      if preds.has_key('entering'):
        reflections['entering'] = preds['entering']

      # set used_in_refinement and centroid_outlier flags
      assert len(preds) == len(reflections)
      reflections.unset_flags(flex.size_t_range(len(reflections)),
        reflections.flags.used_in_refinement | reflections.flags.centroid_outlier)
      mask = preds.get_flags(preds.flags.centroid_outlier)
      reflections.set_flags(mask, reflections.flags.centroid_outlier)
      mask = preds.get_flags(preds.flags.used_in_refinement)
      reflections.set_flags(mask, reflections.flags.used_in_refinement)

      info('Saving reflections with updated predictions to {0}'.format(
        params.output.reflections))
      if params.output.include_unused_reflections:
        reflections.as_pickle(params.output.reflections)
      else:
        sel = reflections.get_flags(reflections.flags.used_in_refinement)
        reflections.select(sel).as_pickle(params.output.reflections)

    # For debugging, if requested save matches to file
    if params.output.matches:
      matches = refiner.get_matches()
      info('Saving matches (use for debugging purposes) to {0}'.format(
        params.output.matches))
      matches.as_pickle(params.output.matches)

    # Correlation plot
    if params.output.correlation_plot.filename is not None:
      from os.path import splitext
      root, ext = splitext(params.output.correlation_plot.filename)
      if not ext: ext = ".pdf"

      steps = params.output.correlation_plot.steps
      if steps is None: steps = [history.get_nrows()-1]

      # extract individual column names or indices
      col_select = params.output.correlation_plot.col_select

      num_plots = 0
      for step in steps:
        fname_base = root + "_step%02d" % step
        plot_fname = fname_base + ext
        corrmat, labels = refiner.get_parameter_correlation_matrix(step, col_select)
        if [corrmat, labels].count(None) == 0:
          from dials.algorithms.refinement.refinement_helpers import corrgram
          plt = corrgram(corrmat, labels)
          if plt is not None:
            info('Saving parameter correlation plot to {}'.format(plot_fname))
            plt.savefig(plot_fname)
            num_plots += 1
          mat_fname = fname_base + ".pickle"
          with open(mat_fname, 'wb') as handle:
            py_mat = corrmat.as_scitbx_matrix() #convert to pickle-friendly form
            info('Saving parameter correlation matrix to {0}'.format(mat_fname))
            pickle.dump({'corrmat':py_mat, 'labels':labels}, handle)

      if num_plots == 0:
        msg = "Sorry, no parameter correlation plots were produced. Please set " \
              "track_parameter_correlation=True to ensure correlations are " \
              "tracked, and make sure correlation_plot.col_select is valid."
        info(msg)

    # Write out refinement history, if requested
    if params.output.history:
      with open(params.output.history, 'wb') as handle:
        info('Saving refinement step history to {0}'.format(
          params.output.history))
        pickle.dump(history, handle)

    # Log the total time taken
    info("\nTotal time taken: {0:.2f}s".format(time() - start_time))

    return
예제 #49
0
  def run(self):
    '''Execute the script.'''
    from dxtbx.datablock import DataBlockTemplateImporter
    from dials.util.options import flatten_datablocks
    from dials.util import log
    from logging import info
    from time import time
    from libtbx.utils import Abort

    # Parse the command line
    params, options = self.parser.parse_args(show_diff_phil=False)
    datablocks = flatten_datablocks(params.input.datablock)

    # Check we have some filenames
    if len(datablocks) == 0:

      # Check if a template has been set and print help if not, otherwise try to
      # import the images based on the template input
      if len(params.input.template) == 0:
        self.parser.print_help()
        exit(0)
      else:
        importer = DataBlockTemplateImporter(
          params.input.template,
          options.verbose)
        datablocks = importer.datablocks

    # Save the options
    self.options = options
    self.params = params
    self.load_reference_geometry()

    st = time()

    # Import stuff
    if len(datablocks) == 0:
      raise Abort('No datablocks specified')
    elif len(datablocks) > 1:
      raise Abort('Only 1 datablock can be processed at a time.')
    datablock = datablocks[0]

    if self.reference_detector is not None:
      for imageset in datablock.extract_imagesets():
        imageset.set_detector(self.reference_detector)

    # Configure logging
    log.config(
      params.verbosity,
      info='dials.process.log',
      debug='dials.process.debug.log')

    # Log the diff phil
    diff_phil = self.parser.diff_phil.as_str()
    if diff_phil is not '':
      info('The following parameters have been modified:\n')
      info(diff_phil)

    if self.params.output.datablock_filename:
      from dxtbx.datablock import DataBlockDumper
      dump = DataBlockDumper(datablock)
      dump.as_json(self.params.output.datablock_filename)

    # Do the processing
    observed = self.find_spots(datablock)
    experiments, indexed = self.index(datablock, observed)
    experiments = self.refine(experiments, indexed)
    integrated = self.integrate(experiments, indexed)

    # Total Time
    info("")
    info("Total Time Taken = %f seconds" % (time() - st))
예제 #50
0
def run(args):

    from dials.util.options import OptionParser
    from dials.util.options import flatten_datablocks
    from dials.util.options import flatten_experiments
    from dials.util.options import flatten_reflections
    from dials.util import log
    import libtbx.load_env

    usage = "%s [options] datablock.json reflections.pickle" % (libtbx.env.dispatcher_name)

    parser = OptionParser(
        usage=usage,
        phil=phil_scope,
        read_datablocks=True,
        read_experiments=True,
        read_reflections=True,
        check_format=False,
        epilog=help_message,
    )

    params, options = parser.parse_args()
    datablocks = flatten_datablocks(params.input.datablock)
    experiments = flatten_experiments(params.input.experiments)
    reflections = flatten_reflections(params.input.reflections)

    if (len(datablocks) == 0 and len(experiments) == 0) or len(reflections) == 0:
        parser.print_help()
        exit(0)

    # Configure the logging
    log.config(info="dials.detect_blanks.log")

    # Log the diff phil
    diff_phil = parser.diff_phil.as_str()
    if diff_phil is not "":
        logger.info("The following parameters have been modified:\n")
        logger.info(diff_phil)

    reflections = reflections[0]

    if len(datablocks) == 0 and len(experiments) > 0:
        imagesets = experiments.imagesets()
    else:
        imagesets = []
        for datablock in datablocks:
            imagesets.extend(datablock.extract_imagesets())

    assert len(imagesets) == 1
    imageset = imagesets[0]
    scan = imageset.get_scan()

    integrated_sel = reflections.get_flags(reflections.flags.integrated)
    indexed_sel = reflections.get_flags(reflections.flags.indexed)
    centroid_outlier_sel = reflections.get_flags(reflections.flags.centroid_outlier)
    strong_sel = reflections.get_flags(reflections.flags.strong)
    indexed_sel &= ~centroid_outlier_sel

    logger.info("Analysis of %i strong reflections:" % strong_sel.count(True))
    strong_results = blank_counts_analysis(
        reflections.select(strong_sel), scan, phi_step=params.phi_step, fractional_loss=params.counts_fractional_loss
    )
    for blank_start, blank_end in strong_results["blank_regions"]:
        logger.info("Potential blank images: %i -> %i" % (blank_start + 1, blank_end))

    indexed_results = None
    if indexed_sel.count(True) > 0:
        logger.info("Analysis of %i indexed reflections:" % indexed_sel.count(True))
        indexed_results = blank_counts_analysis(
            reflections.select(indexed_sel),
            scan,
            phi_step=params.phi_step,
            fractional_loss=params.counts_fractional_loss,
        )
        for blank_start, blank_end in indexed_results["blank_regions"]:
            logger.info("Potential blank images: %i -> %i" % (blank_start + 1, blank_end))

    integrated_results = None
    if integrated_sel.count(True) > 0:
        logger.info("Analysis of %i integrated reflections:" % integrated_sel.count(True))
        integrated_results = blank_integrated_analysis(
            reflections.select(integrated_sel),
            scan,
            phi_step=params.phi_step,
            fractional_loss=params.misigma_fractional_loss,
        )
        for blank_start, blank_end in integrated_results["blank_regions"]:
            logger.info("Potential blank images: %i -> %i" % (blank_start + 1, blank_end))

    d = {"strong": strong_results, "indexed": indexed_results, "integrated": integrated_results}

    if params.output.json is not None:
        import json

        with open(params.output.json, "wb") as f:
            json.dump(d, f)

    if params.output.plot:
        from matplotlib import pyplot

        plots = [(strong_results, "-")]
        if indexed_results:
            plots.append((indexed_results, "--"))
        if integrated_results:
            plots.append((integrated_results, ":"))

        for results, linestyle in plots:
            xs = results["data"][0]["x"]
            ys = results["data"][0]["y"]
            xmax = max(xs)
            ymax = max(ys)
            xs = [x / xmax for x in xs]
            ys = [y / ymax for y in ys]
            blanks = results["data"][0]["blank"]
            pyplot.plot(xs, ys, color="blue", linestyle=linestyle)
            pyplot.plot(
                *zip(*[(x, y) for x, y, blank in zip(xs, ys, blanks) if blank]), color="red", linestyle=linestyle
            )
        pyplot.ylim(0)
        pyplot.show()
        pyplot.clf()
예제 #51
0
  def run(self):
    '''Execute the script.'''
    from dials.util.command_line import heading
    from dials.array_family import flex
    from dials.util.options import flatten_experiments
    from time import time
    from dials.util import log
    from libtbx.utils import Sorry
    from dials.algorithms.background.modeller import BackgroundModeller
    start_time = time()

    # Parse the command line
    params, options = self.parser.parse_args(show_diff_phil=False)

    # Configure the logging
    log.config(
      params.verbosity,
      info=params.output.log,
      debug=params.output.debug_log)

    from dials.util.version import dials_version
    logger.info(dials_version())

    # Log the diff phil
    diff_phil = self.parser.diff_phil.as_str()
    if diff_phil is not '':
      logger.info('The following parameters have been modified:\n')
      logger.info(diff_phil)

    # Ensure we have a data block
    experiments = flatten_experiments(params.input.experiments)
    if len(experiments) == 0:
      self.parser.print_help()
      return

    # Only handle a single imageset at once
    imagesets = set(expr.imageset for expr in experiments)
    if len(imagesets) != 1:
      raise Sorry("Can only process a single imageset at a time")

    # Predict the reflections
    logger.info("")
    logger.info("=" * 80)
    logger.info("")
    logger.info(heading("Predicting reflections"))
    logger.info("")
    predicted = flex.reflection_table.from_predictions_multi(
      experiments,
      dmin=params.prediction.d_min,
      dmax=params.prediction.d_max,
      margin=params.prediction.margin,
      force_static=params.prediction.force_static)

    # Create the modeller
    modeller = BackgroundModeller(experiments, predicted, params)
    model = modeller.compute()

    # Save the background model
    logger.info("Saving background model to %s" % params.output.model)
    from dials.algorithms.background.gmodel import StaticBackgroundModel
    static_model = StaticBackgroundModel()
    for i in range(len(model)):
      static_model.add(model[i].model)
    with open(params.output.model, "w") as outfile:
      import cPickle as pickle
      pickle.dump(static_model, outfile, protocol=pickle.HIGHEST_PROTOCOL)

    # Output some diagnostic images
    image_generator = ImageGenerator(model)
    image_generator.save_mean(params.output.mean_image_prefix)
    image_generator.save_variance(params.output.variance_image_prefix)
    image_generator.save_dispersion(params.output.dispersion_image_prefix)
    image_generator.save_mask(params.output.mask_image_prefix)
    image_generator.save_min(params.output.min_image_prefix)
    image_generator.save_max(params.output.max_image_prefix)
    image_generator.save_model(params.output.model_image_prefix)
    #image_generator.save_polar_model(params.output.polar_model_image_prefix)

    # Print the time
    logger.info("Time Taken: %f" % (time() - start_time))
예제 #52
0
  def run(self):
    ''' Perform the integration. '''
    from dials.util.command_line import heading
    from dials.util.options import flatten_datablocks, flatten_experiments
    from dials.util import log
    from time import time
    from libtbx.utils import Sorry
    from dials.array_family import flex

    # Parse the command line
    params, options = self.parser.parse_args(show_diff_phil=False)
    experiments = flatten_experiments(params.input.experiments)
    datablocks = flatten_datablocks(params.input.datablock)
    if len(experiments) == 0 and len(datablocks) == 0:
      self.parser.print_help()
      return

    if len(datablocks) > 0:
      assert len(datablocks) == 1
      imagesets = datablocks[0].extract_imagesets()
      assert len(imagesets) == 1
      imageset = imagesets[0]
      beam = imageset.get_beam()
      detector = imageset.get_detector()
    else:
      assert len(experiments) == 1
      imageset = experiments[0].imageset
      beam = experiments[0].beam
      detector = experiments[0].detector

    # Configure logging
    log.config()

    # Set the scan range
    if params.scan_range is None:
      scan_range = (0, len(imageset))
    else:
      scan_range = params.scan_range
      i0, i1 = scan_range
      if i0 < 0 or i1 > len(imageset):
        raise RuntimeError('Scan range outside image range')
      if i0 >= i1:
        raise RuntimeError('Invalid scan range')

    summed_data = None
    summed_mask = None

    # Loop through images
    for i in range(*scan_range):
      logger.info("Reading image %d" % i)

      # Read image
      data = imageset.get_raw_data(i)
      mask = imageset.get_mask(i)
      assert isinstance(data, tuple)
      assert isinstance(mask, tuple)

      if summed_data is None:
        summed_mask = mask
        summed_data = data
      else:
        summed_data = [ sd + d for sd, d in zip(summed_data, data) ]
        summed_mask = [ sm & m for sm, m in zip(summed_mask, mask) ]

    # Compute min and max and num
    if params.num_bins is None:
      num_bins = sum(sum(p.get_image_size()) for p in detector)
    if params.d_max is None:
      vmin = 0
    else:
      vmin = (1.0 / d_max)**2
    if params.d_min is None:
      params.d_min = detector.get_max_resolution(beam.get_s0())
    vmax = (1.0 / params.d_min)**2

    # Print some info
    logger.info("Min 1/d^2: %f" % vmin)
    logger.info("Max 1/d^2: %f" % vmax)
    logger.info("Num bins:  %d" % num_bins)

    # Compute the radial average
    from dials.algorithms.background import RadialAverage
    radial_average = RadialAverage(beam, detector, vmin, vmax, num_bins)
    for d, m in zip(summed_data, summed_mask):
      radial_average.add(d.as_double(), m)
    mean = radial_average.mean()
    reso = radial_average.inv_d2()

    logger.info("Writing to %s" % params.output.filename)
    with open(params.output.filename, "w") as outfile:
      for r, m in zip(reso, mean):
        outfile.write("%f, %f\n" % (r, m))
예제 #53
0
파일: export.py 프로젝트: dials/dials
  # Create the option parser
  parser = OptionParser(
    usage=usage,
    read_experiments=True,
    read_reflections=True,
    read_datablocks=True,
    phil=phil_scope,
    epilog=help_message)

  # Get the parameters
  params, options = parser.parse_args(show_diff_phil=False)

  # Configure the logging
  log.config(
    info=params.output.log,
    debug=params.output.debug_log)

  # Print the version number
  logger.info(dials_version())

  # Log the diff phil
  diff_phil = parser.diff_phil.as_str()
  if diff_phil is not '':
    logger.info('The following parameters have been modified:\n')
    logger.info(diff_phil)

  # Get the experiments and reflections
  datablocks = flatten_datablocks(params.input.datablock)

  experiments = flatten_experiments(params.input.experiments)
예제 #54
0
  def run(self):
    '''Execute the script.'''
    from dials.util import log
    from logging import info
    from time import time
    from libtbx.utils import Abort
    from libtbx import easy_mp
    import os, copy
    from dxtbx.datablock import DataBlockFactory

    # Parse the command line
    params, options, all_paths = self.parser.parse_args(show_diff_phil=False, return_unhandled=True)

    # Check we have some filenames
    if len(all_paths) == 0:
      self.parser.print_help()
      return

    # Save the options
    self.options = options
    self.params = params

    st = time()

    # Configure logging
    log.config(
      params.verbosity,
      info='dials.process.log',
      debug='dials.process.debug.log')

    # Log the diff phil
    diff_phil = self.parser.diff_phil.as_str()
    if diff_phil is not '':
      info('The following parameters have been modified:\n')
      info(diff_phil)

    # Import stuff
    info("Loading files...")
    if len(all_paths) == 1:
      datablocks = DataBlockFactory.from_filenames(all_paths)
    else:
      def do_import(filename):
        info("Loading %s"%os.path.basename(filename))
        datablocks = DataBlockFactory.from_filenames([filename])
        if len(datablocks) == 0:
          raise Abort("Could not load %s"%filename)
        if len(datablocks) > 1:
          raise Abort("Got multiple datablocks from file %s"%filename)
        return datablocks[0]

      datablocks = easy_mp.parallel_map(
        func=do_import,
        iterable=all_paths,
        processes=params.mp.nproc,
        method=params.mp.method,
        preserve_order=True,
        preserve_exception_message=True)

    if len(datablocks) == 0:
      raise Abort('No datablocks specified')

    # Handle still imagesets by breaking them apart into multiple datablocks
    # Further handle single file still imagesets (like HDF5) by tagging each
    # frame using its index
    indices = []
    basenames = []
    split_datablocks = []
    for datablock in datablocks:
      for imageset in datablock.extract_imagesets():
        for i in xrange(len(imageset)):
          subset = imageset[i:i+1]
          split_datablocks.append(DataBlockFactory.from_imageset(subset)[0])
          indices.append(i)
          basenames.append(os.path.splitext(os.path.basename(subset.paths()[0]))[0])
    tags = []
    for i, basename in zip(indices, basenames):
      if basenames.count(basename) > 1:
        tags.append("%s_%d"%(basename, i))
      else:
        tags.append(basename)

    # Wrapper function
    def do_work(item):
      Processor(copy.deepcopy(params)).process_datablock(item[0], item[1])

    # Process the data
    easy_mp.parallel_map(
      func=do_work,
      iterable=zip(tags, split_datablocks),
      processes=params.mp.nproc,
      method=params.mp.method,
      preserve_order=True,
      preserve_exception_message=True)

     # Total Time
    info("")
    info("Total Time Taken = %f seconds" % (time() - st))
예제 #55
0
파일: dials_import.py 프로젝트: dials/dials
  def run(self):
    ''' Parse the options. '''
    from dials.util import log

    # Parse the command line arguments in two passes to set up logging early
    params, options = self.parser.parse_args(show_diff_phil=False, quick_parse=True)

    # Configure logging
    log.config(
      params.verbosity,
      info=params.output.log,
      debug=params.output.debug_log)
    from dials.util.version import dials_version
    logger.info(dials_version())

    # Parse the command line arguments completely
    if params.input.ignore_unhandled:
      params, options, unhandled = self.parser.parse_args(
        show_diff_phil=False,
        return_unhandled=True)
    else:
      params, options = self.parser.parse_args(show_diff_phil=False)
      unhandled = None

    # Log the diff phil
    diff_phil = self.parser.diff_phil.as_str()
    if diff_phil is not '':
      logger.info('The following parameters have been modified:\n')
      logger.info(diff_phil)

    # Print a warning if something unhandled
    if unhandled is not None and len(unhandled) > 0:
      msg = 'Unable to handle the following arguments:\n'
      msg += '\n'.join(['  %s' % a for a in unhandled])
      msg += '\n'
      logger.warn(msg)

    # Print help if no input
    if (len(params.input.datablock) == 0 and not
        (params.input.template or params.input.directory)):
      self.parser.print_help()
      return

    # Setup the datablock importer
    datablock_importer = DataBlockImporter(params)

    # Setup the metadata updater
    metadata_updater = MetaDataUpdater(params)

    # Get the datablocks
    datablock = metadata_updater(datablock_importer())

    # Extract any sweeps
    sweeps = datablock.extract_sweeps()

    # Extract any stills
    stills = datablock.extract_stills()
    if not stills:
      num_stills = 0
    else:
      num_stills = sum([len(s) for s in stills])

    # Print some data block info - override the output of image range
    # if appropriate
    image_range = params.geometry.scan.image_range

    logger.info("-" * 80)
    logger.info("  format: %s" % str(datablock.format_class()))
    if image_range is None:
      logger.info("  num images: %d" % datablock.num_images())
    else:
      logger.info("  num images: %d" % (image_range[1] - image_range[0] + 1))
    logger.info("  num sweeps: %d" % len(sweeps))
    logger.info("  num stills: %d" % num_stills)

    # Loop through all the sweeps
    for j, sweep in enumerate(sweeps):
      logger.debug("")
      logger.debug("Sweep %d" % j)
      logger.debug("  Length %d" % len(sweep))
      logger.debug(sweep.get_beam())
      logger.debug(sweep.get_goniometer())
      logger.debug(sweep.get_detector())
      logger.debug(sweep.get_scan())

    # Only allow a single sweep
    if params.input.allow_multiple_sweeps is False:
      self.assert_single_sweep(sweeps, params)

    # Write the datablocks to file
    self.write_datablocks([datablock], params)
예제 #56
0
파일: find_spots.py 프로젝트: dials/dials
  def run(self):
    '''Execute the script.'''
    from dials.array_family import flex
    from dials.util.options import flatten_datablocks
    from time import time
    from dials.util import log
    from libtbx.utils import Sorry
    start_time = time()

    # Parse the command line
    params, options = self.parser.parse_args(show_diff_phil=False)

    # Configure the logging
    log.config(
      params.verbosity,
      info=params.output.log,
      debug=params.output.debug_log)

    from dials.util.version import dials_version
    logger.info(dials_version())

    # Log the diff phil
    diff_phil = self.parser.diff_phil.as_str()
    if diff_phil is not '':
      logger.info('The following parameters have been modified:\n')
      logger.info(diff_phil)

    # Ensure we have a data block
    datablocks = flatten_datablocks(params.input.datablock)
    if len(datablocks) == 0:
      self.parser.print_help()
      return
    elif len(datablocks) != 1:
      raise Sorry('only 1 datablock can be processed at a time')

    # Loop through all the imagesets and find the strong spots
    reflections = flex.reflection_table.from_observations(
      datablocks[0], params)

    # Delete the shoeboxes
    if not params.output.shoeboxes:
      del reflections['shoebox']

    # ascii spot count per image plot
    from dials.util.ascii_art import spot_counts_per_image_plot

    for i, imageset in enumerate(datablocks[0].extract_imagesets()):
      ascii_plot = spot_counts_per_image_plot(
        reflections.select(reflections['id'] == i))
      if len(ascii_plot):
        logger.info('\nHistogram of per-image spot count for imageset %i:' %i)
        logger.info(ascii_plot)

    # Save the reflections to file
    logger.info('\n' + '-' * 80)
    reflections.as_pickle(params.output.reflections)
    logger.info('Saved {0} reflections to {1}'.format(
        len(reflections), params.output.reflections))

    # Save the datablock
    if params.output.datablock:
      from dxtbx.datablock import DataBlockDumper
      logger.info('Saving datablocks to {0}'.format(
        params.output.datablock))
      dump = DataBlockDumper(datablocks)
      dump.as_file(params.output.datablock)

    # Print some per image statistics
    if params.per_image_statistics:
      from dials.algorithms.spot_finding import per_image_analysis
      from cStringIO import StringIO
      s = StringIO()
      for i, imageset in enumerate(datablocks[0].extract_imagesets()):
        print >> s, "Number of centroids per image for imageset %i:" %i
        stats = per_image_analysis.stats_imageset(
          imageset, reflections.select(reflections['id'] == i),
          resolution_analysis=False)
        per_image_analysis.print_table(stats, out=s)
      logger.info(s.getvalue())

    # Print the time
    logger.info("Time Taken: %f" % (time() - start_time))
예제 #57
0
    def run(self, args=None):
        """ Parse the options. """

        # Parse the command line arguments in two passes to set up logging early
        params, options = self.parser.parse_args(args=args,
                                                 show_diff_phil=False,
                                                 quick_parse=True)

        # Configure logging, if this is the main process
        if __name__ == "__main__":
            from dials.util import log

            log.config(verbosity=options.verbose, logfile=params.output.log)

        from dials.util.version import dials_version

        logger.info(dials_version())

        # Parse the command line arguments completely
        if params.input.ignore_unhandled:
            params, options, unhandled = self.parser.parse_args(
                args=args, show_diff_phil=False, return_unhandled=True)
            # Remove any False values from unhandled (eliminate empty strings)
            unhandled = [x for x in unhandled if x]
        else:
            params, options = self.parser.parse_args(args=args,
                                                     show_diff_phil=False)
            unhandled = None

        # Log the diff phil
        diff_phil = self.parser.diff_phil.as_str()
        if diff_phil:
            logger.info("The following parameters have been modified:\n")
            logger.info(diff_phil)

        # Print a warning if something unhandled
        if unhandled:
            msg = "Unable to handle the following arguments:\n"
            msg += "\n".join(["  %s" % a for a in unhandled])
            msg += "\n"
            logger.warning(msg)

        # Print help if no input
        if len(params.input.experiments) == 0 and not (params.input.template or
                                                       params.input.directory):
            self.parser.print_help()
            return

        # Setup the experiments importer
        imageset_importer = ImageSetImporter(params)

        # Setup the metadata updater
        metadata_updater = MetaDataUpdater(params)

        # Extract the experiments and loop through
        experiments = metadata_updater(imageset_importer())

        # Compute some numbers
        num_sweeps = 0
        num_stills = 0
        num_images = 0
        for e in experiments:
            if isinstance(e.imageset, ImageSweep):
                num_sweeps += 1
            else:
                num_stills += 1
            num_images += len(e.imageset)
        format_list = {str(e.imageset.get_format_class()) for e in experiments}

        # Print out some bulk info
        logger.info("-" * 80)
        for f in format_list:
            logger.info("  format: %s" % f)
        logger.info("  num images: %d" % num_images)
        logger.info("  num sweeps: %d" % num_sweeps)
        logger.info("  num stills: %d" % num_stills)

        # Print out info for all experiments
        for experiment in experiments:

            # Print some experiment info - override the output of image range
            # if appropriate
            image_range = params.geometry.scan.image_range
            if isinstance(experiment.imageset, ImageSweep):
                imageset_type = "sweep"
            else:
                imageset_type = "stills"

            logger.debug("-" * 80)
            logger.debug("  format: %s" %
                         str(experiment.imageset.get_format_class()))
            logger.debug("  imageset type: %s" % imageset_type)
            if image_range is None:
                logger.debug("  num images:    %d" % len(experiment.imageset))
            else:
                logger.debug("  num images:    %d" %
                             (image_range[1] - image_range[0] + 1))

            logger.debug("")
            logger.debug(experiment.imageset.get_beam())
            logger.debug(experiment.imageset.get_goniometer())
            logger.debug(experiment.imageset.get_detector())
            logger.debug(experiment.imageset.get_scan())

        # Only allow a single sweep
        if params.input.allow_multiple_sweeps is False:
            self.assert_single_sweep(experiments, params)

        # Write the experiments to file
        self.write_experiments(experiments, params)
예제 #58
0
  def run(self):
    ''' Run the script. '''
    from dials.algorithms.profile_model.factory import ProfileModelFactory
    from dials.util.command_line import Command
    from dials.array_family import flex
    from dials.util.options import flatten_reflections, flatten_experiments
    from dxtbx.model.experiment.experiment_list import ExperimentListDumper
    from libtbx.utils import Sorry
    from dials.util import log

    log.config()

    # Parse the command line
    params, options = self.parser.parse_args(show_diff_phil=True)
    reflections = flatten_reflections(params.input.reflections)
    experiments = flatten_experiments(params.input.experiments)
    if len(reflections) == 0 and len(experiments) == 0:
      self.parser.print_help()
      return
    if len(reflections) != 1:
      raise Sorry('exactly 1 reflection table must be specified')
    if len(experiments) == 0:
      raise Sorry('no experiments were specified')
    if (not 'background.mean' in reflections[0]) and params.subtract_background:
      raise Sorry('for subtract_background need background.mean in reflections')

    reflections, _ = self.process_reference(reflections[0], params)

    # Predict the reflections
    logger.info("")
    logger.info("=" * 80)
    logger.info("")
    logger.info("Predicting reflections")
    logger.info("")
    predicted = flex.reflection_table.from_predictions_multi(
      experiments,
      dmin=params.prediction.d_min,
      dmax=params.prediction.d_max,
      margin=params.prediction.margin,
      force_static=params.prediction.force_static)

    # Match with predicted
    matched, reflections, unmatched = predicted.match_with_reference(reflections)
    assert(len(matched) == len(predicted))
    assert(matched.count(True) <= len(reflections))
    if matched.count(True) == 0:
      raise Sorry('''
        Invalid input for reference reflections.
        Zero reference spots were matched to predictions
      ''')
    elif len(unmatched) != 0:
      logger.info('')
      logger.info('*' * 80)
      logger.info('Warning: %d reference spots were not matched to predictions' % (
        len(unmatched)))
      logger.info('*' * 80)
      logger.info('')

    # Create the profile model
    experiments = ProfileModelFactory.create(params, experiments, reflections)
    for model in experiments:
      sigma_b = model.profile.sigma_b(deg=True)
      sigma_m = model.profile.sigma_m(deg=True)
      if type(sigma_b) == type(1.0):
        logger.info('Sigma B: %f' % sigma_b)
        logger.info('Sigma M: %f' % sigma_m)
      else: # scan varying
        mean_sigma_b = sum(sigma_b) / len(sigma_b)
        mean_sigma_m = sum(sigma_m) / len(sigma_m)
        logger.info('Sigma B: %f' % mean_sigma_b)
        logger.info('Sigma M: %f' % mean_sigma_m)

    # Wrtie the parameters
    Command.start("Writing experiments to %s" % params.output)
    dump = ExperimentListDumper(experiments)
    with open(params.output, "w") as outfile:
      outfile.write(dump.as_json())
    Command.end("Wrote experiments to %s" % params.output)
예제 #59
0
def run(args):
    from dials.util import log
    usage = "%s [options] datablock.json strong.pickle" % libtbx.env.dispatcher_name

    parser = OptionParser(usage=usage,
                          phil=phil_scope,
                          read_datablocks=True,
                          read_reflections=True,
                          check_format=False,
                          epilog=help_message)

    params, options = parser.parse_args(show_diff_phil=False)
    datablocks = flatten_datablocks(params.input.datablock)
    reflections = flatten_reflections(params.input.reflections)

    if len(datablocks) == 0 or len(reflections) == 0:
        parser.print_help()
        exit(0)

    # Configure the logging
    log.config(info=params.output.log, debug=params.output.debug_log)

    # Log the diff phil
    diff_phil = parser.diff_phil.as_str()
    if diff_phil is not '':
        logger.info('The following parameters have been modified:\n')
        logger.info(diff_phil)

    if params.seed is not None:
        import random
        flex.set_random_seed(params.seed)
        random.seed(params.seed)

    imagesets = []
    for datablock in datablocks:
        imagesets.extend(datablock.extract_imagesets())

    assert len(imagesets) > 0
    assert len(reflections) == len(imagesets)

    if params.scan_range is not None and len(params.scan_range) > 0:
        reflections = [
            filter_reflections_by_scan_range(refl, params.scan_range)
            for refl in reflections
        ]

    dps_params = dps_phil_scope.extract()
    # for development, we want an exhaustive plot of beam probability map:
    dps_params.indexing.plot_search_scope = params.plot_search_scope
    dps_params.indexing.mm_search_scope = params.mm_search_scope

    for i in range(params.n_macro_cycles):
        if params.n_macro_cycles > 1:
            logger.info('Starting macro cycle %i' % (i + 1))
        new_detector, new_beam = discover_better_experimental_model(
            imagesets,
            reflections,
            params,
            dps_params,
            nproc=params.nproc,
            wide_search_binning=params.wide_search_binning)
        for imageset in imagesets:
            imageset.set_detector(new_detector)
            imageset.set_beam(new_beam)
        logger.info('')

    from dxtbx.serialize import dump
    logger.info("Saving optimized datablock to %s" % params.output.datablock)
    dump.datablock(datablock, params.output.datablock)
예제 #60
0
  def run(self):
    ''' Extract the shoeboxes. '''
    from dials.util.options import flatten_reflections
    from dials.util.options import flatten_experiments
    from dials.util.options import flatten_datablocks
    from dials.util import log
    from dials.array_family import flex
    from libtbx.utils import Sorry
    from logging import info

    # Parse the command line
    params, options = self.parser.parse_args(show_diff_phil=False)

    # Configure logging
    log.config()

    # Log the diff phil
    diff_phil = self.parser.diff_phil.as_str()
    if diff_phil is not '':
      info('The following parameters have been modified:\n')
      info(diff_phil)

    # Get the data
    reflections = flatten_reflections(params.input.reflections)
    experiments = flatten_experiments(params.input.experiments)
    datablocks = flatten_datablocks(params.input.datablock)
    if len(experiments) == 0 and len(datablocks) == 0 and len(reflections) == 0:
      self.parser.print_help()
      exit(0)
    elif (len(experiments) != 0 and len(datablocks) != 0):
      raise Sorry('Both experiment list and datablocks set')
    elif len(experiments) > 1:
      raise Sorry('More than 1 experiment set')
    elif len(datablocks) > 1:
      raise Sorry('More than 1 datablock set')
    elif len(experiments) == 1:
      imageset = experiments[0].imageset
    elif len(datablocks) == 1:
      imagesets = datablocks[0].extract_imagesets()
      if len(imagesets) != 1:
        raise Sorry('Need 1 imageset, got %d' % len(imagesets))
      imageset = imagesets[0]
    if len(reflections) != 1:
      raise Sorry('Need 1 reflection table, got %d' % len(reflections))
    else:
      reflections = reflections[0]

    # Check the reflections contain the necessary stuff
    assert("bbox" in reflections)
    assert("panel" in reflections)

    # Get some models
    detector = imageset.get_detector()
    scan = imageset.get_scan()
    frame0, frame1 = scan.get_array_range()

    # Add some padding but limit to image volume
    if params.padding > 0:
      info('Adding %d pixels as padding' % params.padding)
      x0, x1, y0, y1, z0, z1 = reflections['bbox'].parts()
      x0 -= params.padding
      x1 += params.padding
      y0 -= params.padding
      y1 += params.padding
      z0 -= params.padding
      z1 += params.padding
      panel = reflections['panel']
      for i in range(len(reflections)):
        width, height = detector[panel[i]].get_image_size()
        if z0[i] < frame0: z0[i] = frame0
        if z1[i] > frame1: z1[i] = frame1
      reflections['bbox'] = flex.int6(x0, x1, y0, y1, z0, z1)

    # Save the old shoeboxes
    if "shoebox" in reflections:
      old_shoebox = reflections['shoebox']
    else:
      old_shoebox = None

    # Allocate the shoeboxes
    reflections["shoebox"] = flex.shoebox(
      reflections["panel"],
      reflections["bbox"],
      allocate=True)

    # Extract the shoeboxes
    reflections.extract_shoeboxes(imageset, verbose=True)

    # Preserve masking
    if old_shoebox is not None:
      info("Applying old shoebox mask")
      new_shoebox = reflections['shoebox']
      for i in range(len(reflections)):
        bbox0 = old_shoebox[i].bbox
        bbox1 = new_shoebox[i].bbox
        mask0 = old_shoebox[i].mask
        mask1 = new_shoebox[i].mask
        mask2 = flex.int(mask1.accessor(), 0)
        x0 = bbox0[0] - bbox1[0]
        x1 = bbox0[1] - bbox0[0] + x0
        y0 = bbox0[2] - bbox1[2]
        y1 = bbox0[3] - bbox0[2] + y0
        z0 = bbox0[4] - bbox1[4]
        z1 = bbox0[5] - bbox0[4] + z0
        mask2[z0:z1,y0:y1,x0:x1] = mask0
        mask1 = mask1.as_1d() | mask2.as_1d()
        mask1.reshape(new_shoebox[i].mask.accessor())
        new_shoebox[i].mask = mask1

    # Saving the reflections to disk
    filename = params.output.reflections
    info('Saving %d reflections to %s' % (len(reflections), filename))
    reflections.as_pickle(filename)