def run(self, args=None):
        """ Perform the integration. """
        from dials.util.command_line import heading
        from dials.util.options import flatten_reflections, flatten_experiments
        from dials.util import log
        from time import time
        from dials.util import Sorry

        # Check the number of arguments is correct
        start_time = time()

        # Parse the command line
        params, options = self.parser.parse_args(args=args,
                                                 show_diff_phil=False)
        reference = flatten_reflections(params.input.reflections)
        experiments = flatten_experiments(params.input.experiments)
        if len(reference) == 0 and len(experiments) == 0:
            self.parser.print_help()
            return
        if len(reference) == 0:
            reference = None
        elif len(reference) != 1:
            raise Sorry("more than 1 reflection file was given")
        else:
            reference = reference[0]
        if len(experiments) == 0:
            raise Sorry("no experiment list was specified")

        # Save phil parameters
        if params.output.phil is not None:
            with open(params.output.phil, "w") as outfile:
                outfile.write(self.parser.diff_phil.as_str())

        if __name__ == "__main__":
            # Configure logging
            log.config(params.verbosity,
                       info=params.output.log,
                       debug=params.output.debug_log)

        from dials.util.version import dials_version

        logger.info(dials_version())

        # Log the diff phil
        diff_phil = self.parser.diff_phil.as_str()
        if diff_phil != "":
            logger.info("The following parameters have been modified:\n")
            logger.info(diff_phil)

        for abs_params in params.absorption_correction:
            if abs_params.apply:
                if not (params.integration.debug.output
                        and not params.integration.debug.separate_files):
                    raise Sorry(
                        "Shoeboxes must be saved to integration intermediates to apply an absorption correction. "
                        +
                        "Set integration.debug.output=True, integration.debug.separate_files=False and "
                        +
                        "integration.debug.delete_shoeboxes=True to temporarily store shoeboxes."
                    )

        # Print if we're using a mask
        for i, exp in enumerate(experiments):
            mask = exp.imageset.external_lookup.mask
            if mask.filename is not None:
                if mask.data:
                    logger.info("Using external mask: %s" % mask.filename)
                    for tile in mask.data:
                        logger.info(" Mask has %d pixels masked" %
                                    tile.data().count(False))

        # Print the experimental models
        for i, exp in enumerate(experiments):
            logger.info("=" * 80)
            logger.info("")
            logger.info("Experiments")
            logger.info("")
            logger.info("Models for experiment %d" % i)
            logger.info("")
            logger.info(str(exp.beam))
            logger.info(str(exp.detector))
            if exp.goniometer:
                logger.info(str(exp.goniometer))
            if exp.scan:
                logger.info(str(exp.scan))
            logger.info(str(exp.crystal))

        logger.info("=" * 80)
        logger.info("")
        logger.info(heading("Initialising"))
        logger.info("")

        # Load the data
        reference, rubbish = self.process_reference(reference)

        # Check pixels don't belong to neighbours
        if reference is not None:
            if exp.goniometer is not None and exp.scan is not None:
                self.filter_reference_pixels(reference, experiments)
        logger.info("")

        # Initialise the integrator
        from dials.algorithms.profile_model.factory import ProfileModelFactory
        from dials.algorithms.integration.integrator import IntegratorFactory

        # Modify experiment list if scan range is set.
        experiments, reference = self.split_for_scan_range(
            experiments, reference, params.scan_range)

        # Modify experiment list if exclude images is set
        experiments = self.exclude_images(experiments, params.exclude_images)

        # Predict the reflections
        logger.info("")
        logger.info("=" * 80)
        logger.info("")
        logger.info(heading("Predicting reflections"))
        logger.info("")
        predicted = flex.reflection_table.from_predictions_multi(
            experiments,
            dmin=params.prediction.d_min,
            dmax=params.prediction.d_max,
            margin=params.prediction.margin,
            force_static=params.prediction.force_static,
            padding=params.prediction.padding,
        )

        # Match reference with predicted
        if reference:
            matched, reference, unmatched = predicted.match_with_reference(
                reference)
            assert len(matched) == len(predicted)
            assert matched.count(True) <= len(reference)
            if matched.count(True) == 0:
                raise Sorry("""
          Invalid input for reference reflections.
          Zero reference spots were matched to predictions
        """)
            elif len(unmatched) != 0:
                logger.info("")
                logger.info("*" * 80)
                logger.info(
                    "Warning: %d reference spots were not matched to predictions"
                    % (len(unmatched)))
                logger.info("*" * 80)
                logger.info("")
            rubbish.extend(unmatched)

            if len(experiments) > 1:
                # filter out any experiments without matched reference reflections
                # f_: filtered
                from dxtbx.model.experiment_list import ExperimentList

                f_reference = flex.reflection_table()
                f_predicted = flex.reflection_table()
                f_rubbish = flex.reflection_table()
                f_experiments = ExperimentList()
                good_expt_count = 0

                def refl_extend(src, dest, eid):
                    tmp = src.select(src["id"] == eid)
                    tmp["id"] = flex.int(len(tmp), good_expt_count)
                    dest.extend(tmp)

                for expt_id, experiment in enumerate(experiments):
                    if len(reference.select(reference["id"] == expt_id)) != 0:
                        refl_extend(reference, f_reference, expt_id)
                        refl_extend(predicted, f_predicted, expt_id)
                        refl_extend(rubbish, f_rubbish, expt_id)
                        f_experiments.append(experiment)
                        good_expt_count += 1
                    else:
                        logger.info(
                            "Removing experiment %d: no reference reflections matched to predictions"
                            % expt_id)

                reference = f_reference
                predicted = f_predicted
                experiments = f_experiments
                rubbish = f_rubbish

        # Select a random sample of the predicted reflections
        if not params.sampling.integrate_all_reflections:
            predicted = self.sample_predictions(experiments, predicted, params)

        # Compute the profile model
        if (params.create_profile_model and reference is not None
                and "shoebox" in reference):
            experiments = ProfileModelFactory.create(params, experiments,
                                                     reference)
        else:
            experiments = ProfileModelFactory.create(params, experiments)
            for expr in experiments:
                if expr.profile is None:
                    raise Sorry("No profile information in experiment list")
        del reference

        # Compute the bounding box
        predicted.compute_bbox(experiments)

        # Create the integrator
        logger.info("")
        integrator = IntegratorFactory.create(params, experiments, predicted)

        # Integrate the reflections
        reflections = integrator.integrate()

        # Append rubbish data onto the end
        if rubbish is not None and params.output.include_bad_reference:
            mask = flex.bool(len(rubbish), True)
            rubbish.unset_flags(mask, rubbish.flags.integrated_sum)
            rubbish.unset_flags(mask, rubbish.flags.integrated_prf)
            rubbish.set_flags(mask, rubbish.flags.bad_reference)
            reflections.extend(rubbish)

        # Correct integrated intensities for absorption correction, if necessary
        for abs_params in params.absorption_correction:
            if abs_params.apply and abs_params.algorithm == "fuller_kapton":
                from dials.algorithms.integration.kapton_correction import (
                    multi_kapton_correction, )

                experiments, reflections = multi_kapton_correction(
                    experiments,
                    reflections,
                    abs_params.fuller_kapton,
                    logger=logger)()

        if params.significance_filter.enable:
            from dials.algorithms.integration.stills_significance_filter import (
                SignificanceFilter, )
            from dxtbx.model.experiment_list import ExperimentList

            sig_filter = SignificanceFilter(params)
            filtered_refls = sig_filter(experiments, reflections)
            accepted_expts = ExperimentList()
            accepted_refls = flex.reflection_table()
            logger.info(
                "Removed %d reflections out of %d when applying significance filter"
                % (len(reflections) - len(filtered_refls), len(reflections)))
            for expt_id, expt in enumerate(experiments):
                refls = filtered_refls.select(filtered_refls["id"] == expt_id)
                if len(refls) > 0:
                    accepted_expts.append(expt)
                    refls["id"] = flex.int(len(refls), len(accepted_expts) - 1)
                    accepted_refls.extend(refls)
                else:
                    logger.info(
                        "Removed experiment %d which has no reflections left after applying significance filter"
                        % expt_id)

            if len(accepted_refls) == 0:
                raise Sorry(
                    "No reflections left after applying significance filter")
            experiments = accepted_expts
            reflections = accepted_refls

        # Delete the shoeboxes used for intermediate calculations, if requested
        if params.integration.debug.delete_shoeboxes and "shoebox" in reflections:
            del reflections["shoebox"]

        # Save the reflections
        self.save_reflections(reflections, params.output.reflections)
        self.save_experiments(experiments, params.output.experiments)

        # Write a report if requested
        if params.output.report is not None:
            integrator.report().as_file(params.output.report)

        # Print the total time taken
        logger.info("\nTotal time taken: %f" % (time() - start_time))

        return experiments, reflections
Example #2
0
  def compute(self):
    '''
    Integrate the data

    '''
    from dials.algorithms.integration.image_integrator import ProcessorImage
    from dials.util.command_line import heading

    # Init the report
    self.profile_model_report = None
    self.integration_report = None

    # Create summary format
    fmt = (
      ' Processing the following experiments:\n'
      '\n'
      ' Experiments: %d\n'
      ' Beams:       %d\n'
      ' Detectors:   %d\n'
      ' Goniometers: %d\n'
      ' Scans:       %d\n'
      ' Crystals:    %d\n'
      ' Imagesets:   %d\n'
    )

    # Print the summary
    logger.info(fmt % (
      len(self.experiments),
      len(self.experiments.beams()),
      len(self.experiments.detectors()),
      len(self.experiments.goniometers()),
      len(self.experiments.scans()),
      len(self.experiments.crystals()),
      len(self.experiments.imagesets())))

    # Print a heading
    logger.info("=" * 80)
    logger.info("")
    logger.info(heading("Modelling background"))
    logger.info("")

    # Compute some reflection properties
    self.reflections.compute_zeta_multi(self.experiments)
    self.reflections.compute_d(self.experiments)
    self.reflections.compute_bbox(self.experiments)

    # Construvt the image integrator processor
    processor = ProcessorImage(
      self.experiments,
      self.reflections,
      self.params)
    processor.executor = BackgroundModellerExecutor(
      self.experiments,
      self.params)

    # Do the processing
    _, time_info = processor.process()

    # Compute the model
    self.model = processor.executor.finalize_model()

    # Print the time info
    logger.info(str(time_info))
    logger.info("")

    # Return the reflections
    return self.model
Example #3
0
    def run(self, args=None):
        """Execute the script."""
        from dials.algorithms.background.modeller import BackgroundModeller
        from dials.array_family import flex
        from dials.util.command_line import heading
        from dials.util.options import flatten_experiments

        # Parse the command line
        params, options = self.parser.parse_args(args, show_diff_phil=False)

        # Configure the logging
        dials.util.log.config(verbosity=options.verbose,
                              logfile=params.output.log)

        if params.integration.mp.nproc != 1 or params.integration.mp.njobs != 1:
            # https://github.com/dials/dials/issues/1083
            logger.warning("Multiprocessing is currently disabled. "
                           "Setting nproc = njobs = 1")
            params.integration.mp.nproc = 1
            params.integration.mp.njobs = 1

        from dials.util.version import dials_version

        logger.info(dials_version())

        # Log the diff phil
        diff_phil = self.parser.diff_phil.as_str()
        if diff_phil != "":
            logger.info("The following parameters have been modified:\n")
            logger.info(diff_phil)

        # Ensure we have a data block
        experiments = flatten_experiments(params.input.experiments)
        if len(experiments) == 0:
            self.parser.print_help()
            return

        # Only handle a single imageset at once
        imagesets = {expr.imageset for expr in experiments}
        if len(imagesets) != 1:
            sys.exit("Can only process a single imageset at a time")

        # Predict the reflections
        logger.info("")
        logger.info("=" * 80)
        logger.info("")
        logger.info(heading("Predicting reflections"))
        logger.info("")
        predicted = flex.reflection_table.from_predictions_multi(
            experiments,
            dmin=params.prediction.d_min,
            dmax=params.prediction.d_max,
            margin=params.prediction.margin,
            force_static=params.prediction.force_static,
        )

        # Create the modeller
        modeller = BackgroundModeller(experiments, predicted, params)
        model = modeller.compute()

        # Save the background model
        logger.info("Saving background model to %s" % params.output.model)
        from dials.algorithms.background.gmodel import StaticBackgroundModel

        static_model = StaticBackgroundModel()
        for m in model:
            static_model.add(m.model)
        with open(params.output.model, "wb") as outfile:
            pickle.dump(static_model,
                        outfile,
                        protocol=pickle.HIGHEST_PROTOCOL)

        # Output some diagnostic images
        image_generator = ImageGenerator(model)
        image_generator.save_mean(params.output.mean_image_prefix)
        image_generator.save_variance(params.output.variance_image_prefix)
        image_generator.save_dispersion(params.output.dispersion_image_prefix)
        image_generator.save_mask(params.output.mask_image_prefix)
        image_generator.save_min(params.output.min_image_prefix)
        image_generator.save_max(params.output.max_image_prefix)
        image_generator.save_model(params.output.model_image_prefix)
Example #4
0
    def compute(self):
        """
        Integrate the data
        """
        from dials.algorithms.integration.image_integrator import ProcessorImage
        from dials.util.command_line import heading

        # Init the report
        self.profile_model_report = None
        self.integration_report = None

        # Create summary format
        fmt = (" Processing the following experiments:\n"
               "\n"
               " Experiments: %d\n"
               " Beams:       %d\n"
               " Detectors:   %d\n"
               " Goniometers: %d\n"
               " Scans:       %d\n"
               " Crystals:    %d\n"
               " Imagesets:   %d\n")

        # Print the summary
        logger.info(fmt % (
            len(self.experiments),
            len(self.experiments.beams()),
            len(self.experiments.detectors()),
            len(self.experiments.goniometers()),
            len(self.experiments.scans()),
            len(self.experiments.crystals()),
            len(self.experiments.imagesets()),
        ))

        # Print a heading
        logger.info("=" * 80)
        logger.info("")
        logger.info(heading("Modelling background"))
        logger.info("")

        # Expand n_sigma
        for expt in self.experiments:
            expt.profile._n_sigma += 2

        # Compute some reflection properties
        self.reflections.compute_zeta_multi(self.experiments)
        self.reflections.compute_d(self.experiments)
        self.reflections.compute_bbox(self.experiments)

        # Construvt the image integrator processor
        processor = ProcessorImage(self.experiments, self.reflections,
                                   self.params)
        processor.executor = BackgroundModellerExecutor(
            self.experiments, self.params)

        # Do the processing
        _, time_info = processor.process()

        # Compute the model
        self.model = processor.executor.finalize_model()

        # Print the time info
        logger.info(str(time_info))
        logger.info("")

        # Return the reflections
        return self.model
Example #5
0
  def run(self):
    '''Execute the script.'''
    from dials.util.command_line import heading
    from dials.array_family import flex
    from dials.util.options import flatten_experiments
    from time import time
    from dials.util import log
    from libtbx.utils import Sorry
    from dials.algorithms.background.modeller import BackgroundModeller
    start_time = time()

    # Parse the command line
    params, options = self.parser.parse_args(show_diff_phil=False)

    # Configure the logging
    log.config(
      params.verbosity,
      info=params.output.log,
      debug=params.output.debug_log)

    from dials.util.version import dials_version
    logger.info(dials_version())

    # Log the diff phil
    diff_phil = self.parser.diff_phil.as_str()
    if diff_phil is not '':
      logger.info('The following parameters have been modified:\n')
      logger.info(diff_phil)

    # Ensure we have a data block
    experiments = flatten_experiments(params.input.experiments)
    if len(experiments) == 0:
      self.parser.print_help()
      return

    # Only handle a single imageset at once
    imagesets = set(expr.imageset for expr in experiments)
    if len(imagesets) != 1:
      raise Sorry("Can only process a single imageset at a time")

    # Predict the reflections
    logger.info("")
    logger.info("=" * 80)
    logger.info("")
    logger.info(heading("Predicting reflections"))
    logger.info("")
    predicted = flex.reflection_table.from_predictions_multi(
      experiments,
      dmin=params.prediction.d_min,
      dmax=params.prediction.d_max,
      margin=params.prediction.margin,
      force_static=params.prediction.force_static)

    # Create the modeller
    modeller = BackgroundModeller(experiments, predicted, params)
    model = modeller.compute()

    # Save the background model
    logger.info("Saving background model to %s" % params.output.model)
    from dials.algorithms.background.gmodel import StaticBackgroundModel
    static_model = StaticBackgroundModel()
    for i in range(len(model)):
      static_model.add(model[i].model)
    with open(params.output.model, "w") as outfile:
      import cPickle as pickle
      pickle.dump(static_model, outfile, protocol=pickle.HIGHEST_PROTOCOL)

    # Output some diagnostic images
    image_generator = ImageGenerator(model)
    image_generator.save_mean(params.output.mean_image_prefix)
    image_generator.save_variance(params.output.variance_image_prefix)
    image_generator.save_dispersion(params.output.dispersion_image_prefix)
    image_generator.save_mask(params.output.mask_image_prefix)
    image_generator.save_min(params.output.min_image_prefix)
    image_generator.save_max(params.output.max_image_prefix)
    image_generator.save_model(params.output.model_image_prefix)
    #image_generator.save_polar_model(params.output.polar_model_image_prefix)

    # Print the time
    logger.info("Time Taken: %f" % (time() - start_time))
Example #6
0
def run_integration(params, experiments, reference=None):
    """Perform the integration.

    Returns:
        experiments: The integrated experiments
        reflections: The integrated reflections
        report(optional): An integration report.

    Raises:
        ValueError: For a number of bad inputs
        RuntimeError: If the profile model creation fails
    """
    predicted = None
    rubbish = None

    for abs_params in params.absorption_correction:
        if abs_params.apply:
            if not (params.integration.debug.output
                    and not params.integration.debug.separate_files):
                raise ValueError(
                    "Shoeboxes must be saved to integration intermediates to apply an absorption correction. "
                    +
                    "Set integration.debug.output=True, integration.debug.separate_files=False and "
                    +
                    "integration.debug.delete_shoeboxes=True to temporarily store shoeboxes."
                )

    # Print if we're using a mask
    for i, exp in enumerate(experiments):
        mask = exp.imageset.external_lookup.mask
        if mask.filename is not None:
            if mask.data:
                logger.info("Using external mask: %s", mask.filename)
                for tile in mask.data:
                    logger.info(" Mask has %d pixels masked",
                                tile.data().count(False))

    # Print the experimental models
    for i, exp in enumerate(experiments):
        summary = "\n".join((
            "",
            "=" * 80,
            "",
            "Experiments",
            "",
            "Models for experiment %d" % i,
            "",
            str(exp.beam),
            str(exp.detector),
        ))
        if exp.goniometer:
            summary += str(exp.goniometer) + "\n"
        if exp.scan:
            summary += str(exp.scan) + "\n"
        summary += str(exp.crystal)
        logger.info(summary)

    logger.info("\n".join(("", "=" * 80, "")))
    logger.info(heading("Initialising"))

    # Load the data
    if reference:
        reference, rubbish = process_reference(reference)

        # Check pixels don't belong to neighbours
        if exp.goniometer is not None and exp.scan is not None:
            reference = filter_reference_pixels(reference, experiments)

        # Modify experiment list if scan range is set.
        experiments, reference = split_for_scan_range(experiments, reference,
                                                      params.scan_range)

    # Modify experiment list if exclude images is set
    if params.exclude_images:
        for experiment in experiments:
            for index in params.exclude_images:
                experiment.imageset.mark_for_rejection(index, True)

    # Predict the reflections
    logger.info("\n".join(("", "=" * 80, "")))
    logger.info(heading("Predicting reflections"))
    predicted = flex.reflection_table.from_predictions_multi(
        experiments,
        dmin=params.prediction.d_min,
        dmax=params.prediction.d_max,
        margin=params.prediction.margin,
        force_static=params.prediction.force_static,
        padding=params.prediction.padding,
    )

    # Match reference with predicted
    if reference:
        matched, reference, unmatched = predicted.match_with_reference(
            reference)
        assert len(matched) == len(predicted)
        assert matched.count(True) <= len(reference)
        if matched.count(True) == 0:
            raise ValueError("""
        Invalid input for reference reflections.
        Zero reference spots were matched to predictions
    """)
        elif unmatched:
            msg = (
                "Warning: %d reference spots were not matched to predictions" %
                unmatched.size())
            border = "\n".join(("", "*" * 80, ""))
            logger.info("".join((border, msg, border)))
            rubbish.extend(unmatched)

        if len(experiments) > 1:
            # filter out any experiments without matched reference reflections
            # f_: filtered

            f_reference = flex.reflection_table()
            f_predicted = flex.reflection_table()
            f_rubbish = flex.reflection_table()
            f_experiments = ExperimentList()
            good_expt_count = 0

            def refl_extend(src, dest, eid):
                old_id = eid
                new_id = good_expt_count
                tmp = src.select(src["id"] == old_id)
                tmp["id"] = flex.int(len(tmp), good_expt_count)
                if old_id in tmp.experiment_identifiers():
                    identifier = tmp.experiment_identifiers()[old_id]
                    del tmp.experiment_identifiers()[old_id]
                    tmp.experiment_identifiers()[new_id] = identifier
                dest.extend(tmp)

            for expt_id, experiment in enumerate(experiments):
                if len(reference.select(reference["id"] == expt_id)) != 0:
                    refl_extend(reference, f_reference, expt_id)
                    refl_extend(predicted, f_predicted, expt_id)
                    refl_extend(rubbish, f_rubbish, expt_id)
                    f_experiments.append(experiment)
                    good_expt_count += 1
                else:
                    logger.info(
                        "Removing experiment %d: no reference reflections matched to predictions",
                        expt_id,
                    )

            reference = f_reference
            predicted = f_predicted
            experiments = f_experiments
            rubbish = f_rubbish

    # Select a random sample of the predicted reflections
    if not params.sampling.integrate_all_reflections:
        predicted = sample_predictions(experiments, predicted, params)

    # Compute the profile model - either load existing or compute
    # can raise RuntimeError
    experiments = ProfileModelFactory.create(params, experiments, reference)
    for expr in experiments:
        if expr.profile is None:
            raise ValueError("No profile information in experiment list")
    del reference

    # Compute the bounding box
    predicted.compute_bbox(experiments)

    # Create the integrator
    integrator = create_integrator(params, experiments, predicted)

    # Integrate the reflections
    reflections = integrator.integrate()

    # Remove unintegrated reflections
    if not params.output.output_unintegrated_reflections:
        keep = reflections.get_flags(reflections.flags.integrated, all=False)
        logger.info(
            "Removing %d unintegrated reflections of %d total",
            keep.count(False),
            keep.size(),
        )

        reflections = reflections.select(keep)

    # Append rubbish data onto the end
    if rubbish is not None and params.output.include_bad_reference:
        mask = flex.bool(len(rubbish), True)
        rubbish.unset_flags(mask, rubbish.flags.integrated_sum)
        rubbish.unset_flags(mask, rubbish.flags.integrated_prf)
        rubbish.set_flags(mask, rubbish.flags.bad_reference)
        reflections.extend(rubbish)

    # Correct integrated intensities for absorption correction, if necessary
    for abs_params in params.absorption_correction:
        if abs_params.apply and abs_params.algorithm == "fuller_kapton":
            from dials.algorithms.integration.kapton_correction import (
                multi_kapton_correction, )

            experiments, reflections = multi_kapton_correction(
                experiments,
                reflections,
                abs_params.fuller_kapton,
                logger=logger)()

    if params.significance_filter.enable:
        from dials.algorithms.integration.stills_significance_filter import (
            SignificanceFilter, )

        sig_filter = SignificanceFilter(params)
        filtered_refls = sig_filter(experiments, reflections)
        accepted_expts = ExperimentList()
        accepted_refls = flex.reflection_table()
        logger.info(
            "Removed %d reflections out of %d when applying significance filter",
            (reflections.size() - filtered_refls.size()),
            reflections.size(),
        )
        for expt_id, expt in enumerate(experiments):
            refls = filtered_refls.select(filtered_refls["id"] == expt_id)
            if refls:
                accepted_expts.append(expt)
                current_id = expt_id
                new_id = len(accepted_expts) - 1
                refls["id"] = flex.int(len(refls), new_id)
                if expt.identifier:
                    del refls.experiment_identifiers()[current_id]
                    refls.experiment_identifiers()[new_id] = expt.identifier
                accepted_refls.extend(refls)
            else:
                logger.info(
                    "Removed experiment %d which has no reflections left after applying significance filter",
                    expt_id,
                )

        if not accepted_refls:
            raise ValueError(
                "No reflections left after applying significance filter")
        experiments = accepted_expts
        reflections = accepted_refls

    # Write a report if requested
    report = None
    if params.output.report is not None:
        report = integrator.report()

    return experiments, reflections, report
Example #7
0
  def integrate(self):
    '''
    Integrate the data

    '''
    from dials.algorithms.integration.report import IntegrationReport
    from dials.algorithms.integration.report import ProfileModelReport
    from dials.algorithms.integration.report import ProfileValidationReport
    from dials.util.command_line import heading
    from logging import info, debug
    from dials.util import pprint
    from random import shuffle, seed
    from math import floor, ceil
    from dials.array_family import flex
    from dials.algorithms.profile_model.modeller import MultiExpProfileModeller
    from dials.algorithms.integration.validation import ValidatedMultiExpProfileModeller

    # Ensure we get the same random sample each time
    seed(0)

    # Init the report
    self.profile_model_report = None
    self.integration_report = None

    # Heading
    info("=" * 80)
    info("")
    info(heading("Processing reflections"))
    info("")

    # Create summary format
    fmt = (
      ' Processing the following experiments:\n'
      '\n'
      ' Experiments: %d\n'
      ' Beams:       %d\n'
      ' Detectors:   %d\n'
      ' Goniometers: %d\n'
      ' Scans:       %d\n'
      ' Crystals:    %d\n'
      ' Imagesets:   %d\n'
    )

    # Print the summary
    info(fmt % (
      len(self.experiments),
      len(self.experiments.beams()),
      len(self.experiments.detectors()),
      len(self.experiments.goniometers()),
      len(self.experiments.scans()),
      len(self.experiments.crystals()),
      len(self.experiments.imagesets())))

    # Initialize the reflections
    initialize = self.InitializerClass(
      self.experiments,
      self.params)
    initialize(self.reflections)

    # Check if we want to do some profile fitting
    fitting_class = [e.profile.fitting_class() for e in self.experiments]
    fitting_avail = all([c is not None for c in fitting_class])
    if self.params.profile.fitting and fitting_avail:
      profile_fitting = True
      profile_fitter = None
    else:
      profile_fitting = False
      profile_fitter = None

    # Do profile modelling
    if profile_fitting:

      info("=" * 80)
      info("")
      info(heading("Modelling reflection profiles"))
      info("")

      # Get the selection
      selection = self.reflections.get_flags(
        self.reflections.flags.reference_spot)

      # Get the reference spots
      reference = self.reflections.select(selection)

      # Check if we need to skip
      if len(reference) == 0:
        info("** Skipping profile modelling - no reference profiles given **")
      else:

        # Try to set up the validation
        if self.params.profile.validation.number_of_partitions > 1:
          n = len(reference)
          k_max = int(floor(n / self.params.profile.validation.min_partition_size))
          if k_max < self.params.profile.validation.number_of_partitions:
            num_folds = k_max
          else:
            num_folds = self.params.profile.validation.number_of_partitions
          if num_folds > 1:
            indices = (list(range(num_folds)) * int(ceil(n/num_folds)))[0:n]
            shuffle(indices)
            reference['profile.index'] = flex.size_t(indices)
          if num_folds < 1:
            num_folds = 1
        else:
          num_folds = 1

        # Create the profile fitter
        profile_fitter = ValidatedMultiExpProfileModeller()
        for i in range(num_folds):
          profile_fitter_single = MultiExpProfileModeller()#(num_folds)
          for expr in self.experiments:
            profile_fitter_single.add(expr.profile.fitting_class()(expr))
          profile_fitter.add(profile_fitter_single)

        # Create the data processor
        executor = ProfileModellerExecutor(
          self.experiments,
          profile_fitter)
        processor = ProcessorBuilder(
          self.ProcessorClass,
          self.experiments,
          reference,
          self.params.modelling).build()
        processor.executor = executor

        # Process the reference profiles
        reference, profile_fitter_list, time_info = processor.process()

        # Set the reference spots info
        #self.reflections.set_selected(selection, reference)

        # Finalize the profile models for validation
        assert len(profile_fitter_list) > 0, "No profile fitters"
        profile_fitter = None
        for index, pf in profile_fitter_list.iteritems():
          if pf is None:
            continue
          if profile_fitter is None:
            profile_fitter = pf
          else:
            profile_fitter.accumulate(pf)
        profile_fitter.finalize()

        # Get the finalized modeller
        finalized_profile_fitter = profile_fitter.finalized_model()

        # Print profiles
        if self.params.debug_reference_output:
          reference_debug = []
          for i in range(len(finalized_profile_fitter)):
            m = finalized_profile_fitter[i]
            p = []
            for j in range(len(m)):
              try:
                p.append(m.data(j))
              except Exception:
                p.append(None)
          reference_debug.append(p)
          with open("reference_profiles.pickle", "wb") as outfile:
            import cPickle as pickle
            pickle.dump(reference_debug, outfile)

        for i in range(len(finalized_profile_fitter)):
          m = finalized_profile_fitter[i]
          debug("")
          debug("Profiles for experiment %d" % i)
          for j in range(len(m)):
            debug("Profile %d" % j)
            try:
              debug(pprint.profile3d(m.data(j)))
            except Exception:
              debug("** NO PROFILE **")

        # Print the modeller report
        self.profile_model_report = ProfileModelReport(
          self.experiments,
          finalized_profile_fitter,
          reference)
        info("")
        info(self.profile_model_report.as_str(prefix=' '))

        # Print the time info
        info("")
        info(str(time_info))
        info("")

        # If we have more than 1 fold then do the validation
        if num_folds > 1:

          # Create the data processor
          executor = ProfileValidatorExecutor(
            self.experiments,
            profile_fitter)
          processor = ProcessorBuilder(
            self.ProcessorClass,
            self.experiments,
            reference,
            self.params.modelling).build()
          processor.executor = executor

          # Process the reference profiles
          reference, validation, time_info = processor.process()

          # Print the modeller report
          self.profile_validation_report = ProfileValidationReport(
            self.experiments,
            profile_fitter,
            reference,
            num_folds)
          info("")
          info(self.profile_validation_report.as_str(prefix=' '))

          # Print the time info
          info("")
          info(str(time_info))
          info("")

        # Set to the finalized fitter
        profile_fitter = finalized_profile_fitter

    info("=" * 80)
    info("")
    info(heading("Integrating reflections"))
    info("")

    # Create the data processor
    executor = IntegratorExecutor(
      self.experiments,
      profile_fitter)
    processor = ProcessorBuilder(
      self.ProcessorClass,
      self.experiments,
      self.reflections,
      self.params.integration).build()
    processor.executor = executor

    # Process the reflections
    self.reflections, _, time_info = processor.process()

    # Finalize the reflections
    finalize = self.FinalizerClass(
      self.experiments,
      self.params)
    finalize(self.reflections)

    # Create the integration report
    self.integration_report = IntegrationReport(
      self.experiments,
      self.reflections)
    info("")
    info(self.integration_report.as_str(prefix=' '))

    # Print the time info
    info(str(time_info))
    info("")

    # Return the reflections
    return self.reflections
Example #8
0
    def integrate(self):
        '''
    Integrate the data

    '''
        from dials.algorithms.integration.report import IntegrationReport
        from dials.util.command_line import heading

        # Init the report
        self.profile_model_report = None
        self.integration_report = None

        # Heading
        logger.info("=" * 80)
        logger.info("")
        logger.info(heading("Processing reflections"))
        logger.info("")

        # Create summary format
        fmt = (' Processing the following experiments:\n'
               '\n'
               ' Experiments: %d\n'
               ' Beams:       %d\n'
               ' Detectors:   %d\n'
               ' Goniometers: %d\n'
               ' Scans:       %d\n'
               ' Crystals:    %d\n'
               ' Imagesets:   %d\n')

        # Print the summary
        logger.info(
            fmt %
            (len(self.experiments), len(
                self.experiments.beams()), len(self.experiments.detectors()),
             len(self.experiments.goniometers()), len(
                 self.experiments.scans()), len(self.experiments.crystals()),
             len(self.experiments.imagesets())))

        # Print a heading
        logger.info("=" * 80)
        logger.info("")
        logger.info(heading("Integrating reflections"))
        logger.info("")

        # Initialise the processing
        initialize = InitializerRot(self.experiments, self.params)
        initialize(self.reflections)

        # Construvt the image integrator processor
        processor = ProcessorImage(self.experiments, self.reflections,
                                   self.params)
        processor.executor = ImageIntegratorExecutor()

        # Do the processing
        self.reflections, time_info = processor.process()

        # Finalise the processing
        finalize = FinalizerRot(self.experiments, self.params)
        finalize(self.reflections)

        # Create the integration report
        self.integration_report = IntegrationReport(self.experiments,
                                                    self.reflections)
        logger.info("")
        logger.info(self.integration_report.as_str(prefix=' '))

        # Print the time info
        logger.info(str(time_info))
        logger.info("")

        # Return the reflections
        return self.reflections
Example #9
0
    def integrate(self):
        '''
    Integrate the data

    '''
        from dials.algorithms.integration.report import IntegrationReport
        from dials.algorithms.integration.report import ProfileModelReport
        from dials.algorithms.integration.report import ProfileValidationReport
        from dials.util.command_line import heading
        from dials.util import pprint
        from random import shuffle, seed
        from math import floor, ceil
        from dials.array_family import flex
        from dials.algorithms.profile_model.modeller import MultiExpProfileModeller
        from dials.algorithms.integration.validation import ValidatedMultiExpProfileModeller

        # Ensure we get the same random sample each time
        seed(0)

        # Init the report
        self.profile_model_report = None
        self.integration_report = None

        # Heading
        logger.info("=" * 80)
        logger.info("")
        logger.info(heading("Processing reflections"))
        logger.info("")

        # Create summary format
        fmt = (' Processing the following experiments:\n'
               '\n'
               ' Experiments: %d\n'
               ' Beams:       %d\n'
               ' Detectors:   %d\n'
               ' Goniometers: %d\n'
               ' Scans:       %d\n'
               ' Crystals:    %d\n'
               ' Imagesets:   %d\n')

        # Print the summary
        logger.info(
            fmt %
            (len(self.experiments), len(
                self.experiments.beams()), len(self.experiments.detectors()),
             len(self.experiments.goniometers()), len(
                 self.experiments.scans()), len(self.experiments.crystals()),
             len(self.experiments.imagesets())))

        # Initialize the reflections
        initialize = self.InitializerClass(self.experiments, self.params)
        initialize(self.reflections)

        # Check if we want to do some profile fitting
        fitting_class = [e.profile.fitting_class() for e in self.experiments]
        fitting_avail = all(c is not None for c in fitting_class)
        if self.params.profile.fitting and fitting_avail:
            profile_fitting = True
            profile_fitter = None
        else:
            profile_fitting = False
            profile_fitter = None

        # Do profile modelling
        if profile_fitting:

            logger.info("=" * 80)
            logger.info("")
            logger.info(heading("Modelling reflection profiles"))
            logger.info("")

            # Get the selection
            selection = self.reflections.get_flags(
                self.reflections.flags.reference_spot)

            # Get the reference spots
            reference = self.reflections.select(selection)

            # Check if we need to skip
            if len(reference) == 0:
                logger.info(
                    "** Skipping profile modelling - no reference profiles given **"
                )
            else:

                # Try to set up the validation
                if self.params.profile.validation.number_of_partitions > 1:
                    n = len(reference)
                    k_max = int(
                        floor(
                            n /
                            self.params.profile.validation.min_partition_size))
                    if k_max < self.params.profile.validation.number_of_partitions:
                        num_folds = k_max
                    else:
                        num_folds = self.params.profile.validation.number_of_partitions
                    if num_folds > 1:
                        indices = (list(range(num_folds)) *
                                   int(ceil(n / num_folds)))[0:n]
                        shuffle(indices)
                        reference['profile.index'] = flex.size_t(indices)
                    if num_folds < 1:
                        num_folds = 1
                else:
                    num_folds = 1

                # Create the profile fitter
                profile_fitter = ValidatedMultiExpProfileModeller()
                for i in range(num_folds):
                    profile_fitter_single = MultiExpProfileModeller(
                    )  #(num_folds)
                    for expr in self.experiments:
                        profile_fitter_single.add(
                            expr.profile.fitting_class()(expr))
                    profile_fitter.add(profile_fitter_single)

                # Create the data processor
                executor = ProfileModellerExecutor(self.experiments,
                                                   profile_fitter)
                processor = ProcessorBuilder(self.ProcessorClass,
                                             self.experiments, reference,
                                             self.params.modelling).build()
                processor.executor = executor

                # Process the reference profiles
                reference, profile_fitter_list, time_info = processor.process()

                # Set the reference spots info
                #self.reflections.set_selected(selection, reference)

                # Finalize the profile models for validation
                assert len(profile_fitter_list) > 0, "No profile fitters"
                profile_fitter = None
                for index, pf in profile_fitter_list.iteritems():
                    if pf is None:
                        continue
                    if profile_fitter is None:
                        profile_fitter = pf
                    else:
                        profile_fitter.accumulate(pf)
                profile_fitter.finalize()

                # Get the finalized modeller
                finalized_profile_fitter = profile_fitter.finalized_model()

                # Print profiles
                if self.params.debug_reference_output:
                    reference_debug = []
                    for i in range(len(finalized_profile_fitter)):
                        m = finalized_profile_fitter[i]
                        p = []
                        for j in range(len(m)):
                            try:
                                p.append(m.data(j))
                            except Exception:
                                p.append(None)
                    reference_debug.append(p)
                    with open("reference_profiles.pickle", "wb") as outfile:
                        import cPickle as pickle
                        pickle.dump(reference_debug, outfile)

                for i in range(len(finalized_profile_fitter)):
                    m = finalized_profile_fitter[i]
                    logger.debug("")
                    logger.debug("Profiles for experiment %d" % i)
                    for j in range(len(m)):
                        logger.debug("Profile %d" % j)
                        try:
                            logger.debug(pprint.profile3d(m.data(j)))
                        except Exception:
                            logger.debug("** NO PROFILE **")

                # Print the modeller report
                self.profile_model_report = ProfileModelReport(
                    self.experiments, finalized_profile_fitter, reference)
                logger.info("")
                logger.info(self.profile_model_report.as_str(prefix=' '))

                # Print the time info
                logger.info("")
                logger.info(str(time_info))
                logger.info("")

                # If we have more than 1 fold then do the validation
                if num_folds > 1:

                    # Create the data processor
                    executor = ProfileValidatorExecutor(
                        self.experiments, profile_fitter)
                    processor = ProcessorBuilder(
                        self.ProcessorClass, self.experiments, reference,
                        self.params.modelling).build()
                    processor.executor = executor

                    # Process the reference profiles
                    reference, validation, time_info = processor.process()

                    # Print the modeller report
                    self.profile_validation_report = ProfileValidationReport(
                        self.experiments, profile_fitter, reference, num_folds)
                    logger.info("")
                    logger.info(
                        self.profile_validation_report.as_str(prefix=' '))

                    # Print the time info
                    logger.info("")
                    logger.info(str(time_info))
                    logger.info("")

                # Set to the finalized fitter
                profile_fitter = finalized_profile_fitter

        logger.info("=" * 80)
        logger.info("")
        logger.info(heading("Integrating reflections"))
        logger.info("")

        # Create the data processor
        executor = IntegratorExecutor(self.experiments, profile_fitter)
        processor = ProcessorBuilder(self.ProcessorClass, self.experiments,
                                     self.reflections,
                                     self.params.integration).build()
        processor.executor = executor

        # Process the reflections
        self.reflections, _, time_info = processor.process()

        # Finalize the reflections
        finalize = self.FinalizerClass(self.experiments, self.params)
        finalize(self.reflections)

        # Create the integration report
        self.integration_report = IntegrationReport(self.experiments,
                                                    self.reflections)
        logger.info("")
        logger.info(self.integration_report.as_str(prefix=' '))

        # Print the time info
        logger.info(str(time_info))
        logger.info("")

        # Return the reflections
        return self.reflections
Example #10
0
    def run(self):
        """ Perform the integration. """
        from dials.util.command_line import heading
        from dials.util.options import flatten_reflections, flatten_experiments
        from dials.util import log
        from logging import info, debug
        from time import time
        from libtbx.utils import Sorry

        # Check the number of arguments is correct
        start_time = time()

        # Parse the command line
        params, options = self.parser.parse_args(show_diff_phil=False)
        reference = flatten_reflections(params.input.reflections)
        experiments = flatten_experiments(params.input.experiments)
        if len(reference) == 0 and len(experiments) == 0:
            self.parser.print_help()
            return
        if len(reference) == 0:
            reference = None
        elif len(reference) != 1:
            raise Sorry("more than 1 reflection file was given")
        else:
            reference = reference[0]
        if len(experiments) == 0:
            raise Sorry("no experiment list was specified")

        # Save phil parameters
        if params.output.phil is not None:
            with open(params.output.phil, "w") as outfile:
                outfile.write(self.parser.diff_phil.as_str())

        # Configure logging
        log.config(params.verbosity, info=params.output.log, debug=params.output.debug_log)

        from dials.util.version import dials_version

        info(dials_version())

        # Log the diff phil
        diff_phil = self.parser.diff_phil.as_str()
        if diff_phil is not "":
            info("The following parameters have been modified:\n")
            info(diff_phil)

        # Print if we're using a mask
        for i, exp in enumerate(experiments):
            mask = exp.imageset.external_lookup.mask
            if mask.filename is not None:
                info("Using external mask: %s" % mask.filename)
                info(" Mask has %d pixels masked" % mask.data.count(False))

        # Print the experimental models
        for i, exp in enumerate(experiments):
            debug("Models for experiment %d" % i)
            debug("")
            debug(str(exp.beam))
            debug(str(exp.detector))
            if exp.goniometer:
                debug(str(exp.goniometer))
            if exp.scan:
                debug(str(exp.scan))
            debug(str(exp.crystal))

        info("=" * 80)
        info("")
        info(heading("Initialising"))
        info("")

        # Load the data
        reference, rubbish = self.process_reference(reference)
        info("")

        # Initialise the integrator
        from dials.algorithms.profile_model.factory import ProfileModelFactory
        from dials.algorithms.integration.integrator import IntegratorFactory
        from dials.array_family import flex

        # Modify experiment list if scan range is set.
        experiments, reference = self.split_for_scan_range(experiments, reference, params.scan_range)

        # Predict the reflections
        info("")
        info("=" * 80)
        info("")
        info(heading("Predicting reflections"))
        info("")
        predicted = flex.reflection_table.from_predictions_multi(
            experiments,
            dmin=params.prediction.d_min,
            dmax=params.prediction.d_max,
            margin=params.prediction.margin,
            force_static=params.prediction.force_static,
        )

        # Match reference with predicted
        if reference:
            matched, reference, unmatched = predicted.match_with_reference(reference)
            assert len(matched) == len(predicted)
            assert matched.count(True) <= len(reference)
            if matched.count(True) == 0:
                raise Sorry(
                    """
          Invalid input for reference reflections.
          Zero reference spots were matched to predictions
        """
                )
            elif len(unmatched) != 0:
                info("")
                info("*" * 80)
                info("Warning: %d reference spots were not matched to predictions" % (len(unmatched)))
                info("*" * 80)
                info("")
            rubbish.extend(unmatched)

        # Select a random sample of the predicted reflections
        if not params.sampling.integrate_all_reflections:
            predicted = self.sample_predictions(experiments, predicted, params)

        # Compute the profile model
        if reference is not None and params.create_profile_model:
            experiments = ProfileModelFactory.create(params, experiments, reference)
        else:
            for expr in experiments:
                expr.profile.params = params.profile
        del reference

        # Compute the bounding box
        predicted.compute_bbox(experiments)

        # Create the integrator
        info("")
        integrator = IntegratorFactory.create(params, experiments, predicted)

        # Integrate the reflections
        reflections = integrator.integrate()

        # Append rubbish data onto the end
        if rubbish is not None and params.output.include_bad_reference:
            mask = flex.bool(len(rubbish), True)
            rubbish.unset_flags(mask, rubbish.flags.integrated_sum)
            rubbish.unset_flags(mask, rubbish.flags.integrated_prf)
            rubbish.set_flags(mask, rubbish.flags.bad_reference)
            reflections.extend(rubbish)

        # Save the reflections
        self.save_reflections(reflections, params.output.reflections)
        self.save_experiments(experiments, params.output.experiments)

        # Write a report if requested
        if params.output.report is not None:
            integrator.report().as_file(params.output.report)

        # Print the total time taken
        info("\nTotal time taken: %f" % (time() - start_time))
Example #11
0
  def run(self):
    ''' Perform the integration. '''
    from dials.util.command_line import heading
    from dials.util.options import flatten_reflections, flatten_experiments
    from dials.util import log
    from time import time
    from libtbx.utils import Sorry

    # Check the number of arguments is correct
    start_time = time()

    # Parse the command line
    params, options = self.parser.parse_args(show_diff_phil=False)
    reference = flatten_reflections(params.input.reflections)
    experiments = flatten_experiments(params.input.experiments)
    if len(reference) == 0 and len(experiments) == 0:
      self.parser.print_help()
      return
    if len(reference) == 0:
      reference = None
    elif len(reference) != 1:
      raise Sorry('more than 1 reflection file was given')
    else:
      reference = reference[0]
    if len(experiments) == 0:
      raise Sorry('no experiment list was specified')

    # Save phil parameters
    if params.output.phil is not None:
      with open(params.output.phil, "w") as outfile:
        outfile.write(self.parser.diff_phil.as_str())

    # Configure logging
    log.config(
      params.verbosity,
      info=params.output.log,
      debug=params.output.debug_log)

    from dials.util.version import dials_version
    logger.info(dials_version())

    # Log the diff phil
    diff_phil = self.parser.diff_phil.as_str()
    if diff_phil is not '':
      logger.info('The following parameters have been modified:\n')
      logger.info(diff_phil)

    # Print if we're using a mask
    for i, exp in enumerate(experiments):
      mask = exp.imageset.external_lookup.mask
      if mask.filename is not None:
        if mask.data:
          logger.info('Using external mask: %s' % mask.filename)
          logger.info(' Mask has %d pixels masked' % mask.data.count(False))

    # Print the experimental models
    for i, exp in enumerate(experiments):
      logger.debug("Models for experiment %d" % i)
      logger.debug("")
      logger.debug(str(exp.beam))
      logger.debug(str(exp.detector))
      if exp.goniometer:
        logger.debug(str(exp.goniometer))
      if exp.scan:
        logger.debug(str(exp.scan))
      logger.debug(str(exp.crystal))

    logger.info("=" * 80)
    logger.info("")
    logger.info(heading("Initialising"))
    logger.info("")

    # Load the data
    reference, rubbish = self.process_reference(reference)
    logger.info("")

    # Initialise the integrator
    from dials.algorithms.profile_model.factory import ProfileModelFactory
    from dials.algorithms.integration.integrator import IntegratorFactory
    from dials.array_family import flex

    # Modify experiment list if scan range is set.
    experiments, reference = self.split_for_scan_range(
      experiments,
      reference,
      params.scan_range)

    # Predict the reflections
    logger.info("")
    logger.info("=" * 80)
    logger.info("")
    logger.info(heading("Predicting reflections"))
    logger.info("")
    predicted = flex.reflection_table.from_predictions_multi(
      experiments,
      dmin=params.prediction.d_min,
      dmax=params.prediction.d_max,
      margin=params.prediction.margin,
      force_static=params.prediction.force_static)

    # Match reference with predicted
    if reference:
      matched, reference, unmatched = predicted.match_with_reference(reference)
      assert(len(matched) == len(predicted))
      assert(matched.count(True) <= len(reference))
      if matched.count(True) == 0:
        raise Sorry('''
          Invalid input for reference reflections.
          Zero reference spots were matched to predictions
        ''')
      elif len(unmatched) != 0:
        logger.info('')
        logger.info('*' * 80)
        logger.info('Warning: %d reference spots were not matched to predictions' % (
          len(unmatched)))
        logger.info('*' * 80)
        logger.info('')
      rubbish.extend(unmatched)

      if len(experiments) > 1:
        # filter out any experiments without matched reference reflections
        # f_: filtered
        from dxtbx.model.experiment.experiment_list import ExperimentList
        f_reference = flex.reflection_table()
        f_predicted = flex.reflection_table()
        f_rubbish = flex.reflection_table()
        f_experiments = ExperimentList()
        good_expt_count = 0
        def refl_extend(src, dest, eid):
          tmp = src.select(src['id'] == eid)
          tmp['id'] = flex.int(len(tmp), good_expt_count)
          dest.extend(tmp)

        for expt_id, experiment in enumerate(experiments):
          if len(reference.select(reference['id'] == expt_id)) != 0:
            refl_extend(reference, f_reference, expt_id)
            refl_extend(predicted, f_predicted, expt_id)
            refl_extend(rubbish, f_rubbish, expt_id)
            f_experiments.append(experiment)
            good_expt_count += 1
          else:
            logger.info("Removing experiment %d: no reference reflections matched to predictions"%expt_id)

        reference = f_reference
        predicted = f_predicted
        experiments = f_experiments
        rubbish = f_rubbish

    # Select a random sample of the predicted reflections
    if not params.sampling.integrate_all_reflections:
      predicted = self.sample_predictions(experiments, predicted, params)

    # Compute the profile model
    if (params.create_profile_model and
        reference is not None and
        "shoebox" in reference):
      experiments = ProfileModelFactory.create(params, experiments, reference)
    else:
      for expr in experiments:
        if expr.profile is None:
          raise Sorry('No profile information in experiment list')
        expr.profile.params = params.profile
    del reference

    # Compute the bounding box
    predicted.compute_bbox(experiments)

    # Create the integrator
    logger.info("")
    integrator = IntegratorFactory.create(params, experiments, predicted)

    # Integrate the reflections
    reflections = integrator.integrate()

    # Append rubbish data onto the end
    if rubbish is not None and params.output.include_bad_reference:
      mask = flex.bool(len(rubbish), True)
      rubbish.unset_flags(mask, rubbish.flags.integrated_sum)
      rubbish.unset_flags(mask, rubbish.flags.integrated_prf)
      rubbish.set_flags(mask, rubbish.flags.bad_reference)
      reflections.extend(rubbish)

    # Save the reflections
    self.save_reflections(reflections, params.output.reflections)
    self.save_experiments(experiments, params.output.experiments)

    # Write a report if requested
    if params.output.report is not None:
      integrator.report().as_file(params.output.report)

    # Print the total time taken
    logger.info("\nTotal time taken: %f" % (time() - start_time))
Example #12
0
  def integrate(self):
    '''
    Integrate the data

    '''
    from dials.algorithms.integration.report import IntegrationReport
    from dials.util.command_line import heading

    # Init the report
    self.profile_model_report = None
    self.integration_report = None

    # Heading
    logger.info("=" * 80)
    logger.info("")
    logger.info(heading("Processing reflections"))
    logger.info("")

    # Create summary format
    fmt = (
      ' Processing the following experiments:\n'
      '\n'
      ' Experiments: %d\n'
      ' Beams:       %d\n'
      ' Detectors:   %d\n'
      ' Goniometers: %d\n'
      ' Scans:       %d\n'
      ' Crystals:    %d\n'
      ' Imagesets:   %d\n'
    )

    # Print the summary
    logger.info(fmt % (
      len(self.experiments),
      len(self.experiments.beams()),
      len(self.experiments.detectors()),
      len(self.experiments.goniometers()),
      len(self.experiments.scans()),
      len(self.experiments.crystals()),
      len(self.experiments.imagesets())))

    # Print a heading
    logger.info("=" * 80)
    logger.info("")
    logger.info(heading("Integrating reflections"))
    logger.info("")

    # Initialise the processing
    initialize = InitializerRot(
      self.experiments,
      self.params)
    initialize(self.reflections)

    # Construvt the image integrator processor
    processor = ProcessorImage(
      self.experiments,
      self.reflections,
      self.params)
    processor.executor = ImageIntegratorExecutor()

    # Do the processing
    self.reflections, time_info = processor.process()

    # Finalise the processing
    finalize = FinalizerRot(
      self.experiments,
      self.params)
    finalize(self.reflections)

    # Create the integration report
    self.integration_report = IntegrationReport(
      self.experiments,
      self.reflections)
    logger.info("")
    logger.info(self.integration_report.as_str(prefix=' '))

    # Print the time info
    logger.info(str(time_info))
    logger.info("")

    # Return the reflections
    return self.reflections
Example #13
0
    def run(self):
        ''' Perform the integration. '''
        from dials.util.command_line import heading
        from dials.util.options import flatten_reflections, flatten_experiments
        from dials.util import log
        from time import time
        from libtbx.utils import Sorry

        # Check the number of arguments is correct
        start_time = time()

        # Parse the command line
        params, options = self.parser.parse_args(show_diff_phil=False)
        reference = flatten_reflections(params.input.reflections)
        experiments = flatten_experiments(params.input.experiments)
        if len(reference) == 0 and len(experiments) == 0:
            self.parser.print_help()
            return
        if len(reference) == 0:
            reference = None
        elif len(reference) != 1:
            raise Sorry('more than 1 reflection file was given')
        else:
            reference = reference[0]
        if len(experiments) == 0:
            raise Sorry('no experiment list was specified')

        # Save phil parameters
        if params.output.phil is not None:
            with open(params.output.phil, "w") as outfile:
                outfile.write(self.parser.diff_phil.as_str())

        # Configure logging
        log.config(params.verbosity,
                   info=params.output.log,
                   debug=params.output.debug_log)

        from dials.util.version import dials_version
        logger.info(dials_version())

        # Log the diff phil
        diff_phil = self.parser.diff_phil.as_str()
        if diff_phil is not '':
            logger.info('The following parameters have been modified:\n')
            logger.info(diff_phil)

        # Print if we're using a mask
        for i, exp in enumerate(experiments):
            mask = exp.imageset.external_lookup.mask
            if mask.filename is not None:
                if mask.data:
                    logger.info('Using external mask: %s' % mask.filename)
                    logger.info(' Mask has %d pixels masked' %
                                mask.data.count(False))

        # Print the experimental models
        for i, exp in enumerate(experiments):
            logger.debug("Models for experiment %d" % i)
            logger.debug("")
            logger.debug(str(exp.beam))
            logger.debug(str(exp.detector))
            if exp.goniometer:
                logger.debug(str(exp.goniometer))
            if exp.scan:
                logger.debug(str(exp.scan))
            logger.debug(str(exp.crystal))

        logger.info("=" * 80)
        logger.info("")
        logger.info(heading("Initialising"))
        logger.info("")

        # Load the data
        reference, rubbish = self.process_reference(reference)
        logger.info("")

        # Initialise the integrator
        from dials.algorithms.profile_model.factory import ProfileModelFactory
        from dials.algorithms.integration.integrator import IntegratorFactory
        from dials.array_family import flex

        # Modify experiment list if scan range is set.
        experiments, reference = self.split_for_scan_range(
            experiments, reference, params.scan_range)

        # Predict the reflections
        logger.info("")
        logger.info("=" * 80)
        logger.info("")
        logger.info(heading("Predicting reflections"))
        logger.info("")
        predicted = flex.reflection_table.from_predictions_multi(
            experiments,
            dmin=params.prediction.d_min,
            dmax=params.prediction.d_max,
            margin=params.prediction.margin,
            force_static=params.prediction.force_static)

        # Match reference with predicted
        if reference:
            matched, reference, unmatched = predicted.match_with_reference(
                reference)
            assert (len(matched) == len(predicted))
            assert (matched.count(True) <= len(reference))
            if matched.count(True) == 0:
                raise Sorry('''
          Invalid input for reference reflections.
          Zero reference spots were matched to predictions
        ''')
            elif len(unmatched) != 0:
                logger.info('')
                logger.info('*' * 80)
                logger.info(
                    'Warning: %d reference spots were not matched to predictions'
                    % (len(unmatched)))
                logger.info('*' * 80)
                logger.info('')
            rubbish.extend(unmatched)

            if len(experiments) > 1:
                # filter out any experiments without matched reference reflections
                # f_: filtered
                from dxtbx.model.experiment.experiment_list import ExperimentList
                f_reference = flex.reflection_table()
                f_predicted = flex.reflection_table()
                f_rubbish = flex.reflection_table()
                f_experiments = ExperimentList()
                good_expt_count = 0

                def refl_extend(src, dest, eid):
                    tmp = src.select(src['id'] == eid)
                    tmp['id'] = flex.int(len(tmp), good_expt_count)
                    dest.extend(tmp)

                for expt_id, experiment in enumerate(experiments):
                    if len(reference.select(reference['id'] == expt_id)) != 0:
                        refl_extend(reference, f_reference, expt_id)
                        refl_extend(predicted, f_predicted, expt_id)
                        refl_extend(rubbish, f_rubbish, expt_id)
                        f_experiments.append(experiment)
                        good_expt_count += 1
                    else:
                        logger.info(
                            "Removing experiment %d: no reference reflections matched to predictions"
                            % expt_id)

                reference = f_reference
                predicted = f_predicted
                experiments = f_experiments
                rubbish = f_rubbish

        # Select a random sample of the predicted reflections
        if not params.sampling.integrate_all_reflections:
            predicted = self.sample_predictions(experiments, predicted, params)

        # Compute the profile model
        if (params.create_profile_model and reference is not None
                and "shoebox" in reference):
            experiments = ProfileModelFactory.create(params, experiments,
                                                     reference)
        else:
            for expr in experiments:
                if expr.profile is None:
                    raise Sorry('No profile information in experiment list')
                expr.profile.params = params.profile
        del reference

        # Compute the bounding box
        predicted.compute_bbox(experiments)

        # Create the integrator
        logger.info("")
        integrator = IntegratorFactory.create(params, experiments, predicted)

        # Integrate the reflections
        reflections = integrator.integrate()

        # Append rubbish data onto the end
        if rubbish is not None and params.output.include_bad_reference:
            mask = flex.bool(len(rubbish), True)
            rubbish.unset_flags(mask, rubbish.flags.integrated_sum)
            rubbish.unset_flags(mask, rubbish.flags.integrated_prf)
            rubbish.set_flags(mask, rubbish.flags.bad_reference)
            reflections.extend(rubbish)

        # Save the reflections
        self.save_reflections(reflections, params.output.reflections)
        self.save_experiments(experiments, params.output.experiments)

        # Write a report if requested
        if params.output.report is not None:
            integrator.report().as_file(params.output.report)

        # Print the total time taken
        logger.info("\nTotal time taken: %f" % (time() - start_time))