def run(self):
    ''' Run the script. '''
    from dials.algorithms.profile_model.factory import ProfileModelFactory
    from dials.util.command_line import Command
    from dials.array_family import flex
    from dials.util.options import flatten_reflections, flatten_experiments
    from dxtbx.model.experiment_list import ExperimentListDumper
    from libtbx.utils import Sorry
    from dials.util import log

    log.config()

    # Parse the command line
    params, options = self.parser.parse_args(show_diff_phil=True)
    reflections = flatten_reflections(params.input.reflections)
    experiments = flatten_experiments(params.input.experiments)
    if len(reflections) == 0 and len(experiments) == 0:
      self.parser.print_help()
      return
    if len(reflections) != 1:
      raise Sorry('exactly 1 reflection table must be specified')
    if len(experiments) == 0:
      raise Sorry('no experiments were specified')
    if (not 'background.mean' in reflections[0]) and params.subtract_background:
      raise Sorry('for subtract_background need background.mean in reflections')

    reflections, _ = self.process_reference(reflections[0], params)

    # Check pixels don't belong to neighbours
    self.filter_reference_pixels(reflections, experiments)

    # Predict the reflections
    logger.info("")
    logger.info("=" * 80)
    logger.info("")
    logger.info("Predicting reflections")
    logger.info("")
    predicted = flex.reflection_table.from_predictions_multi(
      experiments,
      dmin=params.prediction.d_min,
      dmax=params.prediction.d_max,
      margin=params.prediction.margin,
      force_static=params.prediction.force_static,
      padding=params.prediction.padding)

    # Match with predicted
    matched, reflections, unmatched = predicted.match_with_reference(reflections)
    assert(len(matched) == len(predicted))
    assert(matched.count(True) <= len(reflections))
    if matched.count(True) == 0:
      raise Sorry('''
        Invalid input for reference reflections.
        Zero reference spots were matched to predictions
      ''')
    elif len(unmatched) != 0:
      logger.info('')
      logger.info('*' * 80)
      logger.info('Warning: %d reference spots were not matched to predictions' % (
        len(unmatched)))
      logger.info('*' * 80)
      logger.info('')

    # Create the profile model
    experiments = ProfileModelFactory.create(params, experiments, reflections)
    for model in experiments:
      sigma_b = model.profile.sigma_b(deg=True)
      sigma_m = model.profile.sigma_m(deg=True)
      if isinstance(sigma_b, type(1.0)):
        logger.info('Sigma B: %f' % sigma_b)
        logger.info('Sigma M: %f' % sigma_m)
      else: # scan varying
        mean_sigma_b = sum(sigma_b) / len(sigma_b)
        mean_sigma_m = sum(sigma_m) / len(sigma_m)
        logger.info('Sigma B: %f' % mean_sigma_b)
        logger.info('Sigma M: %f' % mean_sigma_m)

    # Wrtie the parameters
    Command.start("Writing experiments to %s" % params.output)
    dump = ExperimentListDumper(experiments)
    with open(params.output, "w") as outfile:
      outfile.write(dump.as_json())
    Command.end("Wrote experiments to %s" % params.output)
    def run(self):
        ''' Parse the options. '''
        from dials.util.options import flatten_experiments, flatten_reflections
        from dxtbx.model import ExperimentList
        from scitbx.math import five_number_summary
        # Parse the command line arguments
        params, options = self.parser.parse_args(show_diff_phil=True)
        self.params = params
        experiments = flatten_experiments(params.input.experiments)
        reflections = flatten_reflections(params.input.reflections)

        assert len(reflections) == 1
        reflections = reflections[0]
        print("Found", len(reflections), "reflections", "and",
              len(experiments), "experiments")

        filtered_reflections = flex.reflection_table()
        filtered_experiments = ExperimentList()

        skipped_reflections = flex.reflection_table()
        skipped_experiments = ExperimentList()

        if params.detector is not None:
            culled_reflections = flex.reflection_table()
            culled_experiments = ExperimentList()
            detector = experiments.detectors()[params.detector]
            for expt_id, experiment in enumerate(experiments):
                refls = reflections.select(reflections['id'] == expt_id)
                if experiment.detector is detector:
                    culled_experiments.append(experiment)
                    refls['id'] = flex.int(len(refls),
                                           len(culled_experiments) - 1)
                    culled_reflections.extend(refls)
                else:
                    skipped_experiments.append(experiment)
                    refls['id'] = flex.int(len(refls),
                                           len(skipped_experiments) - 1)
                    skipped_reflections.extend(refls)

            print(
                "RMSD filtering %d experiments using detector %d, out of %d" %
                (len(culled_experiments), params.detector, len(experiments)))
            reflections = culled_reflections
            experiments = culled_experiments

        difference_vector_norms = (reflections['xyzcal.mm'] -
                                   reflections['xyzobs.mm.value']).norms()

        if params.max_delta is not None:
            sel = difference_vector_norms <= params.max_delta
            reflections = reflections.select(sel)
            difference_vector_norms = difference_vector_norms.select(sel)

        data = flex.double()
        counts = flex.double()
        for i in range(len(experiments)):
            dvns = difference_vector_norms.select(reflections['id'] == i)
            counts.append(len(dvns))
            if len(dvns) == 0:
                data.append(0)
                continue
            rmsd = math.sqrt(flex.sum_sq(dvns) / len(dvns))
            data.append(rmsd)
        data *= 1000
        subset = data.select(counts > 0)
        print(len(subset), "experiments with > 0 reflections")

        if params.show_plots:
            h = flex.histogram(subset, n_slots=40)
            fig = plt.figure()
            ax = fig.add_subplot('111')
            ax.plot(h.slot_centers().as_numpy_array(),
                    h.slots().as_numpy_array(), '-')
            plt.title("Histogram of %d image RMSDs" % len(subset))

            fig = plt.figure()
            plt.boxplot(subset, vert=False)
            plt.title("Boxplot of %d image RMSDs" % len(subset))
            plt.show()

        outliers = counts == 0
        min_x, q1_x, med_x, q3_x, max_x = five_number_summary(subset)
        print(
            "Five number summary of RMSDs (microns): min %.1f, q1 %.1f, med %.1f, q3 %.1f, max %.1f"
            % (min_x, q1_x, med_x, q3_x, max_x))
        iqr_x = q3_x - q1_x
        cut_x = params.iqr_multiplier * iqr_x
        outliers.set_selected(data > q3_x + cut_x, True)
        #outliers.set_selected(col < q1_x - cut_x, True) # Don't throw away the images that are outliers in the 'good' direction!

        for i in range(len(experiments)):
            if outliers[i]:
                continue
            refls = reflections.select(reflections['id'] == i)
            refls['id'] = flex.int(len(refls), len(filtered_experiments))
            filtered_reflections.extend(refls)
            filtered_experiments.append(experiments[i])

        zeroes = counts == 0
        n_zero = len(counts.select(zeroes))
        print(
            "Removed %d bad experiments and %d experiments with zero reflections, out of %d (%%%.1f)"
            % (len(experiments) - len(filtered_experiments) - n_zero, n_zero,
               len(experiments), 100 *
               ((len(experiments) - len(filtered_experiments)) /
                len(experiments))))

        if params.detector is not None:
            crystals = filtered_experiments.crystals()
            for expt_id, experiment in enumerate(skipped_experiments):
                if experiment.crystal in crystals:
                    filtered_experiments.append(experiment)
                    refls = skipped_reflections.select(
                        skipped_reflections['id'] == expt_id)
                    refls['id'] = flex.int(len(refls),
                                           len(filtered_experiments) - 1)
                    filtered_reflections.extend(refls)

        if params.delta_psi_filter is not None:
            delta_psi = filtered_reflections['delpsical.rad'] * 180 / math.pi
            sel = (delta_psi <= params.delta_psi_filter) & (
                delta_psi >= -params.delta_psi_filter)
            l = len(filtered_reflections)
            filtered_reflections = filtered_reflections.select(sel)
            print("Filtering by delta psi, removing %d out of %d reflections" %
                  (l - len(filtered_reflections), l))

        print("Final experiment count", len(filtered_experiments))

        from dxtbx.model.experiment_list import ExperimentListDumper
        dump = ExperimentListDumper(filtered_experiments)
        dump.as_json(params.output.filtered_experiments)

        filtered_reflections.as_pickle(params.output.filtered_reflections)
def refine_expanding(params, merged_scope, combine_phil):
    assert params.start_at_hierarchy_level == 0
    if params.rmsd_filter.enable:
        input_name = "filtered"
        command = "cctbx.xfel.filter_experiments_by_rmsd %s %s output.filtered_experiments=%s output.filtered_reflections=%s"
        command = command % ("%s_combined_experiments.json" % params.tag,
                             "%s_combined_reflections.pickle" % params.tag,
                             "%s_filtered_experiments.json" % params.tag,
                             "%s_filtered_reflections.pickle" % params.tag)
        command += " iqr_multiplier=%f" % params.rmsd_filter.iqr_multiplier
        print(command)
        result = easy_run.fully_buffered(command=command).raise_if_errors()
        result.show_stdout()
    else:
        input_name = "combined"
    # --------------------------
    if params.panel_filter is not None:
        from libtbx import easy_pickle
        print("Filtering out all reflections except those on panels %s" %
              (", ".join(["%d" % p for p in params.panel_filter])))
        combined_path = "%s_combined_reflections.pickle" % params.tag
        data = easy_pickle.load(combined_path)
        sel = None
        for panel_id in params.panel_filter:
            if sel is None:
                sel = data['panel'] == panel_id
            else:
                sel |= data['panel'] == panel_id
        print("Retaining", len(data.select(sel)), "out of", len(data),
              "reflections")
        easy_pickle.dump(combined_path, data.select(sel))
    # ----------------------------------
    # this is the order to refine the CSPAD in
    steps = {}
    steps[0] = [2, 3]
    steps[1] = steps[0] + [0, 1]
    steps[2] = steps[1] + [14, 15]
    steps[3] = steps[2] + [6, 7]
    steps[4] = steps[3] + [4, 5]
    steps[5] = steps[4] + [12, 13]
    steps[6] = steps[5] + [8, 9]
    steps[7] = steps[6] + [10, 11]

    for s, panels in six.iteritems(steps):
        rest = []
        for p in panels:
            rest.append(p + 16)
            rest.append(p + 32)
            rest.append(p + 48)
        panels.extend(rest)

    levels = {0: (0, 1)}  # levels 0 and 1
    for i in range(7):
        levels[i + 1] = (2, )  # level 2

    previous_step_and_level = None
    for j in range(8):
        from libtbx import easy_pickle
        print("Filtering out all reflections except those on panels %s" %
              (", ".join(["%d" % p for p in steps[j]])))
        combined_path = "%s_%s_reflections.pickle" % (params.tag, input_name)
        output_path = "%s_reflections_step%d.pickle" % (params.tag, j)
        data = easy_pickle.load(combined_path)
        sel = None
        for panel_id in steps[j]:
            if sel is None:
                sel = data['panel'] == panel_id
            else:
                sel |= data['panel'] == panel_id
        print("Retaining", len(data.select(sel)), "out of", len(data),
              "reflections")
        easy_pickle.dump(output_path, data.select(sel))

        for i in levels[j]:
            print("Step", j, "refining at hierarchy level", i)
            refine_phil_file = "%s_refine_step%d_level%d.phil" % (params.tag,
                                                                  j, i)
            if i == 0:
                if params.refine_distance:
                    diff_phil = "refinement.parameterisation.detector.fix_list=Tau1"  # fix detector rotz
                else:
                    diff_phil = "refinement.parameterisation.detector.fix_list=Dist,Tau1"  # fix detector rotz, distance
                if params.flat_refinement:
                    diff_phil += ",Tau2,Tau3"  # Also fix x and y rotations
                diff_phil += "\n"
                if params.refine_energy:
                    diff_phil += "refinement.parameterisation.beam.fix=in_spindle_plane+out_spindle_plane\n"  # allow energy to refine
            else:
                # Note, always need to fix something, so pick a panel group and fix its Tau1 (rotation around Z) always
                if params.flat_refinement and params.flat_refinement_with_distance:
                    diff_phil = "refinement.parameterisation.detector.fix_list=Group1Tau1,Tau2,Tau3\n"  # refine distance, rotz and xy translation
                    diff_phil += "refinement.parameterisation.detector.constraints.parameter=Dist\n"  # constrain distance to be refined identically for all panels at this hierarchy level
                elif params.flat_refinement:
                    diff_phil = "refinement.parameterisation.detector.fix_list=Dist,Group1Tau1,Tau2,Tau3\n"  # refine only rotz and xy translation
                else:
                    diff_phil = "refinement.parameterisation.detector.fix_list=Group1Tau1\n"  # refine almost everything

            if previous_step_and_level is None:
                command = "dials.refine %s %s_%s_experiments.json %s_reflections_step%d.pickle"%( \
                  refine_phil_file, params.tag, input_name, params.tag, j)
            else:
                p_step, p_level = previous_step_and_level
                if p_step == j:
                    command = "dials.refine %s %s_refined_experiments_step%d_level%d.json %s_refined_reflections_step%d_level%d.pickle"%( \
                      refine_phil_file, params.tag, p_step, p_level, params.tag, p_step, p_level)
                else:
                    command = "dials.refine %s %s_refined_experiments_step%d_level%d.json %s_reflections_step%d.pickle"%( \
                      refine_phil_file, params.tag, p_step, p_level, params.tag, j)

            diff_phil += "refinement.parameterisation.detector.hierarchy_level=%d\n" % i

            output_experiments = "%s_refined_experiments_step%d_level%d.json" % (
                params.tag, j, i)
            command += " output.experiments=%s output.reflections=%s_refined_reflections_step%d_level%d.pickle"%( \
              output_experiments, params.tag, j, i)

            scope = merged_scope.fetch(parse(diff_phil))
            f = open(refine_phil_file, 'w')
            f.write(refine_scope.fetch_diff(scope).as_str())
            f.close()

            print(command)
            result = easy_run.fully_buffered(command=command).raise_if_errors()
            result.show_stdout()

            # In expanding mode, if using flat refinement with distance, after having refined this step as a block, unrefined
            # panels will have been left behind.  Read back the new metrology, compute the shift applied to the panels refined
            # in this step,and apply that shift to the unrefined panels in this step
            if params.flat_refinement and params.flat_refinement_with_distance and i > 0:
                from dxtbx.model.experiment_list import ExperimentListFactory, ExperimentListDumper
                from xfel.command_line.cspad_detector_congruence import iterate_detector_at_level, iterate_panels
                from scitbx.array_family import flex
                from scitbx.matrix import col
                from libtbx.test_utils import approx_equal
                experiments = ExperimentListFactory.from_json_file(
                    output_experiments, check_format=False)
                assert len(experiments.detectors()) == 1
                detector = experiments.detectors()[0]
                # Displacements: deltas along the vector normal to the detector
                displacements = flex.double()
                # Iterate through the panel groups at this level
                for panel_group in iterate_detector_at_level(
                        detector.hierarchy(), 0, i):
                    # Were there panels refined in this step in this panel group?
                    if params.panel_filter:
                        test = [
                            list(detector).index(panel) in steps[j]
                            for panel in iterate_panels(panel_group) if list(
                                detector).index(panel) in params.panel_filter
                        ]
                    else:
                        test = [
                            list(detector).index(panel) in steps[j]
                            for panel in iterate_panels(panel_group)
                        ]
                    if not any(test): continue
                    # Compute the translation along the normal of this panel group.  This is defined as distance in dials.refine
                    displacements.append(
                        col(panel_group.get_local_fast_axis()).cross(
                            col(panel_group.get_local_slow_axis())).dot(
                                col(panel_group.get_local_origin())))

                # Even though the panels are constrained to move the same amount, there is a bit a variation.
                stats = flex.mean_and_variance(displacements)
                displacement = stats.mean()
                print("Average displacement along normals: %f +/- %f" %
                      (stats.mean(),
                       stats.unweighted_sample_standard_deviation()))

                # Verify the variation isn't significant
                for k in range(1, len(displacements)):
                    assert approx_equal(displacements[0], displacements[k])
                # If all of the panel groups in this level moved, no need to do anything.
                if len(displacements) != len(
                        list(
                            iterate_detector_at_level(detector.hierarchy(), 0,
                                                      i))):
                    for panel_group in iterate_detector_at_level(
                            detector.hierarchy(), 0, i):
                        if params.panel_filter:
                            test = [
                                list(detector).index(panel) in steps[j]
                                and list(detector).index(panel)
                                in params.panel_filter
                                for panel in iterate_panels(panel_group)
                            ]
                        else:
                            test = [
                                list(detector).index(panel) in steps[j]
                                for panel in iterate_panels(panel_group)
                            ]
                        # If any of the panels in this panel group moved, no need to do anything
                        if any(test): continue

                        # None of the panels in this panel group moved in this step, so need to apply displacement from other panel
                        # groups at this level
                        fast = col(panel_group.get_local_fast_axis())
                        slow = col(panel_group.get_local_slow_axis())
                        ori = col(panel_group.get_local_origin())
                        normal = fast.cross(slow)
                        panel_group.set_local_frame(
                            fast, slow, (ori.dot(fast) * fast) +
                            (ori.dot(slow) * slow) + (normal * displacement))

                # Check the new displacements. Should be the same across all panels.
                displacements = []
                for panel_group in iterate_detector_at_level(
                        detector.hierarchy(), 0, i):
                    displacements.append(
                        col(panel_group.get_local_fast_axis()).cross(
                            col(panel_group.get_local_slow_axis())).dot(
                                col(panel_group.get_local_origin())))

                for k in range(1, len(displacements)):
                    assert approx_equal(displacements[0], displacements[k])

                dump = ExperimentListDumper(experiments)
                dump.as_json(output_experiments)

            previous_step_and_level = j, i

    output_geometry(params)
Example #4
0
    def run(self):
        """Execute the script."""

        from dials.util.options import flatten_reflections, flatten_experiments

        # Parse the command line
        params, options = self.parser.parse_args(show_diff_phil=True)
        reflections = flatten_reflections(params.input.reflections)
        experiments = flatten_experiments(params.input.experiments)

        # Try to load the models and data
        slice_exps = len(experiments) > 0
        slice_refs = len(reflections) > 0

        # Catch case of nothing to do
        if not any([slice_exps, slice_refs]):
            print("No suitable input provided")
            self.parser.print_help()
            return

        if reflections:
            if len(reflections) > 1:
                raise Sorry(
                    "Only one reflections list can be imported at present")
            reflections = reflections[0]

            # calculate frame numbers if needed
            if experiments:
                reflections = calculate_frame_numbers(reflections, experiments)

            # if we still don't have the right column give up
            if "xyzobs.px.value" not in reflections:
                raise Sorry(
                    "These reflections do not have frame numbers set, and "
                    "there are no experiments provided to calculate these.")

        # set trivial case where no scan range is provided at all
        if not params.image_range:
            params.image_range = [None]

        # check if slicing into blocks
        if params.block_size is not None:
            if slice_exps:
                if len(experiments) > 1:
                    raise Sorry(
                        "For slicing into blocks please provide a single "
                        "scan only")
                scan = experiments[0].scan

            # Having extracted the scan, calculate the blocks
            params.image_range = calculate_block_ranges(
                scan, params.block_size)

            # Do the slicing then recombine
            if slice_exps:
                sliced = [
                    slice_experiments(experiments, [sr])[0]
                    for sr in params.image_range
                ]
                sliced_experiments = ExperimentList()
                for exp in sliced:
                    sliced_experiments.append(exp)

            # slice reflections if present
            if slice_refs:
                sliced = [
                    slice_reflections(reflections, [sr])
                    for sr in params.image_range
                ]
                sliced_reflections = sliced[0]
                for i, rt in enumerate(sliced[1:]):
                    rt["id"] += i + 1  # set id
                    sliced_reflections.extend(rt)

        else:
            # slice each dataset into the requested subset
            if slice_exps:
                sliced_experiments = slice_experiments(experiments,
                                                       params.image_range)
            if slice_refs:
                sliced_reflections = slice_reflections(reflections,
                                                       params.image_range)

        # Save sliced experiments
        if slice_exps:
            output_experiments_filename = params.output.experiments_filename
            if output_experiments_filename is None:
                # take first filename as template
                bname = basename(params.input.experiments[0].filename)
                bname = splitext(bname)[0]
                if not bname:
                    bname = "experiments"
                if len(params.image_range
                       ) == 1 and params.image_range[0] is not None:
                    ext = "_{0}_{1}.expt".format(*params.image_range[0])
                else:
                    ext = "_sliced.expt"
                output_experiments_filename = bname + ext
            print("Saving sliced experiments to {}".format(
                output_experiments_filename))

            from dxtbx.model.experiment_list import ExperimentListDumper

            dump = ExperimentListDumper(sliced_experiments)
            dump.as_json(output_experiments_filename)

        # Save sliced reflections
        if slice_refs:
            output_reflections_filename = params.output.reflections_filename
            if output_reflections_filename is None:
                # take first filename as template
                bname = basename(params.input.reflections[0].filename)
                bname = splitext(bname)[0]
                if not bname:
                    bname = "reflections"
                if len(params.image_range
                       ) == 1 and params.image_range[0] is not None:
                    ext = "_{0}_{1}.refl".format(*params.image_range[0])
                else:
                    ext = "_sliced.refl"
                output_reflections_filename = bname + ext

            print("Saving sliced reflections to {0}".format(
                output_reflections_filename))
            sliced_reflections.as_file(output_reflections_filename)

        return
Example #5
0
  def run(self):
    '''Execute the script.'''

    from dials.util.options import flatten_reflections, flatten_experiments, \
      flatten_datablocks
    import cPickle as pickle

    # Parse the command line
    params, options = self.parser.parse_args(show_diff_phil=True)
    reflections = flatten_reflections(params.input.reflections)
    experiments = flatten_experiments(params.input.experiments)
    datablocks = flatten_datablocks(params.input.datablock)

    # Try to load the models and data
    slice_exps = len(experiments) > 0
    slice_refs = len(reflections) > 0
    slice_dbs = len(datablocks) > 0

    # Catch case of nothing to do
    if not any([slice_exps, slice_refs, slice_dbs]):
      print "No suitable input provided"
      self.parser.print_help()
      return

    if reflections:
      if len(reflections) > 1:
        raise Sorry("Only one reflections list can be imported at present")
      reflections = reflections[0]

      # calculate frame numbers if needed
      if experiments:
        reflections = calculate_frame_numbers(reflections, experiments)

      # if we still don't have the right column give up
      if 'xyzobs.px.value' not in reflections:
        raise Sorry("These reflections do not have frame numbers set, and "
          "there are no experiments provided to calculate these.")

    # set trivial case where no scan range is provided at all
    if not params.image_range:
      params.image_range = [None]

    # check if slicing into blocks
    if params.block_size is not None:
      # in this case for simplicity, ensure that there is either an
      # an experiment list or datablocks, but not both. Ensure there is only
      # a single scan contained within.
      if [slice_exps, slice_dbs].count(True) != 1:
        raise Sorry("For slicing into blocks please provide either datablocks"
          " or experiments, but not both.")
      if slice_exps:
        if len(experiments) > 1:
          raise Sorry("For slicing into blocks please provide a single "
                      "scan only")
        scan = experiments[0].scan
      if slice_dbs:
        scans = datablocks[0].unique_scans()
        if len(scans) > 1 or len(datablocks) > 1:
          raise Sorry("For slicing into blocks please provide a single "
                      "scan only")
        scan = scans[0]

      # Having extracted the scan, calculate the blocks
      params.image_range = calculate_block_ranges(scan, params.block_size)

      # Do the slicing then recombine
      if slice_exps:
        sliced = [slice_experiments(experiments, [sr])[0] \
          for sr in params.image_range]
        sliced_experiments = ExperimentList()
        for exp in sliced:
          sliced_experiments.append(exp)

      if slice_dbs:
        sliced = [slice_datablocks(datablocks, [sr])[0] \
          for sr in params.image_range]
        imagesets = [db.extract_imagesets()[0] for db in sliced]
        sliced_datablocks = DataBlock(imagesets)

      # slice reflections if present
      if slice_refs:
        sliced = [slice_reflections(reflections, [sr]) \
          for sr in params.image_range]
        sliced_reflections = sliced[0]
        for i, rt in enumerate(sliced[1:]):
          rt['id'] += (i + 1) # set id
          sliced_reflections.extend(rt)

    else:
      # slice each dataset into the requested subset
      if slice_exps:
        sliced_experiments = slice_experiments(experiments, params.image_range)
      if slice_refs:
        sliced_reflections = slice_reflections(reflections, params.image_range)
      if slice_dbs:
        sliced_datablocks = slice_datablocks(datablocks, params.image_range)

    # Save sliced experiments
    if slice_exps:
      output_experiments_filename = params.output.experiments_filename
      if output_experiments_filename is None:
        # take first filename as template
        bname = basename(params.input.experiments[0].filename)
        bname = splitext(bname)[0]
        if not bname: bname = "experiments"
        if len(params.image_range) == 1 and params.image_range[0] is not None:
          ext = "_{0}_{1}.json".format(*params.image_range[0])
        else:
          ext = "_sliced.json"
        output_experiments_filename = bname + ext
      print 'Saving sliced experiments to {0}'.format(
        output_experiments_filename)

      from dxtbx.model.experiment_list import ExperimentListDumper
      dump = ExperimentListDumper(sliced_experiments)
      dump.as_json(output_experiments_filename)

    # Save sliced reflections
    if slice_refs:
      output_reflections_filename = params.output.reflections_filename
      if output_reflections_filename is None:
        # take first filename as template
        bname = basename(params.input.reflections[0].filename)
        bname = splitext(bname)[0]
        if not bname: bname = "reflections"
        if len(params.image_range) == 1 and params.image_range[0] is not None:
          ext = "_{0}_{1}.pickle".format(*params.image_range[0])
        else:
          ext = "_sliced.pickle"
        output_reflections_filename = bname + ext

      print 'Saving sliced reflections to {0}'.format(
        output_reflections_filename)
      sliced_reflections.as_pickle(output_reflections_filename)

    # Save sliced datablocks
    if slice_dbs:
      output_datablocks_filename = params.output.datablocks_filename
      if output_datablocks_filename is None:
        # take first filename as template
        bname = basename(params.input.datablock[0].filename)
        bname = splitext(bname)[0]
        if not bname: bname = "datablock"
        if len(params.image_range) == 1 and params.image_range[0] is not None:
          ext = "_{0}_{1}.json".format(*params.image_range[0])
        else:
          ext = "_sliced.json"
        output_datablocks_filename = bname + ext
      print 'Saving sliced datablocks to {0}'.format(
        output_datablocks_filename)

      from dxtbx.datablock import DataBlockDumper
      dump = DataBlockDumper(sliced_datablocks)
      dump.as_file(output_datablocks_filename)

    return
Example #6
0
  def run(self):
    '''Execute the script.'''
    from time import time
    import six.moves.cPickle as pickle
    from dials.util import log
    from dials.algorithms.refinement import RefinerFactory
    from dials.util.options import flatten_reflections, flatten_experiments

    start_time = time()

    # Parse the command line
    params, options = self.parser.parse_args(show_diff_phil=False)
    reflections = flatten_reflections(params.input.reflections)
    experiments = flatten_experiments(params.input.experiments)

    # Try to load the models and data
    nexp = len(experiments)
    if nexp == 0:
      print("No Experiments found in the input")
      self.parser.print_help()
      return
    if len(reflections) == 0:
      print("No reflection data found in the input")
      self.parser.print_help()
      return
    if len(reflections) > 1:
      raise Sorry("Only one reflections list can be imported at present")
    reflections = reflections[0]

    self.check_input(reflections)

    # Configure the logging
    log.config(info=params.output.log,
      debug=params.output.debug_log)
    from dials.util.version import dials_version
    logger.info(dials_version())

    # Log the diff phil
    diff_phil = self.parser.diff_phil.as_str()
    if diff_phil is not '':
      logger.info('The following parameters have been modified:\n')
      logger.info(diff_phil)

    # Modify options if necessary
    if params.output.correlation_plot.filename is not None:
      params.refinement.refinery.journal.track_parameter_correlation = True

    # Warn about potentially unhelpful options
    if params.refinement.mp.nproc > 1:
      logger.warning("WARNING: setting nproc > 1 is only helpful in rare "
        "circumstances. It is not recommended for typical data processing "
        "tasks.\n")

    # Get the refiner
    logger.info('Configuring refiner')
    refiner = RefinerFactory.from_parameters_data_experiments(params,
        reflections, experiments)

    # Refine the geometry
    if nexp == 1:
      logger.info('Performing refinement of a single Experiment...')
    else:
      logger.info('Performing refinement of {0} Experiments...'.format(nexp))

    # Refine and get the refinement history
    history = refiner.run()

    if params.output.centroids:
      logger.info("Writing table of centroids to '{0}'".format(
        params.output.centroids))
      self.write_centroids_table(refiner, params.output.centroids)

    # Get the refined experiments
    experiments = refiner.get_experiments()

    # Write scan-varying parameters to file, if there were any
    if params.output.parameter_table:
      scans = experiments.scans()
      if len(scans) > 1:
        logger.info("Writing a scan-varying parameter table is only supported "
             "for refinement of a single scan")
      else:
        scan = scans[0]
        text = refiner.get_param_reporter().varying_params_vs_image_number(
            scan.get_array_range())
        if text:
          logger.info("Writing scan-varying parameter table to {0}".format(
            params.output.parameter_table))
          f = open(params.output.parameter_table,"w")
          f.write(text)
          f.close()
        else:
          logger.info("No scan-varying parameter table to write")

    crystals = experiments.crystals()
    if len(crystals) == 1:
      # output the refined model for information
      logger.info('')
      logger.info('Final refined crystal model:')
      logger.info(crystals[0])

    # Save the refined experiments to file
    output_experiments_filename = params.output.experiments
    logger.info('Saving refined experiments to {0}'.format(output_experiments_filename))
    from dxtbx.model.experiment_list import ExperimentListDumper
    dump = ExperimentListDumper(experiments)
    dump.as_json(output_experiments_filename)

    # Save reflections with updated predictions if requested (allow to switch
    # this off if it is a time-consuming step)
    if params.output.reflections:
      # Update predictions for all indexed reflections
      logger.info('Updating predictions for indexed reflections')
      preds = refiner.predict_for_indexed()

      # just copy over the columns of interest, leaving behind things
      # added by e.g. scan-varying refinement such as 'block', the
      # U, B and UB matrices and gradients.
      reflections['s1'] = preds['s1']
      reflections['xyzcal.mm'] = preds['xyzcal.mm']
      reflections['xyzcal.px'] = preds['xyzcal.px']
      if 'entering' in preds:
        reflections['entering'] = preds['entering']

      # set used_in_refinement and centroid_outlier flags
      assert len(preds) == len(reflections)
      reflections.unset_flags(flex.size_t_range(len(reflections)),
        reflections.flags.used_in_refinement | reflections.flags.centroid_outlier)
      mask = preds.get_flags(preds.flags.centroid_outlier)
      reflections.set_flags(mask, reflections.flags.centroid_outlier)
      mask = preds.get_flags(preds.flags.used_in_refinement)
      reflections.set_flags(mask, reflections.flags.used_in_refinement)

      logger.info('Saving reflections with updated predictions to {0}'.format(
        params.output.reflections))
      if params.output.include_unused_reflections:
        reflections.as_pickle(params.output.reflections)
      else:
        sel = reflections.get_flags(reflections.flags.used_in_refinement)
        reflections.select(sel).as_pickle(params.output.reflections)

    # For debugging, if requested save matches to file
    if params.output.matches:
      matches = refiner.get_matches()
      logger.info('Saving matches (use for debugging purposes) to {0}'.format(
        params.output.matches))
      matches.as_pickle(params.output.matches)

    # Correlation plot
    if params.output.correlation_plot.filename is not None:
      from os.path import splitext
      root, ext = splitext(params.output.correlation_plot.filename)
      if not ext: ext = ".pdf"

      steps = params.output.correlation_plot.steps
      if steps is None: steps = [history.get_nrows()-1]

      # extract individual column names or indices
      col_select = params.output.correlation_plot.col_select

      num_plots = 0
      for step in steps:
        fname_base = root
        if len(steps) > 1: fname_base += "_step%02d" % step

        corrmats, labels = refiner.get_parameter_correlation_matrix(step, col_select)
        if [corrmats, labels].count(None) == 0:
          from dials.algorithms.refinement.refinement_helpers import corrgram
          for resid_name, corrmat in corrmats.items():
            plot_fname = fname_base + "_" + resid_name + ext
            plt = corrgram(corrmat, labels)
            if plt is not None:
              logger.info('Saving parameter correlation plot to {}'.format(plot_fname))
              plt.savefig(plot_fname)
              plt.close()
              num_plots += 1
          mat_fname = fname_base + ".pickle"
          with open(mat_fname, 'wb') as handle:
            for k, corrmat in corrmats.items():
              corrmats[k] = corrmat.as_scitbx_matrix()
            logger.info('Saving parameter correlation matrices to {0}'.format(mat_fname))
            pickle.dump({'corrmats':corrmats, 'labels':labels}, handle)

      if num_plots == 0:
        msg = "Sorry, no parameter correlation plots were produced. Please set " \
              "track_parameter_correlation=True to ensure correlations are " \
              "tracked, and make sure correlation_plot.col_select is valid."
        logger.info(msg)

    # Write out refinement history, if requested
    if params.output.history:
      with open(params.output.history, 'wb') as handle:
        logger.info('Saving refinement step history to {0}'.format(
          params.output.history))
        pickle.dump(history, handle)

    # Log the total time taken
    logger.info("\nTotal time taken: {0:.2f}s".format(time() - start_time))

    return
    def run(self):

        print "Parsing input"
        params, options = self.parser.parse_args(show_diff_phil=True)

        #Configure the logging
        log.config(params.detector_phase.refinement.verbosity,
                   info='dials.refine.log',
                   debug='dials.refine.debug.log')

        # Try to obtain the models and data
        if not params.input.experiments:
            raise Sorry("No Experiments found in the input")
        if not params.input.reflections:
            raise Sorry("No reflection data found in the input")
        try:
            assert len(params.input.reflections) == len(
                params.input.experiments)
        except AssertionError:
            raise Sorry(
                "The number of input reflections files does not match the "
                "number of input experiments")

        # set up global experiments and reflections lists
        from dials.array_family import flex
        reflections = flex.reflection_table()
        global_id = 0
        from dxtbx.model.experiment_list import ExperimentList
        experiments = ExperimentList()

        if params.reference_detector == "first":
            # Use the first experiment of the first experiment list as the reference detector
            ref_exp = params.input.experiments[0].data[0]
        else:
            # Average all the detectors to generate a reference detector
            assert params.detector_phase.refinement.parameterisation.detector.hierarchy_level == 0
            from scitbx.matrix import col
            panel_fasts = []
            panel_slows = []
            panel_oris = []
            for exp_wrapper in params.input.experiments:
                exp = exp_wrapper.data[0]
                if panel_oris:
                    for i, panel in enumerate(exp.detector):
                        panel_fasts[i] += col(panel.get_fast_axis())
                        panel_slows[i] += col(panel.get_slow_axis())
                        panel_oris[i] += col(panel.get_origin())
                else:
                    for i, panel in enumerate(exp.detector):
                        panel_fasts.append(col(panel.get_fast_axis()))
                        panel_slows.append(col(panel.get_slow_axis()))
                        panel_oris.append(col(panel.get_origin()))

            ref_exp = copy.deepcopy(params.input.experiments[0].data[0])
            for i, panel in enumerate(ref_exp.detector):
                # Averaging the fast and slow axes can make them be non-orthagonal. Fix by finding
                # the vector that goes exactly between them and rotate
                # around their cross product 45 degrees from that vector in either direction
                vf = panel_fasts[i] / len(params.input.experiments)
                vs = panel_slows[i] / len(params.input.experiments)
                c = vf.cross(vs)
                angle = vf.angle(vs, deg=True)
                v45 = vf.rotate(c, angle / 2, deg=True)
                vf = v45.rotate(c, -45, deg=True)
                vs = v45.rotate(c, 45, deg=True)
                panel.set_frame(vf, vs,
                                panel_oris[i] / len(params.input.experiments))

            print "Reference detector (averaged):", str(ref_exp.detector)

        # set the experiment factory that combines a crystal with the reference beam
        # and the reference detector
        experiment_from_crystal = ExperimentFromCrystal(
            ref_exp.beam, ref_exp.detector)

        # keep track of the number of refl per accepted experiment for a table
        nrefs_per_exp = []

        # loop through the input, building up the global lists
        for ref_wrapper, exp_wrapper in zip(params.input.reflections,
                                            params.input.experiments):
            refs = ref_wrapper.data
            exps = exp_wrapper.data

            # there might be multiple experiments already here. Loop through them
            for i, exp in enumerate(exps):

                # select the relevant reflections
                sel = refs['id'] == i
                sub_ref = refs.select(sel)

                ## DGW commented out as reflections.minimum_number_of_reflections no longer exists
                #if len(sub_ref) < params.crystals_phase.refinement.reflections.minimum_number_of_reflections:
                #  print "skipping experiment", i, "in", exp_wrapper.filename, "due to insufficient strong reflections in", ref_wrapper.filename
                #  continue

                # build an experiment with this crystal plus the reference models
                combined_exp = experiment_from_crystal(exp.crystal)

                # next experiment ID in series
                exp_id = len(experiments)

                # check this experiment
                if not check_experiment(combined_exp, sub_ref):
                    print "skipping experiment", i, "in", exp_wrapper.filename, "due to poor RMSDs"
                    continue

                # set reflections ID
                sub_ref['id'] = flex.int(len(sub_ref), exp_id)

                # keep number of reflections for the table
                nrefs_per_exp.append(len(sub_ref))

                # obtain mm positions on the reference detector
                sub_ref = indexer_base.map_spots_pixel_to_mm_rad(
                    sub_ref, combined_exp.detector, combined_exp.scan)

                # extend refl and experiments lists
                reflections.extend(sub_ref)
                experiments.append(combined_exp)

        # print number of reflections per accepted experiment
        from libtbx.table_utils import simple_table
        header = ["Experiment", "Nref"]
        rows = [(str(i), str(n)) for (i, n) in enumerate(nrefs_per_exp)]
        st = simple_table(rows, header)
        print "Number of reflections per experiment"
        print st.format()

        for cycle in range(params.n_macrocycles):

            print "MACROCYCLE %02d" % (cycle + 1)
            print "=============\n"
            # first run: multi experiment joint refinement of detector with fixed beam and
            # crystals
            print "PHASE 1"

            # SET THIS TEST TO FALSE TO REFINE WHOLE DETECTOR AS SINGLE JOB
            if params.detector_phase.refinement.parameterisation.detector.hierarchy_level > 0:
                experiments = detector_parallel_refiners(
                    params.detector_phase, experiments, reflections)
            else:
                experiments = detector_refiner(params.detector_phase,
                                               experiments, reflections)

            # second run
            print "PHASE 2"
            experiments = crystals_refiner(params.crystals_phase, experiments,
                                           reflections)

        # Save the refined experiments to file
        output_experiments_filename = params.output.experiments_filename
        print 'Saving refined experiments to {0}'.format(
            output_experiments_filename)
        from dxtbx.model.experiment_list import ExperimentListDumper
        dump = ExperimentListDumper(experiments)
        dump.as_json(output_experiments_filename)

        # Write out refined reflections, if requested
        if params.output.reflections_filename:
            print 'Saving refined reflections to {0}'.format(
                params.output.reflections_filename)
            reflections.as_pickle(params.output.reflections_filename)

        return
Example #8
0
def run(args):
    if '-h' in args or '--help' in args or '-c' in args:
        print help_str
        phil_scope.show(attributes_level=2)
        return

    user_phil = []
    for arg in args:
        if os.path.isfile(arg):
            user_phil.append(parse("geom_path=%s" % arg))
        else:
            try:
                user_phil.append(parse(arg))
            except Exception as e:
                raise Sorry("Unrecognized argument: %s" % arg)
    params = phil_scope.fetch(sources=user_phil).extract()
    if params.distance is None:
        raise Usage("Please specify detector distance")

    geom = {}
    for line in open(params.geom_path):
        if len(line.split("=")) != 2: continue
        if "rigid_group" in line and not "collection" in line:
            geom[line.split("=")[1].strip()] = {}
        else:
            for key in geom:
                if line.startswith("%s/" % key):
                    geom[key][line.split("=")[0].split("/")
                              [1].strip()] = line.split("=")[-1].strip()

    detector = Detector()
    root = detector.hierarchy()
    root.set_frame((1, 0, 0), (0, 1, 0), (0, 0, -params.distance))

    for i, key in enumerate(sorted(geom)):
        fs_x, fs_y = geom[key]['fs'].split(" ")
        ss_x, ss_y = geom[key]['ss'].split(" ")
        fast = matrix.col(
            (-float(fs_x.rstrip('x')), float(fs_y.rstrip('y')), 0.0))
        slow = matrix.col(
            (-float(ss_x.rstrip('x')), float(ss_y.rstrip('y')), 0.0))

        origin = matrix.col(
            (-float(geom[key]['corner_x']) * params.pixel_size,
             float(geom[key]['corner_y']) * params.pixel_size, 0.0))

        # OBS! you need to set the panel to a root before set local frame...
        p = root.add_panel()
        p.set_name('panel-%s' % key)
        p.set_image_size((512, 1024))
        p.set_trusted_range((-1, 1000000))
        p.set_pixel_size((params.pixel_size, params.pixel_size))
        p.set_local_frame(fast.elems, slow.elems, origin.elems)

    from dxtbx.model import BeamFactory
    wavelength = params.wavelength
    beam = BeamFactory.simple(wavelength)

    from dxtbx.model import Experiment, ExperimentList
    from dxtbx.model.experiment_list import ExperimentListDumper
    experiments = ExperimentList()
    experiment = Experiment(detector=detector, beam=beam)
    experiments.append(experiment)
    dump = ExperimentListDumper(experiments)
    dump.as_json("geometry.json")
Example #9
0
    def run(self):
        '''Execute the script.'''
        from dials.algorithms.refinement.two_theta_refiner import \
          TwoThetaReflectionManager, TwoThetaTarget, \
          TwoThetaPredictionParameterisation

        start_time = time()

        # Parse the command line
        params, options = self.parser.parse_args(show_diff_phil=False)

        # set up global experiments and reflections lists
        from dials.array_family import flex
        reflections = flex.reflection_table()
        global_id = 0
        from dxtbx.model.experiment_list import ExperimentList
        experiments = ExperimentList()

        # loop through the input, building up the global lists
        nrefs_per_exp = []
        for ref_wrapper, exp_wrapper in zip(params.input.reflections,
                                            params.input.experiments):
            refs = ref_wrapper.data
            exps = exp_wrapper.data
            for i, exp in enumerate(exps):
                sel = refs['id'] == i
                sub_ref = refs.select(sel)
                nrefs_per_exp.append(len(sub_ref))
                sub_ref['id'] = flex.int(len(sub_ref), global_id)
                reflections.extend(sub_ref)
                experiments.append(exp)
                global_id += 1

        # Try to load the models and data
        nexp = len(experiments)
        if nexp == 0:
            print "No Experiments found in the input"
            self.parser.print_help()
            return
        if len(reflections) == 0:
            print "No reflection data found in the input"
            self.parser.print_help()
            return

        self.check_input(reflections)

        # Configure the logging
        log.config(info=params.output.log, debug=params.output.debug_log)
        logger.info(dials_version())

        # Log the diff phil
        diff_phil = self.parser.diff_phil.as_str()
        if diff_phil is not '':
            logger.info('The following parameters have been modified:\n')
            logger.info(diff_phil)

        # Convert to P 1?
        if params.refinement.triclinic:
            reflections, experiments = self.convert_to_P1(
                reflections, experiments)

        # Combine crystals?
        if params.refinement.combine_crystal_models and len(experiments) > 1:
            logger.info('Combining {0} crystal models'.format(
                len(experiments)))
            experiments = self.combine_crystals(experiments)

        # Filter integrated centroids?
        if params.refinement.filter_integrated_centroids:
            reflections = self.filter_integrated_centroids(reflections)

        # Get the refiner
        logger.info('Configuring refiner')
        refiner = self.create_refiner(params, reflections, experiments)

        # Refine the geometry
        if nexp == 1:
            logger.info('Performing refinement of a single Experiment...')
        else:
            logger.info(
                'Performing refinement of {0} Experiments...'.format(nexp))

        # Refine and get the refinement history
        history = refiner.run()

        # get the refined experiments
        experiments = refiner.get_experiments()
        crystals = experiments.crystals()

        if len(crystals) == 1:
            # output the refined model for information
            logger.info('')
            logger.info('Final refined crystal model:')
            logger.info(crystals[0])
            logger.info(self.cell_param_table(crystals[0]))

        # Save the refined experiments to file
        output_experiments_filename = params.output.experiments
        logger.info('Saving refined experiments to {0}'.format(
            output_experiments_filename))
        from dxtbx.model.experiment_list import ExperimentListDumper
        dump = ExperimentListDumper(experiments)
        dump.as_json(output_experiments_filename)

        # Correlation plot
        if params.output.correlation_plot.filename is not None:
            from os.path import splitext
            root, ext = splitext(params.output.correlation_plot.filename)
            if not ext: ext = ".pdf"

            steps = params.output.correlation_plot.steps
            if steps is None: steps = [history.get_nrows() - 1]

            # extract individual column names or indices
            col_select = params.output.correlation_plot.col_select

            num_plots = 0
            for step in steps:
                fname_base = root
                if len(steps) > 1: fname_base += "_step%02d" % step

                corrmats, labels = refiner.get_parameter_correlation_matrix(
                    step, col_select)
                if [corrmats, labels].count(None) == 0:
                    from dials.algorithms.refinement.refinement_helpers import corrgram

                    for resid_name, corrmat in corrmats.items():
                        plot_fname = fname_base + ext
                        plt = corrgram(corrmat, labels)
                        if plt is not None:
                            logger.info(
                                'Saving parameter correlation plot to {}'.
                                format(plot_fname))
                            plt.savefig(plot_fname)
                            plt.close()
                            num_plots += 1
                    mat_fname = fname_base + ".pickle"
                    with open(mat_fname, 'wb') as handle:
                        for k, corrmat in corrmats.items():
                            corrmats[k] = corrmat.as_scitbx_matrix()
                        logger.info(
                            'Saving parameter correlation matrices to {0}'.
                            format(mat_fname))
                        pickle.dump({
                            'corrmats': corrmats,
                            'labels': labels
                        }, handle)

            if num_plots == 0:
                msg = "Sorry, no parameter correlation plots were produced. Please set " \
                      "track_parameter_correlation=True to ensure correlations are " \
                      "tracked, and make sure correlation_plot.col_select is valid."
                logger.info(msg)

        if params.output.cif is not None:
            self.generate_cif(crystals[0], refiner, file=params.output.cif)

        if params.output.p4p is not None:
            self.generate_p4p(crystals[0],
                              experiments[0].beam,
                              file=params.output.p4p)

        if params.output.mmcif is not None:
            self.generate_mmcif(crystals[0], refiner, file=params.output.mmcif)

        # Log the total time taken
        logger.info("\nTotal time taken: {0:.2f}s".format(time() - start_time))
Example #10
0
def run(args=None, phil=working_phil):
    """
    Set up refinement from command line options, files and PHIL parameters.
    Run refinement and save output files as specified.

    Called when running dials.refine as a command-line program

    Args:
        args (list): Additional command-line arguments
        phil: The working PHIL parameters

    Returns:
        None
    """

    import dials.util.log
    from dials.util.options import OptionParser
    from dials.util.options import flatten_reflections
    from dials.util.options import flatten_experiments
    import libtbx.load_env

    start_time = time()

    # The script usage
    usage = ("usage: %s [options] [param.phil] "
             "models.expt observations.refl" % libtbx.env.dispatcher_name)

    # Create the parser
    parser = OptionParser(
        usage=usage,
        phil=phil,
        read_reflections=True,
        read_experiments=True,
        check_format=False,
        epilog=__doc__,
    )

    # Parse the command line
    params, options = parser.parse_args(args=args, show_diff_phil=False)
    reflections = flatten_reflections(params.input.reflections)
    experiments = flatten_experiments(params.input.experiments)

    # Configure the logging
    dials.util.log.config(verbosity=options.verbose, logfile=params.output.log)

    # Try to load the models and data
    nexp = len(experiments)
    if nexp == 0:
        sys.exit("No Experiments found in the input")
    if len(reflections) == 0:
        sys.exit("No reflection data found in the input")
    if len(reflections) > 1:
        sys.exit("Only one reflections list can be imported at present")
    reflections = reflections[0]

    # check input is suitable
    msg = ("The supplied reflection table does not have the required data " +
           "column: {0}")
    for key in ["xyzobs.mm.value", "xyzobs.mm.variance"]:
        if key not in reflections:
            msg = msg.format(key)
            raise dials.util.Sorry(msg)

    from dials.util.version import dials_version

    logger.info(dials_version())

    # Log the diff phil
    diff_phil = parser.diff_phil.as_str()
    if diff_phil:
        logger.info("The following parameters have been modified:\n")
        logger.info(diff_phil)

    # Warn about potentially unhelpful options
    if params.refinement.mp.nproc > 1:
        logger.warning(
            "WARNING: setting nproc > 1 is only helpful in rare "
            "circumstances. It is not recommended for typical data processing "
            "tasks.\n")

    # Run refinement
    experiments, reflections, refiner, history = run_dials_refine(
        experiments, reflections, params)

    # For the usual case of refinement of one crystal, print that model for information
    crystals = experiments.crystals()
    if len(crystals) == 1:
        logger.info("")
        logger.info("Final refined crystal model:")
        logger.info(crystals[0])

    # Write table of centroids to file, if requested
    if params.output.centroids:
        logger.info("Writing table of centroids to '{}'".format(
            params.output.centroids))
        write_centroids_table(refiner, params.output.centroids)

    # Write scan-varying parameters to file, if there were any
    if params.output.parameter_table:
        scans = experiments.scans()
        if len(scans) > 1:
            logger.info(
                "Writing a scan-varying parameter table is only supported "
                "for refinement of a single scan")
        else:
            scan = scans[0]
            text = refiner.get_param_reporter().varying_params_vs_image_number(
                scan.get_array_range())
            if text:
                logger.info(
                    "Writing scan-varying parameter table to {}".format(
                        params.output.parameter_table))
                f = open(params.output.parameter_table, "w")
                f.write(text)
                f.close()
            else:
                logger.info("No scan-varying parameter table to write")

    # Save the refined experiments to file
    output_experiments_filename = params.output.experiments
    logger.info(
        "Saving refined experiments to {}".format(output_experiments_filename))
    from dxtbx.model.experiment_list import ExperimentListDumper

    dump = ExperimentListDumper(experiments)
    dump.as_json(output_experiments_filename)

    # Save reflections with updated predictions if requested (allow to switch
    # this off if it is a time-consuming step)
    if params.output.reflections:
        logger.info("Saving reflections with updated predictions to {}".format(
            params.output.reflections))
        if params.output.include_unused_reflections:
            reflections.as_file(params.output.reflections)
        else:
            sel = reflections.get_flags(reflections.flags.used_in_refinement)
            reflections.select(sel).as_file(params.output.reflections)

    # Save matches to file for debugging
    if params.output.matches:
        matches = refiner.get_matches()
        logger.info("Saving matches (use for debugging purposes) to {}".format(
            params.output.matches))
        matches.as_file(params.output.matches)

    # Create correlation plots
    if params.output.correlation_plot.filename is not None:
        create_correlation_plots(refiner, params.output)

    # Save refinement history
    if params.output.history:
        logger.info("Saving refinement step history to {}".format(
            params.output.history))
        history.to_json_file(params.output.history)

    # Log the total time taken
    logger.info("\nTotal time taken: {:.2f}s".format(time() - start_time))