def test_experimentlist_dumper_dump_empty_sweep(tmpdir):
  tmpdir.chdir()
  from dxtbx.model import Beam, Detector, Goniometer, Scan
  from dxtbx.model import Crystal
  from dxtbx.format.Format import Format

  filenames = ["filename_%01d.cbf" % (i+1) for i in range(0, 2)]

  imageset = Format.get_imageset(
    filenames,
    beam = Beam((1, 0, 0)),
    detector = Detector(),
    goniometer = Goniometer(),
    scan = Scan((1,2), (0.0, 1.0)),
    as_sweep=True)

  crystal = Crystal((1, 0, 0), (0, 1, 0), (0, 0, 1), space_group_symbol="P1")

  experiments = ExperimentListFactory.from_imageset_and_crystal(
    imageset, crystal)

  dump = ExperimentListDumper(experiments)
  filename = 'temp.json'
  dump.as_json(filename)
  experiments2 = ExperimentListFactory.from_json_file(filename,
                                                      check_format=False)
  check(experiments, experiments2)
    def tst_dump_scan_varying(self):
        from uuid import uuid4
        from os.path import join
        import os

        os.environ['DIALS_REGRESSION'] = self.path

        # Get all the filenames
        filename1 = join(self.path, 'experiment_test_data',
                         'experiment_1.json')

        # Read the experiment list in
        elist1 = ExperimentListFactory.from_json_file(filename1)

        # Make trivial scan-varying models
        crystal = elist1[0].crystal
        beam = elist1[0].beam
        crystal.set_A_at_scan_points([crystal.get_A()] * 5)
        beam.set_s0_at_scan_points([beam.get_s0()] * 5)

        # Create the experiment list dumper
        dump = ExperimentListDumper(elist1)

        # Dump as JSON file and reload
        filename = 'temp%s.json' % uuid4().hex
        dump.as_json(filename)
        elist2 = ExperimentListFactory.from_json_file(filename)
        self.check(elist1, elist2)
Пример #3
0
    def run(self):
        """Execute the script."""

        # Parse the command line
        self.params, options = self.parser.parse_args(show_diff_phil=True)

        if not self.params.input.experiments:
            self.parser.print_help()
            sys.exit()

        # Try to load the models
        experiments = flatten_experiments(self.params.input.experiments)
        nexp = len(experiments)
        if nexp == 0:
            print("No Experiments found in the input")
            self.parser.print_help()
            return

        ref_beam = experiments[0].beam
        ref_goniometer = experiments[0].goniometer
        ref_detector = experiments[0].detector

        scan = self.extended_scan(experiments)

        crystal = self.combine_crystals(experiments, scan)

        experiment = Experiment(
            beam=ref_beam,
            detector=ref_detector,
            scan=scan,
            goniometer=ref_goniometer,
            crystal=crystal,
        )

        experiments = ExperimentList([experiment])

        # Reset experiment IDs in the reflections
        reflections = flatten_reflections(self.params.input.reflections)
        assert len(reflections) == 1
        reflections = reflections[0]
        reflections["id"] *= 0

        # Save the experiments to file
        print(
            "Saving the combined experiment to {0}".format(
                self.params.output.experiments
            )
        )
        from dxtbx.model.experiment_list import ExperimentListDumper

        dump = ExperimentListDumper(experiments)
        dump.as_json(self.params.output.experiments)

        # Save the reflections to file
        print(
            "Saving the combined reflections to {0}".format(
                self.params.output.reflections
            )
        )
        reflections.as_pickle(self.params.output.reflections)
    def tst_dump_empty_sweep(self):
        from dxtbx.imageset import ImageSweep, NullReader, SweepFileList
        from dxtbx.model import Beam, Detector, Goniometer, Scan
        from dxtbx.model import Crystal
        from uuid import uuid4

        imageset = ImageSweep(
            NullReader(SweepFileList("filename%01d.cbf", (0, 3))))
        imageset.set_beam(Beam((1, 0, 0)))
        imageset.set_detector(Detector())
        imageset.set_goniometer(Goniometer())
        imageset.set_scan(Scan((1, 3), (0.0, 1.0)))

        crystal = Crystal((1, 0, 0), (0, 1, 0), (0, 0, 1),
                          space_group_symbol="P1")

        experiments = ExperimentListFactory.from_imageset_and_crystal(
            imageset, crystal)

        dump = ExperimentListDumper(experiments)
        filename = 'temp%s.json' % uuid4().hex
        dump.as_json(filename)
        experiments2 = ExperimentListFactory.from_json_file(filename,
                                                            check_format=False)
        self.check(experiments, experiments2)

        print 'OK'
def test_experimentlist_dumper_dump_scan_varying(dials_regression, tmpdir):
  tmpdir.chdir()
  os.environ['DIALS_REGRESSION'] = dials_regression

  # Get all the filenames
  filename1 = os.path.join(dials_regression, 'experiment_test_data', 'experiment_1.json')

  # Read the experiment list in
  elist1 = ExperimentListFactory.from_json_file(filename1)

  # Make trivial scan-varying models
  crystal = elist1[0].crystal
  beam = elist1[0].beam
  goniometer = elist1[0].goniometer
  crystal.set_A_at_scan_points([crystal.get_A()] * 5)
  from scitbx.array_family import flex
  cov_B = flex.double([1e-5]*9*9)
  crystal.set_B_covariance(cov_B)
  cov_B.reshape(flex.grid(1, 9, 9))
  cov_B_array = flex.double(flex.grid(5, 9, 9))
  for i in range(5):
    cov_B_array[i:(i+1), :, :] = cov_B
  crystal.set_B_covariance_at_scan_points(cov_B_array)

  beam.set_s0_at_scan_points([beam.get_s0()] * 5)
  goniometer.set_setting_rotation_at_scan_points([goniometer.get_setting_rotation()] * 5)

  # Create the experiment list dumper
  dump = ExperimentListDumper(elist1)

  # Dump as JSON file and reload
  filename = 'temp.json'
  dump.as_json(filename)
  elist2 = ExperimentListFactory.from_json_file(filename)
  check(elist1, elist2)
def test_experimentlist_dumper_dump_with_bad_lookup(dials_regression, tmpdir):
  tmpdir.chdir()
  from dxtbx.model import Beam, Detector, Goniometer, Scan
  from dxtbx.model import Crystal

  filename = os.path.join(dials_regression, "centroid_test_data",
                  "experiments_with_bad_lookup.json")

  experiments = ExperimentListFactory.from_json_file(
    filename, check_format=False)

  imageset = experiments[0].imageset
  assert imageset.external_lookup.mask.data.empty()
  assert imageset.external_lookup.gain.data.empty()
  assert imageset.external_lookup.pedestal.data.empty()
  assert imageset.external_lookup.mask.filename is not None
  assert imageset.external_lookup.gain.filename is not None
  assert imageset.external_lookup.pedestal.filename is not None

  dump = ExperimentListDumper(experiments)
  filename = 'temp.json'
  dump.as_json(filename)

  experiments = ExperimentListFactory.from_json_file(
    filename, check_format=False)

  imageset = experiments[0].imageset
  assert imageset.external_lookup.mask.data.empty()
  assert imageset.external_lookup.gain.data.empty()
  assert imageset.external_lookup.pedestal.data.empty()
  assert imageset.external_lookup.mask.filename is not None
  assert imageset.external_lookup.gain.filename is not None
  assert imageset.external_lookup.pedestal.filename is not None
Пример #7
0
    def refine(self, experiments, centroids):
        if self.params.dispatch.refine:
            from dials.algorithms.refinement import RefinerFactory
            from time import time
            st = time()

            logger.info('*' * 80)
            logger.info('Refining Model')
            logger.info('*' * 80)

            refiner = RefinerFactory.from_parameters_data_experiments(
                self.params, centroids, experiments)

            refiner.run()
            experiments = refiner.get_experiments()
            predicted = refiner.predict_for_indexed()
            centroids['xyzcal.mm'] = predicted['xyzcal.mm']
            centroids['entering'] = predicted['entering']
            centroids = centroids.select(
                refiner.selection_used_for_refinement())

            # Re-estimate mosaic estimates
            from dials.algorithms.indexing.nave_parameters import nave_parameters
            nv = nave_parameters(params=self.params,
                                 experiments=experiments,
                                 reflections=centroids,
                                 refinery=refiner,
                                 graph_verbose=False)
            nv()
            acceptance_flags_nv = nv.nv_acceptance_flags
            centroids = centroids.select(acceptance_flags_nv)

        if self.params.output.composite_output:
            if self.params.output.refined_experiments_filename or self.params.output.indexed_filename:
                assert self.params.output.refined_experiments_filename is not None and self.params.output.indexed_filename is not None
                from dials.array_family import flex
                n = len(self.all_indexed_experiments)
                self.all_indexed_experiments.extend(experiments)
                for i, experiment in enumerate(experiments):
                    refls = centroids.select(centroids['id'] == i)
                    refls['id'] = flex.int(len(refls), n)
                    self.all_indexed_reflections.extend(refls)
                    n += 1
        else:
            # Dump experiments to disk
            if self.params.output.refined_experiments_filename:
                from dxtbx.model.experiment_list import ExperimentListDumper
                dump = ExperimentListDumper(experiments)
                dump.as_json(self.params.output.refined_experiments_filename)

            if self.params.output.indexed_filename:
                self.save_reflections(centroids,
                                      self.params.output.indexed_filename)

        if self.params.dispatch.refine:
            logger.info('')
            logger.info('Time Taken = %f seconds' % (time() - st))

        return experiments, centroids
Пример #8
0
 def save_output(experiments, reflections, exp_name, refl_name):
     # save output
     from dxtbx.model.experiment_list import ExperimentListDumper
     print('Saving combined experiments to {0}'.format(exp_name))
     dump = ExperimentListDumper(experiments)
     dump.as_json(exp_name)
     print('Saving combined reflections to {0}'.format(refl_name))
     reflections.as_pickle(refl_name)
def save_experiments(experiments, filename):
    """Save the experiments json."""
    st = time()
    logger.info("Saving the experiments to %s", filename)
    dump = ExperimentListDumper(experiments)
    with open(filename, "w") as outfile:
        outfile.write(dump.as_json(split=True))
    logger.info("Time taken: %g", (time() - st))
Пример #10
0
def run(args=None):
    """Run the script from the command-line."""
    usage = """Usage: dials.space_group scaled.refl scaled.expt [options]"""

    parser = OptionParser(
        usage=usage,
        read_experiments=True,
        read_reflections=True,
        phil=phil_scope,
        check_format=False,
        epilog=help_message,
    )
    params, _ = parser.parse_args(args=args, show_diff_phil=False)

    if not params.input.experiments or not params.input.reflections:
        parser.print_help()
        sys.exit()

    reflections = flatten_reflections(params.input.reflections)
    experiments = flatten_experiments(params.input.experiments)

    log.config(verbosity=1, info=params.output.log)
    logger.info(dials_version())

    diff_phil = parser.diff_phil.as_str()
    if diff_phil != "":
        logger.info("The following parameters have been modified:\n")
        logger.info(diff_phil)

    ### Assert that all data have been scaled with dials - should only be
    # able to input one reflection table and experimentlist that are
    # matching and scaled together.
    if not len(reflections) == 1:
        raise Sorry("Only one reflection table can be given as input.")

    if (not "intensity.scale.value" in reflections[0]) and (
        not "intensity.prf.value" in reflections[0]
    ):
        raise Sorry(
            "Unable to find integrated or scaled reflections in the reflection table."
        )

    try:
        run_sys_abs_checks(
            experiments, reflections, params.d_min, float(params.significance_level)
        )
    except ValueError as e:
        raise Sorry(e)

    if params.output.html:
        ScrewAxisObserver().generate_html_report(params.output.html)

    if params.output.experiments:
        dump = ExperimentListDumper(experiments)
        with open(params.output.experiments, "w") as outfile:
            outfile.write(dump.as_json(split=True))
Пример #11
0
 def save_experiments(self, experiments, filename):
     ''' Save the profile model parameters. '''
     from time import time
     from dxtbx.model.experiment_list import ExperimentListDumper
     st = time()
     logger.info('Saving the experiments to %s' % filename)
     dump = ExperimentListDumper(experiments)
     with open(filename, "w") as outfile:
         outfile.write(dump.as_json())
     logger.info(' time taken: %g' % (time() - st))
Пример #12
0
    def run(self):
        ''' Parse the options. '''
        from dials.util.options import flatten_experiments, flatten_reflections
        # Parse the command line arguments
        params, options = self.parser.parse_args(show_diff_phil=True)
        experiments = flatten_experiments(params.input.experiments)
        reflections = flatten_reflections(params.input.reflections)
        assert len(reflections) == 1
        reflections = reflections[0]

        domain_size = flex.double()
        mosaic_angle = flex.double()
        filtered_reflections = flex.reflection_table()

        for i in range(len(experiments)):
            refls = reflections.select(reflections['id'] == i)
            try:
                nv = nave_parameters(params=None,
                                     experiments=experiments[i:i + 1],
                                     reflections=refls,
                                     refinery=None,
                                     graph_verbose=False)
                crystal_model_nv = nv()
            except Exception as e:
                continue
            domain_size.append(experiments[i].crystal.get_domain_size_ang() -
                               crystal_model_nv.get_domain_size_ang())
            mosaic_angle.append(
                experiments[i].crystal.get_half_mosaicity_deg() -
                crystal_model_nv.get_half_mosaicity_deg())
            experiments[i].crystal = crystal_model_nv

            refls = refls.select(nv.nv_acceptance_flags)
            filtered_reflections.extend(refls)

        print "Saving new experiments as %s" % params.output.experiments
        dump = ExperimentListDumper(experiments)
        dump.as_json(params.output.experiments)

        print "Removed %d out of %d reflections as outliers" % (
            len(reflections) - len(filtered_reflections), len(reflections))
        print "Saving filtered reflections as %s" % params.output.experiments
        filtered_reflections.as_msgpack_file(params.output.reflections)

        if params.plot_changes:
            from matplotlib import pyplot as plt
            domain_size = domain_size.select((domain_size >= -10)
                                             & (domain_size <= 10))
            mosaic_angle = mosaic_angle.select((mosaic_angle >= -0.1)
                                               & (mosaic_angle <= 0.1))

            for d in [domain_size, mosaic_angle]:
                f = plt.figure()
                plt.hist(d, bins=30)
            plt.show()
    def write_experiments(self, experiments, params):
        """
        Output the experiments to file.

        """
        if params.output.experiments:
            logger.info("-" * 80)
            logger.info("Writing experiments to %s" %
                        params.output.experiments)
            dump = ExperimentListDumper(experiments)
            dump.as_file(params.output.experiments,
                         compact=params.output.compact)
def test_experimentlist_dumper_dump_formats(dials_regression, tmpdir):
  tmpdir.chdir()
  os.environ['DIALS_REGRESSION'] = dials_regression

  # Get all the filenames
  filename1 = os.path.join(dials_regression, 'experiment_test_data', 'experiment_1.json')

  # Read all the experiment lists in
  elist1 = ExperimentListFactory.from_json_file(filename1)

  # Create the experiment list dumper
  dump = ExperimentListDumper(elist1)

  # Dump as JSON file and reload
  filename = 'temp1.json'
  dump.as_json(filename)
  elist2 = ExperimentListFactory.from_json_file(filename)
  check(elist1, elist2)

  # Dump as split JSON file and reload
  filename = 'temp2.json'
  dump.as_json(filename, split=True)
  elist2 = ExperimentListFactory.from_json_file(filename)
  check(elist1, elist2)

  # Dump as pickle and reload
  filename = 'temp.pickle'
  dump.as_pickle(filename)
  elist2 = ExperimentListFactory.from_pickle_file(filename)
  check(elist1, elist2)
Пример #15
0
    def tst_dump_formats(self):
        from uuid import uuid4
        from os.path import join
        import os

        os.environ['DIALS_REGRESSION'] = self.path

        # Get all the filenames
        filename1 = join(self.path, 'experiment_test_data',
                         'experiment_1.json')

        # Read all the experiment lists in
        elist1 = ExperimentListFactory.from_json_file(filename1)

        # Create the experiment list dumper
        dump = ExperimentListDumper(elist1)

        # Dump as JSON file and reload
        filename = 'temp%s.json' % uuid4().hex
        dump.as_json(filename)
        elist2 = ExperimentListFactory.from_json_file(filename)
        self.check(elist1, elist2)

        # Dump as split JSON file and reload
        filename = 'temp%s.json' % uuid4().hex
        dump.as_json(filename, split=True)
        elist2 = ExperimentListFactory.from_json_file(filename)
        self.check(elist1, elist2)

        # Dump as pickle and reload
        filename = 'temp%s.pickle' % uuid4().hex
        dump.as_pickle(filename)
        elist2 = ExperimentListFactory.from_pickle_file(filename)
        self.check(elist1, elist2)
Пример #16
0
    def tst_dump_with_lookup(self):
        from dxtbx.model import Beam, Detector, Goniometer, Scan
        from dxtbx.model import Crystal
        from uuid import uuid4
        import libtbx.load_env
        import os
        from os.path import join

        try:
            dials_regression = libtbx.env.dist_path('dials_regression')
        except KeyError:
            print 'FAIL: dials_regression not configured'
            exit(0)

        filename = join(dials_regression, "centroid_test_data",
                        "experiments_with_lookup.json")

        experiments = ExperimentListFactory.from_json_file(filename,
                                                           check_format=True)

        imageset = experiments[0].imageset
        assert not imageset.external_lookup.mask.data.empty()
        assert not imageset.external_lookup.gain.data.empty()
        assert not imageset.external_lookup.pedestal.data.empty()
        assert imageset.external_lookup.mask.filename is not None
        assert imageset.external_lookup.gain.filename is not None
        assert imageset.external_lookup.pedestal.filename is not None
        assert imageset.external_lookup.mask.data.tile(0).data().all_eq(True)
        assert imageset.external_lookup.gain.data.tile(0).data().all_eq(1)
        assert imageset.external_lookup.pedestal.data.tile(0).data().all_eq(0)

        dump = ExperimentListDumper(experiments)
        filename = 'temp%s.json' % uuid4().hex
        dump.as_json(filename)

        experiments = ExperimentListFactory.from_json_file(filename,
                                                           check_format=True)

        imageset = experiments[0].imageset
        assert not imageset.external_lookup.mask.data.empty()
        assert not imageset.external_lookup.gain.data.empty()
        assert not imageset.external_lookup.pedestal.data.empty()
        assert imageset.external_lookup.mask.filename is not None
        assert imageset.external_lookup.gain.filename is not None
        assert imageset.external_lookup.pedestal.filename is not None
        assert imageset.external_lookup.mask.data.tile(0).data().all_eq(True)
        assert imageset.external_lookup.gain.data.tile(0).data().all_eq(1)
        assert imageset.external_lookup.pedestal.data.tile(0).data().all_eq(0)
Пример #17
0
def exercise_worker(worker_class):
  """ Boilerplate code for testing a worker class """
  from xfel.merging.application.phil.phil import phil_scope
  from dials.util.options import OptionParser
  from dxtbx.model.experiment_list import ExperimentListDumper
  # Create the parser
  parser = OptionParser(phil=phil_scope)

  # Parse the command line. quick_parse is required for MPI compatibility
  params, options = parser.parse_args(show_diff_phil=True,quick_parse=True)

  # Load the data for the worker
  if 'simple_file_loader' in str(worker_class):
    experiments = reflections = None
  else:
    from xfel.merging.application.input.file_loader import simple_file_loader
    loader = simple_file_loader(params)
    loader.validate()
    experiments, reflections = loader.run(None, None)

  worker = worker_class(params)
  worker.validate()
  experiments, reflections = worker.run(experiments, reflections)

  prefix = worker_class.__name__
  reflections.as_msgpack_file(prefix + ".mpack")
  ExperimentListDumper(experiments).as_file(prefix + ".json")
    def write_experiments_and_reflections(self):
        """Save the reflections and experiments data."""
        if self.experiments and len(self.reflections) == 1:
            logger.info(
                "Saving %d reflections to %s",
                len(self.reflections[0]),
                self.params.output.reflections,
            )
            self.reflections[0].as_file(self.params.output.reflections)
            from dxtbx.model.experiment_list import ExperimentListDumper

            logger.info("Saving the experiments to %s",
                        self.params.output.experiments)
            dump = ExperimentListDumper(self.experiments)
            with open(self.params.output.experiments, "w") as outfile:
                outfile.write(dump.as_json())
def run(xds_inp,
        xparm=None,
        integrate_lp=None,
        integrate_hkl=None,
        spot_xds=None,
        space_group=None,
        reindex_op=None,
        out_prefix=None,
        out_dir="."):
    out_prefix = out_prefix + "_" if out_prefix else ""

    if integrate_lp is not None:
        xparm_objs = prep_xparm_objects_from_integrate_lp(integrate_lp,
                                                          xparm_ref=xparm)
        rr, xp = xparm_objs[0]
        xparm = os.path.join(os.path.dirname(xds_inp),
                             "XPARM.XDS_%.6d-%.6d" % rr)
        open(xparm, "w").write(xp.xparm_str())

    # FIXME template of experiment.imageset could be wrong when relative path
    #       and ######.h5 may need to be replaced with master.h5
    #experiments = ExperimentListFactory.from_xds(xds_inp, xparm) # XDS.INP needed for sweep info
    experiments = import_xds_as_still(xds_inp, xparm)

    assert len(experiments) == 1
    experiment = experiments[0]

    # I don't know what invalid X/Y/ZOBS values should be when multi-panel detector
    assert len(experiment.detector) == 1

    if None not in (space_group, reindex_op):
        cryst_orig = copy.deepcopy(experiment.crystal)
        cryst_reindexed = cryst_orig.change_basis(reindex_op)
        a, b, c = cryst_reindexed.get_real_space_vectors()
        cryst_reindexed = Crystal(a, b, c, space_group=space_group)
        experiment.crystal.update(cryst_reindexed)

    # Very dirty fix.. but no way to change template after object creation??
    json_str = ExperimentListDumper(experiments).as_json().replace(
        "_######.h5", "_master.h5")
    open(os.path.join(out_dir, out_prefix + "experiments.json"),
         "w").write(json_str)

    if integrate_hkl is not None:
        table = import_integrated(integrate_hkl)
        px_to_mm(experiment, table)
        if None not in (space_group, reindex_op):
            table["miller_index"] = reindex_op.apply(table["miller_index"])
        table.as_pickle(
            os.path.join(out_dir, out_prefix + "integrate_hkl.pickle"))

    if spot_xds is not None:
        table = import_spot_xds(spot_xds)
        px_to_mm(experiment, table)
        if None not in (space_group, reindex_op):
            table["miller_index"] = reindex_op.apply(table["miller_index"])
        table.as_pickle(os.path.join(out_dir, out_prefix + "spot_xds.pickle"))
Пример #20
0
  def refine(self, experiments, centroids):
    if self.params.dispatch.refine:
      from dials.algorithms.refinement import RefinerFactory
      from time import time
      st = time()

      logger.info('*' * 80)
      logger.info('Refining Model')
      logger.info('*' * 80)

      refiner = RefinerFactory.from_parameters_data_experiments(
        self.params, centroids, experiments)

      refiner.run()
      experiments = refiner.get_experiments()
      predicted = refiner.predict_for_indexed()
      centroids['xyzcal.mm'] = predicted['xyzcal.mm']
      centroids['entering'] = predicted['entering']
      centroids = centroids.select(refiner.selection_used_for_refinement())

      # Re-estimate mosaic estimates
      from dials.algorithms.indexing.nave_parameters import nave_parameters
      nv = nave_parameters(params = self.params, experiments=experiments, reflections=centroids, refinery=refiner, graph_verbose=False)
      nv()
      acceptance_flags_nv = nv.nv_acceptance_flags
      centroids = centroids.select(acceptance_flags_nv)

    # Dump experiments to disk
    if self.params.output.refined_experiments_filename:
      from dxtbx.model.experiment_list import ExperimentListDumper
      dump = ExperimentListDumper(experiments)
      dump.as_json(self.params.output.refined_experiments_filename)

    if self.params.dispatch.refine:
      if self.params.output.indexed_filename:
        self.save_reflections(centroids, self.params.output.indexed_filename)

      logger.info('')
      logger.info('Time Taken = %f seconds' % (time() - st))

    return experiments, centroids
Пример #21
0
    def run(self):
        """ Run the script. """
        from dials.util.options import flatten_experiments
        from dxtbx.model.experiment_list import ExperimentListDumper
        from dials.util import Sorry

        # Parse the command line arguments
        params, options = self.parser.parse_args(show_diff_phil=True)
        experiments = flatten_experiments(params.input.experiments)

        # Check that an experiment list and at least one mask file have been provided
        if not (experiments and params.input.mask):
            self.parser.print_help()
            return

        # Check number of experiments
        n_expts = len(experiments)
        n_masks = len(params.input.mask)
        if n_expts != n_masks:
            raise Sorry(
                "The number of masks provided must match the number of imagesets "
                "(sweeps).\n"
                "You have provided an experiment list containing {} imageset(s).\n"
                "You have provided {} mask file(s).".format(n_expts, n_masks))

        # Get the imageset
        imagesets = experiments.imagesets()

        for i, imageset in enumerate(imagesets):
            # Set the lookup
            with open(params.input.mask[i]) as f:
                mask = pickle.load(f)
            imageset.external_lookup.mask.filename = params.input.mask[i]
            imageset.external_lookup.mask.data = ImageBool(mask)

        # Dump the experiments
        print("Writing experiments to %s" % params.output.experiments)
        dump = ExperimentListDumper(experiments)
        dump.as_json(filename=params.output.experiments)
Пример #22
0
    def finalize(self):
        ''' Perform any final operations '''
        if self.params.output.composite_output:
            # Dump composite files to disk
            if len(self.all_indexed_experiments
                   ) > 0 and self.params.output.refined_experiments_filename:
                from dxtbx.model.experiment_list import ExperimentListDumper
                dump = ExperimentListDumper(self.all_indexed_experiments)
                dump.as_json(self.params.output.refined_experiments_filename)

            if len(self.all_indexed_reflections
                   ) > 0 and self.params.output.indexed_filename:
                self.save_reflections(self.all_indexed_reflections,
                                      self.params.output.indexed_filename)

            if len(
                    self.all_integrated_experiments
            ) > 0 and self.params.output.integrated_experiments_filename:
                from dxtbx.model.experiment_list import ExperimentListDumper
                dump = ExperimentListDumper(self.all_integrated_experiments)
                dump.as_json(
                    self.params.output.integrated_experiments_filename)

            if len(self.all_integrated_reflections
                   ) > 0 and self.params.output.integrated_filename:
                self.save_reflections(self.all_integrated_reflections,
                                      self.params.output.integrated_filename)

            # Create a tar archive of the integration dictionary pickles
            if len(self.all_int_pickles
                   ) > 0 and self.params.output.integration_pickle:
                import tarfile, StringIO, time, cPickle as pickle
                tar_template_integration_pickle = self.params.output.integration_pickle.replace(
                    '%d', '%s')
                outfile = os.path.join(
                    self.params.output.output_dir,
                    tar_template_integration_pickle %
                    ('x', self.composite_tag)) + ".tar"
                tar = tarfile.TarFile(outfile, "w")
                for i, (fname, d) in enumerate(
                        zip(self.all_int_pickle_filenames,
                            self.all_int_pickles)):
                    string = StringIO.StringIO(pickle.dumps(d, protocol=2))
                    info = tarfile.TarInfo(name=fname)
                    info.size = len(string.buf)
                    info.mtime = time.time()
                    tar.addfile(tarinfo=info, fileobj=string)
                tar.close()
Пример #23
0
def test_experimentlist_dumper_dump_with_lookup(dials_regression, tmpdir):
    tmpdir.chdir()

    filename = os.path.join(dials_regression, "centroid_test_data",
                            "experiments_with_lookup.json")

    experiments = ExperimentListFactory.from_json_file(filename,
                                                       check_format=True)

    imageset = experiments[0].imageset
    assert not imageset.external_lookup.mask.data.empty()
    assert not imageset.external_lookup.gain.data.empty()
    assert not imageset.external_lookup.pedestal.data.empty()
    assert imageset.external_lookup.mask.filename is not None
    assert imageset.external_lookup.gain.filename is not None
    assert imageset.external_lookup.pedestal.filename is not None
    assert imageset.external_lookup.mask.data.tile(0).data().all_eq(True)
    assert imageset.external_lookup.gain.data.tile(0).data().all_eq(1)
    assert imageset.external_lookup.pedestal.data.tile(0).data().all_eq(0)

    dump = ExperimentListDumper(experiments)
    filename = "temp.json"
    dump.as_json(filename)

    experiments = ExperimentListFactory.from_json_file(filename,
                                                       check_format=True)

    imageset = experiments[0].imageset
    assert not imageset.external_lookup.mask.data.empty()
    assert not imageset.external_lookup.gain.data.empty()
    assert not imageset.external_lookup.pedestal.data.empty()
    assert imageset.external_lookup.mask.filename is not None
    assert imageset.external_lookup.gain.filename is not None
    assert imageset.external_lookup.pedestal.filename is not None
    assert imageset.external_lookup.mask.data.tile(0).data().all_eq(True)
    assert imageset.external_lookup.gain.data.tile(0).data().all_eq(1)
    assert imageset.external_lookup.pedestal.data.tile(0).data().all_eq(0)
Пример #24
0
    def run(self):

        print("Parsing input")
        params, options = self.parser.parse_args(show_diff_phil=True)

        #Configure the logging
        log.config(params.detector_phase.refinement.verbosity,
                   info='dials.refine.log',
                   debug='dials.refine.debug.log')

        # Try to obtain the models and data
        if not params.input.experiments:
            raise Sorry("No Experiments found in the input")
        if not params.input.reflections:
            raise Sorry("No reflection data found in the input")
        try:
            assert len(params.input.reflections) == len(
                params.input.experiments)
        except AssertionError:
            raise Sorry(
                "The number of input reflections files does not match the "
                "number of input experiments")

        # set up global experiments and reflections lists
        from dials.array_family import flex
        reflections = flex.reflection_table()
        global_id = 0
        from dxtbx.model.experiment_list import ExperimentList
        experiments = ExperimentList()

        if params.reference_detector == "first":
            # Use the first experiment of the first experiment list as the reference detector
            ref_exp = params.input.experiments[0].data[0]
        else:
            # Average all the detectors to generate a reference detector
            assert params.detector_phase.refinement.parameterisation.detector.hierarchy_level == 0
            from scitbx.matrix import col
            panel_fasts = []
            panel_slows = []
            panel_oris = []
            for exp_wrapper in params.input.experiments:
                exp = exp_wrapper.data[0]
                if panel_oris:
                    for i, panel in enumerate(exp.detector):
                        panel_fasts[i] += col(panel.get_fast_axis())
                        panel_slows[i] += col(panel.get_slow_axis())
                        panel_oris[i] += col(panel.get_origin())
                else:
                    for i, panel in enumerate(exp.detector):
                        panel_fasts.append(col(panel.get_fast_axis()))
                        panel_slows.append(col(panel.get_slow_axis()))
                        panel_oris.append(col(panel.get_origin()))

            ref_exp = copy.deepcopy(params.input.experiments[0].data[0])
            for i, panel in enumerate(ref_exp.detector):
                # Averaging the fast and slow axes can make them be non-orthagonal. Fix by finding
                # the vector that goes exactly between them and rotate
                # around their cross product 45 degrees from that vector in either direction
                vf = panel_fasts[i] / len(params.input.experiments)
                vs = panel_slows[i] / len(params.input.experiments)
                c = vf.cross(vs)
                angle = vf.angle(vs, deg=True)
                v45 = vf.rotate(c, angle / 2, deg=True)
                vf = v45.rotate(c, -45, deg=True)
                vs = v45.rotate(c, 45, deg=True)
                panel.set_frame(vf, vs,
                                panel_oris[i] / len(params.input.experiments))

            print("Reference detector (averaged):", str(ref_exp.detector))

        # set the experiment factory that combines a crystal with the reference beam
        # and the reference detector
        experiment_from_crystal = ExperimentFromCrystal(
            ref_exp.beam, ref_exp.detector)

        # keep track of the number of refl per accepted experiment for a table
        nrefs_per_exp = []

        # loop through the input, building up the global lists
        for ref_wrapper, exp_wrapper in zip(params.input.reflections,
                                            params.input.experiments):
            refs = ref_wrapper.data
            exps = exp_wrapper.data

            # there might be multiple experiments already here. Loop through them
            for i, exp in enumerate(exps):

                # select the relevant reflections
                sel = refs['id'] == i
                sub_ref = refs.select(sel)

                ## DGW commented out as reflections.minimum_number_of_reflections no longer exists
                #if len(sub_ref) < params.crystals_phase.refinement.reflections.minimum_number_of_reflections:
                #  print "skipping experiment", i, "in", exp_wrapper.filename, "due to insufficient strong reflections in", ref_wrapper.filename
                #  continue

                # build an experiment with this crystal plus the reference models
                combined_exp = experiment_from_crystal(exp.crystal)

                # next experiment ID in series
                exp_id = len(experiments)

                # check this experiment
                if not check_experiment(combined_exp, sub_ref):
                    print("skipping experiment", i, "in", exp_wrapper.filename,
                          "due to poor RMSDs")
                    continue

                # set reflections ID
                sub_ref['id'] = flex.int(len(sub_ref), exp_id)

                # keep number of reflections for the table
                nrefs_per_exp.append(len(sub_ref))

                # obtain mm positions on the reference detector
                sub_ref = indexer_base.map_spots_pixel_to_mm_rad(
                    sub_ref, combined_exp.detector, combined_exp.scan)

                # extend refl and experiments lists
                reflections.extend(sub_ref)
                experiments.append(combined_exp)

        # print number of reflections per accepted experiment
        from libtbx.table_utils import simple_table
        header = ["Experiment", "Nref"]
        rows = [(str(i), str(n)) for (i, n) in enumerate(nrefs_per_exp)]
        st = simple_table(rows, header)
        print("Number of reflections per experiment")
        print(st.format())

        for cycle in range(params.n_macrocycles):

            print("MACROCYCLE %02d" % (cycle + 1))
            print("=============\n")
            # first run: multi experiment joint refinement of detector with fixed beam and
            # crystals
            print("PHASE 1")

            # SET THIS TEST TO FALSE TO REFINE WHOLE DETECTOR AS SINGLE JOB
            if params.detector_phase.refinement.parameterisation.detector.hierarchy_level > 0:
                experiments = detector_parallel_refiners(
                    params.detector_phase, experiments, reflections)
            else:
                experiments = detector_refiner(params.detector_phase,
                                               experiments, reflections)

            # second run
            print("PHASE 2")
            experiments = crystals_refiner(params.crystals_phase, experiments,
                                           reflections)

        # Save the refined experiments to file
        output_experiments_filename = params.output.experiments_filename
        print('Saving refined experiments to {0}'.format(
            output_experiments_filename))
        from dxtbx.model.experiment_list import ExperimentListDumper
        dump = ExperimentListDumper(experiments)
        dump.as_json(output_experiments_filename)

        # Write out refined reflections, if requested
        if params.output.reflections_filename:
            print('Saving refined reflections to {0}'.format(
                params.output.reflections_filename))
            reflections.as_pickle(params.output.reflections_filename)

        return
Пример #25
0
    def run(self):
        '''Execute the script.'''
        from time import time
        import cPickle as pickle
        from dials.util import log
        from dials.algorithms.refinement import RefinerFactory
        from dials.util.options import flatten_reflections, flatten_experiments

        start_time = time()

        # Parse the command line
        params, options = self.parser.parse_args(show_diff_phil=False)
        reflections = flatten_reflections(params.input.reflections)
        experiments = flatten_experiments(params.input.experiments)

        # Try to load the models and data
        nexp = len(experiments)
        if nexp == 0:
            print "No Experiments found in the input"
            self.parser.print_help()
            return
        if len(reflections) == 0:
            print "No reflection data found in the input"
            self.parser.print_help()
            return
        if len(reflections) > 1:
            raise Sorry("Only one reflections list can be imported at present")
        reflections = reflections[0]

        self.check_input(reflections)

        # Configure the logging
        log.config(info=params.output.log, debug=params.output.debug_log)
        from dials.util.version import dials_version
        logger.info(dials_version())

        # Log the diff phil
        diff_phil = self.parser.diff_phil.as_str()
        if diff_phil is not '':
            logger.info('The following parameters have been modified:\n')
            logger.info(diff_phil)

        # Modify options if necessary
        if params.output.correlation_plot.filename is not None:
            params.refinement.refinery.track_parameter_correlation = True

        # Warn about potentially unhelpful options
        if params.refinement.mp.nproc > 1:
            logger.warning(
                "WARNING: setting nproc > 1 is only helpful in rare "
                "circumstances. It is not recommended for typical data processing "
                "tasks.\n")

        # Get the refiner
        logger.info('Configuring refiner')
        refiner = RefinerFactory.from_parameters_data_experiments(
            params, reflections, experiments)

        # Refine the geometry
        if nexp == 1:
            logger.info('Performing refinement of a single Experiment...')
        else:
            logger.info(
                'Performing refinement of {0} Experiments...'.format(nexp))

        # Refine and get the refinement history
        history = refiner.run()

        if params.output.centroids:
            logger.info("Writing table of centroids to '{0}'".format(
                params.output.centroids))
            self.write_centroids_table(refiner, params.output.centroids)

        # Get the refined experiments
        experiments = refiner.get_experiments()

        # Write scan-varying parameters to file, if there were any
        if params.output.parameter_table:
            scans = experiments.scans()
            if len(scans) > 1:
                logger.info(
                    "Writing a scan-varying parameter table is only supported "
                    "for refinement of a single scan")
            else:
                scan = scans[0]
                text = refiner.get_param_reporter(
                ).varying_params_vs_image_number(scan.get_array_range())
                if text:
                    logger.info(
                        "Writing scan-varying parameter table to {0}".format(
                            params.output.parameter_table))
                    f = open(params.output.parameter_table, "w")
                    f.write(text)
                    f.close()
                else:
                    logger.info("No scan-varying parameter table to write")

        crystals = experiments.crystals()
        if len(crystals) == 1:
            # output the refined model for information
            logger.info('')
            logger.info('Final refined crystal model:')
            logger.info(crystals[0])

        # Save the refined experiments to file
        output_experiments_filename = params.output.experiments
        logger.info('Saving refined experiments to {0}'.format(
            output_experiments_filename))
        from dxtbx.model.experiment_list import ExperimentListDumper
        dump = ExperimentListDumper(experiments)
        dump.as_json(output_experiments_filename)

        # Save reflections with updated predictions if requested (allow to switch
        # this off if it is a time-consuming step)
        if params.output.reflections:
            # Update predictions for all indexed reflections
            logger.info('Updating predictions for indexed reflections')
            preds = refiner.predict_for_indexed()

            # just copy over the columns of interest, leaving behind things
            # added by e.g. scan-varying refinement such as 'block', the
            # U, B and UB matrices and gradients.
            reflections['s1'] = preds['s1']
            reflections['xyzcal.mm'] = preds['xyzcal.mm']
            reflections['xyzcal.px'] = preds['xyzcal.px']
            if 'entering' in preds:
                reflections['entering'] = preds['entering']

            # set used_in_refinement and centroid_outlier flags
            assert len(preds) == len(reflections)
            reflections.unset_flags(
                flex.size_t_range(len(reflections)),
                reflections.flags.used_in_refinement
                | reflections.flags.centroid_outlier)
            mask = preds.get_flags(preds.flags.centroid_outlier)
            reflections.set_flags(mask, reflections.flags.centroid_outlier)
            mask = preds.get_flags(preds.flags.used_in_refinement)
            reflections.set_flags(mask, reflections.flags.used_in_refinement)

            logger.info(
                'Saving reflections with updated predictions to {0}'.format(
                    params.output.reflections))
            if params.output.include_unused_reflections:
                reflections.as_pickle(params.output.reflections)
            else:
                sel = reflections.get_flags(
                    reflections.flags.used_in_refinement)
                reflections.select(sel).as_pickle(params.output.reflections)

        # For debugging, if requested save matches to file
        if params.output.matches:
            matches = refiner.get_matches()
            logger.info(
                'Saving matches (use for debugging purposes) to {0}'.format(
                    params.output.matches))
            matches.as_pickle(params.output.matches)

        # Correlation plot
        if params.output.correlation_plot.filename is not None:
            from os.path import splitext
            root, ext = splitext(params.output.correlation_plot.filename)
            if not ext: ext = ".pdf"

            steps = params.output.correlation_plot.steps
            if steps is None: steps = [history.get_nrows() - 1]

            # extract individual column names or indices
            col_select = params.output.correlation_plot.col_select

            num_plots = 0
            for step in steps:
                fname_base = root
                if len(steps) > 1: fname_base += "_step%02d" % step

                corrmats, labels = refiner.get_parameter_correlation_matrix(
                    step, col_select)
                if [corrmats, labels].count(None) == 0:
                    from dials.algorithms.refinement.refinement_helpers import corrgram
                    for resid_name, corrmat in corrmats.items():
                        plot_fname = fname_base + "_" + resid_name + ext
                        plt = corrgram(corrmat, labels)
                        if plt is not None:
                            logger.info(
                                'Saving parameter correlation plot to {}'.
                                format(plot_fname))
                            plt.savefig(plot_fname)
                            plt.close()
                            num_plots += 1
                    mat_fname = fname_base + ".pickle"
                    with open(mat_fname, 'wb') as handle:
                        for k, corrmat in corrmats.items():
                            corrmats[k] = corrmat.as_scitbx_matrix()
                        logger.info(
                            'Saving parameter correlation matrices to {0}'.
                            format(mat_fname))
                        pickle.dump({
                            'corrmats': corrmats,
                            'labels': labels
                        }, handle)

            if num_plots == 0:
                msg = "Sorry, no parameter correlation plots were produced. Please set " \
                      "track_parameter_correlation=True to ensure correlations are " \
                      "tracked, and make sure correlation_plot.col_select is valid."
                logger.info(msg)

        # Write out refinement history, if requested
        if params.output.history:
            with open(params.output.history, 'wb') as handle:
                logger.info('Saving refinement step history to {0}'.format(
                    params.output.history))
                pickle.dump(history, handle)

        # Log the total time taken
        logger.info("\nTotal time taken: {0:.2f}s".format(time() - start_time))

        return
Пример #26
0
    def run(self):

        import datetime
        time_now = datetime.datetime.now()

        self.mpi_logger.log(str(time_now))
        if self.mpi_helper.rank == 0:
            self.mpi_logger.main_log(str(time_now))

        self.mpi_logger.log_step_time("TOTAL")

        self.mpi_logger.log_step_time("PARSE_INPUT_PARAMS")
        self.parse_input()
        self.mpi_logger.log_step_time("PARSE_INPUT_PARAMS", True)

        # Create the workers using the factories
        self.mpi_logger.log_step_time("CREATE_WORKERS")
        from xfel.merging import application
        import importlib

        workers = []
        steps = default_steps if self.params.dispatch.step_list is None else self.params.dispatch.step_list
        for step in steps:
            step_factory_name = step
            step_additional_info = []

            step_info = step.split(' ')
            assert len(step_info) > 0
            if len(step_info) > 1:
                step_factory_name = step_info[0]
                step_additional_info = step_info[1:]

            factory = importlib.import_module('xfel.merging.application.' +
                                              step_factory_name + '.factory')
            workers.extend(
                factory.factory.from_parameters(self.params,
                                                step_additional_info,
                                                mpi_helper=self.mpi_helper,
                                                mpi_logger=self.mpi_logger))

        # Perform phil validation up front
        for worker in workers:
            worker.validate()
        self.mpi_logger.log_step_time("CREATE_WORKERS", True)

        # Do the work
        experiments = reflections = None
        step = 0
        while (workers):
            worker = workers.pop(0)
            self.mpi_logger.log_step_time("STEP_" + worker.__repr__())
            # Log worker name, i.e. execution step name
            step += 1
            if step > 1:
                self.mpi_logger.log('')
            step_desc = "STEP %d: %s" % (step, worker)
            self.mpi_logger.log(step_desc)

            if self.mpi_helper.rank == 0:
                if step > 1:
                    self.mpi_logger.main_log('')
                self.mpi_logger.main_log(step_desc)

            # Execute worker
            experiments, reflections = worker.run(experiments, reflections)
            self.mpi_logger.log_step_time("STEP_" + worker.__repr__(), True)

        if self.params.output.save_experiments_and_reflections:
            from dxtbx.model.experiment_list import ExperimentListDumper
            import os
            if 'id' not in reflections:
                from dials.array_family import flex
                id_ = flex.int(len(reflections), -1)
                for expt_number, expt in enumerate(experiments):
                    sel = reflections['exp_id'] == expt.identifier
                    id_.set_selected(sel, expt_number)
                reflections['id'] = id_

            reflections.as_pickle(
                os.path.join(
                    self.params.output.output_dir, self.params.output.prefix +
                    "_%06d.pickle" % self.mpi_helper.rank))
            dump = ExperimentListDumper(experiments)
            dump.as_file(
                os.path.join(
                    self.params.output.output_dir, self.params.output.prefix +
                    "_%06d.json" % self.mpi_helper.rank))

        self.mpi_logger.log_step_time("TOTAL", True)
# RANSAC
indexed_ransac = flex.reflection_table()
experiments_ransac = ExperimentList()

for ii, expt in enumerate(explist_ransac):
    ff = expt.imageset.get_image_identifier(0).split('/')[-1]
    if ff == 'idx-20180501170143366.cbf': print(ii, 'case-ransac')
    if ff in cbf_intersection:
        #expt = explist_ransac[ii]
        experiments_ransac.append(expt)
        refl = refl_ransac.select(refl_ransac['id'] == ii)
        refl['id'] = flex.int(len(refl), len(experiments_ransac) - 1)
        indexed_ransac.extend(refl)
print('done getting new refl tables for case-ransac')

#assert len(experiments_0) == len(experiments_srs) == len(experiments_ransac), 'Mismatch in length of experiment lists. Please check carefully whether code is correct'
print('NUmber of experiments = ', len(experiments_0))
# Dump new json and pickle files - 0
dumper = ExperimentListDumper(experiments_0)
dumper.as_json('intersection_conventional_experiments.json')
dump('intersection_conventional_reflections.pickle', indexed_0)
# Dump new json and pickle files - SRS
dumper = ExperimentListDumper(experiments_srs)
dumper.as_json('intersection_srs_experiments.json')
dump('intersection_srs_reflections.pickle', indexed_srs)
# Dump new json and pickle files - RANSAC
dumper = ExperimentListDumper(experiments_ransac)
dumper.as_json('intersection_ransac_experiments.json')
dump('intersection_ransac_reflections.pickle', indexed_ransac)
Пример #28
0
    def __call__(self, params, options):
        from dxtbx.model.experiment_list import ExperimentListFactory
        from dxtbx.model.experiment_list import ExperimentListDumper
        import os
        # Get the XDS.INP file
        xds_inp = os.path.join(self.args[0], 'XDS.INP')
        if params.input.xds_file is None:
            xds_file = XDSFileImporter.find_best_xds_file(self.args[0])
        else:
            xds_file = os.path.join(self.args[0], params.input.xds_file)

        # Check a file is given
        if xds_file is None:
            raise RuntimeError('No XDS file found')

        # Load the experiment list
        unhandled = []
        experiments = ExperimentListFactory.from_xds(xds_inp, xds_file)

        # Print out any unhandled files
        if len(unhandled) > 0:
            print('-' * 80)
            print('The following command line arguments were not handled:')
            for filename in unhandled:
                print('  %s' % filename)

        # Print some general info
        print('-' * 80)
        print('Read %d experiments from %s' % (len(experiments), xds_file))

        # Attempt to create scan-varying crystal model if requested
        if params.read_varying_crystal:
            integrate_lp = os.path.join(self.args[0], 'INTEGRATE.LP')
            if os.path.isfile(integrate_lp):
                self.extract_varying_crystal(integrate_lp, experiments)
            else:
                print(
                    "No INTEGRATE.LP to extract varying crystal model. Skipping"
                )

        # Loop through the data blocks
        for i, exp in enumerate(experiments):

            # Print some experiment info
            print("-" * 80)
            print("Experiment %d" % i)
            print("  format: %s" % str(exp.imageset.get_format_class()))
            print("  type: %s" % type(exp.imageset))
            print("  num images: %d" % len(exp.imageset))

            # Print some model info
            if options.verbose > 1:
                print("")
                if exp.beam: print(exp.beam)
                else: print("no beam!")
                if exp.detector: print(exp.detector)
                else: print("no detector!")
                if exp.goniometer: print(exp.goniometer)
                else: print("no goniometer!")
                if exp.scan: print(exp.scan)
                else: print("no scan!")
                if exp.crystal: print(exp.crystal)
                else: print("no crystal!")

        # Write the experiment list to a JSON or pickle file
        if params.output.filename is None:
            params.output.filename = 'experiments.json'
        print("-" * 80)
        print('Writing experiments to %s' % params.output.filename)
        dump = ExperimentListDumper(experiments)
        dump.as_file(params.output.filename)

        # Optionally save as a data block
        if params.output.xds_datablock:
            print("-" * 80)
            print("Writing data block to %s" % params.output.xds_datablock)
            dump = DataBlockDumper(experiments.to_datablocks())
            dump.as_file(params.output.xds_datablock)
    def write_expt(self, experiments, filename):
        from dxtbx.model.experiment_list import ExperimentListDumper

        dump = ExperimentListDumper(experiments)
        dump.as_json(filename)
        return
Пример #30
0
def refine_expanding(params, merged_scope, combine_phil):
  assert params.start_at_hierarchy_level == 0
  if params.rmsd_filter.enable:
    input_name = "filtered"
    command = "cctbx.xfel.filter_experiments_by_rmsd %s %s output.filtered_experiments=%s output.filtered_reflections=%s"
    command = command%("%s_combined_experiments.json"%params.tag, "%s_combined_reflections.pickle"%params.tag,
                       "%s_filtered_experiments.json"%params.tag, "%s_filtered_reflections.pickle"%params.tag)
    command += " iqr_multiplier=%f"%params.rmsd_filter.iqr_multiplier
    print command
    result = easy_run.fully_buffered(command=command).raise_if_errors()
    result.show_stdout()
  else:
    input_name = "combined"
  # --------------------------
  if params.panel_filter is not None:
    from libtbx import easy_pickle
    print "Filtering out all reflections except those on panels %s"%(", ".join(["%d"%p for p in params.panel_filter]))
    combined_path = "%s_combined_reflections.pickle"%params.tag
    data = easy_pickle.load(combined_path)
    sel = None
    for panel_id in params.panel_filter:
      if sel is None:
        sel = data['panel'] == panel_id
      else:
        sel |= data['panel'] == panel_id
    print "Retaining", len(data.select(sel)), "out of", len(data), "reflections"
    easy_pickle.dump(combined_path, data.select(sel))
  # ----------------------------------
  # this is the order to refine the CSPAD in
  steps = {}
  steps[0] = [2, 3]
  steps[1] = steps[0] + [0, 1]
  steps[2] = steps[1] + [14, 15]
  steps[3] = steps[2] + [6, 7]
  steps[4] = steps[3] + [4, 5]
  steps[5] = steps[4] + [12, 13]
  steps[6] = steps[5] + [8, 9]
  steps[7] = steps[6] + [10, 11]

  for s, panels in steps.iteritems():
    rest = []
    for p in panels:
      rest.append(p+16)
      rest.append(p+32)
      rest.append(p+48)
    panels.extend(rest)

  levels = {0: (0,1)} # levels 0 and 1
  for i in range(7):
    levels[i+1] = (2,) # level 2

  previous_step_and_level = None
  for j in range(8):
    from libtbx import easy_pickle
    print "Filtering out all reflections except those on panels %s"%(", ".join(["%d"%p for p in steps[j]]))
    combined_path = "%s_%s_reflections.pickle"%(params.tag, input_name)
    output_path = "%s_reflections_step%d.pickle"%(params.tag, j)
    data = easy_pickle.load(combined_path)
    sel = None
    for panel_id in steps[j]:
      if sel is None:
        sel = data['panel'] == panel_id
      else:
        sel |= data['panel'] == panel_id
    print "Retaining", len(data.select(sel)), "out of", len(data), "reflections"
    easy_pickle.dump(output_path, data.select(sel))

    for i in levels[j]:
      print "Step", j , "refining at hierarchy level", i
      refine_phil_file = "%s_refine_step%d_level%d.phil"%(params.tag, j, i)
      if i == 0:
        if params.refine_distance:
          diff_phil = "refinement.parameterisation.detector.fix_list=Tau1" # fix detector rotz
        else:
          diff_phil = "refinement.parameterisation.detector.fix_list=Dist,Tau1" # fix detector rotz, distance
        if params.flat_refinement:
          diff_phil += ",Tau2,Tau3" # Also fix x and y rotations
        diff_phil += "\n"
        if params.refine_energy:
          diff_phil += "refinement.parameterisation.beam.fix=in_spindle_plane+out_spindle_plane\n" # allow energy to refine
      else:
        # Note, always need to fix something, so pick a panel group and fix its Tau1 (rotation around Z) always
        if params.flat_refinement and params.flat_refinement_with_distance:
          diff_phil = "refinement.parameterisation.detector.fix_list=Group1Tau1,Tau2,Tau3\n" # refine distance, rotz and xy translation
          diff_phil += "refinement.parameterisation.detector.constraints.parameter=Dist\n" # constrain distance to be refined identically for all panels at this hierarchy level
        elif params.flat_refinement:
          diff_phil = "refinement.parameterisation.detector.fix_list=Dist,Group1Tau1,Tau2,Tau3\n" # refine only rotz and xy translation
        else:
          diff_phil = "refinement.parameterisation.detector.fix_list=Group1Tau1\n" # refine almost everything

      if previous_step_and_level is None:
        command = "dials.refine %s %s_%s_experiments.json %s_reflections_step%d.pickle"%( \
          refine_phil_file, params.tag, input_name, params.tag, j)
      else:
        p_step, p_level = previous_step_and_level
        if p_step == j:
          command = "dials.refine %s %s_refined_experiments_step%d_level%d.json %s_refined_reflections_step%d_level%d.pickle"%( \
            refine_phil_file, params.tag, p_step, p_level, params.tag, p_step, p_level)
        else:
          command = "dials.refine %s %s_refined_experiments_step%d_level%d.json %s_reflections_step%d.pickle"%( \
            refine_phil_file, params.tag, p_step, p_level, params.tag, j)


      diff_phil += "refinement.parameterisation.detector.hierarchy_level=%d\n"%i

      output_experiments = "%s_refined_experiments_step%d_level%d.json"%(params.tag, j, i)
      command += " output.experiments=%s output.reflections=%s_refined_reflections_step%d_level%d.pickle"%( \
        output_experiments, params.tag, j, i)

      scope = merged_scope.fetch(parse(diff_phil))
      f = open(refine_phil_file, 'w')
      f.write(refine_scope.fetch_diff(scope).as_str())
      f.close()

      print command
      result = easy_run.fully_buffered(command=command).raise_if_errors()
      result.show_stdout()

      # In expanding mode, if using flat refinement with distance, after having refined this step as a block, unrefined
      # panels will have been left behind.  Read back the new metrology, compute the shift applied to the panels refined
      # in this step,and apply that shift to the unrefined panels in this step
      if params.flat_refinement and params.flat_refinement_with_distance and i > 0:
        from dxtbx.model.experiment_list import ExperimentListFactory, ExperimentListDumper
        from xfel.command_line.cspad_detector_congruence import iterate_detector_at_level, iterate_panels
        from scitbx.array_family import flex
        from scitbx.matrix import col
        from libtbx.test_utils import approx_equal
        experiments = ExperimentListFactory.from_json_file(output_experiments, check_format=False)
        assert len(experiments.detectors()) == 1
        detector = experiments.detectors()[0]
        # Displacements: deltas along the vector normal to the detector
        displacements = flex.double()
        # Iterate through the panel groups at this level
        for panel_group in iterate_detector_at_level(detector.hierarchy(), 0, i):
          # Were there panels refined in this step in this panel group?
          if params.panel_filter:
            test = [list(detector).index(panel) in steps[j] for panel in iterate_panels(panel_group) if list(detector).index(panel) in params.panel_filter]
          else:
            test = [list(detector).index(panel) in steps[j] for panel in iterate_panels(panel_group)]
          if not any(test): continue
          # Compute the translation along the normal of this panel group.  This is defined as distance in dials.refine
          displacements.append(col(panel_group.get_local_fast_axis()).cross(col(panel_group.get_local_slow_axis())).dot(col(panel_group.get_local_origin())))

        # Even though the panels are constrained to move the same amount, there is a bit a variation.
        stats = flex.mean_and_variance(displacements)
        displacement = stats.mean()
        print "Average displacement along normals: %f +/- %f"%(stats.mean(), stats.unweighted_sample_standard_deviation())

        # Verify the variation isn't significant
        for k in range(1, len(displacements)):
          assert approx_equal(displacements[0], displacements[k])
        # If all of the panel groups in this level moved, no need to do anything.
        if len(displacements) != len(list(iterate_detector_at_level(detector.hierarchy(), 0, i))):
          for panel_group in iterate_detector_at_level(detector.hierarchy(), 0, i):
            if params.panel_filter:
              test = [list(detector).index(panel) in steps[j] and list(detector).index(panel) in params.panel_filter for panel in iterate_panels(panel_group)]
            else:
              test = [list(detector).index(panel) in steps[j] for panel in iterate_panels(panel_group)]
            # If any of the panels in this panel group moved, no need to do anything
            if any(test): continue

            # None of the panels in this panel group moved in this step, so need to apply displacement from other panel
            # groups at this level
            fast = col(panel_group.get_local_fast_axis())
            slow = col(panel_group.get_local_slow_axis())
            ori = col(panel_group.get_local_origin())
            normal = fast.cross(slow)
            panel_group.set_local_frame(fast, slow, (ori.dot(fast)*fast) + (ori.dot(slow)*slow) + (normal*displacement))

        # Check the new displacements. Should be the same across all panels.
        displacements = []
        for panel_group in iterate_detector_at_level(detector.hierarchy(), 0, i):
          displacements.append(col(panel_group.get_local_fast_axis()).cross(col(panel_group.get_local_slow_axis())).dot(col(panel_group.get_local_origin())))

        for k in range(1, len(displacements)):
          assert approx_equal(displacements[0], displacements[k])

        dump = ExperimentListDumper(experiments)
        dump.as_json(output_experiments)

      previous_step_and_level = j,i

  output_geometry(params)