def tst_dump_empty_sweep(self):
    from dxtbx.imageset import ImageSweep, NullReader, SweepFileList
    from dxtbx.model import Beam, Detector, Goniometer, Scan
    from dxtbx.model.crystal import crystal_model
    from uuid import uuid4

    imageset = ImageSweep(NullReader(SweepFileList("filename%01d.cbf", (0, 3))))
    imageset.set_beam(Beam((1, 0, 0)))
    imageset.set_detector(Detector())
    imageset.set_goniometer(Goniometer())
    imageset.set_scan(Scan((1, 3), (0.0, 1.0)))

    crystal = crystal_model((1, 0, 0), (0, 1, 0), (0, 0, 1), space_group_symbol=1)

    experiments = ExperimentListFactory.from_imageset_and_crystal(
      imageset, crystal)

    dump = ExperimentListDumper(experiments)
    filename = 'temp%s.json' % uuid4().hex
    dump.as_json(filename)
    experiments2 = ExperimentListFactory.from_json_file(filename,
                                                        check_format=False)
    self.check(experiments, experiments2)

    print 'OK'
Exemple #2
0
    def refine(self, experiments, centroids):
        print "Skipping refinement because the crystal orientation is refined during indexing"
        # TODO add dispatch.refine as option and use this code
        #    from dials.algorithms.refinement import RefinerFactory
        #    from time import time
        #    st = time()
        #
        #    logger.info('*' * 80)
        #    logger.info('Refining Model')
        #    logger.info('*' * 80)
        #
        #    refiner = RefinerFactory.from_parameters_data_experiments(
        #      self.params, centroids, experiments)
        #
        #    refiner.run()
        #    experiments = refiner.get_experiments()

        # Dump experiments to disk
        if self.params.output.refined_experiments_filename:
            from dxtbx.model.experiment.experiment_list import ExperimentListDumper
            dump = ExperimentListDumper(experiments)
            dump.as_json(self.params.output.refined_experiments_filename)


#    logger.info('')
#    logger.info('Time Taken = %f seconds' % (time() - st))

        return experiments
  def tst_dump_formats(self):
    from uuid import uuid4
    from os.path import join
    import os

    os.environ['DIALS_REGRESSION'] = self.path

    # Get all the filenames
    filename1 = join(self.path, 'experiment_test_data', 'experiment_1.json')

    # Read all the experiment lists in
    elist1 = ExperimentListFactory.from_json_file(filename1)

    # Create the experiment list dumper
    dump = ExperimentListDumper(elist1)

    # Dump as JSON file and reload
    filename = 'temp%s.json' % uuid4().hex
    dump.as_json(filename)
    elist2 = ExperimentListFactory.from_json_file(filename)
    self.check(elist1, elist2)

    # Dump as split JSON file and reload
    filename = 'temp%s.json' % uuid4().hex
    dump.as_json(filename, split=True)
    elist2 = ExperimentListFactory.from_json_file(filename)
    self.check(elist1, elist2)

    # Dump as pickle and reload
    filename = 'temp%s.pickle' % uuid4().hex
    dump.as_pickle(filename)
    elist2 = ExperimentListFactory.from_pickle_file(filename)
    self.check(elist1, elist2)
Exemple #4
0
 def refine(self):
     # From Aaron Brewster: refinement step skipped as it's done in indexing
     # This writes out experiments to disc
     if self.phil.output.refined_experiments_filename:
         from dxtbx.model.experiment.experiment_list import ExperimentListDumper
         dump = ExperimentListDumper(self.experiments)
         dump.as_json(self.phil.output.refined_experiments_filename)
Exemple #5
0
 def save_experiments(self, experiments, filename):
     ''' Save the profile model parameters. '''
     from time import time
     from dxtbx.model.experiment.experiment_list import ExperimentListDumper
     st = time()
     logger.info('Saving the experiments to %s' % filename)
     dump = ExperimentListDumper(experiments)
     with open(filename, "w") as outfile:
         outfile.write(dump.as_json())
     logger.info(' time taken: %g' % (time() - st))
Exemple #6
0
        dr = DetectorRefiner()
    else:
        experiments = None
        reflections = None
        dr = None
    cr = CrystalRefiners()

    for cycle in range(working_params.n_macrocycles):

        if rank == 0:
            print("MACROCYCLE %02d" % (cycle + 1))
            print("=============\n")
            # first run: multi experiment joint refinement of detector with fixed beam and
            # crystals
            experiments = dr(experiments, reflections)
        else:
            experiments = None

        # second run
        experiments = cr(experiments, reflections)

    if rank == 0:
        # save the refined experiments
        from dxtbx.model.experiment.experiment_list import ExperimentListDumper

        dump = ExperimentListDumper(experiments)
        experiments_filename = "refined_experiments.json"
        dump.as_json(experiments_filename)
        print("refined geometry written to {0}".format(experiments_filename))
class TestExperimentListDumper(object):

  def __init__(self, path):
    self.path = path

  def run(self):
    self.tst_dump_formats()
    self.tst_dump_empty_sweep()
    self.tst_dump_with_lookup()
    self.tst_dump_with_bad_lookup()

  def tst_dump_formats(self):
    from uuid import uuid4
    from os.path import join
    import os

    os.environ['DIALS_REGRESSION'] = self.path

    # Get all the filenames
    filename1 = join(self.path, 'experiment_test_data', 'experiment_1.json')

    # Read all the experiment lists in
    elist1 = ExperimentListFactory.from_json_file(filename1)

    # Create the experiment list dumper
    dump = ExperimentListDumper(elist1)

    # Dump as JSON file and reload
    filename = 'temp%s.json' % uuid4().hex
    dump.as_json(filename)
    elist2 = ExperimentListFactory.from_json_file(filename)
    self.check(elist1, elist2)

    # Dump as split JSON file and reload
    filename = 'temp%s.json' % uuid4().hex
    dump.as_json(filename, split=True)
    elist2 = ExperimentListFactory.from_json_file(filename)
    self.check(elist1, elist2)

    # Dump as pickle and reload
    filename = 'temp%s.pickle' % uuid4().hex
    dump.as_pickle(filename)
    elist2 = ExperimentListFactory.from_pickle_file(filename)
    self.check(elist1, elist2)

  def tst_dump_empty_sweep(self):
    from dxtbx.imageset import ImageSweep, NullReader, SweepFileList
    from dxtbx.model import Beam, Detector, Goniometer, Scan
    from dxtbx.model.crystal import crystal_model
    from uuid import uuid4

    imageset = ImageSweep(NullReader(SweepFileList("filename%01d.cbf", (0, 3))))
    imageset.set_beam(Beam((1, 0, 0)))
    imageset.set_detector(Detector())
    imageset.set_goniometer(Goniometer())
    imageset.set_scan(Scan((1, 3), (0.0, 1.0)))

    crystal = crystal_model((1, 0, 0), (0, 1, 0), (0, 0, 1), space_group_symbol=1)

    experiments = ExperimentListFactory.from_imageset_and_crystal(
      imageset, crystal)

    dump = ExperimentListDumper(experiments)
    filename = 'temp%s.json' % uuid4().hex
    dump.as_json(filename)
    experiments2 = ExperimentListFactory.from_json_file(filename,
                                                        check_format=False)
    self.check(experiments, experiments2)

    print 'OK'

  def tst_dump_with_lookup(self):
    from dxtbx.imageset import ImageSweep, NullReader, SweepFileList
    from dxtbx.model import Beam, Detector, Goniometer, Scan
    from dxtbx.model.crystal import crystal_model
    from uuid import uuid4
    import libtbx.load_env
    import os
    from os.path import join

    try:
      dials_regression = libtbx.env.dist_path('dials_regression')
    except KeyError, e:
      print 'FAIL: dials_regression not configured'
      exit(0)

    filename = join(dials_regression, "centroid_test_data",
                    "experiments_with_lookup.json")

    experiments = ExperimentListFactory.from_json_file(
      filename,
      check_format=True)

    imageset = experiments[0].imageset
    assert imageset.external_lookup.mask.data is not None
    assert imageset.external_lookup.gain.data is not None
    assert imageset.external_lookup.pedestal.data is not None
    assert imageset.external_lookup.mask.filename is not None
    assert imageset.external_lookup.gain.filename is not None
    assert imageset.external_lookup.pedestal.filename is not None
    assert imageset.external_lookup.mask.data.all_eq(True)
    assert imageset.external_lookup.gain.data.all_eq(1)
    assert imageset.external_lookup.pedestal.data.all_eq(0)

    dump = ExperimentListDumper(experiments)
    filename = 'temp%s.json' % uuid4().hex
    dump.as_json(filename)

    experiments = ExperimentListFactory.from_json_file(
      filename,
      check_format=True)

    imageset = experiments[0].imageset
    assert imageset.external_lookup.mask.data is not None
    assert imageset.external_lookup.gain.data is not None
    assert imageset.external_lookup.pedestal.data is not None
    assert imageset.external_lookup.mask.filename is not None
    assert imageset.external_lookup.gain.filename is not None
    assert imageset.external_lookup.pedestal.filename is not None
    assert imageset.external_lookup.mask.data.all_eq(True)
    assert imageset.external_lookup.gain.data.all_eq(1)
    assert imageset.external_lookup.pedestal.data.all_eq(0)
Exemple #8
0
  def run(self):
    ''' Run the script. '''
    from dials.algorithms.profile_model.factory import ProfileModelFactory
    from dials.util.command_line import Command
    from dials.array_family import flex
    from dials.util.options import flatten_reflections, flatten_experiments
    from dxtbx.model.experiment.experiment_list import ExperimentListDumper
    from libtbx.utils import Sorry
    from dials.util import log

    log.config()

    # Parse the command line
    params, options = self.parser.parse_args(show_diff_phil=True)
    reflections = flatten_reflections(params.input.reflections)
    experiments = flatten_experiments(params.input.experiments)
    if len(reflections) == 0 and len(experiments) == 0:
      self.parser.print_help()
      return
    if len(reflections) != 1:
      raise Sorry('exactly 1 reflection table must be specified')
    if len(experiments) == 0:
      raise Sorry('no experiments were specified')
    if (not 'background.mean' in reflections[0]) and params.subtract_background:
      raise Sorry('for subtract_background need background.mean in reflections')

    reflections, _ = self.process_reference(reflections[0], params)

    # Predict the reflections
    logger.info("")
    logger.info("=" * 80)
    logger.info("")
    logger.info("Predicting reflections")
    logger.info("")
    predicted = flex.reflection_table.from_predictions_multi(
      experiments,
      dmin=params.prediction.d_min,
      dmax=params.prediction.d_max,
      margin=params.prediction.margin,
      force_static=params.prediction.force_static)

    # Match with predicted
    matched, reflections, unmatched = predicted.match_with_reference(reflections)
    assert(len(matched) == len(predicted))
    assert(matched.count(True) <= len(reflections))
    if matched.count(True) == 0:
      raise Sorry('''
        Invalid input for reference reflections.
        Zero reference spots were matched to predictions
      ''')
    elif len(unmatched) != 0:
      logger.info('')
      logger.info('*' * 80)
      logger.info('Warning: %d reference spots were not matched to predictions' % (
        len(unmatched)))
      logger.info('*' * 80)
      logger.info('')

    # Create the profile model
    experiments = ProfileModelFactory.create(params, experiments, reflections)
    for model in experiments:
      sigma_b = model.profile.sigma_b(deg=True)
      sigma_m = model.profile.sigma_m(deg=True)
      if type(sigma_b) == type(1.0):
        logger.info('Sigma B: %f' % sigma_b)
        logger.info('Sigma M: %f' % sigma_m)
      else: # scan varying
        mean_sigma_b = sum(sigma_b) / len(sigma_b)
        mean_sigma_m = sum(sigma_m) / len(sigma_m)
        logger.info('Sigma B: %f' % mean_sigma_b)
        logger.info('Sigma M: %f' % mean_sigma_m)

    # Wrtie the parameters
    Command.start("Writing experiments to %s" % params.output)
    dump = ExperimentListDumper(experiments)
    with open(params.output, "w") as outfile:
      outfile.write(dump.as_json())
    Command.end("Wrote experiments to %s" % params.output)
Exemple #9
0
    def run(self):

        print "Parsing input"
        params, options = self.parser.parse_args(show_diff_phil=True)

        #Configure the logging
        log.config(params.detector_phase.refinement.verbosity,
                   info='dials.refine.log',
                   debug='dials.refine.debug.log')

        # Try to obtain the models and data
        if not params.input.experiments:
            raise Sorry("No Experiments found in the input")
        if not params.input.reflections:
            raise Sorry("No reflection data found in the input")
        try:
            assert len(params.input.reflections) == len(
                params.input.experiments)
        except AssertionError:
            raise Sorry(
                "The number of input reflections files does not match the "
                "number of input experiments")

        # set up global experiments and reflections lists
        from dials.array_family import flex
        reflections = flex.reflection_table()
        global_id = 0
        from dxtbx.model.experiment.experiment_list import ExperimentList
        experiments = ExperimentList()

        if params.reference_detector == "first":
            # Use the first experiment of the first experiment list as the reference detector
            ref_exp = params.input.experiments[0].data[0]
        else:
            # Average all the detectors to generate a reference detector
            assert params.detector_phase.refinement.parameterisation.detector.hierarchy_level == 0
            from scitbx.matrix import col
            panel_fasts = []
            panel_slows = []
            panel_oris = []
            for exp_wrapper in params.input.experiments:
                exp = exp_wrapper.data[0]
                if panel_oris:
                    for i, panel in enumerate(exp.detector):
                        panel_fasts[i] += col(panel.get_fast_axis())
                        panel_slows[i] += col(panel.get_slow_axis())
                        panel_oris[i] += col(panel.get_origin())
                else:
                    for i, panel in enumerate(exp.detector):
                        panel_fasts.append(col(panel.get_fast_axis()))
                        panel_slows.append(col(panel.get_slow_axis()))
                        panel_oris.append(col(panel.get_origin()))

            ref_exp = copy.deepcopy(params.input.experiments[0].data[0])
            for i, panel in enumerate(ref_exp.detector):
                # Averaging the fast and slow axes can make them be non-orthagonal. Fix by finding
                # the vector that goes exactly between them and rotate
                # around their cross product 45 degrees from that vector in either direction
                vf = panel_fasts[i] / len(params.input.experiments)
                vs = panel_slows[i] / len(params.input.experiments)
                c = vf.cross(vs)
                angle = vf.angle(vs, deg=True)
                v45 = vf.rotate(c, angle / 2, deg=True)
                vf = v45.rotate(c, -45, deg=True)
                vs = v45.rotate(c, 45, deg=True)
                panel.set_frame(vf, vs,
                                panel_oris[i] / len(params.input.experiments))

            print "Reference detector (averaged):", str(ref_exp.detector)

        # set the experiment factory that combines a crystal with the reference beam
        # and the reference detector
        experiment_from_crystal = ExperimentFromCrystal(
            ref_exp.beam, ref_exp.detector)

        # keep track of the number of refl per accepted experiment for a table
        nrefs_per_exp = []

        # loop through the input, building up the global lists
        for ref_wrapper, exp_wrapper in zip(params.input.reflections,
                                            params.input.experiments):
            refs = ref_wrapper.data
            exps = exp_wrapper.data

            # there might be multiple experiments already here. Loop through them
            for i, exp in enumerate(exps):

                # select the relevant reflections
                sel = refs['id'] == i
                sub_ref = refs.select(sel)

                ## DGW commented out as reflections.minimum_number_of_reflections no longer exists
                #if len(sub_ref) < params.crystals_phase.refinement.reflections.minimum_number_of_reflections:
                #  print "skipping experiment", i, "in", exp_wrapper.filename, "due to insufficient strong reflections in", ref_wrapper.filename
                #  continue

                # build an experiment with this crystal plus the reference models
                combined_exp = experiment_from_crystal(exp.crystal)

                # next experiment ID in series
                exp_id = len(experiments)

                # check this experiment
                if not check_experiment(combined_exp, sub_ref):
                    print "skipping experiment", i, "in", exp_wrapper.filename, "due to poor RMSDs"
                    continue

                # set reflections ID
                sub_ref['id'] = flex.int(len(sub_ref), exp_id)

                # keep number of reflections for the table
                nrefs_per_exp.append(len(sub_ref))

                # obtain mm positions on the reference detector
                sub_ref = indexer_base.map_spots_pixel_to_mm_rad(
                    sub_ref, combined_exp.detector, combined_exp.scan)

                # extend refl and experiments lists
                reflections.extend(sub_ref)
                experiments.append(combined_exp)

        # print number of reflections per accepted experiment
        from libtbx.table_utils import simple_table
        header = ["Experiment", "Nref"]
        rows = [(str(i), str(n)) for (i, n) in enumerate(nrefs_per_exp)]
        st = simple_table(rows, header)
        print "Number of reflections per experiment"
        print st.format()

        for cycle in range(params.n_macrocycles):

            print "MACROCYCLE %02d" % (cycle + 1)
            print "=============\n"
            # first run: multi experiment joint refinement of detector with fixed beam and
            # crystals
            print "PHASE 1"

            # SET THIS TEST TO FALSE TO REFINE WHOLE DETECTOR AS SINGLE JOB
            if params.detector_phase.refinement.parameterisation.detector.hierarchy_level > 0:
                experiments = detector_parallel_refiners(
                    params.detector_phase, experiments, reflections)
            else:
                experiments = detector_refiner(params.detector_phase,
                                               experiments, reflections)

            # second run
            print "PHASE 2"
            experiments = crystals_refiner(params.crystals_phase, experiments,
                                           reflections)

        # Save the refined experiments to file
        output_experiments_filename = params.output.experiments_filename
        print 'Saving refined experiments to {0}'.format(
            output_experiments_filename)
        from dxtbx.model.experiment.experiment_list import ExperimentListDumper
        dump = ExperimentListDumper(experiments)
        dump.as_json(output_experiments_filename)

        # Write out refined reflections, if requested
        if params.output.reflections_filename:
            print 'Saving refined reflections to {0}'.format(
                params.output.reflections_filename)
            reflections.as_pickle(params.output.reflections_filename)

        return
Exemple #10
0
    def run(self):
        '''Execute the script.'''
        from dials.algorithms.refinement.two_theta_refiner import \
          TwoThetaReflectionManager, TwoThetaTarget, \
          TwoThetaPredictionParameterisation

        start_time = time()

        # Parse the command line
        params, options = self.parser.parse_args(show_diff_phil=False)

        # set up global experiments and reflections lists
        from dials.array_family import flex
        reflections = flex.reflection_table()
        global_id = 0
        from dxtbx.model.experiment.experiment_list import ExperimentList
        experiments = ExperimentList()

        # loop through the input, building up the global lists
        nrefs_per_exp = []
        for ref_wrapper, exp_wrapper in zip(params.input.reflections,
                                            params.input.experiments):
            refs = ref_wrapper.data
            exps = exp_wrapper.data
            for i, exp in enumerate(exps):
                sel = refs['id'] == i
                sub_ref = refs.select(sel)
                nrefs_per_exp.append(len(sub_ref))
                sub_ref['id'] = flex.int(len(sub_ref), global_id)
                reflections.extend(sub_ref)
                experiments.append(exp)
                global_id += 1

        # Try to load the models and data
        nexp = len(experiments)
        if nexp == 0:
            print "No Experiments found in the input"
            self.parser.print_help()
            return
        if len(reflections) == 0:
            print "No reflection data found in the input"
            self.parser.print_help()
            return

        self.check_input(reflections)

        # Configure the logging
        log.config(info=params.output.log, debug=params.output.debug_log)
        logger.info(dials_version())

        # Log the diff phil
        diff_phil = self.parser.diff_phil.as_str()
        if diff_phil is not '':
            logger.info('The following parameters have been modified:\n')
            logger.info(diff_phil)

        # Convert to P 1?
        if params.refinement.triclinic:
            reflections, experiments = self.convert_to_P1(
                reflections, experiments)

        # Combine crystals?
        if params.refinement.combine_crystal_models and len(experiments) > 1:
            logger.info('Combining {0} crystal models'.format(
                len(experiments)))
            experiments = self.combine_crystals(experiments)

        # Filter integrated centroids?
        if params.refinement.filter_integrated_centroids:
            reflections = self.filter_integrated_centroids(reflections)

        # Get the refiner
        logger.info('Configuring refiner')
        refiner = self.create_refiner(params, reflections, experiments)

        # Refine the geometry
        if nexp == 1:
            logger.info('Performing refinement of a single Experiment...')
        else:
            logger.info(
                'Performing refinement of {0} Experiments...'.format(nexp))

        # Refine and get the refinement history
        history = refiner.run()

        # get the refined experiments
        experiments = refiner.get_experiments()
        crystals = experiments.crystals()

        if len(crystals) == 1:
            # output the refined model for information
            logger.info('')
            logger.info('Final refined crystal model:')
            logger.info(crystals[0])
            logger.info(self.cell_param_table(crystals[0]))

        # Save the refined experiments to file
        output_experiments_filename = params.output.experiments
        logger.info('Saving refined experiments to {0}'.format(
            output_experiments_filename))
        from dxtbx.model.experiment.experiment_list import ExperimentListDumper
        dump = ExperimentListDumper(experiments)
        dump.as_json(output_experiments_filename)

        # Correlation plot
        if params.output.correlation_plot.filename is not None:
            from os.path import splitext
            root, ext = splitext(params.output.correlation_plot.filename)
            if not ext: ext = ".pdf"

            steps = params.output.correlation_plot.steps
            if steps is None: steps = [history.get_nrows() - 1]

            # extract individual column names or indices
            col_select = params.output.correlation_plot.col_select

            num_plots = 0
            for step in steps:
                fname_base = root
                if len(steps) > 1: fname_base += "_step%02d" % step
                plot_fname = fname_base + ext
                corrmat, labels = refiner.get_parameter_correlation_matrix(
                    step, col_select)
                if [corrmat, labels].count(None) == 0:
                    from dials.algorithms.refinement.refinement_helpers import corrgram
                    plt = corrgram(corrmat, labels)
                    if plt is not None:
                        logger.info(
                            'Saving parameter correlation plot to {}'.format(
                                plot_fname))
                        plt.savefig(plot_fname)
                        num_plots += 1
                    mat_fname = fname_base + ".pickle"
                    with open(mat_fname, 'wb') as handle:
                        py_mat = corrmat.as_scitbx_matrix(
                        )  #convert to pickle-friendly form
                        logger.info(
                            'Saving parameter correlation matrix to {0}'.
                            format(mat_fname))
                        pickle.dump({
                            'corrmat': py_mat,
                            'labels': labels
                        }, handle)

            if num_plots == 0:
                msg = "Sorry, no parameter correlation plots were produced. Please set " \
                      "track_parameter_correlation=True to ensure correlations are " \
                      "tracked, and make sure correlation_plot.col_select is valid."
                logger.info(msg)

        if params.output.cif is not None:
            self.generate_cif(crystals[0], refiner, file=params.output.cif)

        if params.output.p4p is not None:
            self.generate_p4p(crystals[0],
                              experiments[0].beam,
                              file=params.output.p4p)

        if params.output.mmcif is not None:
            self.generate_mmcif(crystals[0], refiner, file=params.output.mmcif)

        # Log the total time taken
        logger.info("\nTotal time taken: {0:.2f}s".format(time() - start_time))
Exemple #11
0
    def run(self):
        '''Execute the script.'''

        from dials.util.options import flatten_reflections, flatten_experiments, \
          flatten_datablocks
        import cPickle as pickle

        # Parse the command line
        params, options = self.parser.parse_args(show_diff_phil=True)
        reflections = flatten_reflections(params.input.reflections)
        experiments = flatten_experiments(params.input.experiments)
        datablocks = flatten_datablocks(params.input.datablock)

        # Try to load the models and data
        slice_exps = len(experiments) > 0
        slice_refs = len(reflections) > 0
        slice_dbs = len(datablocks) > 0

        # Catch case of nothing to do
        if not any([slice_exps, slice_refs, slice_dbs]):
            print "No suitable input provided"
            self.parser.print_help()
            return

        if reflections:
            if len(reflections) > 1:
                raise Sorry(
                    "Only one reflections list can be imported at present")
            reflections = reflections[0]

            # calculate frame numbers if needed
            if experiments:
                reflections = calculate_frame_numbers(reflections, experiments)

            # if we still don't have the right column give up
            if 'xyzobs.px.value' not in reflections:
                raise Sorry(
                    "These reflections do not have frame numbers set, and "
                    "there are no experiments provided to calculate these.")

        # set trivial case where no scan range is provided at all
        if not params.scan_range:
            params.scan_range = [None]

        # check if slicing into blocks
        if params.block_size is not None:
            # in this case for simplicity, ensure that there is either an
            # an experiment list or datablocks, but not both. Ensure there is only
            # a single scan contained within.
            if [slice_exps, slice_dbs].count(True) != 1:
                raise Sorry(
                    "For slicing into blocks please provide either datablocks"
                    " or experiments, but not both.")
            if slice_exps:
                if len(experiments) > 1:
                    raise Sorry(
                        "For slicing into blocks please provide a single "
                        "scan only")
                scan = experiments[0].scan
            if slice_dbs:
                scans = datablocks[0].unique_scans()
                if len(scans) > 1 or len(datablocks) > 1:
                    raise Sorry(
                        "For slicing into blocks please provide a single "
                        "scan only")
                scan = scans[0]

            # Having extracted the scan, calculate the blocks
            params.scan_range = calculate_block_ranges(scan, params.block_size)

            # Do the slicing then recombine
            if slice_exps:
                sliced = [slice_experiments(experiments, [sr])[0] \
                  for sr in params.scan_range]
                sliced_experiments = ExperimentList()
                for exp in sliced:
                    sliced_experiments.append(exp)

            if slice_dbs:
                sliced = [slice_datablocks(datablocks, [sr])[0] \
                  for sr in params.scan_range]
                imagesets = [db.extract_imagesets()[0] for db in sliced]
                sliced_datablocks = DataBlock(imagesets)

            # slice reflections if present
            if slice_refs:
                sliced = [slice_reflections(reflections, [sr]) \
                  for sr in params.scan_range]
                sliced_reflections = sliced[0]
                for i, rt in enumerate(sliced[1:]):
                    rt['id'] += (i + 1)  # set id
                    sliced_reflections.extend(rt)

        else:
            # slice each dataset into the requested subset
            if slice_exps:
                sliced_experiments = slice_experiments(experiments,
                                                       params.scan_range)
            if slice_refs:
                sliced_reflections = slice_reflections(reflections,
                                                       params.scan_range)
            if slice_dbs:
                sliced_datablocks = slice_datablocks(datablocks,
                                                     params.scan_range)

        # Save sliced experiments
        if slice_exps:
            output_experiments_filename = params.output.experiments_filename
            if output_experiments_filename is None:
                # take first filename as template
                bname = basename(params.input.experiments[0].filename)
                bname = splitext(bname)[0]
                if not bname: bname = "experiments"
                if len(params.scan_range
                       ) == 1 and params.scan_range[0] is not None:
                    ext = "_{0}_{1}.json".format(*params.scan_range[0])
                else:
                    ext = "_sliced.json"
                output_experiments_filename = bname + ext
            print 'Saving sliced experiments to {0}'.format(
                output_experiments_filename)

            from dxtbx.model.experiment.experiment_list import ExperimentListDumper
            dump = ExperimentListDumper(sliced_experiments)
            dump.as_json(output_experiments_filename)

        # Save sliced reflections
        if slice_refs:
            output_reflections_filename = params.output.reflections_filename
            if output_reflections_filename is None:
                # take first filename as template
                bname = basename(params.input.reflections[0].filename)
                bname = splitext(bname)[0]
                if not bname: bname = "reflections"
                if len(params.scan_range
                       ) == 1 and params.scan_range[0] is not None:
                    ext = "_{0}_{1}.pickle".format(*params.scan_range[0])
                else:
                    ext = "_sliced.pickle"
                output_reflections_filename = bname + ext

            print 'Saving sliced reflections to {0}'.format(
                output_reflections_filename)
            sliced_reflections.as_pickle(output_reflections_filename)

        # Save sliced datablocks
        if slice_dbs:
            output_datablocks_filename = params.output.datablocks_filename
            if output_datablocks_filename is None:
                # take first filename as template
                bname = basename(params.input.datablock[0].filename)
                bname = splitext(bname)[0]
                if not bname: bname = "datablock"
                if len(params.scan_range
                       ) == 1 and params.scan_range[0] is not None:
                    ext = "_{0}_{1}.json".format(*params.scan_range[0])
                else:
                    ext = "_sliced.json"
                output_datablocks_filename = bname + ext
            print 'Saving sliced datablocks to {0}'.format(
                output_datablocks_filename)

            from dxtbx.datablock import DataBlockDumper
            dump = DataBlockDumper(sliced_datablocks)
            dump.as_file(output_datablocks_filename)

        return
    def run(self):
        ''' Parse the options. '''
        from dials.util.options import flatten_experiments, flatten_reflections
        from dxtbx.model.experiment.experiment_list import ExperimentList
        from scitbx.math import five_number_summary
        # Parse the command line arguments
        params, options = self.parser.parse_args(show_diff_phil=True)
        self.params = params
        experiments = flatten_experiments(params.input.experiments)
        reflections = flatten_reflections(params.input.reflections)

        assert len(reflections) == 1
        reflections = reflections[0]
        print "Found", len(reflections), "reflections", "and", len(
            experiments), "experiments"

        difference_vector_norms = (reflections['xyzcal.mm'] -
                                   reflections['xyzobs.mm.value']).norms()

        data = flex.double()
        counts = flex.double()
        for i in xrange(len(experiments)):
            dvns = difference_vector_norms.select(reflections['id'] == i)
            counts.append(len(dvns))
            if len(dvns) == 0:
                data.append(0)
                continue
            rmsd = math.sqrt(flex.sum_sq(dvns) / len(dvns))
            data.append(rmsd)
        data *= 1000
        subset = data.select(counts > 0)
        print len(subset), "experiments with > 0 reflections"

        if params.show_plots:
            h = flex.histogram(subset, n_slots=40)
            fig = plt.figure()
            ax = fig.add_subplot('111')
            ax.plot(h.slot_centers().as_numpy_array(),
                    h.slots().as_numpy_array(), '-')
            plt.title("Histogram of %d image RMSDs" % len(subset))

            fig = plt.figure()
            plt.boxplot(subset, vert=False)
            plt.title("Boxplot of %d image RMSDs" % len(subset))
            plt.show()

        outliers = counts == 0
        min_x, q1_x, med_x, q3_x, max_x = five_number_summary(subset)
        print "Five number summary of RMSDs (microns): min %.1f, q1 %.1f, med %.1f, q3 %.1f, max %.1f" % (
            min_x, q1_x, med_x, q3_x, max_x)
        iqr_x = q3_x - q1_x
        cut_x = params.iqr_multiplier * iqr_x
        outliers.set_selected(data > q3_x + cut_x, True)
        #outliers.set_selected(col < q1_x - cut_x, True) # Don't throw away the images that are outliers in the 'good' direction!

        filtered_reflections = flex.reflection_table()
        filtered_experiments = ExperimentList()
        for i in xrange(len(experiments)):
            if outliers[i]:
                continue
            refls = reflections.select(reflections['id'] == i)
            refls['id'] = flex.int(len(refls), len(filtered_experiments))
            filtered_reflections.extend(refls)
            filtered_experiments.append(experiments[i])

        zeroes = counts == 0
        n_zero = len(counts.select(zeroes))
        print "Removed %d bad experiments and %d experiments with zero reflections, out of %d (%%%.1f)" % (
            len(experiments) - len(filtered_experiments) - n_zero, n_zero,
            len(experiments), 100 *
            ((len(experiments) - len(filtered_experiments)) /
             len(experiments)))
        from dxtbx.model.experiment.experiment_list import ExperimentListDumper
        dump = ExperimentListDumper(filtered_experiments)
        dump.as_json(params.output.filtered_experiments)

        filtered_reflections.as_pickle(params.output.filtered_reflections)
Exemple #13
0
def experiment_list(obj, outfile):
  ''' Dump an experiment list. '''
  from dxtbx.model.experiment.experiment_list import ExperimentListDumper
  dumper = ExperimentListDumper(obj)
  dumper.as_file(outfile)
Exemple #14
0
    def run(self):
        '''Execute the script.'''
        from time import time
        import cPickle as pickle
        from dials.util import log
        from dials.algorithms.refinement import RefinerFactory
        from dials.util.options import flatten_reflections, flatten_experiments

        start_time = time()

        # Parse the command line
        params, options = self.parser.parse_args(show_diff_phil=False)
        reflections = flatten_reflections(params.input.reflections)
        experiments = flatten_experiments(params.input.experiments)

        # Try to load the models and data
        nexp = len(experiments)
        if nexp == 0:
            print "No Experiments found in the input"
            self.parser.print_help()
            return
        if len(reflections) == 0:
            print "No reflection data found in the input"
            self.parser.print_help()
            return
        if len(reflections) > 1:
            raise Sorry("Only one reflections list can be imported at present")
        reflections = reflections[0]

        self.check_input(reflections)

        # Configure the logging
        log.config(info=params.output.log, debug=params.output.debug_log)
        from dials.util.version import dials_version
        logger.info(dials_version())

        # Log the diff phil
        diff_phil = self.parser.diff_phil.as_str()
        if diff_phil is not '':
            logger.info('The following parameters have been modified:\n')
            logger.info(diff_phil)

        # Modify options if necessary
        if params.output.correlation_plot.filename is not None:
            params.refinement.refinery.track_parameter_correlation = True

        # Warn about potentially unhelpful options
        if params.refinement.mp.nproc > 1:
            logger.warning(
                "WARNING: setting nproc > 1 is only helpful in rare "
                "circumstances. It is not recommended for typical data processing "
                "tasks.\n")

        # Get the refiner
        logger.info('Configuring refiner')
        refiner = RefinerFactory.from_parameters_data_experiments(
            params, reflections, experiments)

        # Refine the geometry
        if nexp == 1:
            logger.info('Performing refinement of a single Experiment...')
        else:
            logger.info(
                'Performing refinement of {0} Experiments...'.format(nexp))

        # Refine and get the refinement history
        history = refiner.run()

        if params.output.centroids:
            logger.info("Writing table of centroids to '{0}'".format(
                params.output.centroids))
            self.write_centroids_table(refiner, params.output.centroids)

        # Get the refined experiments
        experiments = refiner.get_experiments()

        # Write scan-varying parameters to file, if there were any
        if params.output.parameter_table:
            scans = experiments.scans()
            if len(scans) > 1:
                logger.info(
                    "Writing a scan-varying parameter table is only supported "
                    "for refinement of a single scan")
            else:
                scan = scans[0]
                text = refiner.get_param_reporter(
                ).varying_params_vs_image_number(scan.get_array_range())
                if text:
                    logger.info(
                        "Writing scan-varying parameter table to {0}".format(
                            params.output.parameter_table))
                    f = open(params.output.parameter_table, "w")
                    f.write(text)
                    f.close()
                else:
                    logger.info("No scan-varying parameter table to write")

        crystals = experiments.crystals()
        if len(crystals) == 1:
            # output the refined model for information
            logger.info('')
            logger.info('Final refined crystal model:')
            logger.info(crystals[0])

        # Save the refined experiments to file
        output_experiments_filename = params.output.experiments
        logger.info('Saving refined experiments to {0}'.format(
            output_experiments_filename))
        from dxtbx.model.experiment.experiment_list import ExperimentListDumper
        dump = ExperimentListDumper(experiments)
        dump.as_json(output_experiments_filename)

        # Save reflections with updated predictions if requested (allow to switch
        # this off if it is a time-consuming step)
        if params.output.reflections:
            # Update predictions for all indexed reflections
            logger.info('Updating predictions for indexed reflections')
            preds = refiner.predict_for_indexed()

            # just copy over the columns of interest, leaving behind things
            # added by e.g. scan-varying refinement such as 'block', the
            # U, B and UB matrices and gradients.
            reflections['s1'] = preds['s1']
            reflections['xyzcal.mm'] = preds['xyzcal.mm']
            reflections['xyzcal.px'] = preds['xyzcal.px']
            if 'entering' in preds:
                reflections['entering'] = preds['entering']

            # set used_in_refinement and centroid_outlier flags
            assert len(preds) == len(reflections)
            reflections.unset_flags(
                flex.size_t_range(len(reflections)),
                reflections.flags.used_in_refinement
                | reflections.flags.centroid_outlier)
            mask = preds.get_flags(preds.flags.centroid_outlier)
            reflections.set_flags(mask, reflections.flags.centroid_outlier)
            mask = preds.get_flags(preds.flags.used_in_refinement)
            reflections.set_flags(mask, reflections.flags.used_in_refinement)

            logger.info(
                'Saving reflections with updated predictions to {0}'.format(
                    params.output.reflections))
            if params.output.include_unused_reflections:
                reflections.as_pickle(params.output.reflections)
            else:
                sel = reflections.get_flags(
                    reflections.flags.used_in_refinement)
                reflections.select(sel).as_pickle(params.output.reflections)

        # For debugging, if requested save matches to file
        if params.output.matches:
            matches = refiner.get_matches()
            logger.info(
                'Saving matches (use for debugging purposes) to {0}'.format(
                    params.output.matches))
            matches.as_pickle(params.output.matches)

        # Correlation plot
        if params.output.correlation_plot.filename is not None:
            from os.path import splitext
            root, ext = splitext(params.output.correlation_plot.filename)
            if not ext: ext = ".pdf"

            steps = params.output.correlation_plot.steps
            if steps is None: steps = [history.get_nrows() - 1]

            # extract individual column names or indices
            col_select = params.output.correlation_plot.col_select

            num_plots = 0
            for step in steps:
                fname_base = root
                if len(steps) > 1: fname_base += "_step%02d" % step
                plot_fname = fname_base + ext
                corrmat, labels = refiner.get_parameter_correlation_matrix(
                    step, col_select)
                if [corrmat, labels].count(None) == 0:
                    from dials.algorithms.refinement.refinement_helpers import corrgram
                    plt = corrgram(corrmat, labels)
                    if plt is not None:
                        logger.info(
                            'Saving parameter correlation plot to {}'.format(
                                plot_fname))
                        plt.savefig(plot_fname)
                        num_plots += 1
                    mat_fname = fname_base + ".pickle"
                    with open(mat_fname, 'wb') as handle:
                        py_mat = corrmat.as_scitbx_matrix(
                        )  #convert to pickle-friendly form
                        logger.info(
                            'Saving parameter correlation matrix to {0}'.
                            format(mat_fname))
                        pickle.dump({
                            'corrmat': py_mat,
                            'labels': labels
                        }, handle)

            if num_plots == 0:
                msg = "Sorry, no parameter correlation plots were produced. Please set " \
                      "track_parameter_correlation=True to ensure correlations are " \
                      "tracked, and make sure correlation_plot.col_select is valid."
                logger.info(msg)

        # Write out refinement history, if requested
        if params.output.history:
            with open(params.output.history, 'wb') as handle:
                logger.info('Saving refinement step history to {0}'.format(
                    params.output.history))
                pickle.dump(history, handle)

        # Log the total time taken
        logger.info("\nTotal time taken: {0:.2f}s".format(time() - start_time))

        return
    def run(self):
        '''Execute the script.'''

        from dials.util.options import flatten_experiments
        from libtbx.utils import Sorry

        # Parse the command line
        params, options = self.parser.parse_args(show_diff_phil=True)

        # Try to load the models and data
        if len(params.input.experiments) == 0:
            print "No Experiments found in the input"
            self.parser.print_help()
            return
        if len(params.input.reflections) == 0:
            print "No reflection data found in the input"
            self.parser.print_help()
            return
        try:
            assert len(params.input.reflections) == len(
                params.input.experiments)
        except AssertionError:
            raise Sorry(
                "The number of input reflections files does not match the "
                "number of input experiments")

        flat_exps = flatten_experiments(params.input.experiments)

        ref_beam = params.reference_from_experiment.beam
        ref_goniometer = params.reference_from_experiment.goniometer
        ref_scan = params.reference_from_experiment.scan
        ref_crystal = params.reference_from_experiment.crystal
        ref_detector = params.reference_from_experiment.detector

        if ref_beam is not None:
            try:
                ref_beam = flat_exps[ref_beam].beam
            except IndexError:
                raise Sorry(
                    "{0} is not a valid experiment ID".format(ref_beam))

        if ref_goniometer is not None:
            try:
                ref_goniometer = flat_exps[ref_goniometer].goniometer
            except IndexError:
                raise Sorry(
                    "{0} is not a valid experiment ID".format(ref_goniometer))

        if ref_scan is not None:
            try:
                ref_scan = flat_exps[ref_scan].scan
            except IndexError:
                raise Sorry(
                    "{0} is not a valid experiment ID".format(ref_scan))

        if ref_crystal is not None:
            try:
                ref_crystal = flat_exps[ref_crystal].crystal
            except IndexError:
                raise Sorry(
                    "{0} is not a valid experiment ID".format(ref_crystal))

        if ref_detector is not None:
            assert not params.reference_from_experiment.average_detector
            try:
                ref_detector = flat_exps[ref_detector].detector
            except IndexError:
                raise Sorry(
                    "{0} is not a valid experiment ID".format(ref_detector))
        elif params.reference_from_experiment.average_detector:
            # Average all of the detectors together
            from scitbx.matrix import col

            def average_detectors(target, panelgroups, depth):
                # Recursive function to do the averaging

                if params.reference_from_experiment.average_hierarchy_level is None or \
                    depth == params.reference_from_experiment.average_hierarchy_level:
                    n = len(panelgroups)
                    sum_fast = col((0.0, 0.0, 0.0))
                    sum_slow = col((0.0, 0.0, 0.0))
                    sum_ori = col((0.0, 0.0, 0.0))

                    # Average the d matrix vectors
                    for pg in panelgroups:
                        sum_fast += col(pg.get_local_fast_axis())
                        sum_slow += col(pg.get_local_slow_axis())
                        sum_ori += col(pg.get_local_origin())
                    sum_fast /= n
                    sum_slow /= n
                    sum_ori /= n

                    # Re-orthagonalize the slow and the fast vectors by rotating around the cross product
                    c = sum_fast.cross(sum_slow)
                    a = sum_fast.angle(sum_slow, deg=True) / 2
                    sum_fast = sum_fast.rotate(c, a - 45, deg=True)
                    sum_slow = sum_slow.rotate(c, -(a - 45), deg=True)

                    target.set_local_frame(sum_fast, sum_slow, sum_ori)

                if target.is_group():
                    # Recurse
                    for i, target_pg in enumerate(target):
                        average_detectors(target_pg,
                                          [pg[i] for pg in panelgroups],
                                          depth + 1)

            ref_detector = flat_exps[0].detector
            average_detectors(ref_detector.hierarchy(),
                              [e.detector.hierarchy() for e in flat_exps], 0)

        combine = CombineWithReference(beam=ref_beam,
                                       goniometer=ref_goniometer,
                                       scan=ref_scan,
                                       crystal=ref_crystal,
                                       detector=ref_detector,
                                       params=params)

        # set up global experiments and reflections lists
        from dials.array_family import flex
        reflections = flex.reflection_table()
        global_id = 0
        from dxtbx.model.experiment.experiment_list import ExperimentList
        experiments = ExperimentList()

        # loop through the input, building up the global lists
        nrefs_per_exp = []
        for ref_wrapper, exp_wrapper in zip(params.input.reflections,
                                            params.input.experiments):
            refs = ref_wrapper.data
            exps = exp_wrapper.data
            for i, exp in enumerate(exps):
                sel = refs['id'] == i
                sub_ref = refs.select(sel)
                nrefs_per_exp.append(len(sub_ref))
                sub_ref['id'] = flex.int(len(sub_ref), global_id)
                reflections.extend(sub_ref)
                experiments.append(combine(exp))
                global_id += 1

        # print number of reflections per experiment
        from libtbx.table_utils import simple_table
        header = ["Experiment", "Nref"]
        rows = [(str(i), str(n)) for (i, n) in enumerate(nrefs_per_exp)]
        st = simple_table(rows, header)
        print st.format()

        # save output
        from dxtbx.model.experiment.experiment_list import ExperimentListDumper
        print 'Saving combined experiments to {0}'.format(
            params.output.experiments_filename)
        dump = ExperimentListDumper(experiments)
        dump.as_json(params.output.experiments_filename)
        print 'Saving combined reflections to {0}'.format(
            params.output.reflections_filename)
        reflections.as_pickle(params.output.reflections_filename)

        return