Ejemplo n.º 1
0
    def scale(self):
        """Actually perform the scaling - this is delegated to the
        implementation."""

        if self._scalr_integraters == {}:
            raise RuntimeError(
                "no Integrater implementations assigned for scaling")

        xname = self._scalr_xcrystal.get_name()

        while not self.get_scaler_finish_done():
            while not self.get_scaler_done():
                while not self.get_scaler_prepare_done():

                    logger.notice(banner("Preparing %s" % xname))

                    self._scalr_prepare_done = True
                    self._scale_prepare()

                logger.notice(banner("Scaling %s" % xname))

                self._scalr_done = True
                self._scalr_result = self._scale()

            self._scalr_finish_done = True
            self._scale_finish()

        return self._scalr_result
Ejemplo n.º 2
0
    def _index_prepare(self):
        logger.notice(banner("Spotfinding %s" % self.get_indexer_sweep_name()))
        super(XDSIndexerII, self)._index_prepare()

        reflections_file = spot_xds_to_reflection_file(
            self._indxr_payload["SPOT.XDS"],
            working_directory=self.get_working_directory(),
        )
        refl = flex.reflection_table.from_file(reflections_file)
        logger.info(spot_counts_per_image_plot(refl))
Ejemplo n.º 3
0
    def get_output(self):

        result = "Crystal: %s\n" % self._name

        if self._aa_sequence:
            result += "Sequence: %s\n" % self._aa_sequence.get_sequence()
        for wavelength in self._wavelengths.keys():
            result += self._wavelengths[wavelength].get_output()

        scaler = self._get_scaler()
        if scaler.get_scaler_finish_done():
            for wname, xwav in self._wavelengths.iteritems():
                for xsweep in xwav.get_sweeps():
                    idxr = xsweep._get_indexer()
                    if PhilIndex.params.xia2.settings.show_template:
                        result += "%s\n" % banner(
                            "Autoindexing %s (%s)" %
                            (idxr.get_indexer_sweep_name(),
                             idxr.get_template()))
                    else:
                        result += "%s\n" % banner(
                            "Autoindexing %s" % idxr.get_indexer_sweep_name())
                    result += "%s\n" % idxr.show_indexer_solutions()

                    intgr = xsweep._get_integrater()
                    if PhilIndex.params.xia2.settings.show_template:
                        result += "%s\n" % banner(
                            "Integrating %s (%s)" %
                            (intgr.get_integrater_sweep_name(),
                             intgr.get_template()))
                    else:
                        result += "%s\n" % banner(
                            "Integrating %s" %
                            intgr.get_integrater_sweep_name())
                    result += "%s\n" % intgr.show_per_image_statistics()

            result += "%s\n" % banner("Scaling %s" % self.get_name())

            for (
                (dname, sname),
                (limit, suggested),
            ) in scaler.get_scaler_resolution_limits().iteritems():
                if suggested is None or limit == suggested:
                    result += "Resolution limit for %s/%s: %5.2f\n" % (
                        dname,
                        sname,
                        limit,
                    )
                else:
                    result += (
                        "Resolution limit for %s/%s: %5.2f (%5.2f suggested)\n"
                        % (dname, sname, limit, suggested))

        # this is now deprecated - be explicit in what you are
        # asking for...
        reflections_all = self.get_scaled_merged_reflections()
        statistics_all = self._get_scaler().get_scaler_statistics()

        # print some of these statistics, perhaps?

        for key in statistics_all.keys():
            result += format_statistics(statistics_all[key],
                                        caption="For %s/%s/%s" % key)

        # then print out some "derived" information based on the
        # scaling - this is presented through the Scaler interface
        # explicitly...

        cell = self._get_scaler().get_scaler_cell()
        cell_esd = self._get_scaler().get_scaler_cell_esd()
        spacegroups = self._get_scaler().get_scaler_likely_spacegroups()

        spacegroup = spacegroups[0]
        resolution = self._get_scaler().get_scaler_highest_resolution()

        from cctbx import sgtbx

        sg = sgtbx.space_group_type(str(spacegroup))
        spacegroup = sg.lookup_symbol()
        CIF.set_spacegroup(sg)
        mmCIF.set_spacegroup(sg)

        if len(self._wavelengths) == 1:
            CIF.set_wavelengths(
                [w.get_wavelength() for w in self._wavelengths.itervalues()])
            mmCIF.set_wavelengths(
                [w.get_wavelength() for w in self._wavelengths.itervalues()])
        else:
            for wavelength in self._wavelengths.keys():
                full_wave_name = "%s_%s_%s" % (
                    self._project._name,
                    self._name,
                    wavelength,
                )
                CIF.get_block(full_wave_name)[
                    "_diffrn_radiation_wavelength"] = self._wavelengths[
                        wavelength].get_wavelength()
                mmCIF.get_block(full_wave_name)[
                    "_diffrn_radiation_wavelength"] = self._wavelengths[
                        wavelength].get_wavelength()
            CIF.set_wavelengths({
                name: wave.get_wavelength()
                for name, wave in self._wavelengths.iteritems()
            })
            mmCIF.set_wavelengths({
                name: wave.get_wavelength()
                for name, wave in self._wavelengths.iteritems()
            })

        result += "Assuming spacegroup: %s\n" % spacegroup
        if len(spacegroups) > 1:
            result += "Other likely alternatives are:\n"
            for sg in spacegroups[1:]:
                result += "%s\n" % sg

        if cell_esd:
            from libtbx.utils import format_float_with_standard_uncertainty

            def match_formatting(dimA, dimB):
                def conditional_split(s):
                    return ((s[:s.index(".")],
                             s[s.index("."):]) if "." in s else (s, ""))

                A, B = conditional_split(dimA), conditional_split(dimB)
                maxlen = (max(len(A[0]), len(B[0])), max(len(A[1]), len(B[1])))
                return (
                    A[0].rjust(maxlen[0]) + A[1].ljust(maxlen[1]),
                    B[0].rjust(maxlen[0]) + B[1].ljust(maxlen[1]),
                )

            formatted_cell_esds = tuple(
                format_float_with_standard_uncertainty(v, sd)
                for v, sd in zip(cell, cell_esd))
            formatted_rows = (formatted_cell_esds[0:3],
                              formatted_cell_esds[3:6])
            formatted_rows = zip(*(match_formatting(l, a)
                                   for l, a in zip(*formatted_rows)))
            result += "Unit cell (with estimated std devs):\n"
            result += "%s %s %s\n%s %s %s\n" % (formatted_rows[0] +
                                                formatted_rows[1])
        else:
            result += "Unit cell:\n"
            result += "%7.3f %7.3f %7.3f\n%7.3f %7.3f %7.3f\n" % tuple(cell)

        # now, use this information and the sequence (if provided)
        # and also matthews_coef (should I be using this directly, here?)
        # to compute a likely number of molecules in the ASU and also
        # the solvent content...

        if self._aa_sequence:
            residues = self._aa_sequence.get_sequence()
            if residues:
                nres = len(residues)

                # first compute the number of molecules using the K&R
                # method

                nmol = compute_nmol(
                    cell[0],
                    cell[1],
                    cell[2],
                    cell[3],
                    cell[4],
                    cell[5],
                    spacegroup,
                    resolution,
                    nres,
                )

                # then compute the solvent fraction

                solvent = compute_solvent(
                    cell[0],
                    cell[1],
                    cell[2],
                    cell[3],
                    cell[4],
                    cell[5],
                    spacegroup,
                    nmol,
                    nres,
                )

                result += "Likely number of molecules in ASU: %d\n" % nmol
                result += "Giving solvent fraction:        %4.2f\n" % solvent

                self._nmol = nmol

        if isinstance(reflections_all, type({})):
            for format in reflections_all.keys():
                result += "%s format:\n" % format
                reflections = reflections_all[format]

                if isinstance(reflections, type({})):
                    for wavelength in reflections.keys():
                        target = FileHandler.get_data_file(
                            reflections[wavelength])
                        result += "Scaled reflections (%s): %s\n" % (
                            wavelength, target)

                else:
                    target = FileHandler.get_data_file(reflections)
                    result += "Scaled reflections: %s\n" % target

        CIF.write_cif()
        mmCIF.write_cif()

        return result
Ejemplo n.º 4
0
    def index(self):

        if not self.get_indexer_finish_done():
            f = inspect.currentframe().f_back.f_back
            m = f.f_code.co_filename
            l = f.f_lineno

            logger.debug("Index in %s called from %s %d" %
                         (self.__class__.__name__, m, l))

        while not self.get_indexer_finish_done():
            while not self.get_indexer_done():
                while not self.get_indexer_prepare_done():

                    # --------------
                    # call prepare()
                    # --------------

                    self.set_indexer_prepare_done(True)
                    self._index_prepare()

                # --------------------------------------------
                # then do the proper indexing - using the best
                # solution already stored if available (c/f
                # eliminate above)
                # --------------------------------------------

                self.set_indexer_done(True)

                if self.get_indexer_sweeps():
                    xsweeps = [s.get_name() for s in self.get_indexer_sweeps()]
                    if len(xsweeps) > 1:
                        # find "SWEEPn, SWEEP(n+1), (..), SWEEPm" and aggregate to "SWEEPS n-m"
                        xsweeps = [(int(x[5:]),
                                    int(x[5:])) if x.startswith("SWEEP") else x
                                   for x in xsweeps]
                        xsweeps[0] = [xsweeps[0]]

                        def compress(seen, nxt):
                            if (isinstance(seen[-1], tuple)
                                    and isinstance(nxt, tuple)
                                    and (seen[-1][1] + 1 == nxt[0])):
                                seen[-1] = (seen[-1][0], nxt[1])
                            else:
                                seen.append(nxt)
                            return seen

                        xsweeps = reduce(compress, xsweeps)
                        xsweeps = [
                            ("SWEEP%d" %
                             x[0] if x[0] == x[1] else "SWEEPS %d to %d" %
                             (x[0], x[1])) if isinstance(x, tuple) else x
                            for x in xsweeps
                        ]
                    if len(xsweeps) > 1:
                        sweep_names = ", ".join(xsweeps[:-1])
                        sweep_names += " & " + xsweeps[-1]
                    else:
                        sweep_names = xsweeps[0]

                    if PhilIndex.params.xia2.settings.show_template:
                        template = self.get_indexer_sweep().get_template()
                        logger.notice(
                            banner("Autoindexing %s (%s)", sweep_names,
                                   template))
                    else:
                        logger.notice(banner("Autoindexing %s" % sweep_names))

                if not self._indxr_helper:
                    self._index()

                    if not self._indxr_done:
                        logger.debug("Looks like indexing failed - try again!")
                        continue

                    solutions = {
                        k: c["cell"]
                        for k, c in self._indxr_other_lattice_cell.items()
                    }

                    # create a helper for the indexer to manage solutions
                    self._indxr_helper = _IndexerHelper(solutions)

                    solution = self._indxr_helper.get()

                    # compare these against the final solution, if different
                    # reject solution and return - correct solution will
                    # be used next cycle

                    if (self._indxr_lattice != solution[0]
                            and not self._indxr_input_cell and
                            not PhilIndex.params.xia2.settings.integrate_p1):
                        logger.info(
                            "Rerunning indexing lattice %s to %s",
                            self._indxr_lattice,
                            solution[0],
                        )
                        self.set_indexer_done(False)

                else:
                    # rerun autoindexing with the best known current solution

                    solution = self._indxr_helper.get()
                    self._indxr_input_lattice = solution[0]
                    self._indxr_input_cell = solution[1]
                    self._index()

            # next finish up...

            self.set_indexer_finish_done(True)
            self._index_finish()

            if self._indxr_print:
                logger.info(self.show_indexer_solutions())
Ejemplo n.º 5
0
    def _index_prepare(self):

        Citations.cite("dials")

        # all_images = self.get_matching_images()
        # first = min(all_images)
        # last = max(all_images)

        spot_lists = []
        experiments_filenames = []

        for imageset, xsweep in zip(self._indxr_imagesets, self._indxr_sweeps):

            logger.notice(banner("Spotfinding %s" % xsweep.get_name()))

            first, last = imageset.get_scan().get_image_range()

            # at this stage, break out to run the DIALS code: this sets itself up
            # now cheat and pass in some information... save re-reading all of the
            # image headers

            # FIXME need to adjust this to allow (say) three chunks of images

            from dxtbx.model.experiment_list import ExperimentListFactory

            sweep_filename = os.path.join(self.get_working_directory(),
                                          "%s_import.expt" % xsweep.get_name())
            ExperimentListFactory.from_imageset_and_crystal(
                imageset, None).as_file(sweep_filename)

            genmask = self.GenerateMask()
            genmask.set_input_experiments(sweep_filename)
            genmask.set_output_experiments(
                os.path.join(
                    self.get_working_directory(),
                    "%s_%s_masked.expt" %
                    (genmask.get_xpid(), xsweep.get_name()),
                ))
            genmask.set_params(PhilIndex.params.dials.masking)
            sweep_filename, mask_pickle = genmask.run()
            logger.debug("Generated mask for %s: %s", xsweep.get_name(),
                         mask_pickle)

            gain = PhilIndex.params.xia2.settings.input.gain
            if gain is libtbx.Auto:
                gain_estimater = self.EstimateGain()
                gain_estimater.set_sweep_filename(sweep_filename)
                gain_estimater.run()
                gain = gain_estimater.get_gain()
                logger.info("Estimated gain: %.2f", gain)
                PhilIndex.params.xia2.settings.input.gain = gain

            # FIXME this should really use the assigned spot finding regions
            # offset = self.get_frame_offset()
            dfs_params = PhilIndex.params.dials.find_spots
            spotfinder = self.Spotfinder()
            if last - first > 10:
                spotfinder.set_write_hot_mask(True)
            spotfinder.set_input_sweep_filename(sweep_filename)
            spotfinder.set_output_sweep_filename(
                "%s_%s_strong.expt" %
                (spotfinder.get_xpid(), xsweep.get_name()))
            spotfinder.set_input_spot_filename(
                "%s_%s_strong.refl" %
                (spotfinder.get_xpid(), xsweep.get_name()))
            if PhilIndex.params.dials.fast_mode:
                wedges = self._index_select_images_i(imageset)
                spotfinder.set_scan_ranges(wedges)
            else:
                spotfinder.set_scan_ranges([(first, last)])
            if dfs_params.phil_file is not None:
                spotfinder.set_phil_file(dfs_params.phil_file)
            if dfs_params.min_spot_size is libtbx.Auto:
                if imageset.get_detector()[0].get_type() == "SENSOR_PAD":
                    dfs_params.min_spot_size = 3
                else:
                    dfs_params.min_spot_size = None
            if dfs_params.min_spot_size is not None:
                spotfinder.set_min_spot_size(dfs_params.min_spot_size)
            if dfs_params.min_local is not None:
                spotfinder.set_min_local(dfs_params.min_local)
            if dfs_params.sigma_strong:
                spotfinder.set_sigma_strong(dfs_params.sigma_strong)
            gain = PhilIndex.params.xia2.settings.input.gain
            if gain:
                spotfinder.set_gain(gain)
            if dfs_params.filter_ice_rings:
                spotfinder.set_filter_ice_rings(dfs_params.filter_ice_rings)
            if dfs_params.kernel_size:
                spotfinder.set_kernel_size(dfs_params.kernel_size)
            if dfs_params.global_threshold is not None:
                spotfinder.set_global_threshold(dfs_params.global_threshold)
            if dfs_params.threshold.algorithm is not None:
                spotfinder.set_threshold_algorithm(
                    dfs_params.threshold.algorithm)
            spotfinder.run()

            spot_filename = spotfinder.get_spot_filename()
            if not os.path.exists(spot_filename):
                raise RuntimeError("Spotfinding failed: %s does not exist." %
                                   os.path.basename(spot_filename))

            spot_lists.append(spot_filename)
            experiments_filenames.append(
                spotfinder.get_output_sweep_filename())

            refl = flex.reflection_table.from_file(spot_filename)
            if not len(refl):
                raise RuntimeError("No spots found in sweep %s" %
                                   xsweep.get_name())
            logger.info(spot_counts_per_image_plot(refl))

            if not PhilIndex.params.dials.fast_mode:
                detectblanks = self.DetectBlanks()
                detectblanks.set_sweep_filename(experiments_filenames[-1])
                detectblanks.set_reflections_filename(spot_filename)
                detectblanks.run()
                json = detectblanks.get_results()
                blank_regions = json["strong"]["blank_regions"]
                if len(blank_regions):
                    blank_regions = [(int(s), int(e))
                                     for s, e in blank_regions]
                    for blank_start, blank_end in blank_regions:
                        logger.info(
                            "WARNING: Potential blank images: %i -> %i",
                            blank_start + 1,
                            blank_end,
                        )

                    if PhilIndex.params.xia2.settings.remove_blanks:
                        non_blanks = []
                        start, end = imageset.get_array_range()
                        last_blank_end = start
                        for blank_start, blank_end in blank_regions:
                            if blank_start > start:
                                non_blanks.append(
                                    (last_blank_end, blank_start))
                            last_blank_end = blank_end

                        if last_blank_end + 1 < end:
                            non_blanks.append((last_blank_end, end))

                        xsweep = self.get_indexer_sweep()
                        xwav = xsweep.get_wavelength()
                        xsample = xsweep.sample

                        sweep_name = xsweep.get_name()

                        for i, (nb_start, nb_end) in enumerate(non_blanks):
                            assert i < 26
                            if i == 0:
                                sub_imageset = imageset[nb_start -
                                                        start:nb_end - start]
                                xsweep._frames_to_process = (nb_start + 1,
                                                             nb_end + 1)
                                self.set_indexer_prepare_done(done=False)
                                self._indxr_imagesets[
                                    self._indxr_imagesets.index(
                                        imageset)] = sub_imageset
                                xsweep._integrater._setup_from_imageset(
                                    sub_imageset)
                            else:
                                min_images = (PhilIndex.params.xia2.settings.
                                              input.min_images)
                                if (nb_end - nb_start) < min_images:
                                    continue
                                new_name = "_".join(
                                    (sweep_name, string.ascii_lowercase[i]))
                                new_sweep = xwav.add_sweep(
                                    new_name,
                                    xsample,
                                    directory=os.path.join(
                                        os.path.basename(
                                            xsweep.get_directory()),
                                        new_name,
                                    ),
                                    image=imageset.get_path(nb_start - start),
                                    frames_to_process=(nb_start + 1, nb_end),
                                    beam=xsweep.get_beam_centre(),
                                    reversephi=xsweep.get_reversephi(),
                                    distance=xsweep.get_distance(),
                                    gain=xsweep.get_gain(),
                                    dmin=xsweep.get_resolution_high(),
                                    dmax=xsweep.get_resolution_low(),
                                    polarization=xsweep.get_polarization(),
                                    user_lattice=xsweep.get_user_lattice(),
                                    user_cell=xsweep.get_user_cell(),
                                    epoch=xsweep._epoch,
                                    ice=xsweep._ice,
                                    excluded_regions=xsweep._excluded_regions,
                                )
                                logger.info(
                                    "Generating new sweep: %s (%s:%i:%i)",
                                    new_sweep.get_name(),
                                    new_sweep.get_image(),
                                    new_sweep.get_frames_to_process()[0],
                                    new_sweep.get_frames_to_process()[1],
                                )
                        return

            if not PhilIndex.params.xia2.settings.trust_beam_centre:
                discovery = self.SearchBeamPosition()
                discovery.set_sweep_filename(experiments_filenames[-1])
                discovery.set_spot_filename(spot_filename)

                # set scan_range to correspond to not more than 180 degrees
                # if we have > 20000 reflections
                width = imageset.get_scan().get_oscillation()[1]
                if (last - first) * width > 180.0 and len(refl) > 20000:
                    end = first + int(round(180.0 / width)) - 1
                    logger.debug("Using %d to %d for beam search", first, end)
                    discovery.set_image_range((first, end))

                try:
                    discovery.run()
                    result = discovery.get_optimized_experiments_filename()
                    # overwrite indexed.expt in experiments list
                    experiments_filenames[-1] = result
                except Exception as e:
                    logger.debug("DIALS beam centre search failed: %s",
                                 str(e),
                                 exc_info=True)

        self.set_indexer_payload("spot_lists", spot_lists)
        self.set_indexer_payload("experiments", experiments_filenames)
Ejemplo n.º 6
0
    def integrate(self):
        """Actually perform integration until we think we are done..."""

        while not self.get_integrater_finish_done():
            while not self.get_integrater_done():
                while not self.get_integrater_prepare_done():

                    logger.debug("Preparing to do some integration...")
                    self.set_integrater_prepare_done(True)

                    # if this raises an exception, perhaps the autoindexing
                    # solution has too high symmetry. if this the case, then
                    # perform a self._intgr_indexer.eliminate() - this should
                    # reset the indexing system

                    try:
                        self._integrate_prepare()

                    except BadLatticeError as e:
                        logger.info("Rejecting bad lattice %s", str(e))
                        self._intgr_refiner.eliminate()
                        self._integrater_reset()

                # FIXME x1698 - may be the case that _integrate() returns the
                # raw intensities, _integrate_finish() returns intensities
                # which may have been adjusted or corrected. See #1698 below.

                logger.debug("Doing some integration...")

                self.set_integrater_done(True)

                template = self.get_integrater_sweep().get_template()

                if self._intgr_sweep_name:
                    if PhilIndex.params.xia2.settings.show_template:
                        logger.notice(
                            banner("Integrating %s (%s)" %
                                   (self._intgr_sweep_name, template)))
                    else:
                        logger.notice(
                            banner("Integrating %s" % self._intgr_sweep_name))
                try:

                    # 1698
                    self._intgr_hklout_raw = self._integrate()

                except BadLatticeError as e:
                    logger.info("Rejecting bad lattice %s", str(e))
                    self._intgr_refiner.eliminate()
                    self._integrater_reset()

            self.set_integrater_finish_done(True)
            try:
                # allow for the fact that postrefinement may be used
                # to reject the lattice...
                self._intgr_hklout = self._integrate_finish()

            except BadLatticeError as e:
                logger.info("Bad Lattice Error: %s", str(e))
                self._intgr_refiner.eliminate()
                self._integrater_reset()
        return self._intgr_hklout
Ejemplo n.º 7
0
  def get_output(self):

    result = 'Crystal: %s\n' % self._name

    if self._aa_sequence:
      result += 'Sequence: %s\n' % self._aa_sequence.get_sequence()
    for wavelength in self._wavelengths.keys():
      result += self._wavelengths[wavelength].get_output()

    scaler = self._get_scaler()
    if scaler.get_scaler_finish_done():
      for wname, xwav in self._wavelengths.iteritems():
        for xsweep in xwav.get_sweeps():
          idxr = xsweep._get_indexer()
          if PhilIndex.params.xia2.settings.show_template:
            result += '%s\n' %banner('Autoindexing %s (%s)' %(
              idxr.get_indexer_sweep_name(), idxr.get_template()))
          else:
            result += '%s\n' %banner(
              'Autoindexing %s' %idxr.get_indexer_sweep_name())
          result += '%s\n' %idxr.show_indexer_solutions()

          intgr = xsweep._get_integrater()
          if PhilIndex.params.xia2.settings.show_template:
            result += '%s\n' %banner('Integrating %s (%s)' %(
              intgr.get_integrater_sweep_name(), intgr.get_template()))
          else:
            result += '%s\n' %banner(
              'Integrating %s' %intgr.get_integrater_sweep_name())
          result += '%s\n' % intgr.show_per_image_statistics()

      result += '%s\n' %banner('Scaling %s' %self.get_name())

      for (dname, sname), (limit, suggested) in scaler.get_scaler_resolution_limits().iteritems():
        if suggested is None or limit == suggested:
          result += 'Resolution limit for %s/%s: %5.2f\n' %(dname, sname, limit)
        else:
          result += 'Resolution limit for %s/%s: %5.2f (%5.2f suggested)\n' %(dname, sname, limit, suggested)

    # this is now deprecated - be explicit in what you are
    # asking for...
    reflections_all = self.get_scaled_merged_reflections()
    statistics_all = self._get_scaler().get_scaler_statistics()

    # print some of these statistics, perhaps?

    for key in statistics_all.keys():
      result += format_statistics(statistics_all[key], caption='For %s/%s/%s' % key)

    # then print out some "derived" information based on the
    # scaling - this is presented through the Scaler interface
    # explicitly...

    cell = self._get_scaler().get_scaler_cell()
    cell_esd = self._get_scaler().get_scaler_cell_esd()
    spacegroups = self._get_scaler().get_scaler_likely_spacegroups()

    spacegroup = spacegroups[0]
    resolution = self._get_scaler().get_scaler_highest_resolution()

    from cctbx import sgtbx
    sg = sgtbx.space_group_type(str(spacegroup))
    spacegroup = sg.lookup_symbol()
    CIF.set_spacegroup(sg)
    mmCIF.set_spacegroup(sg)

    if len(self._wavelengths) == 1:
      CIF.set_wavelengths([w.get_wavelength() for w in self._wavelengths.itervalues()])
      mmCIF.set_wavelengths([w.get_wavelength() for w in self._wavelengths.itervalues()])
    else:
      for wavelength in self._wavelengths.keys():
        full_wave_name = '%s_%s_%s' % (self._project._name, self._name, wavelength)
        CIF.get_block(full_wave_name)['_diffrn_radiation_wavelength'] = \
          self._wavelengths[wavelength].get_wavelength()
        mmCIF.get_block(full_wave_name)['_diffrn_radiation_wavelength'] = \
          self._wavelengths[wavelength].get_wavelength()
      CIF.set_wavelengths({name: wave.get_wavelength() for name, wave in self._wavelengths.iteritems()})
      mmCIF.set_wavelengths({name: wave.get_wavelength() for name, wave in self._wavelengths.iteritems()})

    result += 'Assuming spacegroup: %s\n' % spacegroup
    if len(spacegroups) > 1:
      result += 'Other likely alternatives are:\n'
      for sg in spacegroups[1:]:
        result += '%s\n' % sg

    if cell_esd:
      from libtbx.utils import format_float_with_standard_uncertainty
      def match_formatting(dimA, dimB):
        def conditional_split(s):
          return (s[:s.index('.')],s[s.index('.'):]) if '.' in s else (s, '')
        A, B = conditional_split(dimA), conditional_split(dimB)
        maxlen = (max(len(A[0]), len(B[0])), max(len(A[1]), len(B[1])))
        return (
          A[0].rjust(maxlen[0])+A[1].ljust(maxlen[1]),
          B[0].rjust(maxlen[0])+B[1].ljust(maxlen[1])
        )
      formatted_cell_esds = tuple(format_float_with_standard_uncertainty(v, sd) for v, sd in zip(cell, cell_esd))
      formatted_rows = (formatted_cell_esds[0:3], formatted_cell_esds[3:6])
      formatted_rows = zip(*(match_formatting(l, a) for l, a in zip(*formatted_rows)))
      result += 'Unit cell (with estimated std devs):\n'
      result += '%s %s %s\n%s %s %s\n' % (formatted_rows[0] + formatted_rows[1])
    else:
      result += 'Unit cell:\n'
      result += '%7.3f %7.3f %7.3f\n%7.3f %7.3f %7.3f\n' % tuple(cell)

    # now, use this information and the sequence (if provided)
    # and also matthews_coef (should I be using this directly, here?)
    # to compute a likely number of molecules in the ASU and also
    # the solvent content...

    if self._aa_sequence:
      residues = self._aa_sequence.get_sequence()
      if residues:
        nres = len(residues)

        # first compute the number of molecules using the K&R
        # method

        nmol = compute_nmol(cell[0], cell[1], cell[2],
                            cell[3], cell[4], cell[5],
                            spacegroup, resolution, nres)

        # then compute the solvent fraction

        solvent = compute_solvent(cell[0], cell[1], cell[2],
                                  cell[3], cell[4], cell[5],
                                  spacegroup, nmol, nres)

        result += 'Likely number of molecules in ASU: %d\n' % nmol
        result += 'Giving solvent fraction:        %4.2f\n' % solvent

        self._nmol = nmol

    if type(reflections_all) == type({}):
      for format in reflections_all.keys():
        result += '%s format:\n' % format
        reflections = reflections_all[format]

        if type(reflections) == type({}):
          for wavelength in reflections.keys():
            target = FileHandler.get_data_file(
                reflections[wavelength])
            result += 'Scaled reflections (%s): %s\n' % \
                      (wavelength, target)

        else:
          target = FileHandler.get_data_file(
              reflections)
          result += 'Scaled reflections: %s\n' % target

    CIF.write_cif()
    mmCIF.write_cif()

    return result