示例#1
0
文件: DialsIndexer.py 项目: xia2/xia2
  def _do_indexing(self, method=None):
    indexer = self.Index()
    for spot_list in self._indxr_payload["spot_lists"]:
      indexer.add_spot_filename(spot_list)
    for datablock in self._indxr_payload["datablocks"]:
      indexer.add_sweep_filename(datablock)
    if PhilIndex.params.dials.index.phil_file is not None:
      indexer.set_phil_file(PhilIndex.params.dials.index.phil_file)
    if PhilIndex.params.dials.index.max_cell:
      indexer.set_max_cell(PhilIndex.params.dials.index.max_cell)
    if PhilIndex.params.xia2.settings.small_molecule == True:
      indexer.set_min_cell(3)
    if PhilIndex.params.dials.fix_geometry:
      indexer.set_detector_fix('all')
      indexer.set_beam_fix('all')
    indexer.set_close_to_spindle_cutoff(
      PhilIndex.params.dials.close_to_spindle_cutoff)

    if self._indxr_input_lattice:
      indexer.set_indexer_input_lattice(self._indxr_input_lattice)
      Debug.write('Set lattice: %s' % self._indxr_input_lattice)

    if self._indxr_input_cell:
      indexer.set_indexer_input_cell(self._indxr_input_cell)
      Debug.write('Set cell: %f %f %f %f %f %f' % \
                  self._indxr_input_cell)
      original_cell = self._indxr_input_cell

    if method is None:
      if PhilIndex.params.dials.index.method is None:
        method = 'fft3d'
        Debug.write('Choosing indexing method: %s' % method)
      else:
        method = PhilIndex.params.dials.index.method

    FileHandler.record_log_file('%s INDEX' % self.get_indexer_full_name(),
                                indexer.get_log_file())
    indexer.run(method)

    if not os.path.exists(indexer.get_experiments_filename()):
      raise RuntimeError("Indexing has failed: see %s for more details."
                         %indexer.get_log_file())
    elif not os.path.exists(indexer.get_indexed_filename()):
      raise RuntimeError("Indexing has failed: %s does not exist."
                         %indexer.get_indexed_filename())

    report = self.Report()
    report.set_experiments_filename(indexer.get_experiments_filename())
    report.set_reflections_filename(indexer.get_indexed_filename())
    html_filename = os.path.join(
      self.get_working_directory(),
      '%i_dials.index.report.html' %report.get_xpid())
    report.set_html_filename(html_filename)
    report.run()
    assert os.path.exists(html_filename)
    FileHandler.record_html_file(
      '%s INDEX' %self.get_indexer_full_name(), html_filename)

    return indexer
示例#2
0
    def _scale_finish_chunk_3_truncate(self):
        for wavelength in self._scalr_scaled_refl_files.keys():

            hklin = self._scalr_scaled_refl_files[wavelength]

            truncate = self._factory.Truncate()
            truncate.set_hklin(hklin)

            if self.get_scaler_anomalous():
                truncate.set_anomalous(True)
            else:
                truncate.set_anomalous(False)

            FileHandler.record_log_file('%s %s %s truncate' % \
                                        (self._scalr_pname,
                                         self._scalr_xname,
                                         wavelength),
                                        truncate.get_log_file())

            hklout = os.path.join(self.get_working_directory(),
                                  '%s_truncated.mtz' % wavelength)

            truncate.set_hklout(hklout)
            truncate.truncate()

            xmlout = truncate.get_xmlout()
            if xmlout is not None:
                FileHandler.record_xml_file('%s %s %s truncate' % \
                                            (self._scalr_pname,
                                             self._scalr_xname,
                                             wavelength),
                                            xmlout)

            Debug.write('%d absent reflections in %s removed' % \
                        (truncate.get_nabsent(), wavelength))

            b_factor = truncate.get_b_factor()

            # record the b factor somewhere (hopefully) useful...

            self._scalr_statistics[(self._scalr_pname, self._scalr_xname,
                                    wavelength)]['Wilson B factor'] = [
                                        b_factor
                                    ]

            # and record the reflection file..
            self._scalr_scaled_refl_files[wavelength] = hklout
示例#3
0
    def dials_symmetry_decide_pointgroup(self, experiments, reflections):
        """Run the symmetry analyser and return it for later inspection."""
        symmetry_analyser = DialsSymmetry()
        symmetry_analyser.set_working_directory(self.get_working_directory())
        auto_logfiler(symmetry_analyser)

        FileHandler.record_log_file(
            "%s %s SYMMETRY" % (self._scalr_pname, self._scalr_xname),
            symmetry_analyser.get_log_file(),
        )

        for (exp, refl) in zip(experiments, reflections):
            symmetry_analyser.add_experiments(exp)
            symmetry_analyser.add_reflections(refl)
        symmetry_analyser.decide_pointgroup()

        return symmetry_analyser
示例#4
0
    def _index(self):
        """Actually do the autoindexing using the data prepared by the
        previous method."""

        images_str = "%d to %d" % tuple(self._indxr_images[0])
        for i in self._indxr_images[1:]:
            images_str += ", %d to %d" % tuple(i)

        cell_str = None
        if self._indxr_input_cell:
            cell_str = "%.2f %.2f %.2f %.2f %.2f %.2f" % self._indxr_input_cell

        # then this is a proper autoindexing run - describe this
        # to the journal entry

        dirname = self.get_directory()

        Journal.block(
            "autoindexing",
            self._indxr_sweep_name,
            "XDS",
            {
                "images": images_str,
                "target cell": cell_str,
                "target lattice": self._indxr_input_lattice,
                "template": self.get_template(),
                "directory": dirname,
            },
        )

        idxref = self.Idxref()

        self._index_remove_masked_regions()
        for file in ["SPOT.XDS"]:
            idxref.set_input_data_file(file, self._indxr_payload[file])

        # edit SPOT.XDS to remove reflections in untrusted regions of the detector

        idxref.set_data_range(self._indxr_images[0][0],
                              self._indxr_images[0][1])
        idxref.set_background_range(self._indxr_images[0][0],
                                    self._indxr_images[0][1])

        # set the phi start etc correctly

        for block in self._indxr_images[:1]:
            starting_frame = block[0]
            starting_angle = self.get_scan().get_angle_from_image_index(
                starting_frame)

            idxref.set_starting_frame(starting_frame)
            idxref.set_starting_angle(starting_angle)

            idxref.add_spot_range(block[0], block[1])

        for block in self._indxr_images[1:]:
            idxref.add_spot_range(block[0], block[1])

        if self._indxr_user_input_lattice:
            idxref.set_indexer_user_input_lattice(True)

        if self._indxr_input_lattice and self._indxr_input_cell:
            idxref.set_indexer_input_lattice(self._indxr_input_lattice)
            idxref.set_indexer_input_cell(self._indxr_input_cell)

            Debug.write("Set lattice: %s" % self._indxr_input_lattice)
            Debug.write("Set cell: %f %f %f %f %f %f" % self._indxr_input_cell)

            original_cell = self._indxr_input_cell
        elif self._indxr_input_lattice:
            idxref.set_indexer_input_lattice(self._indxr_input_lattice)
            original_cell = None
        else:
            original_cell = None

        from dxtbx.serialize.xds import to_xds

        converter = to_xds(self.get_imageset())
        xds_beam_centre = converter.detector_origin

        idxref.set_beam_centre(xds_beam_centre[0], xds_beam_centre[1])

        # fixme need to check if the lattice, cell have been set already,
        # and if they have, pass these in as input to the indexing job.

        done = False

        while not done:
            try:
                done = idxref.run()

                # N.B. in here if the IDXREF step was being run in the first
                # pass done is FALSE however there should be a refined
                # P1 orientation matrix etc. available - so keep it!

            except XDSException as e:
                # inspect this - if we have complaints about not
                # enough reflections indexed, and we have a target
                # unit cell, and they are the same, well ignore it

                if "solution is inaccurate" in str(e):
                    Debug.write("XDS complains solution inaccurate - ignoring")
                    done = idxref.continue_from_error()
                elif ("insufficient percentage (< 70%)" in str(e)
                      or "insufficient percentage (< 50%)"
                      in str(e)) and original_cell:
                    done = idxref.continue_from_error()
                    lattice, cell, mosaic = idxref.get_indexing_solution()
                    # compare solutions FIXME should use xds_cell_deviation
                    check = PhilIndex.params.xia2.settings.xds_check_cell_deviation
                    for j in range(3):
                        # allow two percent variation in unit cell length
                        if (math.fabs(
                            (cell[j] - original_cell[j]) / original_cell[j]) >
                                0.02 and check):
                            Debug.write("XDS unhappy and solution wrong")
                            raise e
                        # and two degree difference in angle
                        if (math.fabs(cell[j + 3] - original_cell[j + 3]) > 2.0
                                and check):
                            Debug.write("XDS unhappy and solution wrong")
                            raise e
                    Debug.write("XDS unhappy but solution ok")
                elif "insufficient percentage (< 70%)" in str(
                        e) or "insufficient percentage (< 50%)" in str(e):
                    Debug.write("XDS unhappy but solution probably ok")
                    done = idxref.continue_from_error()
                else:
                    raise e

        FileHandler.record_log_file(
            "%s INDEX" % self.get_indexer_full_name(),
            os.path.join(self.get_working_directory(), "IDXREF.LP"),
        )

        for file in ["SPOT.XDS", "XPARM.XDS"]:
            self._indxr_payload[file] = idxref.get_output_data_file(file)

        # need to get the indexing solutions out somehow...

        self._indxr_other_lattice_cell = idxref.get_indexing_solutions()

        self._indxr_lattice, self._indxr_cell, self._indxr_mosaic = (
            idxref.get_indexing_solution())

        import dxtbx
        from dxtbx.serialize.xds import to_crystal

        xparm_file = os.path.join(self.get_working_directory(), "XPARM.XDS")
        models = dxtbx.load(xparm_file)
        crystal_model = to_crystal(xparm_file)

        from dxtbx.model import Experiment, ExperimentList

        experiment = Experiment(
            beam=models.get_beam(),
            detector=models.get_detector(),
            goniometer=models.get_goniometer(),
            scan=models.get_scan(),
            crystal=crystal_model,
            # imageset=self.get_imageset(),
        )

        experiment_list = ExperimentList([experiment])
        self.set_indexer_experiment_list(experiment_list)

        # I will want this later on to check that the lattice was ok
        self._idxref_subtree_problem = idxref.get_index_tree_problem()

        return
示例#5
0
    def _scale(self):
        "Perform all of the operations required to deliver the scaled data."

        epochs = self._sweep_handler.get_epochs()

        sc = self._updated_aimless()
        sc.set_hklin(self._prepared_reflections)
        sc.set_chef_unmerged(True)
        sc.set_new_scales_file("%s.scales" % self._scalr_xname)

        user_resolution_limits = {}

        for epoch in epochs:
            si = self._sweep_handler.get_sweep_information(epoch)
            pname, xname, dname = si.get_project_info()
            sname = si.get_sweep_name()
            intgr = si.get_integrater()

            if intgr.get_integrater_user_resolution():
                dmin = intgr.get_integrater_high_resolution()

                if (dname, sname) not in user_resolution_limits:
                    user_resolution_limits[(dname, sname)] = dmin
                elif dmin < user_resolution_limits[(dname, sname)]:
                    user_resolution_limits[(dname, sname)] = dmin

            start, end = si.get_batch_range()

            if (dname, sname) in self._scalr_resolution_limits:
                resolution, _ = self._scalr_resolution_limits[(dname, sname)]
                sc.add_run(start, end, exclude=False, resolution=resolution, name=sname)
            else:
                sc.add_run(start, end, name=sname)

        sc.set_hklout(
            os.path.join(
                self.get_working_directory(),
                f"{self._scalr_pname}_{self._scalr_xname}_scaled_test.mtz",
            )
        )

        if self.get_scaler_anomalous():
            sc.set_anomalous()

        # what follows, sucks

        failover = PhilIndex.params.xia2.settings.failover
        if failover:

            try:
                sc.scale()
            except RuntimeError as e:

                es = str(e)

                if (
                    "bad batch" in es
                    or "negative scales run" in es
                    or "no observations" in es
                ):

                    # first ID the sweep from the batch no

                    batch = int(es.split()[-1])
                    epoch = self._identify_sweep_epoch(batch)
                    sweep = self._scalr_integraters[epoch].get_integrater_sweep()

                    # then remove it from my parent xcrystal

                    self.get_scaler_xcrystal().remove_sweep(sweep)

                    # then remove it from the scaler list of intergraters
                    # - this should really be a scaler interface method

                    del self._scalr_integraters[epoch]

                    # then tell the user what is happening

                    logger.info(
                        "Sweep %s gave negative scales - removing", sweep.get_name()
                    )

                    # then reset the prepare, do, finish flags

                    self.set_scaler_prepare_done(False)
                    self.set_scaler_done(False)
                    self.set_scaler_finish_done(False)

                    # and return
                    return

                else:

                    raise e

        else:
            sc.scale()

        # then gather up all of the resulting reflection files
        # and convert them into the required formats (.sca, .mtz.)

        loggraph = sc.parse_ccp4_loggraph()

        resolution_info = {}

        reflection_files = sc.get_scaled_reflection_files()

        for dataset in reflection_files:
            FileHandler.record_temporary_file(reflection_files[dataset])

        for key in loggraph:
            if "Analysis against resolution" in key:
                dataset = key.split(",")[-1].strip()
                resolution_info[dataset] = transpose_loggraph(loggraph[key])

        # check in here that there is actually some data to scale..!

        if not resolution_info:
            raise RuntimeError("no resolution info")

        highest_suggested_resolution = self.assess_resolution_limits(
            sc.get_unmerged_reflection_file(), user_resolution_limits
        )

        if not self.get_scaler_done():
            logger.debug("Returning as scaling not finished...")
            return

        batch_info = {}

        for key in loggraph:
            if "Analysis against Batch" in key:
                dataset = key.split(",")[-1].strip()
                batch_info[dataset] = transpose_loggraph(loggraph[key])

        sc = self._updated_aimless()

        FileHandler.record_log_file(
            f"{self._scalr_pname} {self._scalr_xname} aimless", sc.get_log_file()
        )

        sc.set_hklin(self._prepared_reflections)
        sc.set_new_scales_file("%s_final.scales" % self._scalr_xname)

        for epoch in epochs:

            si = self._sweep_handler.get_sweep_information(epoch)
            pname, xname, dname = si.get_project_info()
            sname = si.get_sweep_name()
            start, end = si.get_batch_range()

            resolution_limit, _ = self._scalr_resolution_limits[(dname, sname)]

            sc.add_run(
                start, end, exclude=False, resolution=resolution_limit, name=xname
            )

        sc.set_hklout(
            os.path.join(
                self.get_working_directory(),
                f"{self._scalr_pname}_{self._scalr_xname}_scaled.mtz",
            )
        )

        if self.get_scaler_anomalous():
            sc.set_anomalous()

        sc.scale()

        FileHandler.record_xml_file(
            f"{self._scalr_pname} {self._scalr_xname} aimless", sc.get_xmlout()
        )

        data = sc.get_summary()
        scales_file = sc.get_new_scales_file()
        loggraph = sc.parse_ccp4_loggraph()

        standard_deviation_info = {}

        for key in loggraph:
            if "standard deviation v. Intensity" in key:
                dataset = key.split(",")[-1].strip()
                standard_deviation_info[dataset] = transpose_loggraph(loggraph[key])

        resolution_info = {}

        for key in loggraph:
            if "Analysis against resolution" in key:
                dataset = key.split(",")[-1].strip()
                resolution_info[dataset] = transpose_loggraph(loggraph[key])

        batch_info = {}

        for key in loggraph:
            if "Analysis against Batch" in key:
                dataset = key.split(",")[-1].strip()
                batch_info[dataset] = transpose_loggraph(loggraph[key])

        # finally put all of the results "somewhere useful"

        self._scalr_statistics = data

        self._scalr_scaled_refl_files = copy.deepcopy(sc.get_scaled_reflection_files())

        sc = self._updated_aimless()
        sc.set_hklin(self._prepared_reflections)
        sc.set_scales_file(scales_file)

        self._wavelengths_in_order = []

        for epoch in epochs:
            si = self._sweep_handler.get_sweep_information(epoch)
            pname, xname, dname = si.get_project_info()
            sname = si.get_sweep_name()
            start, end = si.get_batch_range()

            resolution_limit, _ = self._scalr_resolution_limits[(dname, sname)]

            sc.add_run(
                start, end, exclude=False, resolution=resolution_limit, name=sname
            )

            if dname not in self._wavelengths_in_order:
                self._wavelengths_in_order.append(dname)

        sc.set_hklout(
            os.path.join(
                self.get_working_directory(),
                f"{self._scalr_pname}_{self._scalr_xname}_scaled.mtz",
            )
        )

        sc.set_scalepack()

        if self.get_scaler_anomalous():
            sc.set_anomalous()
        sc.scale()

        self._update_scaled_unit_cell()

        self._scalr_scaled_reflection_files = {}
        self._scalr_scaled_reflection_files["sca"] = {}
        self._scalr_scaled_reflection_files["sca_unmerged"] = {}
        self._scalr_scaled_reflection_files["mtz_unmerged"] = {}

        for key in self._scalr_scaled_refl_files:
            hklout = self._scalr_scaled_refl_files[key]

            scaout = "%s.sca" % hklout[:-4]
            self._scalr_scaled_reflection_files["sca"][key] = scaout
            FileHandler.record_data_file(scaout)
            scalepack = os.path.join(
                os.path.split(hklout)[0],
                os.path.split(hklout)[1]
                .replace("_scaled", "_scaled_unmerged")
                .replace(".mtz", ".sca"),
            )
            self._scalr_scaled_reflection_files["sca_unmerged"][key] = scalepack
            FileHandler.record_data_file(scalepack)
            mtz_unmerged = os.path.splitext(scalepack)[0] + ".mtz"
            self._scalr_scaled_reflection_files["mtz_unmerged"][key] = mtz_unmerged
            FileHandler.record_data_file(mtz_unmerged)

            if self._scalr_cell_esd is not None:
                # patch .mtz and overwrite unit cell information
                import xia2.Modules.Scaler.tools as tools

                override_cell = self._scalr_cell_dict.get(
                    f"{self._scalr_pname}_{self._scalr_xname}_{key}"
                )[0]
                tools.patch_mtz_unit_cell(mtz_unmerged, override_cell)
                tools.patch_mtz_unit_cell(hklout, override_cell)

            self._scalr_scaled_reflection_files["mtz_unmerged"][key] = mtz_unmerged
            FileHandler.record_data_file(mtz_unmerged)

        if PhilIndex.params.xia2.settings.merging_statistics.source == "cctbx":
            for key in self._scalr_scaled_refl_files:
                stats = self._compute_scaler_statistics(
                    self._scalr_scaled_reflection_files["mtz_unmerged"][key],
                    selected_band=(highest_suggested_resolution, None),
                    wave=key,
                )
                self._scalr_statistics[
                    (self._scalr_pname, self._scalr_xname, key)
                ] = stats

        sc = self._updated_aimless()
        sc.set_hklin(self._prepared_reflections)
        sc.set_scales_file(scales_file)

        self._wavelengths_in_order = []

        for epoch in epochs:

            si = self._sweep_handler.get_sweep_information(epoch)
            pname, xname, dname = si.get_project_info()
            sname = si.get_sweep_name()
            start, end = si.get_batch_range()

            resolution_limit, _ = self._scalr_resolution_limits[(dname, sname)]

            sc.add_run(
                start, end, exclude=False, resolution=resolution_limit, name=sname
            )

            if dname not in self._wavelengths_in_order:
                self._wavelengths_in_order.append(dname)

        sc.set_hklout(
            os.path.join(
                self.get_working_directory(),
                f"{self._scalr_pname}_{self._scalr_xname}_chef.mtz",
            )
        )

        sc.set_chef_unmerged(True)

        if self.get_scaler_anomalous():
            sc.set_anomalous()
        sc.scale()
        if not PhilIndex.params.dials.fast_mode:
            try:
                self._generate_absorption_map(sc)
            except Exception as e:
                # Map generation may fail for number of reasons, eg. matplotlib borken
                logger.debug("Could not generate absorption map (%s)", e)
示例#6
0
    def _index(self):
        """Actually do the autoindexing using the data prepared by the
        previous method."""

        self._index_remove_masked_regions()

        if self._i_or_ii is None:
            self._i_or_ii = self.decide_i_or_ii()
            logger.debug("Selecting I or II, chose %s", self._i_or_ii)

        idxref = self.Idxref()

        for file in ["SPOT.XDS"]:
            idxref.set_input_data_file(file, self._indxr_payload[file])

        # set the phi start etc correctly

        idxref.set_data_range(self._indxr_images[0][0],
                              self._indxr_images[0][1])
        idxref.set_background_range(self._indxr_images[0][0],
                                    self._indxr_images[0][1])

        if self._i_or_ii == "i":
            blocks = self._index_select_images_i()
            for block in blocks[:1]:
                starting_frame = block[0]
                starting_angle = self.get_scan().get_angle_from_image_index(
                    starting_frame)

                idxref.set_starting_frame(starting_frame)
                idxref.set_starting_angle(starting_angle)

                idxref.add_spot_range(block[0], block[1])

            for block in blocks[1:]:
                idxref.add_spot_range(block[0], block[1])
        else:
            for block in self._indxr_images[:1]:
                starting_frame = block[0]
                starting_angle = self.get_scan().get_angle_from_image_index(
                    starting_frame)

                idxref.set_starting_frame(starting_frame)
                idxref.set_starting_angle(starting_angle)

                idxref.add_spot_range(block[0], block[1])

            for block in self._indxr_images[1:]:
                idxref.add_spot_range(block[0], block[1])

        # FIXME need to also be able to pass in the known unit
        # cell and lattice if already available e.g. from
        # the helper... indirectly

        if self._indxr_user_input_lattice:
            idxref.set_indexer_user_input_lattice(True)

        if self._indxr_input_lattice and self._indxr_input_cell:
            idxref.set_indexer_input_lattice(self._indxr_input_lattice)
            idxref.set_indexer_input_cell(self._indxr_input_cell)

            logger.debug("Set lattice: %s", self._indxr_input_lattice)
            logger.debug("Set cell: %f %f %f %f %f %f" %
                         self._indxr_input_cell)

            original_cell = self._indxr_input_cell
        elif self._indxr_input_lattice:
            idxref.set_indexer_input_lattice(self._indxr_input_lattice)
            original_cell = None
        else:
            original_cell = None

        # FIXED need to set the beam centre here - this needs to come
        # from the input .xinfo object or header, and be converted
        # to the XDS frame... done.

        from dxtbx.serialize.xds import to_xds

        converter = to_xds(self.get_imageset())
        xds_beam_centre = converter.detector_origin

        idxref.set_beam_centre(xds_beam_centre[0], xds_beam_centre[1])

        # fixme need to check if the lattice, cell have been set already,
        # and if they have, pass these in as input to the indexing job.

        done = False

        while not done:
            try:
                done = idxref.run()

                # N.B. in here if the IDXREF step was being run in the first
                # pass done is FALSE however there should be a refined
                # P1 orientation matrix etc. available - so keep it!

            except XDSException as e:
                # inspect this - if we have complaints about not
                # enough reflections indexed, and we have a target
                # unit cell, and they are the same, well ignore it

                if "solution is inaccurate" in str(e):
                    logger.debug(
                        "XDS complains solution inaccurate - ignoring")
                    done = idxref.continue_from_error()
                elif ("insufficient percentage (< 70%)" in str(e)
                      or "insufficient percentage (< 50%)"
                      in str(e)) and original_cell:
                    done = idxref.continue_from_error()
                    lattice, cell, mosaic = idxref.get_indexing_solution()
                    # compare solutions
                    check = PhilIndex.params.xia2.settings.xds_check_cell_deviation
                    for j in range(3):
                        # allow two percent variation in unit cell length
                        if (math.fabs(
                            (cell[j] - original_cell[j]) / original_cell[j]) >
                                0.02 and check):
                            logger.debug("XDS unhappy and solution wrong")
                            raise e
                        # and two degree difference in angle
                        if (math.fabs(cell[j + 3] - original_cell[j + 3]) > 2.0
                                and check):
                            logger.debug("XDS unhappy and solution wrong")
                            raise e
                    logger.debug("XDS unhappy but solution ok")
                elif "insufficient percentage (< 70%)" in str(
                        e) or "insufficient percentage (< 50%)" in str(e):
                    logger.debug("XDS unhappy but solution probably ok")
                    done = idxref.continue_from_error()
                else:
                    raise e

        FileHandler.record_log_file(
            "%s INDEX" % self.get_indexer_full_name(),
            os.path.join(self.get_working_directory(), "IDXREF.LP"),
        )

        for file in ["SPOT.XDS", "XPARM.XDS"]:
            self._indxr_payload[file] = idxref.get_output_data_file(file)

        # need to get the indexing solutions out somehow...

        self._indxr_other_lattice_cell = idxref.get_indexing_solutions()

        (
            self._indxr_lattice,
            self._indxr_cell,
            self._indxr_mosaic,
        ) = idxref.get_indexing_solution()

        xparm_file = os.path.join(self.get_working_directory(), "XPARM.XDS")
        models = dxtbx.load(xparm_file)
        crystal_model = to_crystal(xparm_file)

        # this information gets lost when re-creating the models from the
        # XDS results - however is not refined so can simply copy from the
        # input - https://github.com/xia2/xia2/issues/372
        models.get_detector()[0].set_thickness(
            converter.get_detector()[0].get_thickness())

        experiment = Experiment(
            beam=models.get_beam(),
            detector=models.get_detector(),
            goniometer=models.get_goniometer(),
            scan=models.get_scan(),
            crystal=crystal_model,
            # imageset=self.get_imageset(),
        )

        experiment_list = ExperimentList([experiment])
        self.set_indexer_experiment_list(experiment_list)

        # I will want this later on to check that the lattice was ok
        self._idxref_subtree_problem = idxref.get_index_tree_problem()
示例#7
0
文件: CCP4ScalerA.py 项目: hainm/xia2
    if not self.get_scaler_done():
      Debug.write('Returning as scaling not finished...')
      return

    batch_info = { }

    for key in loggraph:
      if 'Analysis against Batch' in key:
        dataset = key.split(',')[-1].strip()
        batch_info[dataset] = transpose_loggraph(
            loggraph[key])

    sc = self._updated_aimless()

    FileHandler.record_log_file('%s %s aimless' % (self._scalr_pname,
                                                   self._scalr_xname),
                                sc.get_log_file())

    highest_resolution = 100.0

    sc.set_hklin(self._prepared_reflections)
    sc.set_intensities(PhilIndex.params.ccp4.aimless.intensities)
    sc.set_new_scales_file('%s_final.scales' % self._scalr_xname)

    for epoch in epochs:

      si = self._sweep_handler.get_sweep_information(epoch)
      pname, xname, dname = si.get_project_info()
      sname = si.get_sweep_name()
      start, end = si.get_batch_range()
示例#8
0
  def _integrate_finish(self):
    '''Finish off the integration by running correct.'''

    # first run the postrefinement etc with spacegroup P1
    # and the current unit cell - this will be used to
    # obtain a benchmark rmsd in pixels / phi and also
    # cell deviations (this is working towards spotting bad
    # indexing solutions) - only do this if we have no
    # reindex matrix... and no postrefined cell...

    p1_deviations = None

    # fix for bug # 3264 -
    # if we have not run integration with refined parameters, make it so...
    # erm? shouldn't this therefore return if this is the principle, or
    # set the flag after we have tested the lattice?

    if not self._xds_data_files.has_key('GXPARM.XDS') and \
      PhilIndex.params.xds.integrate.reintegrate:
      Debug.write(
          'Resetting integrater, to ensure refined orientation is used')
      self.set_integrater_done(False)

    if not self.get_integrater_reindex_matrix() and not self._intgr_cell \
           and not Flags.get_no_lattice_test() and \
           not self.get_integrater_sweep().get_user_lattice():
      correct = self.Correct()

      correct.set_data_range(self._intgr_wedge[0],
                             self._intgr_wedge[1])

      if self.get_polarization() > 0.0:
        correct.set_polarization(self.get_polarization())

      # FIXME should this be using the correctly transformed
      # cell or are the results ok without it?!

      correct.set_spacegroup_number(1)
      correct.set_cell(self._intgr_refiner_cell)

      correct.run()

      # record the log file -

      pname, xname, dname = self.get_integrater_project_info()
      sweep = self.get_integrater_sweep_name()
      FileHandler.record_log_file('%s %s %s %s CORRECT' % \
                                  (pname, xname, dname, sweep),
                                  os.path.join(
          self.get_working_directory(),
          'CORRECT.LP'))

      FileHandler.record_more_data_file(
          '%s %s %s %s CORRECT' % (pname, xname, dname, sweep),
          os.path.join(self.get_working_directory(), 'XDS_ASCII.HKL'))

      cell = correct.get_result('cell')
      cell_esd = correct.get_result('cell_esd')

      Debug.write('Postrefinement in P1 results:')
      Debug.write('%7.3f %7.3f %7.3f %7.3f %7.3f %7.3f' % \
                  tuple(cell))
      Debug.write('%7.3f %7.3f %7.3f %7.3f %7.3f %7.3f' % \
                  tuple(cell_esd))
      Debug.write('Deviations: %.2f pixels %.2f degrees' % \
                  (correct.get_result('rmsd_pixel'),
                   correct.get_result('rmsd_phi')))

      p1_deviations = (correct.get_result('rmsd_pixel'),
                       correct.get_result('rmsd_phi'))

    # next run the postrefinement etc with the given
    # cell / lattice - this will be the assumed result...

    correct = self.Correct()

    correct.set_data_range(self._intgr_wedge[0],
                           self._intgr_wedge[1])

    if self.get_polarization() > 0.0:
      correct.set_polarization(self.get_polarization())

    # BUG # 2695 probably comes from here - need to check...
    # if the pointless interface comes back with a different
    # crystal setting then the unit cell stored in self._intgr_cell
    # needs to be set to None...

    if self.get_integrater_spacegroup_number():
      correct.set_spacegroup_number(
          self.get_integrater_spacegroup_number())
      if not self._intgr_cell:
        raise RuntimeError, 'no unit cell to recycle'
      correct.set_cell(self._intgr_cell)

    # BUG # 3113 - new version of XDS will try and figure the
    # best spacegroup out from the intensities (and get it wrong!)
    # unless we set the spacegroup and cell explicitly

    if not self.get_integrater_spacegroup_number():
      cell = self._intgr_refiner_cell
      lattice = self._intgr_refiner.get_refiner_lattice()
      spacegroup_number = lattice_to_spacegroup_number(lattice)

      # this should not prevent the postrefinement from
      # working correctly, else what is above would not
      # work correctly (the postrefinement test)

      correct.set_spacegroup_number(spacegroup_number)
      correct.set_cell(cell)

      Debug.write('Setting spacegroup to: %d' % spacegroup_number)
      Debug.write(
        'Setting cell to: %.2f %.2f %.2f %.2f %.2f %.2f' % tuple(cell))

    if self.get_integrater_reindex_matrix():

      # bug! if the lattice is not primitive the values in this
      # reindex matrix need to be multiplied by a constant which
      # depends on the Bravais lattice centering.

      lattice = self._intgr_refiner.get_refiner_lattice()

      import scitbx.matrix
      matrix = self.get_integrater_reindex_matrix()
      matrix = scitbx.matrix.sqr(matrix).transpose().elems
      matrix = r_to_rt(matrix)

      if lattice[1] == 'P':
        mult = 1
      elif lattice[1] == 'C' or lattice[1] == 'I':
        mult = 2
      elif lattice[1] == 'R':
        mult = 3
      elif lattice[1] == 'F':
        mult = 4
      else:
        raise RuntimeError, 'unknown multiplier for lattice %s' % \
              lattice

      Debug.write('REIDX multiplier for lattice %s: %d' % \
                  (lattice, mult))

      mult_matrix = [mult * m for m in matrix]

      Debug.write('REIDX set to %d %d %d %d %d %d %d %d %d %d %d %d' % \
                  tuple(mult_matrix))
      correct.set_reindex_matrix(mult_matrix)

    correct.run()

    # erm. just to be sure
    if self.get_integrater_reindex_matrix() and \
           correct.get_reindex_used():
      raise RuntimeError, 'Reindex panic!'

    # get the reindex operation used, which may be useful if none was
    # set but XDS decided to apply one, e.g. #419.

    if not self.get_integrater_reindex_matrix() and \
           correct.get_reindex_used():
      # convert this reindex operation to h, k, l form: n.b. this
      # will involve dividing through by the lattice centring multiplier

      matrix = rt_to_r(correct.get_reindex_used())
      import scitbx.matrix
      matrix = scitbx.matrix.sqr(matrix).transpose().elems

      lattice = self._intgr_refiner.get_refiner_lattice()

      if lattice[1] == 'P':
        mult = 1.0
      elif lattice[1] == 'C' or lattice[1] == 'I':
        mult = 2.0
      elif lattice[1] == 'R':
        mult = 3.0
      elif lattice[1] == 'F':
        mult = 4.0

      matrix = [m / mult for m in matrix]

      reindex_op = mat_to_symop(matrix)

      # assign this to self: will this reset?! make for a leaky
      # abstraction and just assign this...

      # self.set_integrater_reindex_operator(reindex)

      self._intgr_reindex_operator = reindex_op


    # record the log file -

    pname, xname, dname = self.get_integrater_project_info()
    sweep = self.get_integrater_sweep_name()
    FileHandler.record_log_file('%s %s %s %s CORRECT' % \
                                (pname, xname, dname, sweep),
                                os.path.join(self.get_working_directory(),
                                             'CORRECT.LP'))

    # should get some interesting stuff from the XDS correct file
    # here, for instance the resolution range to use in integration
    # (which should be fed back if not fast) and so on...

    self._intgr_corrected_hklout = os.path.join(self.get_working_directory(),
                                'XDS_ASCII.HKL')

    # also record the batch range - needed for the analysis of the
    # radiation damage in chef...

    self._intgr_batches_out = (self._intgr_wedge[0],
                               self._intgr_wedge[1])

    # FIXME perhaps I should also feedback the GXPARM file here??
    for file in ['GXPARM.XDS']:
      self._xds_data_files[file] = correct.get_output_data_file(file)

    # record the postrefined cell parameters
    self._intgr_cell = correct.get_result('cell')
    self._intgr_n_ref = correct.get_result('n_ref')

    Debug.write('Postrefinement in "correct" spacegroup results:')
    Debug.write('%7.3f %7.3f %7.3f %7.3f %7.3f %7.3f' % \
                tuple(correct.get_result('cell')))
    Debug.write('%7.3f %7.3f %7.3f %7.3f %7.3f %7.3f' % \
                tuple(correct.get_result('cell_esd')))
    Debug.write('Deviations: %.2f pixels %.2f degrees' % \
                (correct.get_result('rmsd_pixel'),
                 correct.get_result('rmsd_phi')))

    Debug.write('Error correction parameters: A=%.3f B=%.3f' % \
                correct.get_result('sdcorrection'))

    # compute misorientation of axes

    xparm_file = os.path.join(self.get_working_directory(), 'GXPARM.XDS')

    from iotbx.xds import xparm
    handle = xparm.reader()
    handle.read_file(xparm_file)

    rotn = handle.rotation_axis
    beam = handle.beam_vector

    dot = sum([rotn[j] * beam[j] for j in range(3)])
    r = math.sqrt(sum([rotn[j] * rotn[j] for j in range(3)]))
    b = math.sqrt(sum([beam[j] * beam[j] for j in range(3)]))

    rtod = 180.0 / math.pi

    angle = rtod * math.fabs(0.5 * math.pi - math.acos(dot / (r * b)))

    Debug.write('Axis misalignment %.2f degrees' % angle)

    correct_deviations = (correct.get_result('rmsd_pixel'),
                          correct.get_result('rmsd_phi'))

    if p1_deviations:
      # compare and reject if both > 50% higher - though adding a little
      # flexibility - 0.5 pixel / osc width slack.

      pixel = p1_deviations[0]
      phi = math.sqrt(0.05 * 0.05 + \
                      p1_deviations[1] * p1_deviations[1])

      threshold = Flags.get_rejection_threshold()

      Debug.write('RMSD ratio: %.2f' % (correct_deviations[0] / pixel))
      Debug.write('RMSPhi ratio: %.2f' % (correct_deviations[1] / phi))

      if correct_deviations[0] / pixel > threshold and \
             correct_deviations[1] / phi > threshold:

        Chatter.write(
        'Eliminating this indexing solution as postrefinement')
        Chatter.write(
        'deviations rather high relative to triclinic')
        raise BadLatticeError, \
              'high relative deviations in postrefinement'

    if not Flags.get_quick() and Flags.get_remove():
      # check for alien reflections and perhaps recycle - removing them
      if len(correct.get_remove()) > 0:

        correct_remove = correct.get_remove()
        current_remove = []
        final_remove = []

        # first ensure that there are no duplicate entries...
        if os.path.exists(os.path.join(
            self.get_working_directory(),
            'REMOVE.HKL')):
          for line in open(os.path.join(
              self.get_working_directory(),
              'REMOVE.HKL'), 'r').readlines():
            h, k, l = map(int, line.split()[:3])
            z = float(line.split()[3])

            if not (h, k, l, z) in current_remove:
              current_remove.append((h, k, l, z))

          for c in correct_remove:
            if c in current_remove:
              continue
            final_remove.append(c)

          Debug.write(
              '%d alien reflections are already removed' % \
              (len(correct_remove) - len(final_remove)))
        else:
          # we want to remove all of the new dodgy reflections
          final_remove = correct_remove

        remove_hkl = open(os.path.join(
            self.get_working_directory(),
            'REMOVE.HKL'), 'w')

        z_min = Flags.get_z_min()
        rejected = 0

        # write in the old reflections
        for remove in current_remove:
          z = remove[3]
          if z >= z_min:
            remove_hkl.write('%d %d %d %f\n' % remove)
          else:
            rejected += 1
        Debug.write('Wrote %d old reflections to REMOVE.HKL' % \
                    (len(current_remove) - rejected))
        Debug.write('Rejected %d as z < %f' % \
                    (rejected, z_min))

        # and the new reflections
        rejected = 0
        used = 0
        for remove in final_remove:
          z = remove[3]
          if z >= z_min:
            used += 1
            remove_hkl.write('%d %d %d %f\n' % remove)
          else:
            rejected += 1
        Debug.write('Wrote %d new reflections to REMOVE.HKL' % \
                    (len(final_remove) - rejected))
        Debug.write('Rejected %d as z < %f' % \
                    (rejected, z_min))

        remove_hkl.close()

        # we want to rerun the finishing step so...
        # unless we have added no new reflections... or unless we
        # have not confirmed the point group (see SCI-398)

        if used and self.get_integrater_reindex_matrix():
          self.set_integrater_finish_done(False)

    else:
      Debug.write(
          'Going quickly so not removing %d outlier reflections...' % \
          len(correct.get_remove()))

    # Convert INTEGRATE.HKL to MTZ format and reapply any reindexing operations
    # spacegroup changes to allow use with CCP4 / Aimless for scaling

    integrate_hkl = os.path.join(
      self.get_working_directory(), 'INTEGRATE.HKL')

    hklout = os.path.splitext(integrate_hkl)[0] + ".mtz"
    self._factory.set_working_directory(self.get_working_directory())
    pointless = self._factory.Pointless()
    pointless.set_xdsin(integrate_hkl)
    pointless.set_hklout(hklout)
    pointless.xds_to_mtz()

    integrate_mtz = hklout

    if self.get_integrater_reindex_operator() or \
       self.get_integrater_spacegroup_number():

      Debug.write('Reindexing things to MTZ')

      reindex = Reindex()
      reindex.set_working_directory(self.get_working_directory())
      auto_logfiler(reindex)

      if self.get_integrater_reindex_operator():
        reindex.set_operator(self.get_integrater_reindex_operator())

      if self.get_integrater_spacegroup_number():
        reindex.set_spacegroup(self.get_integrater_spacegroup_number())

      hklout = '%s_reindex.mtz' % os.path.splitext(integrate_mtz)[0]

      reindex.set_hklin(integrate_mtz)
      reindex.set_hklout(hklout)
      reindex.reindex()
      integrate_mtz = hklout

    return integrate_mtz
示例#9
0
    def _mosflm_parallel_integrate(self):
        '''Perform the integration as before, but this time as a
    number of parallel Mosflm jobs (hence, in separate directories)
    and including a step of pre-refinement of the mosaic spread and
    missets. This will all be kind of explicit and hence probably
    messy!'''

        refinr = self.get_integrater_refiner()

        lattice = refinr.get_refiner_lattice()
        spacegroup_number = lattice_to_spacegroup(lattice)
        mosaic = refinr.get_refiner_payload('mosaic')
        beam = refinr.get_refiner_payload('beam')
        distance = refinr.get_refiner_payload('distance')
        matrix = refinr.get_refiner_payload('mosflm_orientation_matrix')

        integration_params = refinr.get_refiner_payload(
            'mosflm_integration_parameters')

        if integration_params:
            if 'separation' in integration_params:
                self.set_integrater_parameter(
                    'mosflm', 'separation',
                    '%s %s' % tuple(integration_params['separation']))
            if 'raster' in integration_params:
                self.set_integrater_parameter(
                    'mosflm', 'raster',
                    '%d %d %d %d %d' % tuple(integration_params['raster']))

        refinr.set_refiner_payload('mosflm_integration_parameters', None)
        pname, xname, dname = self.get_integrater_project_info()

        # what follows below should (i) be run in separate directories
        # and (ii) be repeated N=parallel times.

        nproc = PhilIndex.params.xia2.settings.multiprocessing.nproc
        parallel = nproc

        # FIXME this is something of a kludge - if too few frames refinement
        # and integration does not work well... ideally want at least 15
        # frames / chunk (say)
        nframes = self._intgr_wedge[1] - self._intgr_wedge[0] + 1

        if parallel > nframes / 15:
            parallel = nframes // 15

        if not parallel:
            raise RuntimeError('parallel not set')
        if parallel < 2:
            raise RuntimeError('parallel not parallel: %s' % parallel)

        jobs = []
        hklouts = []
        nref = 0

        # calculate the chunks to use
        offset = self.get_frame_offset()
        start = self._intgr_wedge[0] - offset
        end = self._intgr_wedge[1] - offset

        left_images = 1 + end - start
        left_chunks = parallel
        chunks = []

        while left_images > 0:
            size = left_images // left_chunks
            chunks.append((start, start + size - 1))
            start += size
            left_images -= size
            left_chunks -= 1

        summary_files = []

        for j in range(parallel):

            # make some working directories, as necessary - chunk-(0:N-1)
            wd = os.path.join(self.get_working_directory(), 'chunk-%d' % j)
            if not os.path.exists(wd):
                os.makedirs(wd)

            job = MosflmIntegrate()
            job.set_working_directory(wd)

            auto_logfiler(job)

            l = refinr.get_refiner_lattice()

            # create the starting point
            f = open(os.path.join(wd, 'xiaintegrate-%s.mat' % l), 'w')
            for m in matrix:
                f.write(m)
            f.close()

            spacegroup_number = lattice_to_spacegroup(lattice)

            job.set_refine_profiles(self._mosflm_refine_profiles)

            # N.B. for harvesting need to append N to dname.

            if pname is not None and xname is not None and dname is not None:
                Debug.write('Harvesting: %s/%s/%s' % (pname, xname, dname))
                harvest_dir = self.get_working_directory()
                temp_dname = '%s_%s' % \
                             (dname, self.get_integrater_sweep_name())
                job.set_pname_xname_dname(pname, xname, temp_dname)

            job.set_template(os.path.basename(self.get_template()))
            job.set_directory(self.get_directory())

            # check for ice - and if so, exclude (ranges taken from
            # XDS documentation)
            if self.get_integrater_ice() != 0:
                Debug.write('Excluding ice rings')
                job.set_exclude_ice(True)

            # exclude specified resolution ranges
            if len(self.get_integrater_excluded_regions()) != 0:
                regions = self.get_integrater_excluded_regions()
                Debug.write('Excluding regions: %s' % repr(regions))
                job.set_exclude_regions(regions)

            mask = standard_mask(self.get_detector())
            for m in mask:
                job.add_instruction(m)

            job.set_input_mat_file('xiaintegrate-%s.mat' % l)

            job.set_beam_centre(beam)
            job.set_distance(distance)
            job.set_space_group_number(spacegroup_number)
            job.set_mosaic(mosaic)

            if self.get_wavelength_prov() == 'user':
                job.set_wavelength(self.get_wavelength())

            parameters = self.get_integrater_parameters('mosflm')
            job.update_parameters(parameters)

            if self._mosflm_gain:
                job.set_gain(self._mosflm_gain)

            # check for resolution limits
            if self._intgr_reso_high > 0.0:
                job.set_d_min(self._intgr_reso_high)
            if self._intgr_reso_low:
                job.set_d_max(self._intgr_reso_low)

            if PhilIndex.params.general.backstop_mask:
                from xia2.Toolkit.BackstopMask import BackstopMask
                mask = BackstopMask(PhilIndex.params.general.backstop_mask)
                mask = mask.calculate_mask_mosflm(self.get_header())
                job.set_mask(mask)

            detector = self.get_detector()
            detector_width, detector_height = detector[0].get_image_size_mm()

            lim_x = 0.5 * detector_width
            lim_y = 0.5 * detector_height

            Debug.write('Scanner limits: %.1f %.1f' % (lim_x, lim_y))
            job.set_limits(lim_x, lim_y)

            job.set_fix_mosaic(self._mosflm_postref_fix_mosaic)

            job.set_pre_refinement(True)
            job.set_image_range(chunks[j])

            # these are now running so ...

            jobs.append(job)

            continue

        # ok, at this stage I need to ...
        #
        # (i) accumulate the statistics as a function of batch
        # (ii) mong them into a single block
        #
        # This is likely to be a pain in the arse!

        first_integrated_batch = 1.0e6
        last_integrated_batch = -1.0e6

        all_residuals = []

        threads = []

        for j in range(parallel):
            job = jobs[j]

            # now wait for them to finish - first wait will really be the
            # first one, then all should be finished...

            thread = Background(job, 'run')
            thread.start()
            threads.append(thread)

        mosaics = []
        postref_result = {}

        integrated_images_first = 1.0e6
        integrated_images_last = -1.0e6
        self._intgr_per_image_statistics = {}

        for j in range(parallel):
            thread = threads[j]
            thread.stop()
            job = jobs[j]

            # get the log file
            output = job.get_all_output()

            # record a copy of it, perhaps - though not if parallel
            if self.get_integrater_sweep_name() and False:
                pname, xname, dname = self.get_integrater_project_info()
                FileHandler.record_log_file(
                    '%s %s %s %s mosflm integrate' % \
                    (self.get_integrater_sweep_name(),
                     pname, xname, '%s_%d' % (dname, j)),
                    job.get_log_file())

            # look for things that we want to know...
            # that is, the output reflection file name, the updated
            # value for the gain (if present,) any warnings, errors,
            # or just interesting facts.

            batches = job.get_batches_out()
            integrated_images_first = min(batches[0], integrated_images_first)
            integrated_images_last = max(batches[1], integrated_images_last)

            mosaics.extend(job.get_mosaic_spreads())

            if min(mosaics) < 0:
                raise IntegrationError('negative mosaic spread: %s' %
                                       min(mosaic))

            if (job.get_detector_gain_error()
                    and not (self.get_imageset().get_detector()[0].get_type()
                             == 'SENSOR_PAD')):
                gain = job.get_suggested_gain()
                if gain is not None:
                    self.set_integrater_parameter('mosflm', 'gain', gain)
                    self.set_integrater_export_parameter(
                        'mosflm', 'gain', gain)
                    if self._mosflm_gain:
                        Debug.write('GAIN updated to %f' % gain)
                    else:
                        Debug.write('GAIN found to be %f' % gain)

                    self._mosflm_gain = gain
                    self._mosflm_rerun_integration = True

            hklout = job.get_hklout()
            Debug.write('Integration output: %s' % hklout)
            hklouts.append(hklout)

            nref += job.get_nref()

            # if a BGSIG error happened try not refining the
            # profile and running again...

            if job.get_bgsig_too_large():
                if not self._mosflm_refine_profiles:
                    raise RuntimeError('BGSIG error with profiles fixed')

                Debug.write('BGSIG error detected - try fixing profile...')

                self._mosflm_refine_profiles = False
                self.set_integrater_done(False)

                return

            if job.get_getprof_error():
                Debug.write('GETPROF error detected - try fixing profile...')
                self._mosflm_refine_profiles = False
                self.set_integrater_done(False)

                return

            # here
            # write the report for each image as .*-#$ to Chatter -
            # detailed report will be written automagically to science...

            self._intgr_per_image_statistics.update(
                job.get_per_image_statistics())
            postref_result.update(job.get_postref_result())

            # inspect the output for e.g. very high weighted residuals

            all_residuals.extend(job.get_residuals())

        self._intgr_batches_out = (integrated_images_first,
                                   integrated_images_last)

        if mosaics and len(mosaics) > 0:
            self.set_integrater_mosaic_min_mean_max(
                min(mosaics),
                sum(mosaics) / len(mosaics), max(mosaics))
        else:
            m = indxr.get_indexer_mosaic()
            self.set_integrater_mosaic_min_mean_max(m, m, m)

        Chatter.write(self.show_per_image_statistics())

        Chatter.write('Mosaic spread: %.3f < %.3f < %.3f' % \
                      self.get_integrater_mosaic_min_mean_max())

        # gather the statistics from the postrefinement for all sweeps
        # now write this to a postrefinement log

        postref_log = os.path.join(self.get_working_directory(),
                                   'postrefinement.log')

        fout = open(postref_log, 'w')

        fout.write('$TABLE: Postrefinement for %s:\n' % \
                   self._intgr_sweep_name)
        fout.write('$GRAPHS: Missetting angles:A:1, 2, 3, 4: $$\n')
        fout.write('Batch PhiX PhiY PhiZ $$ Batch PhiX PhiY PhiZ $$\n')

        for image in sorted(postref_result):
            phix = postref_result[image].get('phix', 0.0)
            phiy = postref_result[image].get('phiy', 0.0)
            phiz = postref_result[image].get('phiz', 0.0)

            fout.write('%d %5.2f %5.2f %5.2f\n' % \
                       (image, phix, phiy, phiz))

        fout.write('$$\n')
        fout.close()

        if self.get_integrater_sweep_name():
            pname, xname, dname = self.get_integrater_project_info()
            FileHandler.record_log_file('%s %s %s %s postrefinement' % \
                                        (self.get_integrater_sweep_name(),
                                         pname, xname, dname),
                                        postref_log)

        hklouts.sort()

        hklout = os.path.join(self.get_working_directory(),
                              os.path.split(hklouts[0])[-1])

        Debug.write('Sorting data to %s' % hklout)
        for hklin in hklouts:
            Debug.write('<= %s' % hklin)

        sortmtz = Sortmtz()
        sortmtz.set_hklout(hklout)
        for hklin in hklouts:
            sortmtz.add_hklin(hklin)

        sortmtz.sort()

        self._mosflm_hklout = hklout

        return self._mosflm_hklout
示例#10
0
    def _do_indexing(self, method=None):
        indexer = self.Index()
        for spot_list in self._indxr_payload["spot_lists"]:
            indexer.add_spot_filename(spot_list)
        for filename in self._indxr_payload["experiments"]:
            indexer.add_sweep_filename(filename)
        if PhilIndex.params.dials.index.phil_file is not None:
            indexer.set_phil_file(PhilIndex.params.dials.index.phil_file)
        indexer.set_max_cell(
            max_cell=PhilIndex.params.dials.index.max_cell,
            max_height_fraction=PhilIndex.params.dials.index.
            max_cell_estimation.max_height_fraction,
        )
        if PhilIndex.params.xia2.settings.small_molecule:
            indexer.set_min_cell(3)
        if PhilIndex.params.dials.fix_geometry:
            indexer.set_detector_fix("all")
            indexer.set_beam_fix("all")
        elif PhilIndex.params.dials.fix_distance:
            indexer.set_detector_fix("distance")
        indexer.set_close_to_spindle_cutoff(
            PhilIndex.params.dials.close_to_spindle_cutoff)

        if self._indxr_input_lattice:
            indexer.set_indexer_input_lattice(self._indxr_input_lattice)
            logger.debug("Set lattice: %s", self._indxr_input_lattice)

        if self._indxr_input_cell:
            indexer.set_indexer_input_cell(self._indxr_input_cell)
            logger.debug("Set cell: %f %f %f %f %f %f" %
                         self._indxr_input_cell)

        if method is None:
            if PhilIndex.params.dials.index.method is None:
                method = "fft3d"
                logger.debug("Choosing indexing method: %s", method)
            else:
                method = PhilIndex.params.dials.index.method

        FileHandler.record_log_file("%s INDEX" % self.get_indexer_full_name(),
                                    indexer.get_log_file())
        indexer.run(method)

        if not os.path.exists(indexer.get_experiments_filename()):
            raise RuntimeError(
                "Indexing has failed: see %s for more details." %
                indexer.get_log_file())
        elif not os.path.exists(indexer.get_indexed_filename()):
            raise RuntimeError("Indexing has failed: %s does not exist." %
                               indexer.get_indexed_filename())

        report = self.Report()
        report.set_experiments_filename(indexer.get_experiments_filename())
        report.set_reflections_filename(indexer.get_indexed_filename())
        html_filename = os.path.join(
            self.get_working_directory(),
            "%i_dials.index.report.html" % report.get_xpid(),
        )
        report.set_html_filename(html_filename)
        report.run()
        FileHandler.record_html_file("%s INDEX" % self.get_indexer_full_name(),
                                     html_filename)

        return indexer
示例#11
0
    def _sort_together_data_ccp4(self):
        '''Sort together in the right order (rebatching as we go) the sweeps
    we want to scale together.'''

        max_batches = 0

        for epoch in self._sweep_handler.get_epochs():

            si = self._sweep_handler.get_sweep_information(epoch)
            pname, xname, dname = si.get_project_info()
            sname = si.get_sweep_name()
            hklin = si.get_reflections()

            # limit the reflections - e.g. if we are re-running the scaling step
            # on just a subset of the integrated data

            hklin = si.get_reflections()
            limit_batch_range = None
            for sweep in PhilIndex.params.xia2.settings.sweep:
                if sweep.id == sname and sweep.range is not None:
                    limit_batch_range = sweep.range
                    break

            if limit_batch_range is not None:
                Debug.write('Limiting batch range for %s: %s' %
                            (sname, limit_batch_range))
                start, end = limit_batch_range
                hklout = os.path.splitext(hklin)[0] + '_tmp.mtz'
                FileHandler.record_temporary_file(hklout)
                rb = self._factory.Pointless()
                rb.set_hklin(hklin)
                rb.set_hklout(hklout)
                rb.limit_batches(start, end)
                si.set_reflections(hklout)
                si.set_batches(limit_batch_range)

            # keep a count of the maximum number of batches in a block -
            # this will be used to make rebatch work below.

            hklin = si.get_reflections()

            batches = MtzUtils.batches_from_mtz(hklin)
            if 1 + max(batches) - min(batches) > max_batches:
                max_batches = max(batches) - min(batches) + 1

        Debug.write('Biggest sweep has %d batches' % max_batches)
        max_batches = nifty_power_of_ten(max_batches)

        # then rebatch the files, to make sure that the batch numbers are
        # in the same order as the epochs of data collection.

        counter = 0

        for epoch in self._sweep_handler.get_epochs():

            si = self._sweep_handler.get_sweep_information(epoch)

            hklin = si.get_reflections()

            pname, xname, dname = si.get_project_info()
            sname = si.get_sweep_name()

            hklout = os.path.join(self.get_working_directory(),
                                  '%s_%s_%s_%s_integrated.mtz' % \
                                  (pname, xname, dname, sname))

            first_batch = min(si.get_batches())
            si.set_batch_offset(counter * max_batches - first_batch + 1)

            from xia2.Modules.Scaler.rebatch import rebatch
            new_batches = rebatch(hklin,
                                  hklout,
                                  first_batch=counter * max_batches + 1,
                                  pname=pname,
                                  xname=xname,
                                  dname=dname)

            # update the "input information"

            si.set_reflections(hklout)
            si.set_batches(new_batches)

            # update the counter & recycle

            counter += 1

        s = self._factory.Sortmtz()

        hklout = os.path.join(self.get_working_directory(),
                              '%s_%s_sorted.mtz' % \
                              (self._scalr_pname, self._scalr_xname))

        s.set_hklout(hklout)

        for epoch in self._sweep_handler.get_epochs():
            s.add_hklin(
                self._sweep_handler.get_sweep_information(
                    epoch).get_reflections())

        s.sort()

        # verify that the measurements are in the correct setting
        # choice for the spacegroup

        hklin = hklout
        hklout = hklin.replace('sorted.mtz', 'temp.mtz')

        if not self.get_scaler_reference_reflection_file():

            if PhilIndex.params.xia2.settings.symmetry.program == 'dials':
                p = self._factory.dials_symmetry()
            else:
                p = self._factory.Pointless()

            FileHandler.record_log_file('%s %s pointless' % \
                                        (self._scalr_pname,
                                         self._scalr_xname),
                                        p.get_log_file())

            if len(self._sweep_handler.get_epochs()) > 1:
                p.set_hklin(hklin)
            else:
                # permit the use of pointless preparation...
                epoch = self._sweep_handler.get_epochs()[0]
                p.set_hklin(
                    self._prepare_pointless_hklin(
                        hklin,
                        self._sweep_handler.get_sweep_information(
                            epoch).get_integrater().get_phi_width()))

            if self._scalr_input_spacegroup:
                Debug.write('Assigning user input spacegroup: %s' % \
                            self._scalr_input_spacegroup)

                p.decide_spacegroup()
                spacegroup = p.get_spacegroup()
                reindex_operator = p.get_spacegroup_reindex_operator()

                Debug.write('Pointless thought %s (reindex as %s)' % \
                            (spacegroup, reindex_operator))

                spacegroup = self._scalr_input_spacegroup
                reindex_operator = 'h,k,l'
                self._spacegroup_reindex_operator = reindex_operator

            else:
                p.decide_spacegroup()
                spacegroup = p.get_spacegroup()
                reindex_operator = p.get_spacegroup_reindex_operator()
                self._spacegroup_reindex_operator = clean_reindex_operator(
                    reindex_operator)
                Debug.write('Pointless thought %s (reindex as %s)' % \
                            (spacegroup, reindex_operator))

            if self._scalr_input_spacegroup:
                self._scalr_likely_spacegroups = [self._scalr_input_spacegroup]
            else:
                self._scalr_likely_spacegroups = p.get_likely_spacegroups()

            Chatter.write('Likely spacegroups:')
            for spag in self._scalr_likely_spacegroups:
                Chatter.write('%s' % spag)

            Chatter.write(
                'Reindexing to first spacegroup setting: %s (%s)' % \
                (spacegroup, clean_reindex_operator(reindex_operator)))

        else:
            spacegroup = MtzUtils.space_group_name_from_mtz(
                self.get_scaler_reference_reflection_file())
            reindex_operator = 'h,k,l'

            self._scalr_likely_spacegroups = [spacegroup]

            Debug.write('Assigning spacegroup %s from reference' % \
                        spacegroup)

        # then run reindex to set the correct spacegroup

        ri = self._factory.Reindex()
        ri.set_hklin(hklin)
        ri.set_hklout(hklout)
        ri.set_spacegroup(spacegroup)
        ri.set_operator(reindex_operator)
        ri.reindex()

        FileHandler.record_temporary_file(hklout)

        # then resort the reflections (one last time!)

        s = self._factory.Sortmtz()

        temp = hklin
        hklin = hklout
        hklout = temp

        s.add_hklin(hklin)
        s.set_hklout(hklout)

        s.sort()

        # done preparing!

        self._prepared_reflections = s.get_hklout()
示例#12
0
  def _refine(self):

    for epoch, idxr in self._refinr_indexers.iteritems():
      # decide what images we are going to process, if not already
      # specified
      #if not self._intgr_wedge:
        #images = self.get_matching_images()
        #self.set_integrater_wedge(min(images),
                    #max(images))

      #Debug.write('DIALS INTEGRATE PREPARE:')
      #Debug.write('Wavelength: %.6f' % self.get_wavelength())
      #Debug.write('Distance: %.2f' % self.get_distance())

      #if not self._intgr_indexer:
        #self.set_integrater_indexer(DialsIndexer())
        #self.get_integrater_indexer().set_indexer_sweep(
        #self.get_integrater_sweep())

        #self._intgr_indexer.set_working_directory(
        #self.get_working_directory())

        #self._intgr_indexer.setup_from_imageset(self.get_imageset())

        #if self.get_frame_wedge():
        #wedge = self.get_frame_wedge()
        #Debug.write('Propogating wedge limit: %d %d' % wedge)
        #self._intgr_indexer.set_frame_wedge(wedge[0], wedge[1],
                          #apply_offset = False)

        ## this needs to be set up from the contents of the
        ## Integrater frame processer - wavelength &c.

        #if self.get_beam_centre():
        #self._intgr_indexer.set_beam_centre(self.get_beam_centre())

        #if self.get_distance():
        #self._intgr_indexer.set_distance(self.get_distance())

        #if self.get_wavelength():
        #self._intgr_indexer.set_wavelength(
          #self.get_wavelength())

      # get the unit cell from this indexer to initiate processing
      # if it is new... and also copy out all of the information for
      # the Dials indexer if not...

      experiments = idxr.get_indexer_experiment_list()

      indexed_experiments = idxr.get_indexer_payload("experiments_filename")
      indexed_reflections = idxr.get_indexer_payload("indexed_filename")

      if len(experiments) > 1:
        xsweeps = idxr._indxr_sweeps
        assert len(xsweeps) == len(experiments)
        assert len(self._refinr_sweeps) == 1 # don't currently support joint refinement
        xsweep = self._refinr_sweeps[0]
        i = xsweeps.index(xsweep)
        experiments = experiments[i:i+1]

        # Extract and output experiment and reflections for current sweep
        indexed_experiments = os.path.join(
          self.get_working_directory(),
          "%s_indexed_experiments.json" %xsweep.get_name())
        indexed_reflections = os.path.join(
          self.get_working_directory(),
          "%s_indexed_reflections.pickle" %xsweep.get_name())

        from dxtbx.serialize import dump
        dump.experiment_list(experiments, indexed_experiments)

        from libtbx import easy_pickle
        from scitbx.array_family import flex
        reflections = easy_pickle.load(
          idxr.get_indexer_payload("indexed_filename"))
        sel = reflections['id'] == i
        assert sel.count(True) > 0
        imageset_id = reflections['imageset_id'].select(sel)
        assert imageset_id.all_eq(imageset_id[0])
        sel = reflections['imageset_id'] == imageset_id[0]
        reflections = reflections.select(sel)
        # set indexed reflections to id == 0 and imageset_id == 0
        reflections['id'].set_selected(reflections['id'] == i, 0)
        reflections['imageset_id'] = flex.int(len(reflections), 0)
        easy_pickle.dump(indexed_reflections, reflections)

      assert len(experiments.crystals()) == 1 # currently only handle one lattice/sweep
      crystal_model = experiments.crystals()[0]
      lattice = idxr.get_indexer_lattice()

      # check if the lattice was user assigned...
      user_assigned = idxr.get_indexer_user_input_lattice()

      # XXX check that the indexer is an Dials indexer - if not then
      # create one...

      # set a low resolution limit (which isn't really used...)
      # this should perhaps be done more intelligently from an
      # analysis of the spot list or something...?

      #if not self.get_integrater_low_resolution():

        #dmax = idxr.get_indexer_low_resolution()
        #self.set_integrater_low_resolution(dmax)

        #Debug.write('Low resolution set to: %s' % \
              #self.get_integrater_low_resolution())

      ## copy the data across
      from dxtbx.serialize import load, dump

      refiner = self.Refine()
      refiner.set_experiments_filename(indexed_experiments)
      refiner.set_indexed_filename(indexed_reflections)

      # XXX Temporary workaround for dials.refine error for scan_varying
      # refinement with smaller wedges
      total_phi_range = idxr._indxr_imagesets[0].get_scan().get_oscillation_range()[1]
      if total_phi_range < 5: # arbitrary value
        refiner.set_scan_varying(False)
      elif total_phi_range < 36:
        refiner.set_interval_width_degrees(total_phi_range/2)

      FileHandler.record_log_file('%s REFINE' % idxr.get_indexer_full_name(),
                                  refiner.get_log_file())
      refiner.run()
      self._refinr_experiments_filename \
        = refiner.get_refined_experiments_filename()
      experiments = load.experiment_list(self._refinr_experiments_filename)
      self._refinr_indexed_filename = refiner.get_refined_filename()
      self.set_refiner_payload("experiments.json", self._refinr_experiments_filename)
      self.set_refiner_payload("reflections.pickle", self._refinr_indexed_filename)

      # this is the result of the cell refinement
      self._refinr_cell = experiments.crystals()[0].get_unit_cell().parameters()
示例#13
0
    def _scale(self):
        """Perform all of the operations required to deliver the scaled
        data."""
        sweep_infos = [
            self._sweep_handler.get_sweep_information(e)
            for e in self._sweep_handler.get_epochs()
        ]

        if self._scalr_corrections:
            Journal.block(
                "scaling",
                self.get_scaler_xcrystal().get_name(),
                "Dials",
                {
                    "scaling model": "automatic",
                    "absorption": self._scalr_correct_absorption,
                    "decay": self._scalr_correct_decay,
                },
            )

        else:
            Journal.block(
                "scaling",
                self.get_scaler_xcrystal().get_name(),
                "Dials",
                {"scaling model": "default"},
            )

        ### Set the parameters and datafiles for dials.scale

        self._scaler = DialsScale()
        self._scaler = self._updated_dials_scaler()

        if self._scaled_experiments and self._scaled_reflections:
            # going to continue-where-left-off
            self._scaler.add_experiments_json(self._scaled_experiments)
            self._scaler.add_reflections_file(self._scaled_reflections)
        else:
            for si in sweep_infos:
                self._scaler.add_experiments_json(si.get_experiments())
                self._scaler.add_reflections_file(si.get_reflections())

        ### Set the unmerged mtz filepath

        self._scalr_scaled_reflection_files = {}
        self._scalr_scaled_reflection_files["mtz_unmerged"] = {}

        # First set the unmerged mtz output filename. Note that this is the
        # same for MAD datasets too, as need a single unmerged for merging
        # stats calc. For the merged mtz this is different.
        scaled_unmerged_mtz_path = os.path.join(
            self.get_working_directory(),
            "%s_%s_scaled_unmerged.mtz" % (self._scalr_pname, self._scalr_xname),
        )
        self._scaler.set_scaled_unmerged_mtz([scaled_unmerged_mtz_path])
        self._scaler.set_crystal_name(self._scalr_xname)  # Name goes in mtz

        ### Set the merged mtz filepath(s), making into account MAD case.

        # Find number of dnames (i.e. number of wavelengths)
        dnames_set = OrderedSet()
        for si in sweep_infos:
            dnames_set.add(si.get_project_info()[2])

        scaled_mtz_path = os.path.join(
            self.get_working_directory(),
            "%s_%s_scaled.mtz" % (self._scalr_pname, self._scalr_xname),
        )
        if len(dnames_set) == 1:
            self._scaler.set_scaled_mtz([scaled_mtz_path])
            self._scalr_scaled_reflection_files["mtz"] = {
                dnames_set[0]: scaled_mtz_path
            }
            self._scalr_scaled_reflection_files["mtz_unmerged"] = {
                dnames_set[0]: scaled_unmerged_mtz_path
            }
        else:
            merged_mtz_files = []
            self._scalr_scaled_reflection_files["mtz"] = {}
            for dname in dnames_set:
                this_mtz_path = scaled_mtz_path.rstrip(".mtz") + ("_%s.mtz" % dname)
                merged_mtz_files.append(this_mtz_path)
                self._scalr_scaled_reflection_files["mtz"][dname] = scaled_mtz_path
                # Note - we aren't logging individual unmerged here as not
                # generating until later.
            self._scaler.set_scaled_mtz(merged_mtz_files)

        ### Set the resolution limit if applicable

        user_resolution_limits = {}
        highest_resolution = 100.0
        for si in sweep_infos:
            dname = si.get_project_info()[2]
            sname = si.get_sweep_name()
            intgr = si.get_integrater()

            if intgr.get_integrater_user_resolution():
                # record user resolution here but don't use it until later - why?
                dmin = intgr.get_integrater_high_resolution()

                if (dname, sname) not in user_resolution_limits:
                    user_resolution_limits[(dname, sname)] = dmin
                elif dmin < user_resolution_limits[(dname, sname)]:
                    user_resolution_limits[(dname, sname)] = dmin

            if (dname, sname) in self._scalr_resolution_limits:
                d_min, _ = self._scalr_resolution_limits[(dname, sname)]
                if d_min < highest_resolution:
                    highest_resolution = d_min
        if highest_resolution < 99.9:
            self._scaler.set_resolution(d_min=highest_resolution)

        ### Setup final job details and run scale

        self._scaler.set_working_directory(self.get_working_directory())
        auto_logfiler(self._scaler)
        FileHandler.record_log_file(
            "%s %s SCALE" % (self._scalr_pname, self._scalr_xname),
            self._scaler.get_log_file(),
        )
        self._scaler.scale()
        self._scaled_experiments = self._scaler.get_scaled_experiments()
        self._scaled_reflections = self._scaler.get_scaled_reflections()

        FileHandler.record_data_file(scaled_unmerged_mtz_path)

        # make it so that only scaled.expt and scaled.refl are
        # the files that dials.scale knows about, so that if scale is called again,
        # scaling resumes from where it left off.
        self._scaler.clear_datafiles()

        # log datafiles here, picked up from here in commonscaler methods.
        if len(dnames_set) == 1:
            hklout = copy.deepcopy(self._scaler.get_scaled_mtz()[0])
            self._scalr_scaled_refl_files = {dnames_set[0]: hklout}
            FileHandler.record_data_file(hklout)
        else:
            self._scalr_scaled_refl_files = {}
            for i, dname in enumerate(dnames_set):
                hklout = copy.deepcopy(self._scaler.get_scaled_mtz()[i])
                self._scalr_scaled_refl_files[dname] = hklout
                FileHandler.record_data_file(hklout)

        ### Calculate the resolution limit and set done False if applicable

        highest_suggested_resolution = self.assess_resolution_limits(
            self._scaler.get_unmerged_reflection_file(),
            user_resolution_limits,
            use_misigma=False,
        )

        if not self.get_scaler_done():
            # reset for when resolution limit applied
            Debug.write("Returning as scaling not finished...")
            return

        ### For MAD case, generate individual unmerged mtz for stats.

        if len(dnames_set) > 1:
            unmerged_mtz_files = []
            scaler = DialsScale()
            scaler.set_working_directory(self.get_working_directory())
            scaler.set_export_mtz_only()
            scaler.add_experiments_json(self._scaled_experiments)
            scaler.add_reflections_file(self._scaled_reflections)
            for dname in dnames_set:
                this_mtz_path = scaled_unmerged_mtz_path.rstrip(".mtz") + (
                    "_%s.mtz" % dname
                )
                unmerged_mtz_files.append(this_mtz_path)
                self._scalr_scaled_reflection_files["mtz_unmerged"][
                    dname
                ] = this_mtz_path
            scaler.set_scaled_unmerged_mtz(unmerged_mtz_files)
            scaler.scale()
            for f in scaler.get_scaled_unmerged_mtz():  # a list
                FileHandler.record_data_file(f)
            # set refls, exps & unmerged mtz names"

        if PhilIndex.params.xia2.settings.merging_statistics.source == "cctbx":
            for key in self._scalr_scaled_refl_files:
                stats = self._compute_scaler_statistics(
                    self._scalr_scaled_reflection_files["mtz_unmerged"][key],
                    selected_band=(highest_suggested_resolution, None),
                    wave=key,
                )
                self._scalr_statistics[
                    (self._scalr_pname, self._scalr_xname, key)
                ] = stats

        # Run twotheta refine
        self._update_scaled_unit_cell()
示例#14
0
    def _integrate_finish(self):
        """Finish off the integration by running correct."""

        # first run the postrefinement etc with spacegroup P1
        # and the current unit cell - this will be used to
        # obtain a benchmark rmsd in pixels / phi and also
        # cell deviations (this is working towards spotting bad
        # indexing solutions) - only do this if we have no
        # reindex matrix... and no postrefined cell...

        p1_deviations = None

        # fix for bug # 3264 -
        # if we have not run integration with refined parameters, make it so...
        # erm? shouldn't this therefore return if this is the principle, or
        # set the flag after we have tested the lattice?

        if ("GXPARM.XDS" not in self._xds_data_files
                and PhilIndex.params.xds.integrate.reintegrate):
            logger.debug(
                "Resetting integrater, to ensure refined orientation is used")
            self.set_integrater_done(False)

        if (not self.get_integrater_reindex_matrix() and not self._intgr_cell
                and PhilIndex.params.xia2.settings.lattice_rejection
                and not self.get_integrater_sweep().get_user_lattice()):
            correct = self.Correct()

            correct.set_data_range(
                self._intgr_wedge[0] + self.get_frame_offset(),
                self._intgr_wedge[1] + self.get_frame_offset(),
            )

            if self.get_polarization() > 0.0:
                correct.set_polarization(self.get_polarization())

            # FIXME should this be using the correctly transformed
            # cell or are the results ok without it?!

            correct.set_spacegroup_number(1)
            correct.set_cell(self._intgr_refiner_cell)

            correct.run()

            cell = correct.get_result("cell")
            cell_esd = correct.get_result("cell_esd")

            logger.debug("Postrefinement in P1 results:")
            logger.debug("%7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % tuple(cell))
            logger.debug("%7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" %
                         tuple(cell_esd))
            logger.debug("Deviations: %.2f pixels %.2f degrees" %
                         (correct.get_result("rmsd_pixel"),
                          correct.get_result("rmsd_phi")))

            p1_deviations = (
                correct.get_result("rmsd_pixel"),
                correct.get_result("rmsd_phi"),
            )

        # next run the postrefinement etc with the given
        # cell / lattice - this will be the assumed result...

        integrate_hkl = os.path.join(self.get_working_directory(),
                                     "INTEGRATE.HKL")

        if PhilIndex.params.xia2.settings.input.format.dynamic_shadowing:
            from dxtbx.serialize import load
            from dials.algorithms.shadowing.filter import filter_shadowed_reflections

            experiments_json = xparm_xds_to_experiments_json(
                os.path.join(self.get_working_directory(), "XPARM.XDS"),
                self.get_working_directory(),
            )
            experiments = load.experiment_list(experiments_json,
                                               check_format=True)
            imageset = experiments[0].imageset
            masker = (imageset.get_format_class().get_instance(
                imageset.paths()[0]).get_masker())
            if masker is not None:
                integrate_filename = integrate_hkl_to_reflection_file(
                    integrate_hkl, experiments_json,
                    self.get_working_directory())
                reflections = flex.reflection_table.from_file(
                    integrate_filename)

                t0 = time.time()
                sel = filter_shadowed_reflections(experiments, reflections)
                shadowed = reflections.select(sel)
                t1 = time.time()
                logger.debug("Filtered %i reflections in %.1f seconds" %
                             (sel.count(True), t1 - t0))

                filter_hkl = os.path.join(self.get_working_directory(),
                                          "FILTER.HKL")
                with open(filter_hkl, "wb") as f:
                    detector = experiments[0].detector
                    for ref in shadowed:
                        p = detector[ref["panel"]]
                        ox, oy = p.get_raw_image_offset()
                        h, k, l = ref["miller_index"]
                        x, y, z = ref["xyzcal.px"]
                        dx, dy, dz = (2, 2, 2)
                        print(
                            "%i %i %i %.1f %.1f %.1f %.1f %.1f %.1f" %
                            (h, k, l, x + ox, y + oy, z, dx, dy, dz),
                            file=f,
                        )
                t2 = time.time()
                logger.debug("Written FILTER.HKL in %.1f seconds" % (t2 - t1))

        correct = self.Correct()

        correct.set_data_range(
            self._intgr_wedge[0] + self.get_frame_offset(),
            self._intgr_wedge[1] + self.get_frame_offset(),
        )

        if self.get_polarization() > 0.0:
            correct.set_polarization(self.get_polarization())

        # BUG # 2695 probably comes from here - need to check...
        # if the pointless interface comes back with a different
        # crystal setting then the unit cell stored in self._intgr_cell
        # needs to be set to None...

        if self.get_integrater_spacegroup_number():
            correct.set_spacegroup_number(
                self.get_integrater_spacegroup_number())
            if not self._intgr_cell:
                raise RuntimeError("no unit cell to recycle")
            correct.set_cell(self._intgr_cell)

        # BUG # 3113 - new version of XDS will try and figure the
        # best spacegroup out from the intensities (and get it wrong!)
        # unless we set the spacegroup and cell explicitly

        if not self.get_integrater_spacegroup_number():
            cell = self._intgr_refiner_cell
            lattice = self._intgr_refiner.get_refiner_lattice()
            spacegroup_number = lattice_to_spacegroup_number(lattice)

            # this should not prevent the postrefinement from
            # working correctly, else what is above would not
            # work correctly (the postrefinement test)

            correct.set_spacegroup_number(spacegroup_number)
            correct.set_cell(cell)

            logger.debug("Setting spacegroup to: %d" % spacegroup_number)
            logger.debug("Setting cell to: %.2f %.2f %.2f %.2f %.2f %.2f" %
                         tuple(cell))

        if self.get_integrater_reindex_matrix():

            # bug! if the lattice is not primitive the values in this
            # reindex matrix need to be multiplied by a constant which
            # depends on the Bravais lattice centering.

            lattice = self._intgr_refiner.get_refiner_lattice()

            matrix = self.get_integrater_reindex_matrix()
            matrix = scitbx.matrix.sqr(matrix).transpose().elems
            matrix = r_to_rt(matrix)

            if lattice[1] == "P":
                mult = 1
            elif lattice[1] == "C" or lattice[1] == "I":
                mult = 2
            elif lattice[1] == "R":
                mult = 3
            elif lattice[1] == "F":
                mult = 4
            else:
                raise RuntimeError("unknown multiplier for lattice %s" %
                                   lattice)

            logger.debug("REIDX multiplier for lattice %s: %d" %
                         (lattice, mult))

            mult_matrix = [mult * m for m in matrix]

            logger.debug("REIDX set to %d %d %d %d %d %d %d %d %d %d %d %d" %
                         tuple(mult_matrix))
            correct.set_reindex_matrix(mult_matrix)

        correct.run()

        # record the log file -

        pname, xname, dname = self.get_integrater_project_info()
        sweep = self.get_integrater_sweep_name()
        FileHandler.record_log_file(
            f"{pname} {xname} {dname} {sweep} CORRECT",
            os.path.join(self.get_working_directory(), "CORRECT.LP"),
        )

        FileHandler.record_more_data_file(
            f"{pname} {xname} {dname} {sweep} CORRECT",
            os.path.join(self.get_working_directory(), "XDS_ASCII.HKL"),
        )

        # erm. just to be sure
        if self.get_integrater_reindex_matrix() and correct.get_reindex_used():
            raise RuntimeError("Reindex panic!")

        # get the reindex operation used, which may be useful if none was
        # set but XDS decided to apply one, e.g. #419.

        if not self.get_integrater_reindex_matrix(
        ) and correct.get_reindex_used():
            # convert this reindex operation to h, k, l form: n.b. this
            # will involve dividing through by the lattice centring multiplier

            matrix = rt_to_r(correct.get_reindex_used())

            matrix = scitbx.matrix.sqr(matrix).transpose().elems

            lattice = self._intgr_refiner.get_refiner_lattice()

            if lattice[1] == "P":
                mult = 1.0
            elif lattice[1] == "C" or lattice[1] == "I":
                mult = 2.0
            elif lattice[1] == "R":
                mult = 3.0
            elif lattice[1] == "F":
                mult = 4.0

            matrix = [m / mult for m in matrix]

            reindex_op = mat_to_symop(matrix)

            # assign this to self: will this reset?! make for a leaky
            # abstraction and just assign this...

            # self.set_integrater_reindex_operator(reindex)

            self._intgr_reindex_operator = reindex_op

        # record the log file -

        pname, xname, dname = self.get_integrater_project_info()
        sweep = self.get_integrater_sweep_name()
        FileHandler.record_log_file(
            f"{pname} {xname} {dname} {sweep} CORRECT",
            os.path.join(self.get_working_directory(), "CORRECT.LP"),
        )

        # should get some interesting stuff from the XDS correct file
        # here, for instance the resolution range to use in integration
        # (which should be fed back if not fast) and so on...

        self._intgr_corrected_hklout = os.path.join(
            self.get_working_directory(), "XDS_ASCII.HKL")

        # also record the batch range - needed for the analysis of the
        # radiation damage in chef...

        self._intgr_batches_out = (self._intgr_wedge[0], self._intgr_wedge[1])

        # FIXME perhaps I should also feedback the GXPARM file here??
        for file in ["GXPARM.XDS"]:
            self._xds_data_files[file] = correct.get_output_data_file(file)

        # record the postrefined cell parameters
        self._intgr_cell = correct.get_result("cell")
        self._intgr_n_ref = correct.get_result("n_ref")

        logger.debug('Postrefinement in "correct" spacegroup results:')
        logger.debug("%7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" %
                     tuple(correct.get_result("cell")))
        logger.debug("%7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" %
                     tuple(correct.get_result("cell_esd")))
        logger.debug(
            "Deviations: %.2f pixels %.2f degrees" %
            (correct.get_result("rmsd_pixel"), correct.get_result("rmsd_phi")))

        logger.debug("Error correction parameters: A=%.3f B=%.3f" %
                     correct.get_result("sdcorrection"))

        # compute misorientation of axes

        xparm_file = os.path.join(self.get_working_directory(), "GXPARM.XDS")

        handle = xparm.reader()
        handle.read_file(xparm_file)

        rotn = handle.rotation_axis
        beam = handle.beam_vector

        dot = sum(rotn[j] * beam[j] for j in range(3))
        r = math.sqrt(sum(rotn[j] * rotn[j] for j in range(3)))
        b = math.sqrt(sum(beam[j] * beam[j] for j in range(3)))

        rtod = 180.0 / math.pi

        angle = rtod * math.fabs(0.5 * math.pi - math.acos(dot / (r * b)))

        logger.debug("Axis misalignment %.2f degrees" % angle)

        correct_deviations = (
            correct.get_result("rmsd_pixel"),
            correct.get_result("rmsd_phi"),
        )

        if p1_deviations:
            # compare and reject if both > 50% higher - though adding a little
            # flexibility - 0.5 pixel / osc width slack.

            pixel = p1_deviations[0]
            phi = math.sqrt(0.05 * 0.05 + p1_deviations[1] * p1_deviations[1])

            threshold = PhilIndex.params.xia2.settings.lattice_rejection_threshold
            logger.debug("RMSD ratio: %.2f" % (correct_deviations[0] / pixel))
            logger.debug("RMSPhi ratio: %.2f" % (correct_deviations[1] / phi))

            if (correct_deviations[0] / pixel > threshold
                    and correct_deviations[1] / phi > threshold):

                logger.info(
                    "Eliminating this indexing solution as postrefinement")
                logger.info("deviations rather high relative to triclinic")
                raise BadLatticeError(
                    "high relative deviations in postrefinement")

        if (not PhilIndex.params.dials.fast_mode
                and not PhilIndex.params.xds.keep_outliers):
            # check for alien reflections and perhaps recycle - removing them
            correct_remove = correct.get_remove()
            if correct_remove:
                current_remove = set()
                final_remove = []

                # first ensure that there are no duplicate entries...
                if os.path.exists(
                        os.path.join(self.get_working_directory(),
                                     "REMOVE.HKL")):
                    with open(
                            os.path.join(self.get_working_directory(),
                                         "REMOVE.HKL")) as fh:
                        for line in fh.readlines():
                            h, k, l = list(map(int, line.split()[:3]))
                            z = float(line.split()[3])

                            if (h, k, l, z) not in current_remove:
                                current_remove.add((h, k, l, z))

                    for c in correct_remove:
                        if c in current_remove:
                            continue
                        final_remove.append(c)

                    logger.debug("%d alien reflections are already removed" %
                                 (len(correct_remove) - len(final_remove)))
                else:
                    # we want to remove all of the new dodgy reflections
                    final_remove = correct_remove

                z_min = PhilIndex.params.xds.z_min
                rejected = 0

                with open(
                        os.path.join(self.get_working_directory(),
                                     "REMOVE.HKL"), "w") as remove_hkl:

                    # write in the old reflections
                    for remove in current_remove:
                        z = remove[3]
                        if z >= z_min:
                            remove_hkl.write("%d %d %d %f\n" % remove)
                        else:
                            rejected += 1
                    logger.debug("Wrote %d old reflections to REMOVE.HKL" %
                                 (len(current_remove) - rejected))
                    logger.debug("Rejected %d as z < %f" % (rejected, z_min))

                    # and the new reflections
                    rejected = 0
                    used = 0
                    for remove in final_remove:
                        z = remove[3]
                        if z >= z_min:
                            used += 1
                            remove_hkl.write("%d %d %d %f\n" % remove)
                        else:
                            rejected += 1
                    logger.debug("Wrote %d new reflections to REMOVE.HKL" %
                                 (len(final_remove) - rejected))
                    logger.debug("Rejected %d as z < %f" % (rejected, z_min))

                # we want to rerun the finishing step so...
                # unless we have added no new reflections... or unless we
                # have not confirmed the point group (see SCI-398)

                if used and self.get_integrater_reindex_matrix():
                    self.set_integrater_finish_done(False)

        else:
            logger.debug(
                "Going quickly so not removing %d outlier reflections..." %
                len(correct.get_remove()))

        # Convert INTEGRATE.HKL to MTZ format and reapply any reindexing operations
        # spacegroup changes to allow use with CCP4 / Aimless for scaling

        hklout = os.path.splitext(integrate_hkl)[0] + ".mtz"
        self._factory.set_working_directory(self.get_working_directory())
        pointless = self._factory.Pointless()
        pointless.set_xdsin(integrate_hkl)
        pointless.set_hklout(hklout)
        pointless.xds_to_mtz()

        integrate_mtz = hklout

        if (self.get_integrater_reindex_operator()
                or self.get_integrater_spacegroup_number()):

            logger.debug("Reindexing things to MTZ")

            reindex = Reindex()
            reindex.set_working_directory(self.get_working_directory())
            auto_logfiler(reindex)

            if self.get_integrater_reindex_operator():
                reindex.set_operator(self.get_integrater_reindex_operator())

            if self.get_integrater_spacegroup_number():
                reindex.set_spacegroup(self.get_integrater_spacegroup_number())

            hklout = "%s_reindex.mtz" % os.path.splitext(integrate_mtz)[0]

            reindex.set_hklin(integrate_mtz)
            reindex.set_hklout(hklout)
            reindex.reindex()
            integrate_mtz = hklout

        experiments_json = xparm_xds_to_experiments_json(
            self._xds_data_files["GXPARM.XDS"], self.get_working_directory())
        pname, xname, dname = self.get_integrater_project_info()
        sweep = self.get_integrater_sweep_name()
        FileHandler.record_more_data_file(f"{pname} {xname} {dname} {sweep}",
                                          experiments_json)
        FileHandler.record_more_data_file(
            f"{pname} {xname} {dname} {sweep} INTEGRATE", integrate_mtz)

        self._intgr_experiments_filename = experiments_json

        return integrate_mtz
示例#15
0
    def _integrate(self):
        '''Actually do the integration - in XDS terms this will mean running
    DEFPIX and INTEGRATE to measure all the reflections.'''

        experiment = self._intgr_refiner.get_refined_experiment_list(
            self.get_integrater_epoch())[0]
        crystal_model = experiment.crystal
        self._intgr_refiner_cell = crystal_model.get_unit_cell().parameters()

        images_str = '%d to %d' % tuple(self._intgr_wedge)
        cell_str = '%.2f %.2f %.2f %.2f %.2f %.2f' % tuple(
            self._intgr_refiner_cell)

        if len(self._fp_directory) <= 50:
            dirname = self._fp_directory
        else:
            dirname = '...%s' % self._fp_directory[-46:]

        Journal.block(
            'integrating', self._intgr_sweep_name, 'XDS', {
                'images': images_str,
                'cell': cell_str,
                'lattice': self._intgr_refiner.get_refiner_lattice(),
                'template': self._fp_template,
                'directory': dirname,
                'resolution': '%.2f' % self._intgr_reso_high
            })

        defpix = self.Defpix()

        # pass in the correct data

        for file in [
                'X-CORRECTIONS.cbf', 'Y-CORRECTIONS.cbf', 'BKGINIT.cbf',
                'XPARM.XDS'
        ]:
            defpix.set_input_data_file(file, self._xds_data_files[file])

        defpix.set_data_range(self._intgr_wedge[0] + self.get_frame_offset(),
                              self._intgr_wedge[1] + self.get_frame_offset())

        if self.get_integrater_high_resolution() > 0.0 and \
               self.get_integrater_user_resolution():
            Debug.write('Setting resolution limit in DEFPIX to %.2f' % \
                        self.get_integrater_high_resolution())
            defpix.set_resolution_high(self.get_integrater_high_resolution())
            defpix.set_resolution_low(self.get_integrater_low_resolution())

        elif self.get_integrater_low_resolution():
            Debug.write('Setting low resolution limit in DEFPIX to %.2f' % \
                        self.get_integrater_low_resolution())
            defpix.set_resolution_high(0.0)
            defpix.set_resolution_low(self.get_integrater_low_resolution())

        defpix.run()

        # and gather the result files
        for file in ['BKGPIX.cbf', 'ABS.cbf']:
            self._xds_data_files[file] = defpix.get_output_data_file(file)

        integrate = self.Integrate()

        if self._xds_integrate_parameters:
            integrate.set_updates(self._xds_integrate_parameters)

        # decide what images we are going to process, if not already
        # specified

        if not self._intgr_wedge:
            images = self.get_matching_images()
            self.set_integrater_wedge(min(images), max(images))

        integrate.set_data_range(
            self._intgr_wedge[0] + self.get_frame_offset(),
            self._intgr_wedge[1] + self.get_frame_offset())

        for file in [
                'X-CORRECTIONS.cbf', 'Y-CORRECTIONS.cbf', 'BLANK.cbf',
                'BKGPIX.cbf', 'GAIN.cbf'
        ]:
            integrate.set_input_data_file(file, self._xds_data_files[file])

        if 'GXPARM.XDS' in self._xds_data_files:
            Debug.write('Using globally refined parameters')
            integrate.set_input_data_file('XPARM.XDS',
                                          self._xds_data_files['GXPARM.XDS'])
            integrate.set_refined_xparm()
        else:
            integrate.set_input_data_file('XPARM.XDS',
                                          self._xds_data_files['XPARM.XDS'])

        integrate.run()

        self._intgr_per_image_statistics = integrate.get_per_image_statistics()
        Chatter.write(self.show_per_image_statistics())

        # record the log file -

        pname, xname, dname = self.get_integrater_project_info()
        sweep = self.get_integrater_sweep_name()
        FileHandler.record_log_file('%s %s %s %s INTEGRATE' % \
                                    (pname, xname, dname, sweep),
                                    os.path.join(self.get_working_directory(),
                                                 'INTEGRATE.LP'))

        # and copy the first pass INTEGRATE.HKL...

        lattice = self._intgr_refiner.get_refiner_lattice()
        if not os.path.exists(
                os.path.join(self.get_working_directory(),
                             'INTEGRATE-%s.HKL' % lattice)):
            here = self.get_working_directory()
            shutil.copyfile(os.path.join(here, 'INTEGRATE.HKL'),
                            os.path.join(here, 'INTEGRATE-%s.HKL' % lattice))

        # record INTEGRATE.HKL for e.g. BLEND.

        FileHandler.record_more_data_file(
            '%s %s %s %s INTEGRATE' % (pname, xname, dname, sweep),
            os.path.join(self.get_working_directory(), 'INTEGRATE.HKL'))

        # should the existence of these require that I rerun the
        # integration or can we assume that the application of a
        # sensible resolution limit will achieve this??

        self._xds_integrate_parameters = integrate.get_updates()

        # record the mosaic spread &c.

        m_min, m_mean, m_max = integrate.get_mosaic()
        self.set_integrater_mosaic_min_mean_max(m_min, m_mean, m_max)

        Chatter.write('Mosaic spread: %.3f < %.3f < %.3f' % \
                      self.get_integrater_mosaic_min_mean_max())

        return os.path.join(self.get_working_directory(), 'INTEGRATE.HKL')
示例#16
0
  def _sort_together_data_xds_one_sweep(self):

    assert(len(self._sweep_information) == 1)

    epoch = self._sweep_information.keys()[0]
    hklin = self._sweep_information[epoch]['scaled_reflections']

    if Flags.get_chef():
      self._sweep_information_to_chef()

    if self.get_scaler_reference_reflection_file():
      md = self._factory.Mtzdump()
      md.set_hklin(self.get_scaler_reference_reflection_file())
      md.dump()

      spacegroups = [md.get_spacegroup()]
      reindex_operator = 'h,k,l'

    elif self._scalr_input_spacegroup:
      Debug.write('Assigning user input spacegroup: %s' % \
                  self._scalr_input_spacegroup)
      spacegroups = [self._scalr_input_spacegroup]
      reindex_operator = 'h,k,l'

    else:
      pointless = self._factory.Pointless()
      pointless.set_hklin(hklin)
      pointless.decide_spacegroup()

      FileHandler.record_log_file('%s %s pointless' % \
                                  (self._scalr_pname,
                                   self._scalr_xname),
                                  pointless.get_log_file())

      spacegroups = pointless.get_likely_spacegroups()
      reindex_operator = pointless.get_spacegroup_reindex_operator()


    self._scalr_likely_spacegroups = spacegroups
    spacegroup = self._scalr_likely_spacegroups[0]

    self._scalr_reindex_operator = reindex_operator

    Chatter.write('Likely spacegroups:')
    for spag in self._scalr_likely_spacegroups:
      Chatter.write('%s' % spag)

    Chatter.write(
        'Reindexing to first spacegroup setting: %s (%s)' % \
        (spacegroup, clean_reindex_operator(reindex_operator)))

    hklout = os.path.join(self.get_working_directory(),
                          '%s_%s_reindex.mtz' % \
                          (self._scalr_pname, self._scalr_xname))

    FileHandler.record_temporary_file(hklout)

    if reindex_operator == '[h,k,l]':
      # just assign spacegroup

      from iotbx import mtz
      from cctbx import sgtbx

      s = sgtbx.space_group(sgtbx.space_group_symbols(
           str(spacegroup)).hall())

      m = mtz.object(hklin)
      m.set_space_group(s).write(hklout)
      self._scalr_cell = m.crystals()[-1].unit_cell().parameters()
      Debug.write(
          'Updating unit cell to %.2f %.2f %.2f %.2f %.2f %.2f' % \
          tuple(self._scalr_cell))
      del(m)
      del(s)

    else:
      ri = self._factory.Reindex()
      ri.set_hklin(hklin)
      ri.set_hklout(hklout)
      ri.set_spacegroup(spacegroup)
      ri.set_operator(reindex_operator)
      ri.reindex()

      Debug.write(
          'Updating unit cell to %.2f %.2f %.2f %.2f %.2f %.2f' % \
          tuple(ri.get_cell()))
      self._scalr_cell = tuple(ri.get_cell())

    hklin = hklout
    hklout = os.path.join(self.get_working_directory(),
                          '%s_%s_sorted.mtz' % \
                          (self._scalr_pname, self._scalr_xname))

    s = self._factory.Sortmtz()
    s.set_hklin(hklin)
    s.set_hklout(hklout)

    s.sort(vrset = -99999999.0)

    self._prepared_reflections = hklout

    return
示例#17
0
  def _sort_together_data_ccp4(self):
    '''Sort together in the right order (rebatching as we go) the sweeps
    we want to scale together.'''

    max_batches = 0

    for e in self._sweep_handler.get_epochs():
      if Flags.get_small_molecule():
        continue
      si = self._sweep_handler.get_sweep_information(e)

      pname, xname, dname = si.get_project_info()
      sname = si.get_sweep_name()



    for epoch in self._sweep_handler.get_epochs():

      si = self._sweep_handler.get_sweep_information(epoch)
      hklin = si.get_reflections()

      # limit the reflections - e.g. if we are re-running the scaling step
      # on just a subset of the integrated data

      hklin = si.get_reflections()
      limit_batch_range = None
      for sweep in PhilIndex.params.xia2.settings.sweep:
        if sweep.id == sname and sweep.range is not None:
          limit_batch_range = sweep.range
          break

      if limit_batch_range is not None:
        Debug.write('Limiting batch range for %s: %s' %(sname, limit_batch_range))
        start, end = limit_batch_range
        hklout = os.path.splitext(hklin)[0] + '_tmp.mtz'
        FileHandler.record_temporary_file(hklout)
        rb = self._factory.Pointless()
        rb.set_hklin(hklin)
        rb.set_hklout(hklout)
        rb.limit_batches(start, end)
        si.set_reflections(hklout)
        si.set_batches(limit_batch_range)

      # keep a count of the maximum number of batches in a block -
      # this will be used to make rebatch work below.

      hklin = si.get_reflections()
      md = self._factory.Mtzdump()
      md.set_hklin(hklin)
      md.dump()

      batches = md.get_batches()
      if 1 + max(batches) - min(batches) > max_batches:
        max_batches = max(batches) - min(batches) + 1

      datasets = md.get_datasets()

      Debug.write('In reflection file %s found:' % hklin)
      for d in datasets:
        Debug.write('... %s' % d)

      dataset_info = md.get_dataset_info(datasets[0])

    Debug.write('Biggest sweep has %d batches' % max_batches)
    max_batches = nifty_power_of_ten(max_batches)

    # then rebatch the files, to make sure that the batch numbers are
    # in the same order as the epochs of data collection.

    counter = 0

    for epoch in self._sweep_handler.get_epochs():

      si = self._sweep_handler.get_sweep_information(epoch)
      rb = self._factory.Rebatch()

      hklin = si.get_reflections()

      pname, xname, dname = si.get_project_info()
      sname = si.get_sweep_name()

      hklout = os.path.join(self.get_working_directory(),
                            '%s_%s_%s_%s_integrated.mtz' % \
                            (pname, xname, dname, sname))

      first_batch = min(si.get_batches())
      si.set_batch_offset(counter * max_batches - first_batch + 1)

      rb.set_hklin(hklin)
      rb.set_first_batch(counter * max_batches + 1)
      rb.set_project_info(pname, xname, dname)
      rb.set_hklout(hklout)

      new_batches = rb.rebatch()

      # update the "input information"

      si.set_reflections(hklout)
      si.set_batches(new_batches)

      # update the counter & recycle

      counter += 1

    s = self._factory.Sortmtz()

    hklout = os.path.join(self.get_working_directory(),
                          '%s_%s_sorted.mtz' % \
                          (self._scalr_pname, self._scalr_xname))

    s.set_hklout(hklout)

    for epoch in self._sweep_handler.get_epochs():
      s.add_hklin(self._sweep_handler.get_sweep_information(
          epoch).get_reflections())

    s.sort()

    # verify that the measurements are in the correct setting
    # choice for the spacegroup

    hklin = hklout
    hklout = hklin.replace('sorted.mtz', 'temp.mtz')

    if not self.get_scaler_reference_reflection_file():

      p = self._factory.Pointless()

      FileHandler.record_log_file('%s %s pointless' % \
                                  (self._scalr_pname,
                                   self._scalr_xname),
                                  p.get_log_file())

      if len(self._sweep_handler.get_epochs()) > 1:
        p.set_hklin(hklin)
      else:
        # permit the use of pointless preparation...
        epoch = self._sweep_handler.get_epochs()[0]
        p.set_hklin(self._prepare_pointless_hklin(
            hklin, self._sweep_handler.get_sweep_information(
            epoch).get_integrater().get_phi_width()))

      if self._scalr_input_spacegroup:
        Debug.write('Assigning user input spacegroup: %s' % \
                    self._scalr_input_spacegroup)

        p.decide_spacegroup()
        spacegroup = p.get_spacegroup()
        reindex_operator = p.get_spacegroup_reindex_operator()

        Debug.write('Pointless thought %s (reindex as %s)' % \
                    (spacegroup, reindex_operator))

        spacegroup = self._scalr_input_spacegroup
        reindex_operator = 'h,k,l'

      elif Flags.get_small_molecule() and False:
        p.decide_pointgroup()
        spacegroup = p.get_pointgroup()
        reindex_operator = p.get_reindex_operator()

        Debug.write('Pointless thought %s (reindex as %s)' % \
                    (spacegroup, reindex_operator))
        self._scalr_likely_spacegroups = [spacegroup]

      else:
        p.decide_spacegroup()
        spacegroup = p.get_spacegroup()
        reindex_operator = p.get_spacegroup_reindex_operator()

        Debug.write('Pointless thought %s (reindex as %s)' % \
                    (spacegroup, reindex_operator))

      if self._scalr_input_spacegroup:
        self._scalr_likely_spacegroups = [self._scalr_input_spacegroup]
      else:
        self._scalr_likely_spacegroups = p.get_likely_spacegroups()

      Chatter.write('Likely spacegroups:')
      for spag in self._scalr_likely_spacegroups:
        Chatter.write('%s' % spag)

      Chatter.write(
          'Reindexing to first spacegroup setting: %s (%s)' % \
          (spacegroup, clean_reindex_operator(reindex_operator)))

    else:

      md = self._factory.Mtzdump()
      md.set_hklin(self.get_scaler_reference_reflection_file())
      md.dump()

      spacegroup = md.get_spacegroup()
      reindex_operator = 'h,k,l'

      self._scalr_likely_spacegroups = [spacegroup]

      Debug.write('Assigning spacegroup %s from reference' % \
                  spacegroup)

    # then run reindex to set the correct spacegroup

    ri = self._factory.Reindex()
    ri.set_hklin(hklin)
    ri.set_hklout(hklout)
    ri.set_spacegroup(spacegroup)
    ri.set_operator(reindex_operator)
    ri.reindex()

    FileHandler.record_temporary_file(hklout)

    # then resort the reflections (one last time!)

    s = self._factory.Sortmtz()

    temp = hklin
    hklin = hklout
    hklout = temp

    s.add_hklin(hklin)
    s.set_hklout(hklout)

    s.sort()

    # done preparing!

    self._prepared_reflections = s.get_hklout()

    return
示例#18
0
  def _sort_together_data_xds(self):

    if len(self._sweep_information) == 1:
      return self._sort_together_data_xds_one_sweep()

    max_batches = 0

    for epoch in self._sweep_information.keys():

      hklin = self._sweep_information[epoch]['scaled_reflections']

      md = self._factory.Mtzdump()
      md.set_hklin(hklin)
      md.dump()

      if self._sweep_information[epoch]['batches'] == [0, 0]:

        Chatter.write('Getting batches from %s' % hklin)
        batches = md.get_batches()
        self._sweep_information[epoch]['batches'] = [min(batches),
                                                     max(batches)]
        Chatter.write('=> %d to %d' % (min(batches),
                                       max(batches)))

      batches = self._sweep_information[epoch]['batches']
      if 1 + max(batches) - min(batches) > max_batches:
        max_batches = max(batches) - min(batches) + 1

      datasets = md.get_datasets()

      Debug.write('In reflection file %s found:' % hklin)
      for d in datasets:
        Debug.write('... %s' % d)

      dataset_info = md.get_dataset_info(datasets[0])

    Debug.write('Biggest sweep has %d batches' % max_batches)
    max_batches = nifty_power_of_ten(max_batches)

    epochs = self._sweep_information.keys()
    epochs.sort()

    counter = 0

    for epoch in epochs:
      rb = self._factory.Rebatch()

      hklin = self._sweep_information[epoch]['scaled_reflections']

      pname = self._sweep_information[epoch]['pname']
      xname = self._sweep_information[epoch]['xname']
      dname = self._sweep_information[epoch]['dname']

      sname = self._sweep_information[epoch]['sname']

      hklout = os.path.join(self.get_working_directory(),
                            '%s_%s_%s_%d.mtz' % \
                            (pname, xname, dname, counter))

      # we will want to delete this one exit
      FileHandler.record_temporary_file(hklout)

      # record this for future reference - will be needed in the
      # radiation damage analysis...

      # hack - reset this as it gets in a muddle...
      intgr = self._sweep_information[epoch]['integrater']
      self._sweep_information[epoch][
          'batches'] = intgr.get_integrater_batches()

      first_batch = min(self._sweep_information[epoch]['batches'])
      self._sweep_information[epoch][
          'batch_offset'] = counter * max_batches - first_batch + 1

      rb.set_hklin(hklin)
      rb.set_first_batch(counter * max_batches + 1)
      rb.set_hklout(hklout)

      new_batches = rb.rebatch()

      # update the "input information"

      self._sweep_information[epoch]['hklin'] = hklout
      self._sweep_information[epoch]['batches'] = new_batches

      # update the counter & recycle

      counter += 1

    if Flags.get_chef():
      self._sweep_information_to_chef()

    s = self._factory.Sortmtz()

    hklout = os.path.join(self.get_working_directory(),
                          '%s_%s_sorted.mtz' % \
                          (self._scalr_pname, self._scalr_xname))

    s.set_hklout(hklout)

    for epoch in epochs:
      s.add_hklin(self._sweep_information[epoch]['hklin'])

    s.sort(vrset = -99999999.0)

    self._prepared_reflections = hklout

    if self.get_scaler_reference_reflection_file():
      md = self._factory.Mtzdump()
      md.set_hklin(self.get_scaler_reference_reflection_file())
      md.dump()

      spacegroups = [md.get_spacegroup()]
      reindex_operator = 'h,k,l'

    else:

      pointless = self._factory.Pointless()
      pointless.set_hklin(hklout)
      pointless.decide_spacegroup()

      FileHandler.record_log_file('%s %s pointless' % \
                                  (self._scalr_pname,
                                   self._scalr_xname),
                                  pointless.get_log_file())

      spacegroups = pointless.get_likely_spacegroups()
      reindex_operator = pointless.get_spacegroup_reindex_operator()

      if self._scalr_input_spacegroup:
        Debug.write('Assigning user input spacegroup: %s' % \
                    self._scalr_input_spacegroup)
        spacegroups = [self._scalr_input_spacegroup]
        reindex_operator = 'h,k,l'

    self._scalr_likely_spacegroups = spacegroups
    spacegroup = self._scalr_likely_spacegroups[0]

    self._scalr_reindex_operator = reindex_operator

    Chatter.write('Likely spacegroups:')
    for spag in self._scalr_likely_spacegroups:
      Chatter.write('%s' % spag)

    Chatter.write(
        'Reindexing to first spacegroup setting: %s (%s)' % \
        (spacegroup, clean_reindex_operator(reindex_operator)))

    hklin = self._prepared_reflections
    hklout = os.path.join(self.get_working_directory(),
                          '%s_%s_reindex.mtz' % \
                          (self._scalr_pname, self._scalr_xname))

    FileHandler.record_temporary_file(hklout)

    ri = self._factory.Reindex()
    ri.set_hklin(hklin)
    ri.set_hklout(hklout)
    ri.set_spacegroup(spacegroup)
    ri.set_operator(reindex_operator)
    ri.reindex()

    hklin = hklout
    hklout = os.path.join(self.get_working_directory(),
                          '%s_%s_sorted.mtz' % \
                          (self._scalr_pname, self._scalr_xname))

    s = self._factory.Sortmtz()
    s.set_hklin(hklin)
    s.set_hklout(hklout)

    s.sort(vrset = -99999999.0)

    self._prepared_reflections = hklout

    Debug.write(
        'Updating unit cell to %.2f %.2f %.2f %.2f %.2f %.2f' % \
        tuple(ri.get_cell()))
    self._scalr_cell = tuple(ri.get_cell())

    return
示例#19
0
    def _integrate(self):
        """Actually do the integration - in XDS terms this will mean running
        DEFPIX and INTEGRATE to measure all the reflections."""

        images_str = "%d to %d" % tuple(self._intgr_wedge)
        cell_str = "%.2f %.2f %.2f %.2f %.2f %.2f" % tuple(self._intgr_cell)

        if len(self._fp_directory) <= 50:
            dirname = self._fp_directory
        else:
            dirname = "...%s" % self._fp_directory[-46:]

        Journal.block(
            "integrating",
            self._intgr_sweep_name,
            "DIALS",
            {
                "images": images_str,
                "cell": cell_str,
                "lattice": self.get_integrater_refiner().get_refiner_lattice(),
                "template": self._fp_template,
                "directory": dirname,
                "resolution": "%.2f" % self._intgr_reso_high,
            },
        )

        integrate = self.Integrate()

        # decide what images we are going to process, if not already
        # specified

        if not self._intgr_wedge:
            images = self.get_matching_images()
            self.set_integrater_wedge(min(images), max(images))

        imageset = self.get_imageset()
        beam = imageset.get_beam()
        detector = imageset.get_detector()

        d_min_limit = detector.get_max_resolution(beam.get_s0())
        if (d_min_limit > self._intgr_reso_high or
                PhilIndex.params.xia2.settings.resolution.keep_all_reflections
            ):
            Debug.write("Overriding high resolution limit: %f => %f" %
                        (self._intgr_reso_high, d_min_limit))
            self._intgr_reso_high = d_min_limit

        integrate.set_experiments_filename(self._intgr_experiments_filename)
        integrate.set_reflections_filename(self._intgr_indexed_filename)
        if PhilIndex.params.dials.integrate.d_max:
            integrate.set_d_max(PhilIndex.params.dials.integrate.d_max)
        else:
            integrate.set_d_max(self._intgr_reso_low)
        if PhilIndex.params.dials.integrate.d_min:
            integrate.set_d_min(PhilIndex.params.dials.integrate.d_min)
        else:
            integrate.set_d_min(self._intgr_reso_high)
        pname, xname, dname = self.get_integrater_project_info()
        sweep = self.get_integrater_sweep_name()
        FileHandler.record_log_file(
            "%s %s %s %s INTEGRATE" % (pname, xname, dname, sweep),
            integrate.get_log_file(),
        )

        try:
            integrate.run()
        except xia2.Wrappers.Dials.Integrate.DIALSIntegrateError as e:
            s = str(e)
            if ("dials.integrate requires more memory than is available." in s
                    and not self._intgr_reso_high):
                # Try to estimate a more sensible resolution limit for integration
                # in case we were just integrating noise to the edge of the detector
                images = self._integrate_select_images_wedges()

                Debug.write(
                    "Integrating subset of images to estimate resolution limit.\n"
                    "Integrating images %s" % images)

                integrate = self.Integrate()
                integrate.set_experiments_filename(
                    self._intgr_experiments_filename)
                integrate.set_reflections_filename(
                    self._intgr_indexed_filename)
                integrate.set_d_max(self._intgr_reso_low)
                integrate.set_d_min(self._intgr_reso_high)
                for (start, stop) in images:
                    integrate.add_scan_range(
                        start - self.get_matching_images()[0],
                        stop - self.get_matching_images()[0],
                    )
                integrate.set_reflections_per_degree(1000)
                integrate.run()

                integrated_reflections = integrate.get_integrated_filename()

                from xia2.Wrappers.Dials.EstimateResolutionLimit import (
                    EstimateResolutionLimit, )

                d_min_estimater = EstimateResolutionLimit()
                d_min_estimater.set_working_directory(
                    self.get_working_directory())
                auto_logfiler(d_min_estimater)
                d_min_estimater.set_experiments_filename(
                    self._intgr_experiments_filename)
                d_min_estimater.set_reflections_filename(
                    integrated_reflections)
                d_min = d_min_estimater.run()

                Debug.write("Estimate for d_min: %.2f" % d_min)
                Debug.write("Re-running integration to this resolution limit")

                self._intgr_reso_high = d_min
                self.set_integrater_done(False)
                return
            raise Sorry(e)

        self._intgr_experiments_filename = integrate.get_integrated_experiments(
        )

        # also record the batch range - needed for the analysis of the
        # radiation damage in chef...

        self._intgr_batches_out = (self._intgr_wedge[0], self._intgr_wedge[1])

        # FIXME (i) record the log file, (ii) get more information out from the
        # integration log on the quality of the data and (iii) the mosaic spread
        # range observed and R.M.S. deviations.

        self._intgr_integrated_reflections = integrate.get_integrated_reflections(
        )
        if not os.path.isfile(self._intgr_integrated_reflections):
            raise RuntimeError("Integration failed: %s does not exist." %
                               self._intgr_integrated_reflections)

        self._intgr_per_image_statistics = integrate.get_per_image_statistics()
        Chatter.write(self.show_per_image_statistics())

        report = self.Report()
        html_filename = os.path.join(
            self.get_working_directory(),
            "%i_dials.integrate.report.html" % report.get_xpid(),
        )
        report.set_html_filename(html_filename)
        report.run(wait_for_completion=True)
        FileHandler.record_html_file(
            "%s %s %s %s INTEGRATE" % (pname, xname, dname, sweep),
            html_filename)

        from dxtbx.serialize import load

        experiments = load.experiment_list(self._intgr_experiments_filename)
        profile = experiments.profiles()[0]
        mosaic = profile.sigma_m()
        try:
            m_min, m_max, m_mean = mosaic.min_max_mean().as_tuple()
            self.set_integrater_mosaic_min_mean_max(m_min, m_mean, m_max)
        except AttributeError as e:
            self.set_integrater_mosaic_min_mean_max(mosaic, mosaic, mosaic)

        Chatter.write("Mosaic spread: %.3f < %.3f < %.3f" %
                      self.get_integrater_mosaic_min_mean_max())

        return self._intgr_integrated_reflections
示例#20
0
    def _sort_together_data_xds(self):

        if len(self._sweep_information) == 1:
            return self._sort_together_data_xds_one_sweep()

        max_batches = 0

        for epoch in self._sweep_information.keys():

            hklin = self._sweep_information[epoch]['scaled_reflections']

            if self._sweep_information[epoch]['batches'] == [0, 0]:

                Chatter.write('Getting batches from %s' % hklin)
                batches = MtzUtils.batches_from_mtz(hklin)
                self._sweep_information[epoch]['batches'] = [
                    min(batches), max(batches)
                ]
                Chatter.write('=> %d to %d' % (min(batches), max(batches)))

            batches = self._sweep_information[epoch]['batches']
            if 1 + max(batches) - min(batches) > max_batches:
                max_batches = max(batches) - min(batches) + 1

        Debug.write('Biggest sweep has %d batches' % max_batches)
        max_batches = nifty_power_of_ten(max_batches)

        epochs = sorted(self._sweep_information.keys())

        counter = 0

        for epoch in epochs:

            hklin = self._sweep_information[epoch]['scaled_reflections']

            pname = self._sweep_information[epoch]['pname']
            xname = self._sweep_information[epoch]['xname']
            dname = self._sweep_information[epoch]['dname']

            sname = self._sweep_information[epoch]['sname']

            hklout = os.path.join(self.get_working_directory(),
                                  '%s_%s_%s_%d.mtz' % \
                                  (pname, xname, dname, counter))

            # we will want to delete this one exit
            FileHandler.record_temporary_file(hklout)

            # record this for future reference - will be needed in the
            # radiation damage analysis...

            # hack - reset this as it gets in a muddle...
            intgr = self._sweep_information[epoch]['integrater']
            self._sweep_information[epoch][
                'batches'] = intgr.get_integrater_batches()

            first_batch = min(self._sweep_information[epoch]['batches'])
            offset = counter * max_batches - first_batch + 1
            self._sweep_information[epoch]['batch_offset'] = offset

            from xia2.Modules.Scaler.rebatch import rebatch
            new_batches = rebatch(hklin,
                                  hklout,
                                  add_batch=offset,
                                  pname=pname,
                                  xname=xname,
                                  dname=dname)

            # update the "input information"

            self._sweep_information[epoch]['hklin'] = hklout
            self._sweep_information[epoch]['batches'] = new_batches

            # update the counter & recycle

            counter += 1

        s = self._factory.Sortmtz()

        hklout = os.path.join(self.get_working_directory(),
                              '%s_%s_sorted.mtz' % \
                              (self._scalr_pname, self._scalr_xname))

        s.set_hklout(hklout)

        for epoch in epochs:
            s.add_hklin(self._sweep_information[epoch]['hklin'])

        s.sort(vrset=-99999999.0)

        self._prepared_reflections = hklout

        if self.get_scaler_reference_reflection_file():
            spacegroups = [
                MtzUtils.space_group_name_from_mtz(
                    self.get_scaler_reference_reflection_file())
            ]
            reindex_operator = 'h,k,l'

        else:
            pointless = self._factory.Pointless()
            pointless.set_hklin(hklout)
            pointless.decide_spacegroup()

            FileHandler.record_log_file('%s %s pointless' % \
                                        (self._scalr_pname,
                                         self._scalr_xname),
                                        pointless.get_log_file())

            spacegroups = pointless.get_likely_spacegroups()
            reindex_operator = pointless.get_spacegroup_reindex_operator()

            if self._scalr_input_spacegroup:
                Debug.write('Assigning user input spacegroup: %s' % \
                            self._scalr_input_spacegroup)
                spacegroups = [self._scalr_input_spacegroup]
                reindex_operator = 'h,k,l'

        self._scalr_likely_spacegroups = spacegroups
        spacegroup = self._scalr_likely_spacegroups[0]

        self._scalr_reindex_operator = reindex_operator

        Chatter.write('Likely spacegroups:')
        for spag in self._scalr_likely_spacegroups:
            Chatter.write('%s' % spag)

        Chatter.write(
            'Reindexing to first spacegroup setting: %s (%s)' % \
            (spacegroup, clean_reindex_operator(reindex_operator)))

        hklin = self._prepared_reflections
        hklout = os.path.join(self.get_working_directory(),
                              '%s_%s_reindex.mtz' % \
                              (self._scalr_pname, self._scalr_xname))

        FileHandler.record_temporary_file(hklout)

        ri = self._factory.Reindex()
        ri.set_hklin(hklin)
        ri.set_hklout(hklout)
        ri.set_spacegroup(spacegroup)
        ri.set_operator(reindex_operator)
        ri.reindex()

        hklin = hklout
        hklout = os.path.join(self.get_working_directory(),
                              '%s_%s_sorted.mtz' % \
                              (self._scalr_pname, self._scalr_xname))

        s = self._factory.Sortmtz()
        s.set_hklin(hklin)
        s.set_hklout(hklout)

        s.sort(vrset=-99999999.0)

        self._prepared_reflections = hklout

        Debug.write(
            'Updating unit cell to %.2f %.2f %.2f %.2f %.2f %.2f' % \
            tuple(ri.get_cell()))
        self._scalr_cell = tuple(ri.get_cell())

        return
示例#21
0
    def _refine(self):
        for idxr in set(self._refinr_indexers.values()):
            experiments = idxr.get_indexer_experiment_list()

            indexed_experiments = idxr.get_indexer_payload(
                "experiments_filename")
            indexed_reflections = idxr.get_indexer_payload("indexed_filename")

            # If multiple sweeps but not doing joint refinement, get only the
            # relevant reflections.
            multi_sweep = PhilIndex.params.xia2.settings.multi_sweep_refinement
            if len(experiments) > 1 and not multi_sweep:
                xsweeps = idxr._indxr_sweeps
                assert len(xsweeps) == len(experiments)
                # Don't do joint refinement
                assert len(self._refinr_sweeps) == 1
                xsweep = self._refinr_sweeps[0]
                i = xsweeps.index(xsweep)
                experiments = experiments[i:i + 1]

                # Extract and output experiment and reflections for current sweep
                indexed_experiments = os.path.join(
                    self.get_working_directory(),
                    "%s_indexed.expt" % xsweep.get_name())
                indexed_reflections = os.path.join(
                    self.get_working_directory(),
                    "%s_indexed.refl" % xsweep.get_name())

                experiments.as_file(indexed_experiments)

                reflections = flex.reflection_table.from_file(
                    idxr.get_indexer_payload("indexed_filename"))
                sel = reflections["id"] == i
                assert sel.count(True) > 0
                imageset_id = reflections["imageset_id"].select(sel)
                assert imageset_id.all_eq(imageset_id[0])
                sel = reflections["imageset_id"] == imageset_id[0]
                reflections = reflections.select(sel)
                # set indexed reflections to id == 0 and imageset_id == 0
                reflections["id"].set_selected(reflections["id"] == i, 0)
                reflections["imageset_id"] = flex.int(len(reflections), 0)
                reflections.as_file(indexed_reflections)

            # currently only handle one lattice/refiner
            assert len(experiments.crystals()) == 1

            scan_static = PhilIndex.params.dials.refine.scan_static

            # Avoid doing scan-varying refinement on narrow wedges.
            scan_oscillation_ranges = []
            for experiment in experiments:
                start, end = experiment.scan.get_oscillation_range()
                scan_oscillation_ranges.append(end - start)

            min_oscillation_range = min(scan_oscillation_ranges)

            if (PhilIndex.params.dials.refine.scan_varying
                    and min_oscillation_range > 5
                    and not PhilIndex.params.dials.fast_mode):
                scan_varying = PhilIndex.params.dials.refine.scan_varying
            else:
                scan_varying = False

            if scan_static:
                refiner = self.Refine()
                refiner.set_experiments_filename(indexed_experiments)
                refiner.set_indexed_filename(indexed_reflections)
                refiner.set_scan_varying(False)
                refiner.run()
                self._refinr_experiments_filename = (
                    refiner.get_refined_experiments_filename())
                self._refinr_indexed_filename = refiner.get_refined_filename()
            else:
                self._refinr_experiments_filename = indexed_experiments
                self._refinr_indexed_filename = indexed_reflections

            if scan_varying:
                refiner = self.Refine()
                refiner.set_experiments_filename(
                    self._refinr_experiments_filename)
                refiner.set_indexed_filename(self._refinr_indexed_filename)
                if min_oscillation_range < 36:
                    refiner.set_interval_width_degrees(min_oscillation_range /
                                                       2)
                refiner.run()
                self._refinr_experiments_filename = (
                    refiner.get_refined_experiments_filename())
                self._refinr_indexed_filename = refiner.get_refined_filename()

            if scan_static or scan_varying:
                FileHandler.record_log_file(
                    "%s REFINE" % idxr.get_indexer_full_name(),
                    refiner.get_log_file())
                report = self.Report()
                report.set_experiments_filename(
                    self._refinr_experiments_filename)
                report.set_reflections_filename(self._refinr_indexed_filename)
                html_filename = os.path.join(
                    self.get_working_directory(),
                    "%i_dials.refine.report.html" % report.get_xpid(),
                )
                report.set_html_filename(html_filename)
                report.run()
                FileHandler.record_html_file(
                    "%s REFINE" % idxr.get_indexer_full_name(), html_filename)

            experiments = ExperimentList.from_file(
                self._refinr_experiments_filename)
            self.set_refiner_payload("models.expt",
                                     self._refinr_experiments_filename)
            self.set_refiner_payload("observations.refl",
                                     self._refinr_indexed_filename)

            # this is the result of the cell refinement
            self._refinr_cell = experiments.crystals()[0].get_unit_cell(
            ).parameters()
示例#22
0
    def _sort_together_data_xds_one_sweep(self):

        assert len(self._sweep_information) == 1

        epoch = self._sweep_information.keys()[0]
        hklin = self._sweep_information[epoch]['scaled_reflections']

        if self.get_scaler_reference_reflection_file():
            spacegroups = [
                MtzUtils.space_group_name_from_mtz(
                    self.get_scaler_reference_reflection_file())
            ]
            reindex_operator = 'h,k,l'

        elif self._scalr_input_spacegroup:
            Debug.write('Assigning user input spacegroup: %s' % \
                        self._scalr_input_spacegroup)
            spacegroups = [self._scalr_input_spacegroup]
            reindex_operator = 'h,k,l'

        else:
            pointless = self._factory.Pointless()
            pointless.set_hklin(hklin)
            pointless.decide_spacegroup()

            FileHandler.record_log_file('%s %s pointless' % \
                                        (self._scalr_pname,
                                         self._scalr_xname),
                                        pointless.get_log_file())

            spacegroups = pointless.get_likely_spacegroups()
            reindex_operator = pointless.get_spacegroup_reindex_operator()

        self._scalr_likely_spacegroups = spacegroups
        spacegroup = self._scalr_likely_spacegroups[0]

        self._scalr_reindex_operator = clean_reindex_operator(reindex_operator)

        Chatter.write('Likely spacegroups:')
        for spag in self._scalr_likely_spacegroups:
            Chatter.write('%s' % spag)

        Chatter.write(
            'Reindexing to first spacegroup setting: %s (%s)' % \
            (spacegroup, clean_reindex_operator(reindex_operator)))

        hklout = os.path.join(self.get_working_directory(),
                              '%s_%s_reindex.mtz' % \
                              (self._scalr_pname, self._scalr_xname))

        FileHandler.record_temporary_file(hklout)

        if reindex_operator == '[h,k,l]':
            # just assign spacegroup

            from cctbx import sgtbx

            s = sgtbx.space_group(
                sgtbx.space_group_symbols(str(spacegroup)).hall())

            m = mtz.object(hklin)
            m.set_space_group(s).write(hklout)
            self._scalr_cell = m.crystals()[-1].unit_cell().parameters()
            Debug.write(
                'Updating unit cell to %.2f %.2f %.2f %.2f %.2f %.2f' % \
                tuple(self._scalr_cell))
            del m
            del s

        else:
            ri = self._factory.Reindex()
            ri.set_hklin(hklin)
            ri.set_hklout(hklout)
            ri.set_spacegroup(spacegroup)
            ri.set_operator(reindex_operator)
            ri.reindex()

            Debug.write(
                'Updating unit cell to %.2f %.2f %.2f %.2f %.2f %.2f' % \
                tuple(ri.get_cell()))
            self._scalr_cell = tuple(ri.get_cell())

        hklin = hklout
        hklout = os.path.join(self.get_working_directory(),
                              '%s_%s_sorted.mtz' % \
                              (self._scalr_pname, self._scalr_xname))

        s = self._factory.Sortmtz()
        s.set_hklin(hklin)
        s.set_hklout(hklout)

        s.sort(vrset=-99999999.0)

        self._prepared_reflections = hklout
示例#23
0
    def _integrate(self):
        """Actually do the integration - in XDS terms this will mean running
        DEFPIX and INTEGRATE to measure all the reflections."""

        integrate = self.Integrate()

        # decide what images we are going to process, if not already
        # specified

        if not self._intgr_wedge:
            images = self.get_matching_images()
            self.set_integrater_wedge(min(images), max(images))

        imageset = self.get_imageset()
        beam = imageset.get_beam()
        detector = imageset.get_detector()

        d_min_limit = detector.get_max_resolution(beam.get_s0())
        if (d_min_limit > self._intgr_reso_high or
                PhilIndex.params.xia2.settings.resolution.keep_all_reflections
            ):
            logger.debug("Overriding high resolution limit: %f => %f" %
                         (self._intgr_reso_high, d_min_limit))
            self._intgr_reso_high = d_min_limit

        integrate.set_experiments_filename(self._intgr_experiments_filename)
        integrate.set_reflections_filename(self._intgr_indexed_filename)
        if PhilIndex.params.dials.integrate.d_max:
            integrate.set_d_max(PhilIndex.params.dials.integrate.d_max)
        else:
            integrate.set_d_max(self._intgr_reso_low)
        if PhilIndex.params.dials.integrate.d_min:
            integrate.set_d_min(PhilIndex.params.dials.integrate.d_min)
        else:
            integrate.set_d_min(self._intgr_reso_high)

        pname, xname, dname = self.get_integrater_project_info()
        sweep = self.get_integrater_sweep_name()
        FileHandler.record_log_file(
            f"{pname} {xname} {dname} {sweep} INTEGRATE",
            integrate.get_log_file(),
        )

        integrate.run()

        self._intgr_experiments_filename = integrate.get_integrated_experiments(
        )

        # also record the batch range - needed for the analysis of the
        # radiation damage in chef...

        self._intgr_batches_out = (self._intgr_wedge[0], self._intgr_wedge[1])

        # FIXME (i) record the log file, (ii) get more information out from the
        # integration log on the quality of the data and (iii) the mosaic spread
        # range observed and R.M.S. deviations.

        self._intgr_integrated_reflections = integrate.get_integrated_reflections(
        )
        if not os.path.isfile(self._intgr_integrated_reflections):
            raise RuntimeError("Integration failed: %s does not exist." %
                               self._intgr_integrated_reflections)

        self._intgr_per_image_statistics = integrate.get_per_image_statistics()
        logger.info(self.show_per_image_statistics())

        report = self.Report()
        html_filename = os.path.join(
            self.get_working_directory(),
            "%i_dials.integrate.report.html" % report.get_xpid(),
        )
        report.set_html_filename(html_filename)
        report.run(wait_for_completion=True)
        FileHandler.record_html_file(
            f"{pname} {xname} {dname} {sweep} INTEGRATE", html_filename)

        experiments = load.experiment_list(self._intgr_experiments_filename)
        profile = experiments.profiles()[0]
        mosaic = profile.sigma_m()
        try:
            m_min, m_max, m_mean = mosaic.min_max_mean().as_tuple()
            self.set_integrater_mosaic_min_mean_max(m_min, m_mean, m_max)
        except AttributeError:
            self.set_integrater_mosaic_min_mean_max(mosaic, mosaic, mosaic)

        logger.info("Mosaic spread: %.3f < %.3f < %.3f" %
                    self.get_integrater_mosaic_min_mean_max())

        # If running in high-pressure mode, run dials.anvil_correction to
        # correct for the attenuation of the incident and diffracted beams by the
        # diamond anvils.
        if self.high_pressure:
            self._anvil_correction()

        return self._intgr_integrated_reflections
示例#24
0
    def _mosflm_integrate(self):
        '''Perform the actual integration, based on the results of the
    cell refinement or indexing (they have the equivalent form.)'''

        refinr = self.get_integrater_refiner()

        if not refinr.get_refiner_payload('mosflm_orientation_matrix'):
            raise RuntimeError('unexpected situation in indexing')

        lattice = refinr.get_refiner_lattice()
        spacegroup_number = lattice_to_spacegroup(lattice)
        mosaic = refinr.get_refiner_payload('mosaic')
        beam = refinr.get_refiner_payload('beam')
        distance = refinr.get_refiner_payload('distance')
        matrix = refinr.get_refiner_payload('mosflm_orientation_matrix')

        integration_params = refinr.get_refiner_payload(
            'mosflm_integration_parameters')

        if integration_params:
            if 'separation' in integration_params:
                self.set_integrater_parameter(
                    'mosflm', 'separation',
                    '%s %s' % tuple(integration_params['separation']))
            if 'raster' in integration_params:
                self.set_integrater_parameter(
                    'mosflm', 'raster',
                    '%d %d %d %d %d' % tuple(integration_params['raster']))

        refinr.set_refiner_payload('mosflm_integration_parameters', None)

        f = open(
            os.path.join(self.get_working_directory(), 'xiaintegrate.mat'),
            'w')
        for m in matrix:
            f.write(m)
        f.close()

        # then start the integration
        integrater = MosflmIntegrate()
        integrater.set_working_directory(self.get_working_directory())
        auto_logfiler(integrater)

        integrater.set_refine_profiles(self._mosflm_refine_profiles)

        pname, xname, dname = self.get_integrater_project_info()

        if pname is not None and xname is not None and dname is not None:
            Debug.write('Harvesting: %s/%s/%s' % (pname, xname, dname))
            harvest_dir = self.get_working_directory()
            # harvest file name will be %s.mosflm_run_start_end % dname
            temp_dname = '%s_%s' % \
                         (dname, self.get_integrater_sweep_name())
            integrater.set_pname_xname_dname(pname, xname, temp_dname)

        integrater.set_template(os.path.basename(self.get_template()))
        integrater.set_directory(self.get_directory())

        # check for ice - and if so, exclude (ranges taken from
        # XDS documentation)
        if self.get_integrater_ice() != 0:
            Debug.write('Excluding ice rings')
            integrater.set_exclude_ice(True)

        # exclude specified resolution ranges
        if len(self.get_integrater_excluded_regions()) != 0:
            regions = self.get_integrater_excluded_regions()
            Debug.write('Excluding regions: %s' % repr(regions))
            integrater.set_exclude_regions(regions)

        mask = standard_mask(self.get_detector())
        for m in mask:
            integrater.add_instruction(m)

        integrater.set_input_mat_file('xiaintegrate.mat')

        integrater.set_beam_centre(beam)
        integrater.set_distance(distance)
        integrater.set_space_group_number(spacegroup_number)
        integrater.set_mosaic(mosaic)

        if self.get_wavelength_prov() == 'user':
            integrater.set_wavelength(self.get_wavelength())

        parameters = self.get_integrater_parameters('mosflm')
        integrater.update_parameters(parameters)

        if self._mosflm_gain:
            integrater.set_gain(self._mosflm_gain)

        # check for resolution limits
        if self._intgr_reso_high > 0.0:
            integrater.set_d_min(self._intgr_reso_high)
        if self._intgr_reso_low:
            integrater.set_d_max(self._intgr_reso_low)

        if PhilIndex.params.general.backstop_mask:
            from xia2.Toolkit.BackstopMask import BackstopMask
            mask = BackstopMask(PhilIndex.params.general.backstop_mask)
            mask = mask.calculate_mask_mosflm(self.get_header())
            integrater.set_mask(mask)

        detector = self.get_detector()
        detector_width, detector_height = detector[0].get_image_size_mm()

        lim_x = 0.5 * detector_width
        lim_y = 0.5 * detector_height

        Debug.write('Scanner limits: %.1f %.1f' % (lim_x, lim_y))
        integrater.set_limits(lim_x, lim_y)

        integrater.set_fix_mosaic(self._mosflm_postref_fix_mosaic)
        offset = self.get_frame_offset()

        integrater.set_image_range(
            (self._intgr_wedge[0] - offset, self._intgr_wedge[1] - offset))

        try:
            integrater.run()
        except RuntimeError as e:
            if 'integration failed: reason unknown' in str(e):
                Chatter.write('Mosflm has failed in integration')
                message = 'The input was:\n\n'
                for input in integrater.get_all_input():
                    message += '  %s' % input
                Chatter.write(message)
            raise

        FileHandler.record_log_file(
            '%s %s %s %s mosflm integrate' % \
            (self.get_integrater_sweep_name(),
             pname, xname, dname),
            integrater.get_log_file())

        self._intgr_per_image_statistics = integrater.get_per_image_statistics(
        )

        self._mosflm_hklout = integrater.get_hklout()
        Debug.write('Integration output: %s' % self._mosflm_hklout)

        self._intgr_n_ref = integrater.get_nref()

        # if a BGSIG error happened try not refining the
        # profile and running again...

        if integrater.get_bgsig_too_large():
            if not self._mosflm_refine_profiles:
                raise RuntimeError('BGSIG error with profiles fixed')

            Debug.write('BGSIG error detected - try fixing profile...')

            self._mosflm_refine_profiles = False
            self.set_integrater_done(False)

            return

        if integrater.get_getprof_error():
            Debug.write('GETPROF error detected - try fixing profile...')
            self._mosflm_refine_profiles = False
            self.set_integrater_done(False)

            return

        if (integrater.get_detector_gain_error()
                and not (self.get_imageset().get_detector()[0].get_type()
                         == 'SENSOR_PAD')):
            gain = integrater.get_suggested_gain()
            if gain is not None:
                self.set_integrater_parameter('mosflm', 'gain', gain)
                self.set_integrater_export_parameter('mosflm', 'gain', gain)
                if self._mosflm_gain:
                    Debug.write('GAIN updated to %f' % gain)
                else:
                    Debug.write('GAIN found to be %f' % gain)

                self._mosflm_gain = gain
                self._mosflm_rerun_integration = True

        if not self._mosflm_hklout:
            raise RuntimeError('processing abandoned')

        self._intgr_batches_out = integrater.get_batches_out()

        mosaics = integrater.get_mosaic_spreads()
        if mosaics and len(mosaics) > 0:
            self.set_integrater_mosaic_min_mean_max(
                min(mosaics),
                sum(mosaics) / len(mosaics), max(mosaics))
        else:
            m = indxr.get_indexer_mosaic()
            self.set_integrater_mosaic_min_mean_max(m, m, m)

        # write the report for each image as .*-#$ to Chatter -
        # detailed report will be written automagically to science...

        Chatter.write(self.show_per_image_statistics())

        Chatter.write('Mosaic spread: %.3f < %.3f < %.3f' % \
                      self.get_integrater_mosaic_min_mean_max())

        # gather the statistics from the postrefinement
        postref_result = integrater.get_postref_result()

        # now write this to a postrefinement log
        postref_log = os.path.join(self.get_working_directory(),
                                   'postrefinement.log')

        fout = open(postref_log, 'w')

        fout.write('$TABLE: Postrefinement for %s:\n' % \
                   self._intgr_sweep_name)
        fout.write('$GRAPHS: Missetting angles:A:1, 2, 3, 4: $$\n')
        fout.write('Batch PhiX PhiY PhiZ $$ Batch PhiX PhiY PhiZ $$\n')

        for image in sorted(postref_result):
            phix = postref_result[image].get('phix', 0.0)
            phiy = postref_result[image].get('phiy', 0.0)
            phiz = postref_result[image].get('phiz', 0.0)

            fout.write('%d %5.2f %5.2f %5.2f\n' % \
                       (image, phix, phiy, phiz))

        fout.write('$$\n')
        fout.close()

        if self.get_integrater_sweep_name():
            pname, xname, dname = self.get_integrater_project_info()
            FileHandler.record_log_file('%s %s %s %s postrefinement' % \
                                        (self.get_integrater_sweep_name(),
                                         pname, xname, dname),
                                        postref_log)

        return self._mosflm_hklout
示例#25
0
  def _scale(self):
    '''Perform all of the operations required to deliver the scaled
    data.'''

    epochs = self._sweep_handler.get_epochs()

    if self._scalr_corrections:
      Journal.block(
          'scaling', self.get_scaler_xcrystal().get_name(), 'CCP4',
          {'scaling model':'automatic',
           'absorption':self._scalr_correct_absorption,
           'decay':self._scalr_correct_decay
           })

    else:
      Journal.block(
          'scaling', self.get_scaler_xcrystal().get_name(), 'CCP4',
          {'scaling model':'default'})

    sc = self._updated_aimless()
    sc.set_hklin(self._prepared_reflections)
    sc.set_chef_unmerged(True)
    sc.set_new_scales_file('%s.scales' % self._scalr_xname)

    user_resolution_limits = {}

    for epoch in epochs:

      si = self._sweep_handler.get_sweep_information(epoch)
      pname, xname, dname = si.get_project_info()
      sname = si.get_sweep_name()
      intgr = si.get_integrater()

      if intgr.get_integrater_user_resolution():
        dmin = intgr.get_integrater_high_resolution()

        if (dname, sname) not in user_resolution_limits:
          user_resolution_limits[(dname, sname)] = dmin
        elif dmin < user_resolution_limits[(dname, sname)]:
          user_resolution_limits[(dname, sname)] = dmin

      start, end = si.get_batch_range()

      if (dname, sname) in self._scalr_resolution_limits:
        resolution, _ = self._scalr_resolution_limits[(dname, sname)]
        sc.add_run(start, end, exclude=False, resolution=resolution, name=sname)
      else:
        sc.add_run(start, end, name=sname)

    sc.set_hklout(os.path.join(self.get_working_directory(),
                               '%s_%s_scaled_test.mtz' % \
                               (self._scalr_pname, self._scalr_xname)))

    if self.get_scaler_anomalous():
      sc.set_anomalous()

    # what follows, sucks

    failover = PhilIndex.params.xia2.settings.failover
    if failover:

      try:
        sc.scale()
      except RuntimeError as e:

        es = str(e)

        if 'bad batch' in es or \
               'negative scales run' in es or \
               'no observations' in es:

          # first ID the sweep from the batch no

          batch = int(es.split()[-1])
          epoch = self._identify_sweep_epoch(batch)
          sweep = self._scalr_integraters[epoch].get_integrater_sweep()

          # then remove it from my parent xcrystal

          self.get_scaler_xcrystal().remove_sweep(sweep)

          # then remove it from the scaler list of intergraters
          # - this should really be a scaler interface method

          del self._scalr_integraters[epoch]

          # then tell the user what is happening

          Chatter.write(
              'Sweep %s gave negative scales - removing' % \
              sweep.get_name())

          # then reset the prepare, do, finish flags

          self.set_scaler_prepare_done(False)
          self.set_scaler_done(False)
          self.set_scaler_finish_done(False)

          # and return

          return

        else:

          raise e

    else:
      sc.scale()

    # then gather up all of the resulting reflection files
    # and convert them into the required formats (.sca, .mtz.)

    data = sc.get_summary()

    loggraph = sc.parse_ccp4_loggraph()

    resolution_info = {}

    reflection_files = sc.get_scaled_reflection_files()

    for dataset in reflection_files:
      FileHandler.record_temporary_file(reflection_files[dataset])

    for key in loggraph:
      if 'Analysis against resolution' in key:
        dataset = key.split(',')[-1].strip()
        resolution_info[dataset] = transpose_loggraph(loggraph[key])

    highest_resolution = 100.0
    highest_suggested_resolution = None

    # check in here that there is actually some data to scale..!

    if len(resolution_info) == 0:
      raise RuntimeError('no resolution info')

    for epoch in epochs:

      si = self._sweep_handler.get_sweep_information(epoch)
      pname, xname, dname = si.get_project_info()
      sname = si.get_sweep_name()
      intgr = si.get_integrater()
      start, end = si.get_batch_range()

      if (dname, sname) in self._scalr_resolution_limits:
        continue

      elif (dname, sname) in user_resolution_limits:
        limit = user_resolution_limits[(dname, sname)]
        self._scalr_resolution_limits[(dname, sname)] = (limit, None)
        if limit < highest_resolution:
          highest_resolution = limit
        Chatter.write('Resolution limit for %s: %5.2f (user provided)' % \
                      (dname, limit))
        continue

      hklin = sc.get_unmerged_reflection_file()
      limit, reasoning = self._estimate_resolution_limit(
        hklin, batch_range=(start, end))

      if PhilIndex.params.xia2.settings.resolution.keep_all_reflections == True:
        suggested = limit
        if highest_suggested_resolution is None or limit < highest_suggested_resolution:
          highest_suggested_resolution = limit
        limit = intgr.get_detector().get_max_resolution(intgr.get_beam_obj().get_s0())
        self._scalr_resolution_limits[(dname, sname)] = (limit, suggested)
        Debug.write('keep_all_reflections set, using detector limits')
      Debug.write('Resolution for sweep %s: %.2f' % \
                  (sname, limit))

      if not (dname, sname) in self._scalr_resolution_limits:
        self._scalr_resolution_limits[(dname, sname)] = (limit, None)
        self.set_scaler_done(False)

      if limit < highest_resolution:
        highest_resolution = limit

      limit, suggested = self._scalr_resolution_limits[(dname, sname)]
      if suggested is None or limit == suggested:
        reasoning_str = ''
        if reasoning:
          reasoning_str = ' (%s)' % reasoning
        Chatter.write('Resolution for sweep %s/%s: %.2f%s' % \
                      (dname, sname, limit, reasoning_str))
      else:
        Chatter.write('Resolution limit for %s/%s: %5.2f (%5.2f suggested)' % \
                      (dname, sname, limit, suggested))

    if highest_suggested_resolution is not None and \
        highest_resolution >= (highest_suggested_resolution - 0.004):
      Debug.write('Dropping resolution cut-off suggestion since it is'
                  ' essentially identical to the actual resolution limit.')
      highest_suggested_resolution = None
    self._scalr_highest_resolution = highest_resolution
    self._scalr_highest_suggested_resolution = highest_suggested_resolution
    if highest_suggested_resolution is not None:
      Debug.write('Suggested highest resolution is %5.2f (%5.2f suggested)' % \
                (highest_resolution, highest_suggested_resolution))
    else:
      Debug.write('Scaler highest resolution set to %5.2f' % \
                highest_resolution)

    if not self.get_scaler_done():
      Debug.write('Returning as scaling not finished...')
      return

    batch_info = {}

    for key in loggraph:
      if 'Analysis against Batch' in key:
        dataset = key.split(',')[-1].strip()
        batch_info[dataset] = transpose_loggraph(loggraph[key])

    sc = self._updated_aimless()

    FileHandler.record_log_file('%s %s aimless' % (self._scalr_pname,
                                                   self._scalr_xname),
                                sc.get_log_file())

    sc.set_hklin(self._prepared_reflections)
    sc.set_new_scales_file('%s_final.scales' % self._scalr_xname)

    for epoch in epochs:

      si = self._sweep_handler.get_sweep_information(epoch)
      pname, xname, dname = si.get_project_info()
      sname = si.get_sweep_name()
      start, end = si.get_batch_range()

      resolution_limit, _ = self._scalr_resolution_limits[(dname, sname)]

      sc.add_run(start, end, exclude=False, resolution=resolution_limit,
                 name=xname)

    sc.set_hklout(os.path.join(self.get_working_directory(),
                               '%s_%s_scaled.mtz' % \
                               (self._scalr_pname, self._scalr_xname)))

    if self.get_scaler_anomalous():
      sc.set_anomalous()

    sc.scale()

    FileHandler.record_xml_file('%s %s aimless xml' % (self._scalr_pname,
                                                       self._scalr_xname),
                                sc.get_xmlout())

    data = sc.get_summary()
    scales_file = sc.get_new_scales_file()
    loggraph = sc.parse_ccp4_loggraph()

    standard_deviation_info = {}

    for key in loggraph:
      if 'standard deviation v. Intensity' in key:
        dataset = key.split(',')[-1].strip()
        standard_deviation_info[dataset] = transpose_loggraph(loggraph[key])

    resolution_info = {}

    for key in loggraph:
      if 'Analysis against resolution' in key:
        dataset = key.split(',')[-1].strip()
        resolution_info[dataset] = transpose_loggraph(loggraph[key])

    batch_info = {}

    for key in loggraph:
      if 'Analysis against Batch' in key:
        dataset = key.split(',')[-1].strip()
        batch_info[dataset] = transpose_loggraph(loggraph[key])

    # finally put all of the results "somewhere useful"

    self._scalr_statistics = data

    self._scalr_scaled_refl_files = copy.deepcopy(
        sc.get_scaled_reflection_files())

    sc = self._updated_aimless()
    sc.set_hklin(self._prepared_reflections)
    sc.set_scales_file(scales_file)

    self._wavelengths_in_order = []

    for epoch in epochs:
      si = self._sweep_handler.get_sweep_information(epoch)
      pname, xname, dname = si.get_project_info()
      sname = si.get_sweep_name()
      start, end = si.get_batch_range()

      resolution_limit, _ = self._scalr_resolution_limits[(dname, sname)]

      sc.add_run(start, end, exclude=False, resolution=resolution_limit,
                 name=sname)

      if not dname in self._wavelengths_in_order:
        self._wavelengths_in_order.append(dname)

    sc.set_hklout(os.path.join(self.get_working_directory(),
                               '%s_%s_scaled.mtz' % \
                               (self._scalr_pname,
                                self._scalr_xname)))

    sc.set_scalepack()

    if self.get_scaler_anomalous():
      sc.set_anomalous()
    sc.scale()

    self._update_scaled_unit_cell()

    self._scalr_scaled_reflection_files = {}
    self._scalr_scaled_reflection_files['sca'] = {}
    self._scalr_scaled_reflection_files['sca_unmerged'] = {}
    self._scalr_scaled_reflection_files['mtz_unmerged'] = {}

    for key in self._scalr_scaled_refl_files:
      hklout = self._scalr_scaled_refl_files[key]

      scaout = '%s.sca' % hklout[:-4]
      self._scalr_scaled_reflection_files['sca'][key] = scaout
      FileHandler.record_data_file(scaout)
      scalepack = os.path.join(os.path.split(hklout)[0],
                               os.path.split(hklout)[1].replace(
          '_scaled', '_scaled_unmerged').replace('.mtz', '.sca'))
      self._scalr_scaled_reflection_files['sca_unmerged'][key] = scalepack
      FileHandler.record_data_file(scalepack)
      mtz_unmerged = os.path.splitext(scalepack)[0] + '.mtz'
      self._scalr_scaled_reflection_files['mtz_unmerged'][key] = mtz_unmerged
      FileHandler.record_data_file(mtz_unmerged)

      if self._scalr_cell_esd is not None:
        # patch .mtz and overwrite unit cell information
        import xia2.Modules.Scaler.tools as tools
        override_cell = self._scalr_cell_dict.get('%s_%s_%s' %
          (self._scalr_pname, self._scalr_xname, key))[0]
        tools.patch_mtz_unit_cell(mtz_unmerged, override_cell)
        tools.patch_mtz_unit_cell(hklout, override_cell)

      self._scalr_scaled_reflection_files['mtz_unmerged'][key] = mtz_unmerged
      FileHandler.record_data_file(mtz_unmerged)

    if PhilIndex.params.xia2.settings.merging_statistics.source == 'cctbx':
      for key in self._scalr_scaled_refl_files:
        stats = self._compute_scaler_statistics(
          self._scalr_scaled_reflection_files['mtz_unmerged'][key],
          selected_band=(highest_suggested_resolution, None), wave=key)
        self._scalr_statistics[
          (self._scalr_pname, self._scalr_xname, key)] = stats

    sc = self._updated_aimless()
    sc.set_hklin(self._prepared_reflections)
    sc.set_scales_file(scales_file)

    self._wavelengths_in_order = []

    for epoch in epochs:

      si = self._sweep_handler.get_sweep_information(epoch)
      pname, xname, dname = si.get_project_info()
      sname = si.get_sweep_name()
      start, end = si.get_batch_range()

      resolution_limit, _ = self._scalr_resolution_limits[(dname, sname)]

      sc.add_run(start, end, exclude=False, resolution=resolution_limit,
                 name=sname)

      if not dname in self._wavelengths_in_order:
        self._wavelengths_in_order.append(dname)

    sc.set_hklout(os.path.join(self.get_working_directory(),
                               '%s_%s_chef.mtz' % \
                               (self._scalr_pname,
                                self._scalr_xname)))

    sc.set_chef_unmerged(True)

    if self.get_scaler_anomalous():
      sc.set_anomalous()
    sc.scale()
    if not PhilIndex.params.dials.fast_mode:
      try:
        self._generate_absorption_map(sc)
      except Exception as e:
        # Map generation may fail for number of reasons, eg. matplotlib borken
        Debug.write("Could not generate absorption map (%s)" % e)
示例#26
0
    def _index(self):
        if PhilIndex.params.dials.index.method in (libtbx.Auto, None):
            if self._indxr_input_cell is not None:
                indexer = self._do_indexing("real_space_grid_search")
            else:
                try:
                    indexer_fft3d = self._do_indexing(method="fft3d")
                    nref_3d, rmsd_3d = indexer_fft3d.get_nref_rmsds()
                except Exception as e:
                    nref_3d = None
                    rmsd_3d = None
                    indexing_failure = e
                try:
                    indexer_fft1d = self._do_indexing(method="fft1d")
                    nref_1d, rmsd_1d = indexer_fft1d.get_nref_rmsds()
                except Exception as e:
                    nref_1d = None
                    rmsd_1d = None
                    indexing_failure = e

                if (nref_1d is not None and nref_3d is None or
                    (nref_1d > nref_3d and rmsd_1d[0] < rmsd_3d[0]
                     and rmsd_1d[1] < rmsd_3d[1] and rmsd_1d[2] < rmsd_3d[2])):
                    indexer = indexer_fft1d
                elif nref_3d is not None:
                    indexer = indexer_fft3d
                else:
                    raise RuntimeError(indexing_failure)

        else:
            indexer = self._do_indexing(
                method=PhilIndex.params.dials.index.method)

        # not strictly the P1 cell, rather the cell that was used in indexing
        self._p1_cell = indexer._p1_cell
        self.set_indexer_payload("indexed_filename",
                                 indexer.get_indexed_filename())

        indexed_file = indexer.get_indexed_filename()
        indexed_experiments = indexer.get_experiments_filename()

        fast_mode = PhilIndex.params.dials.fast_mode
        trust_beam_centre = PhilIndex.params.xia2.settings.trust_beam_centre
        multi_sweep_indexing = PhilIndex.params.xia2.settings.multi_sweep_indexing
        check_indexing_symmetry = PhilIndex.params.dials.check_indexing_symmetry

        if check_indexing_symmetry and not (trust_beam_centre or fast_mode
                                            or multi_sweep_indexing):
            checksym = self.CheckIndexingSymmetry()
            checksym.set_experiments_filename(indexed_experiments)
            checksym.set_indexed_filename(indexed_file)
            checksym.set_grid_search_scope(1)
            checksym.run()
            hkl_offset = checksym.get_hkl_offset()
            logger.debug("hkl_offset: %s", str(hkl_offset))
            if hkl_offset is not None and hkl_offset != (0, 0, 0):
                reindex = self.Reindex()
                reindex.set_hkl_offset(hkl_offset)
                reindex.set_indexed_filename(indexed_file)
                reindex.run()
                indexed_file = reindex.get_reindexed_reflections_filename()

                # do some scan-static refinement - run twice, first without outlier
                # rejection as the model is too far from reality to do a sensible job of
                # outlier rejection
                refiner = self.Refine()
                refiner.set_experiments_filename(indexed_experiments)
                refiner.set_indexed_filename(
                    reindex.get_reindexed_reflections_filename())
                refiner.set_outlier_algorithm(None)
                refiner.run()
                indexed_experiments = refiner.get_refined_experiments_filename(
                )

                # now again with outlier rejection (possibly)
                refiner = self.Refine()
                refiner.set_experiments_filename(indexed_experiments)
                refiner.set_indexed_filename(indexed_file)
                refiner.run()
                indexed_experiments = refiner.get_refined_experiments_filename(
                )

        if self._indxr_input_lattice is None:

            # FIXME in here should respect the input unit cell and lattice if provided

            # FIXME from this (i) populate the helper table,
            # (ii) try to avoid re-running the indexing
            # step if we eliminate a solution as we have all of the refined results
            # already available.

            rbs = self.RefineBravaisSettings()
            rbs.set_experiments_filename(indexed_experiments)
            rbs.set_indexed_filename(indexed_file)
            if PhilIndex.params.dials.fix_geometry:
                rbs.set_detector_fix("all")
                rbs.set_beam_fix("all")
            elif PhilIndex.params.dials.fix_distance:
                rbs.set_detector_fix("distance")

            FileHandler.record_log_file(
                "%s LATTICE" % self.get_indexer_full_name(),
                rbs.get_log_file())
            rbs.run()

            for k in sorted(rbs.get_bravais_summary()):
                summary = rbs.get_bravais_summary()[k]

                # FIXME need to do this better - for the moment only accept lattices
                # where R.M.S. deviation is less than twice P1 R.M.S. deviation.

                if self._indxr_input_lattice is None:
                    if not summary["recommended"]:
                        continue

                experiments = load.experiment_list(summary["experiments_file"],
                                                   check_format=False)
                cryst = experiments.crystals()[0]
                cs = crystal.symmetry(unit_cell=cryst.get_unit_cell(),
                                      space_group=cryst.get_space_group())
                lattice = str(
                    bravais_types.bravais_lattice(group=cs.space_group()))
                cb_op = sgtbx.change_of_basis_op(str(summary["cb_op"]))

                self._solutions[k] = {
                    "number": k,
                    "mosaic": 0.0,
                    "metric": summary["max_angular_difference"],
                    "rmsd": summary["rmsd"],
                    "nspots": summary["nspots"],
                    "lattice": lattice,
                    "cell": cs.unit_cell().parameters(),
                    "experiments_file": summary["experiments_file"],
                    "cb_op": str(cb_op),
                }

            self._solution = self.get_solution()
            self._indxr_lattice = self._solution["lattice"]

            for solution in self._solutions:
                lattice = self._solutions[solution]["lattice"]
                if (self._indxr_input_lattice is not None
                        and self._indxr_input_lattice != lattice):
                    continue
                if lattice in self._indxr_other_lattice_cell:
                    if (self._indxr_other_lattice_cell[lattice]["metric"] <
                            self._solutions[solution]["metric"]):
                        continue

                self._indxr_other_lattice_cell[lattice] = {
                    "metric": self._solutions[solution]["metric"],
                    "cell": self._solutions[solution]["cell"],
                }

            self._indxr_mosaic = self._solution["mosaic"]

            experiments_file = self._solution["experiments_file"]
            experiment_list = load.experiment_list(experiments_file)
            self.set_indexer_experiment_list(experiment_list)

            self.set_indexer_payload("experiments_filename", experiments_file)

            # reindex the output reflection list to this solution
            reindex = self.Reindex()
            reindex.set_indexed_filename(indexed_file)
            reindex.set_cb_op(self._solution["cb_op"])
            reindex.set_space_group(
                str(lattice_to_spacegroup_number(self._solution["lattice"])))
            reindex.run()
            indexed_file = reindex.get_reindexed_reflections_filename()
            self.set_indexer_payload("indexed_filename", indexed_file)

        else:
            experiment_list = load.experiment_list(indexed_experiments)
            self.set_indexer_experiment_list(experiment_list)
            self.set_indexer_payload("experiments_filename",
                                     indexed_experiments)

            cryst = experiment_list.crystals()[0]
            lattice = str(
                bravais_types.bravais_lattice(group=cryst.get_space_group()))
            self._indxr_lattice = lattice
            self._solutions = {}
            self._solutions[0] = {
                "number": 0,
                "mosaic": 0.0,
                "metric": -1,
                "rmsd": -1,
                "nspots": -1,
                "lattice": lattice,
                "cell": cryst.get_unit_cell().parameters(),
                "experiments_file": indexed_experiments,
                "cb_op": "a,b,c",
            }

            self._indxr_other_lattice_cell[lattice] = {
                "metric": self._solutions[0]["metric"],
                "cell": self._solutions[0]["cell"],
            }
示例#27
0
    def _integrate(self):
        '''Actually do the integration - in XDS terms this will mean running
    DEFPIX and INTEGRATE to measure all the reflections.'''

        images_str = '%d to %d' % tuple(self._intgr_wedge)
        cell_str = '%.2f %.2f %.2f %.2f %.2f %.2f' % tuple(self._intgr_cell)

        if len(self._fp_directory) <= 50:
            dirname = self._fp_directory
        else:
            dirname = '...%s' % self._fp_directory[-46:]

        Journal.block(
            'integrating', self._intgr_sweep_name, 'DIALS', {
                'images': images_str,
                'cell': cell_str,
                'lattice': self.get_integrater_refiner().get_refiner_lattice(),
                'template': self._fp_template,
                'directory': dirname,
                'resolution': '%.2f' % self._intgr_reso_high
            })

        integrate = self.Integrate()

        # decide what images we are going to process, if not already
        # specified

        if not self._intgr_wedge:
            images = self.get_matching_images()
            self.set_integrater_wedge(min(images), max(images))

        imageset = self.get_imageset()
        beam = imageset.get_beam()
        detector = imageset.get_detector()

        d_min_limit = detector.get_max_resolution(beam.get_s0())
        if d_min_limit > self._intgr_reso_high \
            or PhilIndex.params.xia2.settings.resolution.keep_all_reflections:
            Debug.write('Overriding high resolution limit: %f => %f' % \
                        (self._intgr_reso_high, d_min_limit))
            self._intgr_reso_high = d_min_limit

        integrate.set_experiments_filename(self._intgr_experiments_filename)
        integrate.set_reflections_filename(self._intgr_indexed_filename)
        integrate.set_d_max(self._intgr_reso_low)
        integrate.set_d_min(self._intgr_reso_high)
        pname, xname, dname = self.get_integrater_project_info()
        sweep = self.get_integrater_sweep_name()
        FileHandler.record_log_file('%s %s %s %s INTEGRATE' % \
                                    (pname, xname, dname, sweep),
                                    integrate.get_log_file())

        try:
            integrate.run()
        except xia2.Wrappers.Dials.Integrate.DIALSIntegrateError as e:
            s = str(e)
            if ('dials.integrate requires more memory than is available.' in s
                    and not self._intgr_reso_high):
                # Try to estimate a more sensible resolution limit for integration
                # in case we were just integrating noise to the edge of the detector
                images = self._integrate_select_images_wedges()

                Debug.write(
                    'Integrating subset of images to estimate resolution limit.\n'
                    'Integrating images %s' % images)

                integrate = self.Integrate()
                integrate.set_experiments_filename(
                    self._intgr_experiments_filename)
                integrate.set_reflections_filename(
                    self._intgr_indexed_filename)
                integrate.set_d_max(self._intgr_reso_low)
                integrate.set_d_min(self._intgr_reso_high)
                for (start, stop) in images:
                    integrate.add_scan_range(
                        start - self.get_matching_images()[0],
                        stop - self.get_matching_images()[0])
                integrate.set_reflections_per_degree(1000)
                integrate.run()

                integrated_pickle = integrate.get_integrated_filename()

                from xia2.Wrappers.Dials.EstimateResolutionLimit import EstimateResolutionLimit
                d_min_estimater = EstimateResolutionLimit()
                d_min_estimater.set_working_directory(
                    self.get_working_directory())
                auto_logfiler(d_min_estimater)
                d_min_estimater.set_experiments_filename(
                    self._intgr_experiments_filename)
                d_min_estimater.set_reflections_filename(integrated_pickle)
                d_min = d_min_estimater.run()

                Debug.write('Estimate for d_min: %.2f' % d_min)
                Debug.write('Re-running integration to this resolution limit')

                self._intgr_reso_high = d_min
                self.set_integrater_done(False)
                return
            raise Sorry(e)

        self._intgr_experiments_filename = integrate.get_integrated_experiments(
        )

        # also record the batch range - needed for the analysis of the
        # radiation damage in chef...

        self._intgr_batches_out = (self._intgr_wedge[0], self._intgr_wedge[1])

        # FIXME (i) record the log file, (ii) get more information out from the
        # integration log on the quality of the data and (iii) the mosaic spread
        # range observed and R.M.S. deviations.

        self._intgr_integrated_pickle = integrate.get_integrated_reflections()
        if not os.path.isfile(self._intgr_integrated_pickle):
            raise RuntimeError("Integration failed: %s does not exist." %
                               self._intgr_integrated_pickle)

        self._intgr_per_image_statistics = integrate.get_per_image_statistics()
        Chatter.write(self.show_per_image_statistics())

        report = self.Report()
        html_filename = os.path.join(
            self.get_working_directory(),
            '%i_dials.integrate.report.html' % report.get_xpid())
        report.set_html_filename(html_filename)
        report.run()
        FileHandler.record_html_file('%s %s %s %s INTEGRATE' % \
                                     (pname, xname, dname, sweep),
                                     html_filename)

        import dials
        from dxtbx.serialize import load
        experiments = load.experiment_list(self._intgr_experiments_filename)
        profile = experiments.profiles()[0]
        mosaic = profile.sigma_m()
        self.set_integrater_mosaic_min_mean_max(mosaic, mosaic, mosaic)

        Chatter.write('Mosaic spread: %.3f < %.3f < %.3f' % \
                      self.get_integrater_mosaic_min_mean_max())

        return self._intgr_integrated_pickle
示例#28
0
    def _index(self):
        '''Actually do the autoindexing using the data prepared by the
    previous method.'''

        images_str = '%d to %d' % tuple(self._indxr_images[0])
        for i in self._indxr_images[1:]:
            images_str += ', %d to %d' % tuple(i)

        cell_str = None
        if self._indxr_input_cell:
            cell_str = '%.2f %.2f %.2f %.2f %.2f %.2f' % \
                       self._indxr_input_cell

        # then this is a proper autoindexing run - describe this
        # to the journal entry

        #if len(self._fp_directory) <= 50:
        #dirname = self._fp_directory
        #else:
        #dirname = '...%s' % self._fp_directory[-46:]
        dirname = self.get_directory()

        Journal.block(
            'autoindexing', self._indxr_sweep_name, 'XDS', {
                'images': images_str,
                'target cell': cell_str,
                'target lattice': self._indxr_input_lattice,
                'template': self.get_template(),
                'directory': dirname
            })

        self._index_remove_masked_regions()

        if self._i_or_ii is None:
            self._i_or_ii = self.decide_i_or_ii()
            Debug.write('Selecting I or II, chose %s' % self._i_or_ii)

        idxref = self.Idxref()

        for file in ['SPOT.XDS']:
            idxref.set_input_data_file(file, self._indxr_payload[file])

        # set the phi start etc correctly

        idxref.set_data_range(self._indxr_images[0][0],
                              self._indxr_images[0][1])
        idxref.set_background_range(self._indxr_images[0][0],
                                    self._indxr_images[0][1])

        if self._i_or_ii == 'i':
            blocks = self._index_select_images_i()
            for block in blocks[:1]:
                starting_frame = block[0]
                starting_angle = self.get_scan().get_angle_from_image_index(
                    starting_frame)

                idxref.set_starting_frame(starting_frame)
                idxref.set_starting_angle(starting_angle)

                idxref.add_spot_range(block[0], block[1])

            for block in blocks[1:]:
                idxref.add_spot_range(block[0], block[1])
        else:
            for block in self._indxr_images[:1]:
                starting_frame = block[0]
                starting_angle = self.get_scan().get_angle_from_image_index(
                    starting_frame)

                idxref.set_starting_frame(starting_frame)
                idxref.set_starting_angle(starting_angle)

                idxref.add_spot_range(block[0], block[1])

            for block in self._indxr_images[1:]:
                idxref.add_spot_range(block[0], block[1])

        # FIXME need to also be able to pass in the known unit
        # cell and lattice if already available e.g. from
        # the helper... indirectly

        if self._indxr_user_input_lattice:
            idxref.set_indexer_user_input_lattice(True)

        if self._indxr_input_lattice and self._indxr_input_cell:
            idxref.set_indexer_input_lattice(self._indxr_input_lattice)
            idxref.set_indexer_input_cell(self._indxr_input_cell)

            Debug.write('Set lattice: %s' % self._indxr_input_lattice)
            Debug.write('Set cell: %f %f %f %f %f %f' % \
                        self._indxr_input_cell)

            original_cell = self._indxr_input_cell
        elif self._indxr_input_lattice:
            idxref.set_indexer_input_lattice(self._indxr_input_lattice)
            original_cell = None
        else:
            original_cell = None

        # FIXED need to set the beam centre here - this needs to come
        # from the input .xinfo object or header, and be converted
        # to the XDS frame... done.

        #mosflm_beam_centre = self.get_beam_centre()
        #xds_beam_centre = beam_centre_mosflm_to_xds(
        #mosflm_beam_centre[0], mosflm_beam_centre[1], self.get_header())
        from dxtbx.serialize.xds import to_xds
        converter = to_xds(self.get_imageset())
        xds_beam_centre = converter.detector_origin

        idxref.set_beam_centre(xds_beam_centre[0], xds_beam_centre[1])

        # fixme need to check if the lattice, cell have been set already,
        # and if they have, pass these in as input to the indexing job.

        done = False

        while not done:
            try:
                done = idxref.run()

                # N.B. in here if the IDXREF step was being run in the first
                # pass done is FALSE however there should be a refined
                # P1 orientation matrix etc. available - so keep it!

            except XDSException as e:
                # inspect this - if we have complaints about not
                # enough reflections indexed, and we have a target
                # unit cell, and they are the same, well ignore it

                if 'solution is inaccurate' in str(e):
                    Debug.write('XDS complains solution inaccurate - ignoring')
                    done = idxref.continue_from_error()
                elif ('insufficient percentage (< 70%)' in str(e) or
                      'insufficient percentage (< 50%)' in str(e)) and \
                         original_cell:
                    done = idxref.continue_from_error()
                    lattice, cell, mosaic = \
                             idxref.get_indexing_solution()
                    # compare solutions
                    check = PhilIndex.params.xia2.settings.xds_check_cell_deviation
                    for j in range(3):
                        # allow two percent variation in unit cell length
                        if math.fabs((cell[j] - original_cell[j]) / \
                                     original_cell[j]) > 0.02 and check:
                            Debug.write('XDS unhappy and solution wrong')
                            raise e
                        # and two degree difference in angle
                        if math.fabs(cell[j + 3] - original_cell[j + 3]) \
                               > 2.0 and check:
                            Debug.write('XDS unhappy and solution wrong')
                            raise e
                    Debug.write('XDS unhappy but solution ok')
                elif 'insufficient percentage (< 70%)' in str(e) or \
                         'insufficient percentage (< 50%)' in str(e):
                    Debug.write('XDS unhappy but solution probably ok')
                    done = idxref.continue_from_error()
                else:
                    raise e

        FileHandler.record_log_file(
            '%s INDEX' % self.get_indexer_full_name(),
            os.path.join(self.get_working_directory(), 'IDXREF.LP'))

        for file in ['SPOT.XDS', 'XPARM.XDS']:
            self._indxr_payload[file] = idxref.get_output_data_file(file)

        # need to get the indexing solutions out somehow...

        self._indxr_other_lattice_cell = idxref.get_indexing_solutions()

        self._indxr_lattice, self._indxr_cell, self._indxr_mosaic = \
                             idxref.get_indexing_solution()

        import dxtbx
        from dxtbx.serialize.xds import to_crystal
        xparm_file = os.path.join(self.get_working_directory(), 'XPARM.XDS')
        models = dxtbx.load(xparm_file)
        crystal_model = to_crystal(xparm_file)

        from dxtbx.model import Experiment, ExperimentList
        experiment = Experiment(
            beam=models.get_beam(),
            detector=models.get_detector(),
            goniometer=models.get_goniometer(),
            scan=models.get_scan(),
            crystal=crystal_model,
            #imageset=self.get_imageset(),
        )

        experiment_list = ExperimentList([experiment])
        self.set_indexer_experiment_list(experiment_list)

        # I will want this later on to check that the lattice was ok
        self._idxref_subtree_problem = idxref.get_index_tree_problem()

        return
示例#29
0
      (self._intgr_wedge[0] - offset, self._intgr_wedge[1] - offset))

    try:
      integrater.run()
    except RuntimeError, e:
      if 'integration failed: reason unknown' in str(e):
        Chatter.write('Mosflm has failed in integration')
        message = 'The input was:\n\n'
        for input in integrater.get_all_input():
          message += '  %s' % input
        Chatter.write(message)
      raise

    FileHandler.record_log_file(
        '%s %s %s %s mosflm integrate' % \
        (self.get_integrater_sweep_name(),
         pname, xname, dname),
        integrater.get_log_file())

    self._intgr_per_image_statistics = integrater.get_per_image_statistics()

    self._mosflm_hklout = integrater.get_hklout()
    Debug.write('Integration output: %s' %self._mosflm_hklout)

    self._intgr_n_ref = integrater.get_nref()

    # if a BGSIG error happened try not refining the
    # profile and running again...

    if integrater.get_bgsig_too_large():
      if not self._mosflm_refine_profiles:
示例#30
0
  def _integrate(self):
    '''Actually do the integration - in XDS terms this will mean running
    DEFPIX and INTEGRATE to measure all the reflections.'''

    experiment = self._intgr_refiner.get_refined_experiment_list(
      self.get_integrater_epoch())[0]
    crystal_model = experiment.crystal
    self._intgr_refiner_cell = crystal_model.get_unit_cell().parameters()

    images_str = '%d to %d' % tuple(self._intgr_wedge)
    cell_str = '%.2f %.2f %.2f %.2f %.2f %.2f' %tuple(self._intgr_refiner_cell)

    if len(self._fp_directory) <= 50:
      dirname = self._fp_directory
    else:
      dirname = '...%s' % self._fp_directory[-46:]

    Journal.block(
        'integrating', self._intgr_sweep_name, 'XDS',
        {'images':images_str,
         'cell':cell_str,
         'lattice':self._intgr_refiner.get_refiner_lattice(),
         'template':self._fp_template,
         'directory':dirname,
         'resolution':'%.2f' % self._intgr_reso_high})

    first_image_in_wedge = self.get_image_name(self._intgr_wedge[0])

    defpix = self.Defpix()

    # pass in the correct data

    for file in ['X-CORRECTIONS.cbf',
                 'Y-CORRECTIONS.cbf',
                 'BKGINIT.cbf',
                 'XPARM.XDS']:
      defpix.set_input_data_file(file, self._xds_data_files[file])

    defpix.set_data_range(self._intgr_wedge[0],
                          self._intgr_wedge[1])

    if self.get_integrater_high_resolution() > 0.0 and \
           self.get_integrater_user_resolution():
      Debug.write('Setting resolution limit in DEFPIX to %.2f' % \
                  self.get_integrater_high_resolution())
      defpix.set_resolution_high(self.get_integrater_high_resolution())
      defpix.set_resolution_low(self.get_integrater_low_resolution())

    elif self.get_integrater_low_resolution():
      Debug.write('Setting low resolution limit in DEFPIX to %.2f' % \
                  self.get_integrater_low_resolution())
      defpix.set_resolution_high(0.0)
      defpix.set_resolution_low(self.get_integrater_low_resolution())

    defpix.run()

    # and gather the result files
    for file in ['BKGPIX.cbf',
                 'ABS.cbf']:
      self._xds_data_files[file] = defpix.get_output_data_file(file)

    integrate = self.Integrate()

    if self._xds_integrate_parameters:
      integrate.set_updates(self._xds_integrate_parameters)

    # decide what images we are going to process, if not already
    # specified

    if not self._intgr_wedge:
      images = self.get_matching_images()
      self.set_integrater_wedge(min(images),
                                max(images))

    first_image_in_wedge = self.get_image_name(self._intgr_wedge[0])

    integrate.set_data_range(self._intgr_wedge[0],
                             self._intgr_wedge[1])

    for file in ['X-CORRECTIONS.cbf',
                 'Y-CORRECTIONS.cbf',
                 'BLANK.cbf',
                 'BKGPIX.cbf',
                 'GAIN.cbf']:
      integrate.set_input_data_file(file, self._xds_data_files[file])

    if self._xds_data_files.has_key('GXPARM.XDS'):
      Debug.write('Using globally refined parameters')
      integrate.set_input_data_file(
          'XPARM.XDS', self._xds_data_files['GXPARM.XDS'])
      integrate.set_refined_xparm()
    else:
      integrate.set_input_data_file(
          'XPARM.XDS', self._xds_data_files['XPARM.XDS'])

    integrate.run()

    self._intgr_per_image_statistics = integrate.get_per_image_statistics()
    Chatter.write(self.show_per_image_statistics())

    # record the log file -

    pname, xname, dname = self.get_integrater_project_info()
    sweep = self.get_integrater_sweep_name()
    FileHandler.record_log_file('%s %s %s %s INTEGRATE' % \
                                (pname, xname, dname, sweep),
                                os.path.join(self.get_working_directory(),
                                             'INTEGRATE.LP'))

    # and copy the first pass INTEGRATE.HKL...

    lattice = self._intgr_refiner.get_refiner_lattice()
    if not os.path.exists(os.path.join(
        self.get_working_directory(),
        'INTEGRATE-%s.HKL' % lattice)):
      here = self.get_working_directory()
      shutil.copyfile(os.path.join(here, 'INTEGRATE.HKL'),
                      os.path.join(here, 'INTEGRATE-%s.HKL' % lattice))

    # record INTEGRATE.HKL for e.g. BLEND.

    FileHandler.record_more_data_file(
        '%s %s %s %s INTEGRATE' % (pname, xname, dname, sweep),
        os.path.join(self.get_working_directory(), 'INTEGRATE.HKL'))

    # should the existence of these require that I rerun the
    # integration or can we assume that the application of a
    # sensible resolution limit will achieve this??

    self._xds_integrate_parameters = integrate.get_updates()

    # record the mosaic spread &c.

    m_min, m_mean, m_max = integrate.get_mosaic()
    self.set_integrater_mosaic_min_mean_max(m_min, m_mean, m_max)

    Chatter.write('Mosaic spread: %.3f < %.3f < %.3f' % \
                  self.get_integrater_mosaic_min_mean_max())

    return os.path.join(self.get_working_directory(), 'INTEGRATE.HKL')
示例#31
0
  def _mosflm_parallel_integrate(self):
    '''Perform the integration as before, but this time as a
    number of parallel Mosflm jobs (hence, in separate directories)
    and including a step of pre-refinement of the mosaic spread and
    missets. This will all be kind of explicit and hence probably
    messy!'''

    refinr = self.get_integrater_refiner()

    lattice = refinr.get_refiner_lattice()
    spacegroup_number = lattice_to_spacegroup(lattice)
    mosaic = refinr.get_refiner_payload('mosaic')
    beam = refinr.get_refiner_payload('beam')
    distance = refinr.get_refiner_payload('distance')
    matrix = refinr.get_refiner_payload('mosflm_orientation_matrix')

    integration_params = refinr.get_refiner_payload(
      'mosflm_integration_parameters')

    if integration_params:
      if 'separation' in integration_params:
        self.set_integrater_parameter(
            'mosflm', 'separation',
            '%s %s' % tuple(integration_params['separation']))
      if 'raster' in integration_params:
        self.set_integrater_parameter(
            'mosflm', 'raster',
            '%d %d %d %d %d' % tuple(integration_params['raster']))

    refinr.set_refiner_payload('mosflm_integration_parameters', None)
    pname, xname, dname = self.get_integrater_project_info()

    # what follows below should (i) be run in separate directories
    # and (ii) be repeated N=parallel times.

    nproc = PhilIndex.params.xia2.settings.multiprocessing.nproc
    parallel = nproc

    # FIXME this is something of a kludge - if too few frames refinement
    # and integration does not work well... ideally want at least 15
    # frames / chunk (say)
    nframes = self._intgr_wedge[1] - self._intgr_wedge[0] + 1

    if parallel > nframes / 15:
      parallel = nframes // 15

    if not parallel:
      raise RuntimeError, 'parallel not set'
    if parallel < 2:
      raise RuntimeError, 'parallel not parallel: %s' % parallel

    jobs = []
    hklouts = []
    nref = 0

    # calculate the chunks to use
    offset = self.get_frame_offset()
    start = self._intgr_wedge[0] - offset
    end = self._intgr_wedge[1] - offset

    left_images = 1 + end - start
    left_chunks = parallel
    chunks = []

    while left_images > 0:
      size = left_images // left_chunks
      chunks.append((start, start + size - 1))
      start += size
      left_images -= size
      left_chunks -= 1

    summary_files = []

    for j in range(parallel):

      # make some working directories, as necessary - chunk-(0:N-1)
      wd = os.path.join(self.get_working_directory(),
                        'chunk-%d' % j)
      if not os.path.exists(wd):
        os.makedirs(wd)

      job = MosflmIntegrate()
      job.set_working_directory(wd)

      auto_logfiler(job)

      l = refinr.get_refiner_lattice()

      # create the starting point
      f = open(os.path.join(wd, 'xiaintegrate-%s.mat' % l), 'w')
      for m in matrix:
        f.write(m)
      f.close()

      spacegroup_number = lattice_to_spacegroup(lattice)

      job.set_refine_profiles(self._mosflm_refine_profiles)

      # N.B. for harvesting need to append N to dname.

      if pname is not None and xname is not None and dname is not None:
        Debug.write('Harvesting: %s/%s/%s' %
                    (pname, xname, dname))
        harvest_dir = self.get_working_directory()
        temp_dname = '%s_%s' % \
                     (dname, self.get_integrater_sweep_name())
        job.set_pname_xname_dname(pname, xname, temp_dname)

      job.set_template(os.path.basename(self.get_template()))
      job.set_directory(self.get_directory())

      # check for ice - and if so, exclude (ranges taken from
      # XDS documentation)
      if self.get_integrater_ice() != 0:
        Debug.write('Excluding ice rings')
        job.set_exclude_ice(True)

      # exclude specified resolution ranges
      if len(self.get_integrater_excluded_regions()) != 0:
        regions = self.get_integrater_excluded_regions()
        Debug.write('Excluding regions: %s' % `regions`)
        job.set_exclude_regions(regions)

      mask = standard_mask(self.get_detector())
      for m in mask:
        job.add_instruction(m)

      job.set_input_mat_file('xiaintegrate-%s.mat' % l)

      job.set_beam_centre(beam)
      job.set_distance(distance)
      job.set_space_group_number(spacegroup_number)
      job.set_mosaic(mosaic)

      if self.get_wavelength_prov() == 'user':
        job.set_wavelength(self.get_wavelength())

      parameters = self.get_integrater_parameters('mosflm')
      job.update_parameters(parameters)

      if self._mosflm_gain:
        job.set_gain(self._mosflm_gain)

      # check for resolution limits
      if self._intgr_reso_high > 0.0:
        job.set_d_min(self._intgr_reso_high)
      if self._intgr_reso_low:
        job.set_d_max(self._intgr_reso_low)

      if PhilIndex.params.general.backstop_mask:
        from xia2.Toolkit.BackstopMask import BackstopMask
        mask = BackstopMask(PhilIndex.params.general.backstop_mask)
        mask = mask.calculate_mask_mosflm(self.get_header())
        job.set_mask(mask)

      detector = self.get_detector()
      detector_width, detector_height = detector[0].get_image_size_mm()

      lim_x = 0.5 * detector_width
      lim_y = 0.5 * detector_height

      Debug.write('Scanner limits: %.1f %.1f' % (lim_x, lim_y))
      job.set_limits(lim_x, lim_y)

      job.set_fix_mosaic(self._mosflm_postref_fix_mosaic)

      job.set_pre_refinement(True)
      job.set_image_range(chunks[j])


      # these are now running so ...

      jobs.append(job)

      continue

    # ok, at this stage I need to ...
    #
    # (i) accumulate the statistics as a function of batch
    # (ii) mong them into a single block
    #
    # This is likely to be a pain in the arse!

    first_integrated_batch = 1.0e6
    last_integrated_batch = -1.0e6

    all_residuals = []

    threads = []

    for j in range(parallel):
      job = jobs[j]

      # now wait for them to finish - first wait will really be the
      # first one, then all should be finished...

      thread = Background(job, 'run')
      thread.start()
      threads.append(thread)

    mosaics = []
    postref_result = { }

    integrated_images_first = 1.0e6
    integrated_images_last = -1.0e6
    self._intgr_per_image_statistics = {}

    for j in range(parallel):
      thread = threads[j]
      thread.stop()
      job = jobs[j]

      # get the log file
      output = job.get_all_output()

      # record a copy of it, perhaps - though not if parallel
      if self.get_integrater_sweep_name() and False:
        pname, xname, dname = self.get_integrater_project_info()
        FileHandler.record_log_file(
            '%s %s %s %s mosflm integrate' % \
            (self.get_integrater_sweep_name(),
             pname, xname, '%s_%d' % (dname, j)),
            job.get_log_file())

      # look for things that we want to know...
      # that is, the output reflection file name, the updated
      # value for the gain (if present,) any warnings, errors,
      # or just interesting facts.

      batches = job.get_batches_out()
      integrated_images_first = min(batches[0], integrated_images_first)
      integrated_images_last = max(batches[1], integrated_images_last)

      mosaics.extend(job.get_mosaic_spreads())

      if min(mosaics) < 0:
        raise IntegrationError, 'negative mosaic spread: %s' % min(mosaic)

      if (job.get_detector_gain_error() and not
          (self.get_imageset().get_detector()[0].get_type() == 'SENSOR_PAD')):
        gain = job.get_suggested_gain()
        if gain is not None:
          self.set_integrater_parameter('mosflm', 'gain', gain)
          self.set_integrater_export_parameter('mosflm', 'gain', gain)
          if self._mosflm_gain:
            Debug.write('GAIN updated to %f' % gain)
          else:
            Debug.write('GAIN found to be %f' % gain)

          self._mosflm_gain = gain
          self._mosflm_rerun_integration = True

      hklout = job.get_hklout()
      Debug.write('Integration output: %s' % hklout)
      hklouts.append(hklout)

      nref += job.get_nref()

      # if a BGSIG error happened try not refining the
      # profile and running again...

      if job.get_bgsig_too_large():
        if not self._mosflm_refine_profiles:
          raise RuntimeError, 'BGSIG error with profiles fixed'

        Debug.write(
            'BGSIG error detected - try fixing profile...')

        self._mosflm_refine_profiles = False
        self.set_integrater_done(False)

        return

      if job.get_getprof_error():
        Debug.write(
            'GETPROF error detected - try fixing profile...')
        self._mosflm_refine_profiles = False
        self.set_integrater_done(False)

        return

      # here
      # write the report for each image as .*-#$ to Chatter -
      # detailed report will be written automagically to science...

      self._intgr_per_image_statistics.update(job.get_per_image_statistics())
      postref_result.update(job.get_postref_result())

      # inspect the output for e.g. very high weighted residuals

      all_residuals.extend(job.get_residuals())

    self._intgr_batches_out = (integrated_images_first,
                               integrated_images_last)

    if mosaics and len(mosaics) > 0:
      self.set_integrater_mosaic_min_mean_max(
          min(mosaics), sum(mosaics) / len(mosaics), max(mosaics))
    else:
      m = indxr.get_indexer_mosaic()
      self.set_integrater_mosaic_min_mean_max(m, m, m)

    Chatter.write(self.show_per_image_statistics())

    Chatter.write('Mosaic spread: %.3f < %.3f < %.3f' % \
                  self.get_integrater_mosaic_min_mean_max())

    # gather the statistics from the postrefinement for all sweeps
    # now write this to a postrefinement log

    postref_log = os.path.join(self.get_working_directory(),
                               'postrefinement.log')

    fout = open(postref_log, 'w')

    fout.write('$TABLE: Postrefinement for %s:\n' % \
               self._intgr_sweep_name)
    fout.write('$GRAPHS: Missetting angles:A:1, 2, 3, 4: $$\n')
    fout.write('Batch PhiX PhiY PhiZ $$ Batch PhiX PhiY PhiZ $$\n')

    for image in sorted(postref_result):
      phix = postref_result[image].get('phix', 0.0)
      phiy = postref_result[image].get('phiy', 0.0)
      phiz = postref_result[image].get('phiz', 0.0)

      fout.write('%d %5.2f %5.2f %5.2f\n' % \
                 (image, phix, phiy, phiz))

    fout.write('$$\n')
    fout.close()

    if self.get_integrater_sweep_name():
      pname, xname, dname = self.get_integrater_project_info()
      FileHandler.record_log_file('%s %s %s %s postrefinement' % \
                                  (self.get_integrater_sweep_name(),
                                   pname, xname, dname),
                                  postref_log)

    hklouts.sort()

    hklout = os.path.join(self.get_working_directory(),
                          os.path.split(hklouts[0])[-1])

    Debug.write('Sorting data to %s' % hklout)
    for hklin in hklouts:
      Debug.write('<= %s' % hklin)

    sortmtz = Sortmtz()
    sortmtz.set_hklout(hklout)
    for hklin in hklouts:
      sortmtz.add_hklin(hklin)

    sortmtz.sort()

    self._mosflm_hklout = hklout

    return self._mosflm_hklout
示例#32
0
    def _index(self):
        if PhilIndex.params.dials.index.method in (libtbx.Auto, None):
            if self._indxr_input_cell is not None:
                indexer = self._do_indexing("real_space_grid_search")
            else:
                try:
                    indexer_fft3d = self._do_indexing(method="fft3d")
                    nref_3d, rmsd_3d = indexer_fft3d.get_nref_rmsds()
                except Exception as e:
                    nref_3d = None
                    rmsd_3d = None
                try:
                    indexer_fft1d = self._do_indexing(method="fft1d")
                    nref_1d, rmsd_1d = indexer_fft1d.get_nref_rmsds()
                except Exception as e:
                    nref_1d = None
                    rmsd_1d = None

                if (nref_1d is not None and nref_3d is None or
                    (nref_1d > nref_3d and rmsd_1d[0] < rmsd_3d[0]
                     and rmsd_1d[1] < rmsd_3d[1] and rmsd_1d[2] < rmsd_3d[2])):
                    indexer = indexer_fft1d
                elif nref_3d is not None:
                    indexer = indexer_fft3d
                else:
                    raise RuntimeError(e)

        else:
            indexer = self._do_indexing(
                method=PhilIndex.params.dials.index.method)

        # not strictly the P1 cell, rather the cell that was used in indexing
        self._p1_cell = indexer._p1_cell
        self.set_indexer_payload("indexed_filename",
                                 indexer.get_indexed_filename())

        from cctbx.sgtbx import bravais_types
        from dxtbx.serialize import load

        indexed_file = indexer.get_indexed_filename()
        indexed_experiments = indexer.get_experiments_filename()

        fast_mode = PhilIndex.params.dials.fast_mode
        trust_beam_centre = PhilIndex.params.xia2.settings.trust_beam_centre
        multi_sweep_indexing = PhilIndex.params.xia2.settings.multi_sweep_indexing == True

        if not (trust_beam_centre or fast_mode or multi_sweep_indexing):
            checksym = self.CheckIndexingSymmetry()
            checksym.set_experiments_filename(indexed_experiments)
            checksym.set_indexed_filename(indexed_file)
            checksym.set_grid_search_scope(1)
            checksym.run()
            hkl_offset = checksym.get_hkl_offset()
            Debug.write("hkl_offset: %s" % str(hkl_offset))
            if hkl_offset is not None and hkl_offset != (0, 0, 0):
                reindex = self.Reindex()
                reindex.set_hkl_offset(hkl_offset)
                reindex.set_indexed_filename(indexed_file)
                reindex.run()
                indexed_file = reindex.get_reindexed_reflections_filename()

                # do some scan-static refinement - run twice, first without outlier
                # rejection as the model is too far from reality to do a sensible job of
                # outlier rejection
                refiner = self.Refine()
                refiner.set_experiments_filename(indexed_experiments)
                refiner.set_indexed_filename(
                    reindex.get_reindexed_reflections_filename())
                refiner.set_outlier_algorithm(None)
                refiner.run()
                indexed_experiments = refiner.get_refined_experiments_filename(
                )

                # now again with outlier rejection (possibly)
                refiner = self.Refine()
                refiner.set_experiments_filename(indexed_experiments)
                refiner.set_indexed_filename(indexed_file)
                refiner.run()
                indexed_experiments = refiner.get_refined_experiments_filename(
                )

        if self._indxr_input_lattice is None:

            # FIXME in here should respect the input unit cell and lattice if provided

            # FIXME from this (i) populate the helper table,
            # (ii) try to avoid re-running the indexing
            # step if we eliminate a solution as we have all of the refined results
            # already available.

            rbs = self.RefineBravaisSettings()
            rbs.set_experiments_filename(indexed_experiments)
            rbs.set_indexed_filename(indexed_file)
            if PhilIndex.params.dials.fix_geometry:
                rbs.set_detector_fix('all')
                rbs.set_beam_fix('all')

            FileHandler.record_log_file(
                '%s LATTICE' % self.get_indexer_full_name(),
                rbs.get_log_file())
            rbs.run()

            from cctbx import crystal, sgtbx

            for k in sorted(rbs.get_bravais_summary()):
                summary = rbs.get_bravais_summary()[k]

                # FIXME need to do this better - for the moment only accept lattices
                # where R.M.S. deviation is less than twice P1 R.M.S. deviation.

                if self._indxr_input_lattice is None:
                    if not summary['recommended']:
                        continue

                experiments = load.experiment_list(summary['experiments_file'],
                                                   check_format=False)
                cryst = experiments.crystals()[0]
                cs = crystal.symmetry(unit_cell=cryst.get_unit_cell(),
                                      space_group=cryst.get_space_group())
                cb_op_best_to_ref = cs.change_of_basis_op_to_reference_setting(
                )
                cs_reference = cs.change_basis(cb_op_best_to_ref)
                lattice = str(
                    bravais_types.bravais_lattice(
                        group=cs_reference.space_group()))
                cb_op = cb_op_best_to_ref * sgtbx.change_of_basis_op(
                    str(summary['cb_op']))

                self._solutions[k] = {
                    'number': k,
                    'mosaic': 0.0,
                    'metric': summary['max_angular_difference'],
                    'rmsd': summary['rmsd'],
                    'nspots': summary['nspots'],
                    'lattice': lattice,
                    'cell': cs_reference.unit_cell().parameters(),
                    'experiments_file': summary['experiments_file'],
                    'cb_op': str(cb_op)
                }

            self._solution = self.get_solution()
            self._indxr_lattice = self._solution['lattice']

            for solution in self._solutions.keys():
                lattice = self._solutions[solution]['lattice']
                if (self._indxr_input_lattice is not None
                        and self._indxr_input_lattice != lattice):
                    continue
                if lattice in self._indxr_other_lattice_cell:
                    if self._indxr_other_lattice_cell[lattice]['metric'] < \
                      self._solutions[solution]['metric']:
                        continue

                self._indxr_other_lattice_cell[lattice] = {
                    'metric': self._solutions[solution]['metric'],
                    'cell': self._solutions[solution]['cell']
                }

            self._indxr_mosaic = self._solution['mosaic']

            experiment_list = load.experiment_list(
                self._solution['experiments_file'])
            self.set_indexer_experiment_list(experiment_list)

            # reindex the output experiments list to the reference setting
            # (from the best cell/conventional setting)
            cb_op_to_ref = experiment_list.crystals()[0].get_space_group().info()\
              .change_of_basis_op_to_reference_setting()
            reindex = self.Reindex()
            reindex.set_experiments_filename(
                self._solution['experiments_file'])
            reindex.set_cb_op(cb_op_to_ref)
            reindex.set_space_group(
                str(lattice_to_spacegroup_number(self._solution['lattice'])))
            reindex.run()
            experiments_file = reindex.get_reindexed_experiments_filename()
            experiment_list = load.experiment_list(experiments_file)
            self.set_indexer_experiment_list(experiment_list)
            self.set_indexer_payload("experiments_filename", experiments_file)

            # reindex the output reflection list to this solution
            reindex = self.Reindex()
            reindex.set_indexed_filename(indexed_file)
            reindex.set_cb_op(self._solution['cb_op'])
            reindex.set_space_group(
                str(lattice_to_spacegroup_number(self._solution['lattice'])))
            reindex.run()
            indexed_file = reindex.get_reindexed_reflections_filename()
            self.set_indexer_payload("indexed_filename", indexed_file)

        else:
            experiment_list = load.experiment_list(indexed_experiments)
            self.set_indexer_experiment_list(experiment_list)
            self.set_indexer_payload("experiments_filename",
                                     indexed_experiments)

            cryst = experiment_list.crystals()[0]
            lattice = str(
                bravais_types.bravais_lattice(group=cryst.get_space_group()))
            self._indxr_lattice = lattice
            self._solutions = {}
            self._solutions[0] = {
                'number': 0,
                'mosaic': 0.0,
                'metric': -1,
                'rmsd': -1,
                'nspots': -1,
                'lattice': lattice,
                'cell': cryst.get_unit_cell().parameters(),
                'experiments_file': indexed_experiments,
                'cb_op': 'a,b,c'
            }

            self._indxr_other_lattice_cell[lattice] = {
                'metric': self._solutions[0]['metric'],
                'cell': self._solutions[0]['cell']
            }

        return
示例#33
0
文件: XDSScalerA.py 项目: xia2/xia2
  def _scale(self):
    '''Actually scale all of the data together.'''

    from xia2.Handlers.Environment import debug_memory_usage
    debug_memory_usage()

    Journal.block(
        'scaling', self.get_scaler_xcrystal().get_name(), 'XSCALE',
        {'scaling model':'default (all)'})

    epochs = self._sweep_information.keys()
    epochs.sort()

    xscale = self.XScale()

    xscale.set_spacegroup_number(self._xds_spacegroup)
    xscale.set_cell(self._scalr_cell)

    Debug.write('Set CELL: %.2f %.2f %.2f %.2f %.2f %.2f' % \
                tuple(self._scalr_cell))
    Debug.write('Set SPACEGROUP_NUMBER: %d' % \
                self._xds_spacegroup)

    Debug.write('Gathering measurements for scaling')

    for epoch in epochs:

      # get the prepared reflections
      reflections = self._sweep_information[epoch][
          'prepared_reflections']

      # and the get wavelength that this belongs to
      dname = self._sweep_information[epoch]['dname']
      sname = self._sweep_information[epoch]['sname']

      # and the resolution range for the reflections
      intgr = self._sweep_information[epoch]['integrater']
      Debug.write('Epoch: %d' % epoch)
      Debug.write('HKL: %s (%s/%s)' % (reflections, dname, sname))

      resolution_low = intgr.get_integrater_low_resolution()
      resolution_high, _ = self._scalr_resolution_limits.get((dname, sname), (0.0, None))

      resolution = (resolution_high, resolution_low)

      xscale.add_reflection_file(reflections, dname, resolution)

    # set the global properties of the sample
    xscale.set_crystal(self._scalr_xname)
    xscale.set_anomalous(self._scalr_anomalous)

    debug_memory_usage()
    xscale.run()

    scale_factor = xscale.get_scale_factor()

    Debug.write('XSCALE scale factor found to be: %e' % scale_factor)

    # record the log file

    pname = self._scalr_pname
    xname = self._scalr_xname

    FileHandler.record_log_file('%s %s XSCALE' % \
                                (pname, xname),
                                os.path.join(self.get_working_directory(),
                                             'XSCALE.LP'))

    # check for outlier reflections and if a number are found
    # then iterate (that is, rerun XSCALE, rejecting these outliers)

    if not PhilIndex.params.dials.fast_mode and not PhilIndex.params.xds.keep_outliers:
      xscale_remove = xscale.get_remove()
      if xscale_remove:
        current_remove = []
        final_remove = []

        # first ensure that there are no duplicate entries...
        if os.path.exists(os.path.join(
            self.get_working_directory(),
            'REMOVE.HKL')):
          for line in open(os.path.join(
              self.get_working_directory(),
              'REMOVE.HKL'), 'r').readlines():
            h, k, l = map(int, line.split()[:3])
            z = float(line.split()[3])

            if not (h, k, l, z) in current_remove:
              current_remove.append((h, k, l, z))

          for c in xscale_remove:
            if c in current_remove:
              continue
            final_remove.append(c)

          Debug.write(
              '%d alien reflections are already removed' % \
              (len(xscale_remove) - len(final_remove)))

        else:
          # we want to remove all of the new dodgy reflections
          final_remove = xscale_remove

        remove_hkl = open(os.path.join(
            self.get_working_directory(),
            'REMOVE.HKL'), 'w')

        z_min = PhilIndex.params.xds.z_min
        rejected = 0

        # write in the old reflections
        for remove in current_remove:
          z = remove[3]
          if z >= z_min:
            remove_hkl.write('%d %d %d %f\n' % remove)
          else:
            rejected += 1
        Debug.write('Wrote %d old reflections to REMOVE.HKL' % \
                    (len(current_remove) - rejected))
        Debug.write('Rejected %d as z < %f' % \
                    (rejected, z_min))

        # and the new reflections
        rejected = 0
        used = 0
        for remove in final_remove:
          z = remove[3]
          if z >= z_min:
            used += 1
            remove_hkl.write('%d %d %d %f\n' % remove)
          else:
            rejected += 1
        Debug.write('Wrote %d new reflections to REMOVE.HKL' % \
                    (len(final_remove) - rejected))
        Debug.write('Rejected %d as z < %f' % \
                    (rejected, z_min))

        remove_hkl.close()

        # we want to rerun the finishing step so...
        # unless we have added no new reflections
        if used:
          self.set_scaler_done(False)

    if not self.get_scaler_done():
      Chatter.write('Excluding outlier reflections Z > %.2f' %
                    PhilIndex.params.xds.z_min)
      return

    debug_memory_usage()

    # now get the reflection files out and merge them with aimless

    output_files = xscale.get_output_reflection_files()
    wavelength_names = output_files.keys()

    # these are per wavelength - also allow for user defined resolution
    # limits a la bug # 3183. No longer...

    for epoch in self._sweep_information.keys():

      input = self._sweep_information[epoch]

      intgr = input['integrater']

      rkey = input['dname'], input['sname']

      if intgr.get_integrater_user_resolution():
        dmin = intgr.get_integrater_high_resolution()

        if rkey not in self._user_resolution_limits:
          self._scalr_resolution_limits[rkey] = (dmin, None)
          self._user_resolution_limits[rkey] = dmin
        elif dmin < self._user_resolution_limits[rkey]:
          self._scalr_resolution_limits[rkey] = (dmin, None)
          self._user_resolution_limits[rkey] = dmin

    self._scalr_scaled_refl_files = { }

    self._scalr_statistics = { }

    max_batches = 0
    mtz_dict = { }

    project_info = { }
    for epoch in self._sweep_information.keys():
      pname = self._scalr_pname
      xname = self._scalr_xname
      dname = self._sweep_information[epoch]['dname']
      reflections = os.path.split(
          self._sweep_information[epoch]['prepared_reflections'])[-1]
      project_info[reflections] = (pname, xname, dname)

    for epoch in self._sweep_information.keys():
      self._sweep_information[epoch]['scaled_reflections'] = None

    debug_memory_usage()

    for wavelength in wavelength_names:
      hklin = output_files[wavelength]

      xsh = XDSScalerHelper()
      xsh.set_working_directory(self.get_working_directory())

      ref = xsh.split_and_convert_xscale_output(
          hklin, 'SCALED_', project_info, 1.0 / scale_factor)

      for hklout in ref.keys():
        for epoch in self._sweep_information.keys():
          if os.path.split(self._sweep_information[epoch][
              'prepared_reflections'])[-1] == \
              os.path.split(hklout)[-1]:
            if self._sweep_information[epoch][
                'scaled_reflections'] is not None:
              raise RuntimeError, 'duplicate entries'
            self._sweep_information[epoch][
                'scaled_reflections'] = ref[hklout]

      del(xsh)

    debug_memory_usage()

    for epoch in self._sweep_information.keys():
      hklin = self._sweep_information[epoch]['scaled_reflections']
      dname = self._sweep_information[epoch]['dname']
      sname = self._sweep_information[epoch]['sname']

      hkl_copy = os.path.join(self.get_working_directory(),
                              'R_%s' % os.path.split(hklin)[-1])

      if not os.path.exists(hkl_copy):
        shutil.copyfile(hklin, hkl_copy)

      # let's properly listen to the user's resolution limit needs...

      if self._user_resolution_limits.get((dname, sname), False):
        resolution = self._user_resolution_limits[(dname, sname)]

      else:
        if PhilIndex.params.xia2.settings.resolution.keep_all_reflections == True:
          try:
            resolution = intgr.get_detector().get_max_resolution(intgr.get_beam_obj().get_s0())
            Debug.write('keep_all_reflections set, using detector limits')
          except Exception:
            resolution = self._estimate_resolution_limit(hklin)
        else:
          resolution = self._estimate_resolution_limit(hklin)

      Chatter.write('Resolution for sweep %s/%s: %.2f' % \
                    (dname, sname, resolution))

      if (dname, sname) not in self._scalr_resolution_limits:
        self._scalr_resolution_limits[(dname, sname)] = (resolution, None)
        self.set_scaler_done(False)
      else:
        if resolution < self._scalr_resolution_limits[(dname, sname)][0]:
          self._scalr_resolution_limits[(dname, sname)] = (resolution, None)
          self.set_scaler_done(False)

    debug_memory_usage()

    if not self.get_scaler_done():
      Debug.write('Returning as scaling not finished...')
      return

    self._sort_together_data_xds()

    highest_resolution = min(limit for limit, _ in self._scalr_resolution_limits.values())

    self._scalr_highest_resolution = highest_resolution

    Debug.write('Scaler highest resolution set to %5.2f' % \
                highest_resolution)

    if not self.get_scaler_done():
      Debug.write('Returning as scaling not finished...')
      return

    sdadd_full = 0.0
    sdb_full = 0.0

    # ---------- FINAL MERGING ----------

    sc = self._factory.Aimless()

    FileHandler.record_log_file('%s %s aimless' % (self._scalr_pname,
                                                   self._scalr_xname),
                                sc.get_log_file())

    sc.set_resolution(highest_resolution)
    sc.set_hklin(self._prepared_reflections)
    sc.set_new_scales_file('%s_final.scales' % self._scalr_xname)

    if sdadd_full == 0.0 and sdb_full == 0.0:
      pass
    else:
      sc.add_sd_correction('both', 1.0, sdadd_full, sdb_full)

    for epoch in epochs:
      input = self._sweep_information[epoch]
      start, end = (min(input['batches']), max(input['batches']))

      rkey = input['dname'], input['sname']
      run_resolution_limit, _ = self._scalr_resolution_limits[rkey]

      sc.add_run(start, end, exclude = False,
                 resolution = run_resolution_limit,
                 name = input['sname'])

    sc.set_hklout(os.path.join(self.get_working_directory(),
                               '%s_%s_scaled.mtz' % \
                               (self._scalr_pname, self._scalr_xname)))

    if self.get_scaler_anomalous():
      sc.set_anomalous()

    sc.multi_merge()

    FileHandler.record_xml_file('%s %s aimless xml' % (self._scalr_pname,
                                                       self._scalr_xname),
                                sc.get_xmlout())
    data = sc.get_summary()

    loggraph = sc.parse_ccp4_loggraph()

    standard_deviation_info = { }

    for key in loggraph.keys():
      if 'standard deviation v. Intensity' in key:
        dataset = key.split(',')[-1].strip()
        standard_deviation_info[dataset] = transpose_loggraph(
            loggraph[key])

    resolution_info = { }

    for key in loggraph.keys():
      if 'Analysis against resolution' in key:
        dataset = key.split(',')[-1].strip()
        resolution_info[dataset] = transpose_loggraph(
            loggraph[key])

    # and also radiation damage stuff...

    batch_info = { }

    for key in loggraph.keys():
      if 'Analysis against Batch' in key:
        dataset = key.split(',')[-1].strip()
        batch_info[dataset] = transpose_loggraph(
            loggraph[key])


    # finally put all of the results "somewhere useful"

    self._scalr_statistics = data

    self._scalr_scaled_refl_files = copy.deepcopy(
        sc.get_scaled_reflection_files())

    self._scalr_scaled_reflection_files = { }

    # also output the unmerged scalepack format files...

    sc = self._factory.Aimless()
    sc.set_resolution(highest_resolution)
    sc.set_hklin(self._prepared_reflections)
    sc.set_scalepack()

    for epoch in epochs:
      input = self._sweep_information[epoch]
      start, end = (min(input['batches']), max(input['batches']))

      rkey = input['dname'], input['sname']
      run_resolution_limit, _ = self._scalr_resolution_limits[rkey]

      sc.add_run(start, end, exclude = False,
                 resolution = run_resolution_limit,
                 name = input['sname'])

    sc.set_hklout(os.path.join(self.get_working_directory(),
                               '%s_%s_scaled.mtz' % \
                               (self._scalr_pname,
                                self._scalr_xname)))

    if self.get_scaler_anomalous():
      sc.set_anomalous()

    sc.multi_merge()

    self._scalr_scaled_reflection_files['sca_unmerged'] = { }
    self._scalr_scaled_reflection_files['mtz_unmerged'] = { }

    for dataset in sc.get_scaled_reflection_files().keys():
      hklout = sc.get_scaled_reflection_files()[dataset]

      # then mark the scalepack files for copying...

      scalepack = os.path.join(os.path.split(hklout)[0],
                               os.path.split(hklout)[1].replace(
          '_scaled', '_scaled_unmerged').replace('.mtz', '.sca'))
      self._scalr_scaled_reflection_files['sca_unmerged'][
          dataset] = scalepack
      FileHandler.record_data_file(scalepack)
      mtz_unmerged = os.path.splitext(scalepack)[0] + '.mtz'
      self._scalr_scaled_reflection_files['mtz_unmerged'][dataset] = mtz_unmerged
      FileHandler.record_data_file(mtz_unmerged)

    if PhilIndex.params.xia2.settings.merging_statistics.source == 'cctbx':
      for key in self._scalr_scaled_refl_files:
        stats = self._compute_scaler_statistics(
          self._scalr_scaled_reflection_files['mtz_unmerged'][key], wave=key)
        self._scalr_statistics[
          (self._scalr_pname, self._scalr_xname, key)] = stats

    # convert reflection files to .sca format - use mtz2various for this

    self._scalr_scaled_reflection_files['sca'] = { }
    self._scalr_scaled_reflection_files['hkl'] = { }

    for key in self._scalr_scaled_refl_files:

      f = self._scalr_scaled_refl_files[key]
      scaout = '%s.sca' % f[:-4]

      m2v = self._factory.Mtz2various()
      m2v.set_hklin(f)
      m2v.set_hklout(scaout)
      m2v.convert()

      self._scalr_scaled_reflection_files['sca'][key] = scaout
      FileHandler.record_data_file(scaout)

      if PhilIndex.params.xia2.settings.small_molecule == True:
        hklout = '%s.hkl' % f[:-4]

        m2v = self._factory.Mtz2various()
        m2v.set_hklin(f)
        m2v.set_hklout(hklout)
        m2v.convert_shelx()

        self._scalr_scaled_reflection_files['hkl'][key] = hklout
        FileHandler.record_data_file(hklout)
示例#34
0
    def _refine(self):
        for epoch, idxr in self._refinr_indexers.iteritems():
            experiments = idxr.get_indexer_experiment_list()

            indexed_experiments = idxr.get_indexer_payload(
                "experiments_filename")
            indexed_reflections = idxr.get_indexer_payload("indexed_filename")

            if len(experiments) > 1:
                xsweeps = idxr._indxr_sweeps
                assert len(xsweeps) == len(experiments)
                assert len(self._refinr_sweeps
                           ) == 1  # don't currently support joint refinement
                xsweep = self._refinr_sweeps[0]
                i = xsweeps.index(xsweep)
                experiments = experiments[i:i + 1]

                # Extract and output experiment and reflections for current sweep
                indexed_experiments = os.path.join(
                    self.get_working_directory(),
                    "%s_indexed_experiments.json" % xsweep.get_name())
                indexed_reflections = os.path.join(
                    self.get_working_directory(),
                    "%s_indexed_reflections.pickle" % xsweep.get_name())

                from dxtbx.serialize import dump
                dump.experiment_list(experiments, indexed_experiments)

                from libtbx import easy_pickle
                from scitbx.array_family import flex
                reflections = easy_pickle.load(
                    idxr.get_indexer_payload("indexed_filename"))
                sel = reflections['id'] == i
                assert sel.count(True) > 0
                imageset_id = reflections['imageset_id'].select(sel)
                assert imageset_id.all_eq(imageset_id[0])
                sel = reflections['imageset_id'] == imageset_id[0]
                reflections = reflections.select(sel)
                # set indexed reflections to id == 0 and imageset_id == 0
                reflections['id'].set_selected(reflections['id'] == i, 0)
                reflections['imageset_id'] = flex.int(len(reflections), 0)
                easy_pickle.dump(indexed_reflections, reflections)

            assert len(experiments.crystals()
                       ) == 1  # currently only handle one lattice/sweep
            crystal_model = experiments.crystals()[0]
            lattice = idxr.get_indexer_lattice()

            from dxtbx.serialize import load

            scan_static = PhilIndex.params.dials.refine.scan_static

            # XXX Temporary workaround for dials.refine error for scan_varying
            # refinement with smaller wedges
            start, end = experiments[0].scan.get_oscillation_range()
            total_phi_range = end - start

            if (PhilIndex.params.dials.refine.scan_varying
                    and total_phi_range > 5
                    and not PhilIndex.params.dials.fast_mode):
                scan_varying = PhilIndex.params.dials.refine.scan_varying
            else:
                scan_varying = False

            if scan_static:
                refiner = self.Refine()
                refiner.set_experiments_filename(indexed_experiments)
                refiner.set_indexed_filename(indexed_reflections)
                refiner.set_scan_varying(False)
                refiner.run()
                self._refinr_experiments_filename \
                  = refiner.get_refined_experiments_filename()
                self._refinr_indexed_filename = refiner.get_refined_filename()
            else:
                self._refinr_experiments_filename = indexed_experiments
                self._refinr_indexed_filename = indexed_reflections

            if scan_varying:
                refiner = self.Refine()
                refiner.set_experiments_filename(
                    self._refinr_experiments_filename)
                refiner.set_indexed_filename(self._refinr_indexed_filename)
                if total_phi_range < 36:
                    refiner.set_interval_width_degrees(total_phi_range / 2)
                refiner.run()
                self._refinr_experiments_filename \
                  = refiner.get_refined_experiments_filename()
                self._refinr_indexed_filename = refiner.get_refined_filename()

            if scan_static or scan_varying:
                FileHandler.record_log_file(
                    '%s REFINE' % idxr.get_indexer_full_name(),
                    refiner.get_log_file())
                report = self.Report()
                report.set_experiments_filename(
                    self._refinr_experiments_filename)
                report.set_reflections_filename(self._refinr_indexed_filename)
                html_filename = os.path.join(
                    self.get_working_directory(),
                    '%i_dials.refine.report.html' % report.get_xpid())
                report.set_html_filename(html_filename)
                report.run()
                FileHandler.record_html_file(
                    '%s REFINE' % idxr.get_indexer_full_name(), html_filename)

            experiments = load.experiment_list(
                self._refinr_experiments_filename)
            self.set_refiner_payload("experiments.json",
                                     self._refinr_experiments_filename)
            self.set_refiner_payload("reflections.pickle",
                                     self._refinr_indexed_filename)

            # this is the result of the cell refinement
            self._refinr_cell = experiments.crystals()[0].get_unit_cell(
            ).parameters()
示例#35
0
    def _integrate(self):
        """Actually do the integration - in XDS terms this will mean running
        DEFPIX and INTEGRATE to measure all the reflections."""

        experiment = self._intgr_refiner.get_refined_experiment_list(
            self.get_integrater_epoch())[0]
        crystal_model = experiment.crystal
        self._intgr_refiner_cell = crystal_model.get_unit_cell().parameters()

        defpix = self.Defpix()

        # pass in the correct data

        for file in (
                "X-CORRECTIONS.cbf",
                "Y-CORRECTIONS.cbf",
                "BKGINIT.cbf",
                "XPARM.XDS",
        ):
            defpix.set_input_data_file(file, self._xds_data_files[file])

        defpix.set_data_range(
            self._intgr_wedge[0] + self.get_frame_offset(),
            self._intgr_wedge[1] + self.get_frame_offset(),
        )

        if (self.get_integrater_high_resolution() > 0.0
                and self.get_integrater_user_resolution()):
            logger.debug("Setting resolution limit in DEFPIX to %.2f" %
                         self.get_integrater_high_resolution())
            defpix.set_resolution_high(self.get_integrater_high_resolution())
            defpix.set_resolution_low(self.get_integrater_low_resolution())

        elif self.get_integrater_low_resolution():
            logger.debug("Setting low resolution limit in DEFPIX to %.2f" %
                         self.get_integrater_low_resolution())
            defpix.set_resolution_high(0.0)
            defpix.set_resolution_low(self.get_integrater_low_resolution())

        defpix.run()

        # and gather the result files
        for file in ("BKGPIX.cbf", "ABS.cbf"):
            self._xds_data_files[file] = defpix.get_output_data_file(file)

        integrate = self.Integrate()

        if self._xds_integrate_parameters:
            integrate.set_updates(self._xds_integrate_parameters)

        # decide what images we are going to process, if not already
        # specified

        if not self._intgr_wedge:
            images = self.get_matching_images()
            self.set_integrater_wedge(min(images), max(images))

        integrate.set_data_range(
            self._intgr_wedge[0] + self.get_frame_offset(),
            self._intgr_wedge[1] + self.get_frame_offset(),
        )

        for file in (
                "X-CORRECTIONS.cbf",
                "Y-CORRECTIONS.cbf",
                "BLANK.cbf",
                "BKGPIX.cbf",
                "GAIN.cbf",
        ):
            integrate.set_input_data_file(file, self._xds_data_files[file])

        if "GXPARM.XDS" in self._xds_data_files:
            logger.debug("Using globally refined parameters")
            integrate.set_input_data_file("XPARM.XDS",
                                          self._xds_data_files["GXPARM.XDS"])
            integrate.set_refined_xparm()
        else:
            integrate.set_input_data_file("XPARM.XDS",
                                          self._xds_data_files["XPARM.XDS"])

        integrate.run()

        self._intgr_per_image_statistics = integrate.get_per_image_statistics()
        logger.info(self.show_per_image_statistics())

        # record the log file -

        pname, xname, dname = self.get_integrater_project_info()
        sweep = self.get_integrater_sweep_name()
        FileHandler.record_log_file(
            f"{pname} {xname} {dname} {sweep} INTEGRATE",
            os.path.join(self.get_working_directory(), "INTEGRATE.LP"),
        )

        # and copy the first pass INTEGRATE.HKL...

        lattice = self._intgr_refiner.get_refiner_lattice()
        if not os.path.exists(
                os.path.join(self.get_working_directory(),
                             "INTEGRATE-%s.HKL" % lattice)):
            here = self.get_working_directory()
            shutil.copyfile(
                os.path.join(here, "INTEGRATE.HKL"),
                os.path.join(here, "INTEGRATE-%s.HKL" % lattice),
            )

        # record INTEGRATE.HKL
        FileHandler.record_more_data_file(
            f"{pname} {xname} {dname} {sweep} INTEGRATE",
            os.path.join(self.get_working_directory(), "INTEGRATE.HKL"),
        )

        # should the existence of these require that I rerun the
        # integration or can we assume that the application of a
        # sensible resolution limit will achieve this??

        self._xds_integrate_parameters = integrate.get_updates()

        # record the mosaic spread &c.

        m_min, m_mean, m_max = integrate.get_mosaic()
        self.set_integrater_mosaic_min_mean_max(m_min, m_mean, m_max)

        logger.info("Mosaic spread: %.3f < %.3f < %.3f" %
                    self.get_integrater_mosaic_min_mean_max())

        return os.path.join(self.get_working_directory(), "INTEGRATE.HKL")
示例#36
0
  def _integrate(self):
    '''Actually do the integration - in XDS terms this will mean running
    DEFPIX and INTEGRATE to measure all the reflections.'''

    images_str = '%d to %d' % tuple(self._intgr_wedge)
    cell_str = '%.2f %.2f %.2f %.2f %.2f %.2f' % tuple(self._intgr_cell)

    if len(self._fp_directory) <= 50:
      dirname = self._fp_directory
    else:
      dirname = '...%s' % self._fp_directory[-46:]

    Journal.block(
        'integrating', self._intgr_sweep_name, 'DIALS',
        {'images':images_str,
         'cell':cell_str,
         'lattice':self.get_integrater_refiner().get_refiner_lattice(),
         'template':self._fp_template,
         'directory':dirname,
         'resolution':'%.2f' % self._intgr_reso_high})

    integrate = self.Integrate()

    # decide what images we are going to process, if not already
    # specified

    if not self._intgr_wedge:
      images = self.get_matching_images()
      self.set_integrater_wedge(min(images),
                                max(images))

    imageset = self.get_imageset()
    beam = imageset.get_beam()
    detector = imageset.get_detector()

    d_min_limit = detector.get_max_resolution(beam.get_s0())
    if d_min_limit > self._intgr_reso_high \
        or PhilIndex.params.xia2.settings.resolution.keep_all_reflections:
      Debug.write('Overriding high resolution limit: %f => %f' % \
                  (self._intgr_reso_high, d_min_limit))
      self._intgr_reso_high = d_min_limit

    integrate.set_experiments_filename(self._intgr_experiments_filename)
    integrate.set_reflections_filename(self._intgr_indexed_filename)
    integrate.set_d_max(self._intgr_reso_low)
    integrate.set_d_min(self._intgr_reso_high)
    pname, xname, dname = self.get_integrater_project_info()
    sweep = self.get_integrater_sweep_name()
    FileHandler.record_log_file('%s %s %s %s INTEGRATE' % \
                                (pname, xname, dname, sweep),
                                integrate.get_log_file())

    try:
      integrate.run()
    except RuntimeError, e:
      s = str(e)
      if ('dials.integrate requires more memory than is available.' in s
          and not self._intgr_reso_high):
        # Try to estimate a more sensible resolution limit for integration
        # in case we were just integrating noise to the edge of the detector
        images = self._integrate_select_images_wedges()

        Debug.write(
          'Integrating subset of images to estimate resolution limit.\n'
          'Integrating images %s' %images)

        integrate = self.Integrate()
        integrate.set_experiments_filename(self._intgr_experiments_filename)
        integrate.set_reflections_filename(self._intgr_indexed_filename)
        integrate.set_d_max(self._intgr_reso_low)
        integrate.set_d_min(self._intgr_reso_high)
        for (start, stop) in images:
          integrate.add_scan_range(start-self.get_matching_images()[0], stop-self.get_matching_images()[0])
        integrate.set_reflections_per_degree(1000)
        integrate.run()

        integrated_pickle = integrate.get_integrated_filename()

        from xia2.Wrappers.Dials.EstimateResolutionLimit import EstimateResolutionLimit
        d_min_estimater = EstimateResolutionLimit()
        d_min_estimater.set_working_directory(self.get_working_directory())
        auto_logfiler(d_min_estimater)
        d_min_estimater.set_experiments_filename(self._intgr_experiments_filename)
        d_min_estimater.set_reflections_filename(integrated_pickle)
        d_min = d_min_estimater.run()

        Debug.write('Estimate for d_min: %.2f' %d_min)
        Debug.write('Re-running integration to this resolution limit')

        self._intgr_reso_high = d_min
        self.set_integrater_done(False)
        return
      raise
示例#37
0
文件: DialsIndexer.py 项目: xia2/xia2
      # FIXME in here should respect the input unit cell and lattice if provided

      # FIXME from this (i) populate the helper table,
      # (ii) try to avoid re-running the indexing
      # step if we eliminate a solution as we have all of the refined results
      # already available.

      rbs = self.RefineBravaisSettings()
      rbs.set_experiments_filename(indexed_experiments)
      rbs.set_indexed_filename(indexed_file)
      if PhilIndex.params.dials.fix_geometry:
        rbs.set_detector_fix('all')
        rbs.set_beam_fix('all')

      FileHandler.record_log_file('%s LATTICE' % self.get_indexer_full_name(),
                                  rbs.get_log_file())
      rbs.run()

      from cctbx import crystal, sgtbx

      for k in sorted(rbs.get_bravais_summary()):
        summary = rbs.get_bravais_summary()[k]

        # FIXME need to do this better - for the moment only accept lattices
        # where R.M.S. deviation is less than twice P1 R.M.S. deviation.

        if self._indxr_input_lattice is None:
          if not summary['recommended']:
            continue

        experiments = load.experiment_list(