コード例 #1
0
ファイル: CommonScaler.py プロジェクト: lizhen-dlut/xia2
    def _scale_finish_chunk_8_raddam(self):
        crd = CCP4InterRadiationDamageDetector()

        crd.set_working_directory(self.get_working_directory())

        crd.set_hklin(self._scalr_scaled_reflection_files['mtz'])

        if self.get_scaler_anomalous():
            crd.set_anomalous(True)

        hklout = os.path.join(self.get_working_directory(), 'temp.mtz')
        FileHandler.record_temporary_file(hklout)

        crd.set_hklout(hklout)

        status = crd.detect()

        if status:
            Chatter.write('')
            Chatter.banner('Local Scaling %s' % self._scalr_xname)
            for s in status:
                Chatter.write('%s %s' % s)
            Chatter.banner('')
        else:
            Debug.write('Local scaling failed')
コード例 #2
0
    def _do_indexing(self, method=None):
        indexer = self.Index()
        for spot_list in self._indxr_payload["spot_lists"]:
            indexer.add_spot_filename(spot_list)
        for datablock in self._indxr_payload["datablocks"]:
            indexer.add_sweep_filename(datablock)
        if PhilIndex.params.dials.index.phil_file is not None:
            indexer.set_phil_file(PhilIndex.params.dials.index.phil_file)
        indexer.set_max_cell(max_cell=PhilIndex.params.dials.index.max_cell,
                             max_height_fraction=PhilIndex.params.dials.index.
                             max_cell_estimation.max_height_fraction)
        if PhilIndex.params.xia2.settings.small_molecule == True:
            indexer.set_min_cell(3)
        if PhilIndex.params.dials.fix_geometry:
            indexer.set_detector_fix('all')
            indexer.set_beam_fix('all')
        indexer.set_close_to_spindle_cutoff(
            PhilIndex.params.dials.close_to_spindle_cutoff)

        if self._indxr_input_lattice:
            indexer.set_indexer_input_lattice(self._indxr_input_lattice)
            Debug.write('Set lattice: %s' % self._indxr_input_lattice)

        if self._indxr_input_cell:
            indexer.set_indexer_input_cell(self._indxr_input_cell)
            Debug.write('Set cell: %f %f %f %f %f %f' % \
                        self._indxr_input_cell)
            original_cell = self._indxr_input_cell

        if method is None:
            if PhilIndex.params.dials.index.method is None:
                method = 'fft3d'
                Debug.write('Choosing indexing method: %s' % method)
            else:
                method = PhilIndex.params.dials.index.method

        FileHandler.record_log_file('%s INDEX' % self.get_indexer_full_name(),
                                    indexer.get_log_file())
        indexer.run(method)

        if not os.path.exists(indexer.get_experiments_filename()):
            raise RuntimeError(
                "Indexing has failed: see %s for more details." %
                indexer.get_log_file())
        elif not os.path.exists(indexer.get_indexed_filename()):
            raise RuntimeError("Indexing has failed: %s does not exist." %
                               indexer.get_indexed_filename())

        report = self.Report()
        report.set_experiments_filename(indexer.get_experiments_filename())
        report.set_reflections_filename(indexer.get_indexed_filename())
        html_filename = os.path.join(
            self.get_working_directory(),
            '%i_dials.index.report.html' % report.get_xpid())
        report.set_html_filename(html_filename)
        report.run()
        FileHandler.record_html_file('%s INDEX' % self.get_indexer_full_name(),
                                     html_filename)

        return indexer
コード例 #3
0
ファイル: DialsIndexer.py プロジェクト: xia2/xia2
  def _do_indexing(self, method=None):
    indexer = self.Index()
    for spot_list in self._indxr_payload["spot_lists"]:
      indexer.add_spot_filename(spot_list)
    for datablock in self._indxr_payload["datablocks"]:
      indexer.add_sweep_filename(datablock)
    if PhilIndex.params.dials.index.phil_file is not None:
      indexer.set_phil_file(PhilIndex.params.dials.index.phil_file)
    if PhilIndex.params.dials.index.max_cell:
      indexer.set_max_cell(PhilIndex.params.dials.index.max_cell)
    if PhilIndex.params.xia2.settings.small_molecule == True:
      indexer.set_min_cell(3)
    if PhilIndex.params.dials.fix_geometry:
      indexer.set_detector_fix('all')
      indexer.set_beam_fix('all')
    indexer.set_close_to_spindle_cutoff(
      PhilIndex.params.dials.close_to_spindle_cutoff)

    if self._indxr_input_lattice:
      indexer.set_indexer_input_lattice(self._indxr_input_lattice)
      Debug.write('Set lattice: %s' % self._indxr_input_lattice)

    if self._indxr_input_cell:
      indexer.set_indexer_input_cell(self._indxr_input_cell)
      Debug.write('Set cell: %f %f %f %f %f %f' % \
                  self._indxr_input_cell)
      original_cell = self._indxr_input_cell

    if method is None:
      if PhilIndex.params.dials.index.method is None:
        method = 'fft3d'
        Debug.write('Choosing indexing method: %s' % method)
      else:
        method = PhilIndex.params.dials.index.method

    FileHandler.record_log_file('%s INDEX' % self.get_indexer_full_name(),
                                indexer.get_log_file())
    indexer.run(method)

    if not os.path.exists(indexer.get_experiments_filename()):
      raise RuntimeError("Indexing has failed: see %s for more details."
                         %indexer.get_log_file())
    elif not os.path.exists(indexer.get_indexed_filename()):
      raise RuntimeError("Indexing has failed: %s does not exist."
                         %indexer.get_indexed_filename())

    report = self.Report()
    report.set_experiments_filename(indexer.get_experiments_filename())
    report.set_reflections_filename(indexer.get_indexed_filename())
    html_filename = os.path.join(
      self.get_working_directory(),
      '%i_dials.index.report.html' %report.get_xpid())
    report.set_html_filename(html_filename)
    report.run()
    assert os.path.exists(html_filename)
    FileHandler.record_html_file(
      '%s INDEX' %self.get_indexer_full_name(), html_filename)

    return indexer
コード例 #4
0
  def _scale_finish_chunk_5_finish_small_molecule(self):
      # keep 'mtz' and remove 'mtz_merged' from the dictionary for
      # consistency with non-small-molecule workflow
      self._scalr_scaled_reflection_files['mtz'] = \
        self._scalr_scaled_reflection_files['mtz_merged']
      del self._scalr_scaled_reflection_files['mtz_merged']

      FileHandler.record_data_file(self._scalr_scaled_reflection_files['mtz'])
コード例 #5
0
ファイル: MosflmIntegrater.py プロジェクト: xia2/xia2
  def _integrate(self):
    '''Implement the integrater interface.'''

    # cite the program
    Citations.cite('mosflm')

    images_str = '%d to %d' % tuple(self._intgr_wedge)
    cell_str = '%.2f %.2f %.2f %.2f %.2f %.2f' % tuple(self._intgr_cell)

    if len(self._fp_directory) <= 50:
      dirname = self._fp_directory
    else:
      dirname = '...%s' % self._fp_directory[-46:]

    Journal.block(
        'integrating', self._intgr_sweep_name, 'mosflm',
        {'images':images_str,
         'cell':cell_str,
         'lattice':self.get_integrater_refiner().get_refiner_lattice(),
         'template':self._fp_template,
         'directory':dirname,
         'resolution':'%.2f' % self._intgr_reso_high})

    self._mosflm_rerun_integration = False

    wd = self.get_working_directory()

    try:

      if self.get_integrater_sweep_name():
        pname, xname, dname = self.get_integrater_project_info()

      nproc = PhilIndex.params.xia2.settings.multiprocessing.nproc
      if nproc > 1:
        Debug.write('Parallel integration: %d jobs' %nproc)
        self._mosflm_hklout = self._mosflm_parallel_integrate()
      else:
        self._mosflm_hklout = self._mosflm_integrate()

      # record integration output for e.g. BLEND.

      sweep = self.get_integrater_sweep_name()
      if sweep:
        FileHandler.record_more_data_file(
            '%s %s %s %s INTEGRATE' % (pname, xname, dname, sweep),
            self._mosflm_hklout)

    except IntegrationError, e:
      if 'negative mosaic spread' in str(e):
        if self._mosflm_postref_fix_mosaic:
          Chatter.write(
              'Negative mosaic spread - stopping integration')
          raise BadLatticeError, 'negative mosaic spread'

        Chatter.write(
            'Negative mosaic spread - rerunning integration')
        self.set_integrater_done(False)
        self._mosflm_postref_fix_mosaic = True
コード例 #6
0
ファイル: CCP4ScalerHelpers.py プロジェクト: hainm/xia2
def _prepare_pointless_hklin(working_directory,
                             hklin,
                             phi_width):
  '''Prepare some data for pointless - this will take only 180 degrees
  of data if there is more than this (through a "rebatch" command) else
  will simply return hklin.'''

  # also remove blank images?

  if not Flags.get_microcrystal() and not Flags.get_small_molecule():

    Debug.write('Excluding blank images')

    hklout = os.path.join(
        working_directory,
        '%s_noblank.mtz' % (os.path.split(hklin)[-1][:-4]))

    FileHandler.record_temporary_file(hklout)

    hklin = remove_blank(hklin, hklout)

  # find the number of batches

  md = Mtzdump()
  md.set_working_directory(working_directory)
  auto_logfiler(md)
  md.set_hklin(hklin)
  md.dump()

  batches = max(md.get_batches()) - min(md.get_batches())

  phi_limit = 180

  if batches * phi_width < phi_limit or Flags.get_small_molecule():
    return hklin

  hklout = os.path.join(
      working_directory,
      '%s_prepointless.mtz' % (os.path.split(hklin)[-1][:-4]))

  rb = Rebatch()
  rb.set_working_directory(working_directory)
  auto_logfiler(rb)
  rb.set_hklin(hklin)
  rb.set_hklout(hklout)

  first = min(md.get_batches())
  last = first + int(phi_limit / phi_width)

  Debug.write('Preparing data for pointless - %d batches (%d degrees)' % \
              ((last - first), phi_limit))

  rb.limit_batches(first, last)

  # we will want to delete this one exit
  FileHandler.record_temporary_file(hklout)

  return hklout
コード例 #7
0
def _prepare_pointless_hklin(working_directory, hklin, phi_width):
    """Prepare some data for pointless - this will take only 180 degrees
    of data if there is more than this (through a "pointless" command) else
    will simply return hklin."""

    # also remove blank images?

    if not PhilIndex.params.xia2.settings.small_molecule:
        Debug.write("Excluding blank images")

        hklout = os.path.join(
            working_directory, "%s_noblank.mtz" % (os.path.split(hklin)[-1][:-4])
        )

        FileHandler.record_temporary_file(hklout)

        hklin = remove_blank(hklin, hklout)

    # find the number of batches

    batches = MtzUtils.batches_from_mtz(hklin)
    n_batches = max(batches) - min(batches)

    phi_limit = 180

    if (
        n_batches * phi_width < phi_limit
        or PhilIndex.params.xia2.settings.small_molecule
    ):
        return hklin

    hklout = os.path.join(
        working_directory, "%s_prepointless.mtz" % (os.path.split(hklin)[-1][:-4])
    )

    pl = xia2.Wrappers.CCP4.Pointless.Pointless()
    pl.set_working_directory(working_directory)
    auto_logfiler(pl)
    pl.set_hklin(hklin)
    pl.set_hklout(hklout)

    first = min(batches)
    last = first + int(phi_limit / phi_width)

    Debug.write(
        "Preparing data for pointless - %d batches (%d degrees)"
        % ((last - first), phi_limit)
    )

    pl.limit_batches(first, last)

    # we will want to delete this one exit
    FileHandler.record_temporary_file(hklout)

    return hklout
コード例 #8
0
ファイル: CommonScaler.py プロジェクト: lizhen-dlut/xia2
    def _scale_finish_chunk_3_truncate(self):
        for wavelength in self._scalr_scaled_refl_files.keys():

            hklin = self._scalr_scaled_refl_files[wavelength]

            truncate = self._factory.Truncate()
            truncate.set_hklin(hklin)

            if self.get_scaler_anomalous():
                truncate.set_anomalous(True)
            else:
                truncate.set_anomalous(False)

            FileHandler.record_log_file('%s %s %s truncate' % \
                                        (self._scalr_pname,
                                         self._scalr_xname,
                                         wavelength),
                                        truncate.get_log_file())

            hklout = os.path.join(self.get_working_directory(),
                                  '%s_truncated.mtz' % wavelength)

            truncate.set_hklout(hklout)
            truncate.truncate()

            xmlout = truncate.get_xmlout()
            if xmlout is not None:
                FileHandler.record_xml_file('%s %s %s truncate' % \
                                            (self._scalr_pname,
                                             self._scalr_xname,
                                             wavelength),
                                            xmlout)

            Debug.write('%d absent reflections in %s removed' % \
                        (truncate.get_nabsent(), wavelength))

            b_factor = truncate.get_b_factor()

            # record the b factor somewhere (hopefully) useful...

            self._scalr_statistics[(self._scalr_pname, self._scalr_xname,
                                    wavelength)]['Wilson B factor'] = [
                                        b_factor
                                    ]

            # and record the reflection file..
            self._scalr_scaled_refl_files[wavelength] = hklout
コード例 #9
0
    def dials_symmetry_decide_pointgroup(self, experiments, reflections):
        """Run the symmetry analyser and return it for later inspection."""
        symmetry_analyser = DialsSymmetry()
        symmetry_analyser.set_working_directory(self.get_working_directory())
        auto_logfiler(symmetry_analyser)

        FileHandler.record_log_file(
            "%s %s SYMMETRY" % (self._scalr_pname, self._scalr_xname),
            symmetry_analyser.get_log_file(),
        )

        for (exp, refl) in zip(experiments, reflections):
            symmetry_analyser.add_experiments(exp)
            symmetry_analyser.add_reflections(refl)
        symmetry_analyser.decide_pointgroup()

        return symmetry_analyser
コード例 #10
0
ファイル: CommonScaler.py プロジェクト: lizhen-dlut/xia2
    def _scale_finish_chunk_2_report(self):
        from cctbx.array_family import flex
        from iotbx.reflection_file_reader import any_reflection_file
        from xia2.lib.bits import auto_logfiler
        from xia2.Wrappers.XIA.Report import Report

        for wavelength in self._scalr_scaled_refl_files.keys():
            mtz_unmerged = self._scalr_scaled_reflection_files['mtz_unmerged'][
                wavelength]
            reader = any_reflection_file(mtz_unmerged)
            mtz_object = reader.file_content()
            batches = mtz_object.as_miller_arrays_dict()['HKL_base',
                                                         'HKL_base', 'BATCH']
            dose = flex.double(batches.size(), -1)
            batch_to_dose = self.get_batch_to_dose()
            for i, b in enumerate(batches.data()):
                dose[i] = batch_to_dose[b]
            c = mtz_object.crystals()[0]
            d = c.datasets()[0]
            d.add_column('DOSE', 'R').set_values(dose.as_float())
            tmp_mtz = os.path.join(self.get_working_directory(),
                                   'dose_tmp.mtz')
            mtz_object.write(tmp_mtz)
            hklin = tmp_mtz
            FileHandler.record_temporary_file(hklin)

            report = Report()
            report.set_working_directory(self.get_working_directory())
            report.set_mtz_filename(hklin)
            htmlout = os.path.join(
                self.get_working_directory(), '%s_%s_%s_report.html' %
                (self._scalr_pname, self._scalr_xname, wavelength))
            report.set_html_filename(htmlout)
            report.set_chef_min_completeness(0.95)  # sensible?
            auto_logfiler(report)
            try:
                report.run()
                FileHandler.record_html_file(
                    '%s %s %s report' %
                    (self._scalr_pname, self._scalr_xname, wavelength),
                    htmlout)
            except Exception as e:
                Debug.write('xia2.report failed:')
                Debug.write(str(e))
コード例 #11
0
    def _do_multisweep_symmetry_analysis(self):
        refiners = []
        experiments = []
        reflections = []

        for epoch in self._sweep_handler.get_epochs():
            si = self._sweep_handler.get_sweep_information(epoch)
            integrater = si.get_integrater()
            experiments.append(integrater.get_integrated_experiments())
            reflections.append(integrater.get_integrated_reflections())
            refiners.append(integrater.get_integrater_refiner())

        Debug.write("Running multisweep dials.symmetry for %d sweeps" % len(refiners))
        pointgroup, reindex_op, ntr, pt, reind_refl, reind_exp, reindex_initial = self._dials_symmetry_indexer_jiffy(
            experiments, reflections, refiners, multisweep=True
        )

        FileHandler.record_temporary_file(reind_refl)
        FileHandler.record_temporary_file(reind_exp)
        return pointgroup, reindex_op, ntr, pt, reind_refl, reind_exp, reindex_initial
コード例 #12
0
ファイル: CommonScaler.py プロジェクト: lizhen-dlut/xia2
    def _scale_finish_chunk_4_mad_mangling(self):
        if len(self._scalr_scaled_refl_files.keys()) > 1:

            reflection_files = {}

            for wavelength in self._scalr_scaled_refl_files.keys():
                cad = self._factory.Cad()
                cad.add_hklin(self._scalr_scaled_refl_files[wavelength])
                cad.set_hklout(
                    os.path.join(self.get_working_directory(),
                                 'cad-tmp-%s.mtz' % wavelength))
                cad.set_new_suffix(wavelength)
                cad.update()

                reflection_files[wavelength] = cad.get_hklout()
                FileHandler.record_temporary_file(cad.get_hklout())

            # now merge the reflection files together...
            hklout = os.path.join(
                self.get_working_directory(),
                '%s_%s_merged.mtz' % (self._scalr_pname, self._scalr_xname))
            FileHandler.record_temporary_file(hklout)

            Debug.write('Merging all data sets to %s' % hklout)

            cad = self._factory.Cad()
            for wavelength in reflection_files.keys():
                cad.add_hklin(reflection_files[wavelength])
            cad.set_hklout(hklout)
            cad.merge()

            self._scalr_scaled_reflection_files['mtz_merged'] = hklout

        else:

            self._scalr_scaled_reflection_files[
                'mtz_merged'] = self._scalr_scaled_refl_files[
                    self._scalr_scaled_refl_files.keys()[0]]
コード例 #13
0
ファイル: CCP4ScalerA.py プロジェクト: hainm/xia2
  def _scale_prepare(self):
    '''Perform all of the preparation required to deliver the scaled
    data. This should sort together the reflection files, ensure that
    they are correctly indexed (via pointless) and generally tidy
    things up.'''

    # acknowledge all of the programs we are about to use...

    Citations.cite('pointless')
    Citations.cite('aimless')
    Citations.cite('ccp4')

    # ---------- GATHER ----------

    self._sweep_handler = SweepInformationHandler(self._scalr_integraters)

    Journal.block(
        'gathering', self.get_scaler_xcrystal().get_name(), 'CCP4',
        {'working directory':self.get_working_directory()})

    for epoch in self._sweep_handler.get_epochs():
      si = self._sweep_handler.get_sweep_information(epoch)
      pname, xname, dname = si.get_project_info()
      sname = si.get_sweep_name()

      exclude_sweep = False

      for sweep in PhilIndex.params.xia2.settings.sweep:
        if sweep.id == sname and sweep.exclude:
          exclude_sweep = True
          break

      if exclude_sweep:
        self._sweep_handler.remove_epoch(epoch)
        Debug.write('Excluding sweep %s' %sname)
      else:
        Journal.entry({'adding data from':'%s/%s/%s' % \
                       (xname, dname, sname)})

    # gather data for all images which belonged to the parent
    # crystal - allowing for the fact that things could go wrong
    # e.g. epoch information not available, exposure times not in
    # headers etc...

    for e in self._sweep_handler.get_epochs():
      si = self._sweep_handler.get_sweep_information(e)
      assert is_mtz_file(si.get_reflections())

    p, x = self._sweep_handler.get_project_info()
    self._scalr_pname = p
    self._scalr_xname = x

    # verify that the lattices are consistent, calling eliminate if
    # they are not N.B. there could be corner cases here

    need_to_return = False

    multi_sweep_indexing = \
      PhilIndex.params.xia2.settings.developmental.multi_sweep_indexing


    if len(self._sweep_handler.get_epochs()) > 1:

      if multi_sweep_indexing and not self._scalr_input_pointgroup:
        pointless_hklins = []

        max_batches = 0
        for epoch in self._sweep_handler.get_epochs():
          si = self._sweep_handler.get_sweep_information(epoch)
          hklin = si.get_reflections()

          md = self._factory.Mtzdump()
          md.set_hklin(hklin)
          md.dump()

          batches = md.get_batches()
          if 1 + max(batches) - min(batches) > max_batches:
            max_batches = max(batches) - min(batches) + 1

          datasets = md.get_datasets()

          Debug.write('In reflection file %s found:' % hklin)
          for d in datasets:
            Debug.write('... %s' % d)

          dataset_info = md.get_dataset_info(datasets[0])

        from xia2.lib.bits import nifty_power_of_ten
        Debug.write('Biggest sweep has %d batches' % max_batches)
        max_batches = nifty_power_of_ten(max_batches)

        counter = 0

        for epoch in self._sweep_handler.get_epochs():
          si = self._sweep_handler.get_sweep_information(epoch)
          hklin = si.get_reflections()
          integrater = si.get_integrater()
          refiner = integrater.get_integrater_refiner()

          hklin = self._prepare_pointless_hklin(
            hklin, si.get_integrater().get_phi_width())

          rb = self._factory.Rebatch()

          hklout = os.path.join(self.get_working_directory(),
                                '%s_%s_%s_%s_prepointless.mtz' % \
                                (pname, xname, dname, si.get_sweep_name()))

          # we will want to delete this one exit
          FileHandler.record_temporary_file(hklout)

          first_batch = min(si.get_batches())
          si.set_batch_offset(counter * max_batches - first_batch + 1)

          rb.set_hklin(hklin)
          rb.set_first_batch(counter * max_batches + 1)
          rb.set_project_info(pname, xname, dname)
          rb.set_hklout(hklout)

          new_batches = rb.rebatch()

          pointless_hklins.append(hklout)

          # update the counter & recycle
          counter += 1

        s = self._factory.Sortmtz()

        pointless_hklin = os.path.join(self.get_working_directory(),
                              '%s_%s_prepointless_sorted.mtz' % \
                              (self._scalr_pname, self._scalr_xname))

        s.set_hklout(pointless_hklin)

        for hklin in pointless_hklins:
          s.add_hklin(hklin)

        s.sort()

        pointgroup, reindex_op, ntr, pt = \
                    self._pointless_indexer_jiffy(
            pointless_hklin, refiner)

        Debug.write('X1698: %s: %s' % (pointgroup, reindex_op))

        lattices = [Syminfo.get_lattice(pointgroup)]

        for epoch in self._sweep_handler.get_epochs():
          si = self._sweep_handler.get_sweep_information(epoch)
          intgr = si.get_integrater()
          hklin = si.get_reflections()
          refiner = intgr.get_integrater_refiner()

          if ntr:
            intgr.integrater_reset_reindex_operator()
            need_to_return = True

      else:
        lattices = []

        for epoch in self._sweep_handler.get_epochs():

          si = self._sweep_handler.get_sweep_information(epoch)
          intgr = si.get_integrater()
          hklin = si.get_reflections()
          refiner = intgr.get_integrater_refiner()

          if self._scalr_input_pointgroup:
            pointgroup = self._scalr_input_pointgroup
            reindex_op = 'h,k,l'
            ntr = False

          else:
            pointless_hklin = self._prepare_pointless_hklin(
              hklin, si.get_integrater().get_phi_width())

            pointgroup, reindex_op, ntr, pt = \
                        self._pointless_indexer_jiffy(
                pointless_hklin, refiner)

            Debug.write('X1698: %s: %s' % (pointgroup, reindex_op))

          lattice = Syminfo.get_lattice(pointgroup)

          if not lattice in lattices:
            lattices.append(lattice)

          if ntr:

            intgr.integrater_reset_reindex_operator()
            need_to_return = True

      if len(lattices) > 1:

        # why not using pointless indexer jiffy??!

        correct_lattice = sort_lattices(lattices)[0]

        Chatter.write('Correct lattice asserted to be %s' % \
                      correct_lattice)

        # transfer this information back to the indexers
        for epoch in self._sweep_handler.get_epochs():

          si = self._sweep_handler.get_sweep_information(epoch)
          refiner = si.get_integrater().get_integrater_refiner()
          sname = si.get_sweep_name()

          state = refiner.set_refiner_asserted_lattice(
              correct_lattice)

          if state == refiner.LATTICE_CORRECT:
            Chatter.write('Lattice %s ok for sweep %s' % \
                          (correct_lattice, sname))
          elif state == refiner.LATTICE_IMPOSSIBLE:
            raise RuntimeError, 'Lattice %s impossible for %s' \
                  % (correct_lattice, sname)
          elif state == refiner.LATTICE_POSSIBLE:
            Chatter.write('Lattice %s assigned for sweep %s' % \
                          (correct_lattice, sname))
            need_to_return = True

    # if one or more of them was not in the lowest lattice,
    # need to return here to allow reprocessing

    if need_to_return:
      self.set_scaler_done(False)
      self.set_scaler_prepare_done(False)
      return

    # ---------- REINDEX ALL DATA TO CORRECT POINTGROUP ----------

    # all should share the same pointgroup, unless twinned... in which
    # case force them to be...

    pointgroups = { }
    reindex_ops = { }
    probably_twinned = False

    need_to_return = False

    multi_sweep_indexing = \
      PhilIndex.params.xia2.settings.developmental.multi_sweep_indexing

    if multi_sweep_indexing and not self._scalr_input_pointgroup:
      pointless_hklins = []

      max_batches = 0
      for epoch in self._sweep_handler.get_epochs():
        si = self._sweep_handler.get_sweep_information(epoch)
        hklin = si.get_reflections()

        md = self._factory.Mtzdump()
        md.set_hklin(hklin)
        md.dump()

        batches = md.get_batches()
        if 1 + max(batches) - min(batches) > max_batches:
          max_batches = max(batches) - min(batches) + 1

        datasets = md.get_datasets()

        Debug.write('In reflection file %s found:' % hklin)
        for d in datasets:
          Debug.write('... %s' % d)

        dataset_info = md.get_dataset_info(datasets[0])

      from xia2.lib.bits import nifty_power_of_ten
      Debug.write('Biggest sweep has %d batches' % max_batches)
      max_batches = nifty_power_of_ten(max_batches)

      counter = 0

      for epoch in self._sweep_handler.get_epochs():
        si = self._sweep_handler.get_sweep_information(epoch)
        hklin = si.get_reflections()
        integrater = si.get_integrater()
        refiner = integrater.get_integrater_refiner()

        hklin = self._prepare_pointless_hklin(
            hklin, si.get_integrater().get_phi_width())

        rb = self._factory.Rebatch()

        hklout = os.path.join(self.get_working_directory(),
                              '%s_%s_%s_%s_prepointless.mtz' % \
                              (pname, xname, dname, si.get_sweep_name()))

        # we will want to delete this one exit
        FileHandler.record_temporary_file(hklout)

        first_batch = min(si.get_batches())
        si.set_batch_offset(counter * max_batches - first_batch + 1)

        rb.set_hklin(hklin)
        rb.set_first_batch(counter * max_batches + 1)
        rb.set_project_info(pname, xname, dname)
        rb.set_hklout(hklout)

        new_batches = rb.rebatch()

        pointless_hklins.append(hklout)

        # update the counter & recycle
        counter += 1

      s = self._factory.Sortmtz()

      pointless_hklin = os.path.join(self.get_working_directory(),
                            '%s_%s_prepointless_sorted.mtz' % \
                            (self._scalr_pname, self._scalr_xname))

      s.set_hklout(pointless_hklin)

      for hklin in pointless_hklins:
        s.add_hklin(hklin)

      s.sort()

      pointgroup, reindex_op, ntr, pt = \
                  self._pointless_indexer_jiffy(
          pointless_hklin, refiner)

      for epoch in self._sweep_handler.get_epochs():
        pointgroups[epoch] = pointgroup
        reindex_ops[epoch] = reindex_op

    else:
      for epoch in self._sweep_handler.get_epochs():
        si = self._sweep_handler.get_sweep_information(epoch)

        hklin = si.get_reflections()
        #hklout = os.path.join(
            #self.get_working_directory(),
            #os.path.split(hklin)[-1].replace('.mtz', '_rdx.mtz'))

        #FileHandler.record_temporary_file(hklout)

        integrater = si.get_integrater()
        refiner = integrater.get_integrater_refiner()

        if self._scalr_input_pointgroup:
          Debug.write('Using input pointgroup: %s' % \
                      self._scalr_input_pointgroup)
          pointgroup = self._scalr_input_pointgroup
          reindex_op = 'h,k,l'
          pt = False

        else:

          pointless_hklin = self._prepare_pointless_hklin(
              hklin, si.get_integrater().get_phi_width())

          pointgroup, reindex_op, ntr, pt = \
                      self._pointless_indexer_jiffy(
              pointless_hklin, refiner)

          Debug.write('X1698: %s: %s' % (pointgroup, reindex_op))

          if ntr:

            integrater.integrater_reset_reindex_operator()
            need_to_return = True

        if pt and not probably_twinned:
          probably_twinned = True

        Debug.write('Pointgroup: %s (%s)' % (pointgroup, reindex_op))

        pointgroups[epoch] = pointgroup
        reindex_ops[epoch] = reindex_op

    overall_pointgroup = None

    pointgroup_set = set([pointgroups[e] for e in pointgroups])

    if len(pointgroup_set) > 1 and \
       not probably_twinned:
      raise RuntimeError, 'non uniform pointgroups'

    if len(pointgroup_set) > 1:
      Debug.write('Probably twinned, pointgroups: %s' % \
                  ' '.join([p.replace(' ', '') for p in \
                            list(pointgroup_set)]))
      numbers = [Syminfo.spacegroup_name_to_number(s) for s in \
                 pointgroup_set]
      overall_pointgroup = Syminfo.spacegroup_number_to_name(
          min(numbers))
      self._scalr_input_pointgroup = overall_pointgroup

      Chatter.write('Twinning detected, assume pointgroup %s' % \
                    overall_pointgroup)

      need_to_return = True

    else:
      overall_pointgroup = pointgroup_set.pop()

    for epoch in self._sweep_handler.get_epochs():
      si = self._sweep_handler.get_sweep_information(epoch)

      integrater = si.get_integrater()

      integrater.set_integrater_spacegroup_number(
          Syminfo.spacegroup_name_to_number(overall_pointgroup))
      integrater.set_integrater_reindex_operator(
          reindex_ops[epoch], reason='setting point group')
      # This will give us the reflections in the correct point group
      si.set_reflections(integrater.get_integrater_intensities())

    if need_to_return:
      self.set_scaler_done(False)
      self.set_scaler_prepare_done(False)
      return

    # in here now optinally work through the data files which should be
    # indexed with a consistent point group, and transform the orientation
    # matrices by the lattice symmetry operations (if possible) to get a
    # consistent definition of U matrix modulo fixed rotations

    if PhilIndex.params.xia2.settings.unify_setting:

      from scitbx.matrix import sqr
      reference_U = None
      i3 = sqr((1, 0, 0, 0, 1, 0, 0, 0, 1))

      for epoch in self._sweep_handler.get_epochs():
        si = self._sweep_handler.get_sweep_information(epoch)
        intgr = si.get_integrater()
        fixed = sqr(intgr.get_goniometer().get_fixed_rotation())
        u, b, s = get_umat_bmat_lattice_symmetry_from_mtz(si.get_reflections())
        U = fixed.inverse() * sqr(u).transpose()
        B = sqr(b)

        if reference_U is None:
          reference_U = U
          continue

        results = []
        for op in s.all_ops():
          R = B * sqr(op.r().as_double()).transpose() * B.inverse()
          nearly_i3 = (U * R).inverse() * reference_U
          score = sum([abs(_n - _i) for (_n, _i) in zip(nearly_i3, i3)])
          results.append((score, op.r().as_hkl(), op))

        results.sort()
        best = results[0]
        Debug.write('Best reindex: %s %.3f' % (best[1], best[0]))
        intgr.set_integrater_reindex_operator(best[2].r().inverse().as_hkl(),
                                              reason='unifying [U] setting')
        si.set_reflections(intgr.get_integrater_intensities())

        # recalculate to verify
        u, b, s = get_umat_bmat_lattice_symmetry_from_mtz(si.get_reflections())
        U = fixed.inverse() * sqr(u).transpose()
        Debug.write('New reindex: %s' % (U.inverse() * reference_U))

        # FIXME I should probably raise an exception at this stage if this
        # is not about I3...

    if self.get_scaler_reference_reflection_file():
      self._reference = self.get_scaler_reference_reflection_file()
      Debug.write('Using HKLREF %s' % self._reference)

    elif Flags.get_reference_reflection_file():
      self._reference = Flags.get_reference_reflection_file()
      Debug.write('Using HKLREF %s' % self._reference)

    params = PhilIndex.params
    use_brehm_diederichs = params.xia2.settings.use_brehm_diederichs
    if len(self._sweep_handler.get_epochs()) > 1 and use_brehm_diederichs:

      brehm_diederichs_files_in = []
      for epoch in self._sweep_handler.get_epochs():

        si = self._sweep_handler.get_sweep_information(epoch)
        hklin = si.get_reflections()
        brehm_diederichs_files_in.append(hklin)

      # now run cctbx.brehm_diederichs to figure out the indexing hand for
      # each sweep
      from xia2.Wrappers.Cctbx.BrehmDiederichs import BrehmDiederichs
      from xia2.lib.bits import auto_logfiler
      brehm_diederichs = BrehmDiederichs()
      brehm_diederichs.set_working_directory(self.get_working_directory())
      auto_logfiler(brehm_diederichs)
      brehm_diederichs.set_input_filenames(brehm_diederichs_files_in)
      # 1 or 3? 1 seems to work better?
      brehm_diederichs.set_asymmetric(1)
      brehm_diederichs.run()
      reindexing_dict = brehm_diederichs.get_reindexing_dict()

      for epoch in self._sweep_handler.get_epochs():

        si = self._sweep_handler.get_sweep_information(epoch)
        intgr = si.get_integrater()
        hklin = si.get_reflections()

        reindex_op = reindexing_dict.get(os.path.abspath(hklin))
        assert reindex_op is not None

        if 1 or reindex_op != 'h,k,l':
          # apply the reindexing operator
          intgr.set_integrater_reindex_operator(
            reindex_op, reason='match reference')
          si.set_reflections(intgr.get_integrater_intensities())

    elif len(self._sweep_handler.get_epochs()) > 1 and \
           not self._reference:

      first = self._sweep_handler.get_epochs()[0]
      si = self._sweep_handler.get_sweep_information(first)
      self._reference = si.get_reflections()

    if self._reference:

      md = self._factory.Mtzdump()
      md.set_hklin(self._reference)
      md.dump()

      if md.get_batches() and False:
        raise RuntimeError, 'reference reflection file %s unmerged' % \
              self._reference

      datasets = md.get_datasets()

      if len(datasets) > 1 and False:
        raise RuntimeError, 'more than one dataset in %s' % \
              self._reference

      # then get the unit cell, lattice etc.

      reference_lattice = Syminfo.get_lattice(md.get_spacegroup())
      reference_cell = md.get_dataset_info(datasets[0])['cell']

      # then compute the pointgroup from this...

      # ---------- REINDEX TO CORRECT (REFERENCE) SETTING ----------

      for epoch in self._sweep_handler.get_epochs():
        pl = self._factory.Pointless()

        si = self._sweep_handler.get_sweep_information(epoch)
        hklin = si.get_reflections()

        pl.set_hklin(self._prepare_pointless_hklin(
            hklin, si.get_integrater().get_phi_width()))

        hklout = os.path.join(
            self.get_working_directory(),
            '%s_rdx2.mtz' % os.path.split(hklin)[-1][:-4])

        # we will want to delete this one exit
        FileHandler.record_temporary_file(hklout)

        # now set the initial reflection set as a reference...

        pl.set_hklref(self._reference)

        # write a pointless log file...
        pl.decide_pointgroup()

        Debug.write('Reindexing analysis of %s' % pl.get_hklin())

        pointgroup = pl.get_pointgroup()
        reindex_op = pl.get_reindex_operator()

        Debug.write('Operator: %s' % reindex_op)

        # apply this...

        integrater = si.get_integrater()

        integrater.set_integrater_reindex_operator(reindex_op,
                                                   reason='match reference')
        integrater.set_integrater_spacegroup_number(
            Syminfo.spacegroup_name_to_number(pointgroup))
        si.set_reflections(integrater.get_integrater_intensities())

        md = self._factory.Mtzdump()
        md.set_hklin(si.get_reflections())
        md.dump()

        datasets = md.get_datasets()

        if len(datasets) > 1:
          raise RuntimeError, 'more than one dataset in %s' % \
                si.get_reflections()

        # then get the unit cell, lattice etc.

        lattice = Syminfo.get_lattice(md.get_spacegroup())
        cell = md.get_dataset_info(datasets[0])['cell']

        if lattice != reference_lattice:
          raise RuntimeError, 'lattices differ in %s and %s' % \
                (self._reference, si.get_reflections())

        for j in range(6):
          if math.fabs((cell[j] - reference_cell[j]) /
                       reference_cell[j]) > 0.1:
            raise RuntimeError, \
                  'unit cell parameters differ in %s and %s' % \
                  (self._reference, si.get_reflections())

    # ---------- SORT TOGETHER DATA ----------

    self._sort_together_data_ccp4()

    self._scalr_resolution_limits = { }

    # store central resolution limit estimates

    batch_ranges = [self._sweep_handler.get_sweep_information(
        epoch).get_batch_range() for epoch in
                    self._sweep_handler.get_epochs()]

    self._resolution_limit_estimates = erzatz_resolution(
        self._prepared_reflections, batch_ranges)


    return
コード例 #14
0
ファイル: XDSIntegrater.py プロジェクト: hainm/xia2
  def _integrate_finish(self):
    '''Finish off the integration by running correct.'''

    # first run the postrefinement etc with spacegroup P1
    # and the current unit cell - this will be used to
    # obtain a benchmark rmsd in pixels / phi and also
    # cell deviations (this is working towards spotting bad
    # indexing solutions) - only do this if we have no
    # reindex matrix... and no postrefined cell...

    p1_deviations = None

    # fix for bug # 3264 -
    # if we have not run integration with refined parameters, make it so...
    # erm? shouldn't this therefore return if this is the principle, or
    # set the flag after we have tested the lattice?

    if not self._xds_data_files.has_key('GXPARM.XDS') and \
      PhilIndex.params.xds.integrate.reintegrate:
      Debug.write(
          'Resetting integrater, to ensure refined orientation is used')
      self.set_integrater_done(False)

    if not self.get_integrater_reindex_matrix() and not self._intgr_cell \
           and not Flags.get_no_lattice_test() and \
           not self.get_integrater_sweep().get_user_lattice():
      correct = self.Correct()

      correct.set_data_range(self._intgr_wedge[0],
                             self._intgr_wedge[1])

      if self.get_polarization() > 0.0:
        correct.set_polarization(self.get_polarization())

      # FIXME should this be using the correctly transformed
      # cell or are the results ok without it?!

      correct.set_spacegroup_number(1)
      correct.set_cell(self._intgr_refiner_cell)

      correct.run()

      # record the log file -

      pname, xname, dname = self.get_integrater_project_info()
      sweep = self.get_integrater_sweep_name()
      FileHandler.record_log_file('%s %s %s %s CORRECT' % \
                                  (pname, xname, dname, sweep),
                                  os.path.join(
          self.get_working_directory(),
          'CORRECT.LP'))

      FileHandler.record_more_data_file(
          '%s %s %s %s CORRECT' % (pname, xname, dname, sweep),
          os.path.join(self.get_working_directory(), 'XDS_ASCII.HKL'))

      cell = correct.get_result('cell')
      cell_esd = correct.get_result('cell_esd')

      Debug.write('Postrefinement in P1 results:')
      Debug.write('%7.3f %7.3f %7.3f %7.3f %7.3f %7.3f' % \
                  tuple(cell))
      Debug.write('%7.3f %7.3f %7.3f %7.3f %7.3f %7.3f' % \
                  tuple(cell_esd))
      Debug.write('Deviations: %.2f pixels %.2f degrees' % \
                  (correct.get_result('rmsd_pixel'),
                   correct.get_result('rmsd_phi')))

      p1_deviations = (correct.get_result('rmsd_pixel'),
                       correct.get_result('rmsd_phi'))

    # next run the postrefinement etc with the given
    # cell / lattice - this will be the assumed result...

    correct = self.Correct()

    correct.set_data_range(self._intgr_wedge[0],
                           self._intgr_wedge[1])

    if self.get_polarization() > 0.0:
      correct.set_polarization(self.get_polarization())

    # BUG # 2695 probably comes from here - need to check...
    # if the pointless interface comes back with a different
    # crystal setting then the unit cell stored in self._intgr_cell
    # needs to be set to None...

    if self.get_integrater_spacegroup_number():
      correct.set_spacegroup_number(
          self.get_integrater_spacegroup_number())
      if not self._intgr_cell:
        raise RuntimeError, 'no unit cell to recycle'
      correct.set_cell(self._intgr_cell)

    # BUG # 3113 - new version of XDS will try and figure the
    # best spacegroup out from the intensities (and get it wrong!)
    # unless we set the spacegroup and cell explicitly

    if not self.get_integrater_spacegroup_number():
      cell = self._intgr_refiner_cell
      lattice = self._intgr_refiner.get_refiner_lattice()
      spacegroup_number = lattice_to_spacegroup_number(lattice)

      # this should not prevent the postrefinement from
      # working correctly, else what is above would not
      # work correctly (the postrefinement test)

      correct.set_spacegroup_number(spacegroup_number)
      correct.set_cell(cell)

      Debug.write('Setting spacegroup to: %d' % spacegroup_number)
      Debug.write(
        'Setting cell to: %.2f %.2f %.2f %.2f %.2f %.2f' % tuple(cell))

    if self.get_integrater_reindex_matrix():

      # bug! if the lattice is not primitive the values in this
      # reindex matrix need to be multiplied by a constant which
      # depends on the Bravais lattice centering.

      lattice = self._intgr_refiner.get_refiner_lattice()

      import scitbx.matrix
      matrix = self.get_integrater_reindex_matrix()
      matrix = scitbx.matrix.sqr(matrix).transpose().elems
      matrix = r_to_rt(matrix)

      if lattice[1] == 'P':
        mult = 1
      elif lattice[1] == 'C' or lattice[1] == 'I':
        mult = 2
      elif lattice[1] == 'R':
        mult = 3
      elif lattice[1] == 'F':
        mult = 4
      else:
        raise RuntimeError, 'unknown multiplier for lattice %s' % \
              lattice

      Debug.write('REIDX multiplier for lattice %s: %d' % \
                  (lattice, mult))

      mult_matrix = [mult * m for m in matrix]

      Debug.write('REIDX set to %d %d %d %d %d %d %d %d %d %d %d %d' % \
                  tuple(mult_matrix))
      correct.set_reindex_matrix(mult_matrix)

    correct.run()

    # erm. just to be sure
    if self.get_integrater_reindex_matrix() and \
           correct.get_reindex_used():
      raise RuntimeError, 'Reindex panic!'

    # get the reindex operation used, which may be useful if none was
    # set but XDS decided to apply one, e.g. #419.

    if not self.get_integrater_reindex_matrix() and \
           correct.get_reindex_used():
      # convert this reindex operation to h, k, l form: n.b. this
      # will involve dividing through by the lattice centring multiplier

      matrix = rt_to_r(correct.get_reindex_used())
      import scitbx.matrix
      matrix = scitbx.matrix.sqr(matrix).transpose().elems

      lattice = self._intgr_refiner.get_refiner_lattice()

      if lattice[1] == 'P':
        mult = 1.0
      elif lattice[1] == 'C' or lattice[1] == 'I':
        mult = 2.0
      elif lattice[1] == 'R':
        mult = 3.0
      elif lattice[1] == 'F':
        mult = 4.0

      matrix = [m / mult for m in matrix]

      reindex_op = mat_to_symop(matrix)

      # assign this to self: will this reset?! make for a leaky
      # abstraction and just assign this...

      # self.set_integrater_reindex_operator(reindex)

      self._intgr_reindex_operator = reindex_op


    # record the log file -

    pname, xname, dname = self.get_integrater_project_info()
    sweep = self.get_integrater_sweep_name()
    FileHandler.record_log_file('%s %s %s %s CORRECT' % \
                                (pname, xname, dname, sweep),
                                os.path.join(self.get_working_directory(),
                                             'CORRECT.LP'))

    # should get some interesting stuff from the XDS correct file
    # here, for instance the resolution range to use in integration
    # (which should be fed back if not fast) and so on...

    self._intgr_corrected_hklout = os.path.join(self.get_working_directory(),
                                'XDS_ASCII.HKL')

    # also record the batch range - needed for the analysis of the
    # radiation damage in chef...

    self._intgr_batches_out = (self._intgr_wedge[0],
                               self._intgr_wedge[1])

    # FIXME perhaps I should also feedback the GXPARM file here??
    for file in ['GXPARM.XDS']:
      self._xds_data_files[file] = correct.get_output_data_file(file)

    # record the postrefined cell parameters
    self._intgr_cell = correct.get_result('cell')
    self._intgr_n_ref = correct.get_result('n_ref')

    Debug.write('Postrefinement in "correct" spacegroup results:')
    Debug.write('%7.3f %7.3f %7.3f %7.3f %7.3f %7.3f' % \
                tuple(correct.get_result('cell')))
    Debug.write('%7.3f %7.3f %7.3f %7.3f %7.3f %7.3f' % \
                tuple(correct.get_result('cell_esd')))
    Debug.write('Deviations: %.2f pixels %.2f degrees' % \
                (correct.get_result('rmsd_pixel'),
                 correct.get_result('rmsd_phi')))

    Debug.write('Error correction parameters: A=%.3f B=%.3f' % \
                correct.get_result('sdcorrection'))

    # compute misorientation of axes

    xparm_file = os.path.join(self.get_working_directory(), 'GXPARM.XDS')

    from iotbx.xds import xparm
    handle = xparm.reader()
    handle.read_file(xparm_file)

    rotn = handle.rotation_axis
    beam = handle.beam_vector

    dot = sum([rotn[j] * beam[j] for j in range(3)])
    r = math.sqrt(sum([rotn[j] * rotn[j] for j in range(3)]))
    b = math.sqrt(sum([beam[j] * beam[j] for j in range(3)]))

    rtod = 180.0 / math.pi

    angle = rtod * math.fabs(0.5 * math.pi - math.acos(dot / (r * b)))

    Debug.write('Axis misalignment %.2f degrees' % angle)

    correct_deviations = (correct.get_result('rmsd_pixel'),
                          correct.get_result('rmsd_phi'))

    if p1_deviations:
      # compare and reject if both > 50% higher - though adding a little
      # flexibility - 0.5 pixel / osc width slack.

      pixel = p1_deviations[0]
      phi = math.sqrt(0.05 * 0.05 + \
                      p1_deviations[1] * p1_deviations[1])

      threshold = Flags.get_rejection_threshold()

      Debug.write('RMSD ratio: %.2f' % (correct_deviations[0] / pixel))
      Debug.write('RMSPhi ratio: %.2f' % (correct_deviations[1] / phi))

      if correct_deviations[0] / pixel > threshold and \
             correct_deviations[1] / phi > threshold:

        Chatter.write(
        'Eliminating this indexing solution as postrefinement')
        Chatter.write(
        'deviations rather high relative to triclinic')
        raise BadLatticeError, \
              'high relative deviations in postrefinement'

    if not Flags.get_quick() and Flags.get_remove():
      # check for alien reflections and perhaps recycle - removing them
      if len(correct.get_remove()) > 0:

        correct_remove = correct.get_remove()
        current_remove = []
        final_remove = []

        # first ensure that there are no duplicate entries...
        if os.path.exists(os.path.join(
            self.get_working_directory(),
            'REMOVE.HKL')):
          for line in open(os.path.join(
              self.get_working_directory(),
              'REMOVE.HKL'), 'r').readlines():
            h, k, l = map(int, line.split()[:3])
            z = float(line.split()[3])

            if not (h, k, l, z) in current_remove:
              current_remove.append((h, k, l, z))

          for c in correct_remove:
            if c in current_remove:
              continue
            final_remove.append(c)

          Debug.write(
              '%d alien reflections are already removed' % \
              (len(correct_remove) - len(final_remove)))
        else:
          # we want to remove all of the new dodgy reflections
          final_remove = correct_remove

        remove_hkl = open(os.path.join(
            self.get_working_directory(),
            'REMOVE.HKL'), 'w')

        z_min = Flags.get_z_min()
        rejected = 0

        # write in the old reflections
        for remove in current_remove:
          z = remove[3]
          if z >= z_min:
            remove_hkl.write('%d %d %d %f\n' % remove)
          else:
            rejected += 1
        Debug.write('Wrote %d old reflections to REMOVE.HKL' % \
                    (len(current_remove) - rejected))
        Debug.write('Rejected %d as z < %f' % \
                    (rejected, z_min))

        # and the new reflections
        rejected = 0
        used = 0
        for remove in final_remove:
          z = remove[3]
          if z >= z_min:
            used += 1
            remove_hkl.write('%d %d %d %f\n' % remove)
          else:
            rejected += 1
        Debug.write('Wrote %d new reflections to REMOVE.HKL' % \
                    (len(final_remove) - rejected))
        Debug.write('Rejected %d as z < %f' % \
                    (rejected, z_min))

        remove_hkl.close()

        # we want to rerun the finishing step so...
        # unless we have added no new reflections... or unless we
        # have not confirmed the point group (see SCI-398)

        if used and self.get_integrater_reindex_matrix():
          self.set_integrater_finish_done(False)

    else:
      Debug.write(
          'Going quickly so not removing %d outlier reflections...' % \
          len(correct.get_remove()))

    # Convert INTEGRATE.HKL to MTZ format and reapply any reindexing operations
    # spacegroup changes to allow use with CCP4 / Aimless for scaling

    integrate_hkl = os.path.join(
      self.get_working_directory(), 'INTEGRATE.HKL')

    hklout = os.path.splitext(integrate_hkl)[0] + ".mtz"
    self._factory.set_working_directory(self.get_working_directory())
    pointless = self._factory.Pointless()
    pointless.set_xdsin(integrate_hkl)
    pointless.set_hklout(hklout)
    pointless.xds_to_mtz()

    integrate_mtz = hklout

    if self.get_integrater_reindex_operator() or \
       self.get_integrater_spacegroup_number():

      Debug.write('Reindexing things to MTZ')

      reindex = Reindex()
      reindex.set_working_directory(self.get_working_directory())
      auto_logfiler(reindex)

      if self.get_integrater_reindex_operator():
        reindex.set_operator(self.get_integrater_reindex_operator())

      if self.get_integrater_spacegroup_number():
        reindex.set_spacegroup(self.get_integrater_spacegroup_number())

      hklout = '%s_reindex.mtz' % os.path.splitext(integrate_mtz)[0]

      reindex.set_hklin(integrate_mtz)
      reindex.set_hklout(hklout)
      reindex.reindex()
      integrate_mtz = hklout

    return integrate_mtz
コード例 #15
0
    def _index(self):
        '''Actually do the autoindexing using the data prepared by the
    previous method.'''

        images_str = '%d to %d' % tuple(self._indxr_images[0])
        for i in self._indxr_images[1:]:
            images_str += ', %d to %d' % tuple(i)

        cell_str = None
        if self._indxr_input_cell:
            cell_str = '%.2f %.2f %.2f %.2f %.2f %.2f' % \
                       self._indxr_input_cell

        # then this is a proper autoindexing run - describe this
        # to the journal entry

        #if len(self._fp_directory) <= 50:
        #dirname = self._fp_directory
        #else:
        #dirname = '...%s' % self._fp_directory[-46:]
        dirname = self.get_directory()

        Journal.block(
            'autoindexing', self._indxr_sweep_name, 'XDS', {
                'images': images_str,
                'target cell': cell_str,
                'target lattice': self._indxr_input_lattice,
                'template': self.get_template(),
                'directory': dirname
            })

        self._index_remove_masked_regions()

        if self._i_or_ii is None:
            self._i_or_ii = self.decide_i_or_ii()
            Debug.write('Selecting I or II, chose %s' % self._i_or_ii)

        idxref = self.Idxref()

        for file in ['SPOT.XDS']:
            idxref.set_input_data_file(file, self._indxr_payload[file])

        # set the phi start etc correctly

        idxref.set_data_range(self._indxr_images[0][0],
                              self._indxr_images[0][1])
        idxref.set_background_range(self._indxr_images[0][0],
                                    self._indxr_images[0][1])

        if self._i_or_ii == 'i':
            blocks = self._index_select_images_i()
            for block in blocks[:1]:
                starting_frame = block[0]
                starting_angle = self.get_scan().get_angle_from_image_index(
                    starting_frame)

                idxref.set_starting_frame(starting_frame)
                idxref.set_starting_angle(starting_angle)

                idxref.add_spot_range(block[0], block[1])

            for block in blocks[1:]:
                idxref.add_spot_range(block[0], block[1])
        else:
            for block in self._indxr_images[:1]:
                starting_frame = block[0]
                starting_angle = self.get_scan().get_angle_from_image_index(
                    starting_frame)

                idxref.set_starting_frame(starting_frame)
                idxref.set_starting_angle(starting_angle)

                idxref.add_spot_range(block[0], block[1])

            for block in self._indxr_images[1:]:
                idxref.add_spot_range(block[0], block[1])

        # FIXME need to also be able to pass in the known unit
        # cell and lattice if already available e.g. from
        # the helper... indirectly

        if self._indxr_user_input_lattice:
            idxref.set_indexer_user_input_lattice(True)

        if self._indxr_input_lattice and self._indxr_input_cell:
            idxref.set_indexer_input_lattice(self._indxr_input_lattice)
            idxref.set_indexer_input_cell(self._indxr_input_cell)

            Debug.write('Set lattice: %s' % self._indxr_input_lattice)
            Debug.write('Set cell: %f %f %f %f %f %f' % \
                        self._indxr_input_cell)

            original_cell = self._indxr_input_cell
        elif self._indxr_input_lattice:
            idxref.set_indexer_input_lattice(self._indxr_input_lattice)
            original_cell = None
        else:
            original_cell = None

        # FIXED need to set the beam centre here - this needs to come
        # from the input .xinfo object or header, and be converted
        # to the XDS frame... done.

        #mosflm_beam_centre = self.get_beam_centre()
        #xds_beam_centre = beam_centre_mosflm_to_xds(
        #mosflm_beam_centre[0], mosflm_beam_centre[1], self.get_header())
        from dxtbx.serialize.xds import to_xds
        converter = to_xds(self.get_imageset())
        xds_beam_centre = converter.detector_origin

        idxref.set_beam_centre(xds_beam_centre[0], xds_beam_centre[1])

        # fixme need to check if the lattice, cell have been set already,
        # and if they have, pass these in as input to the indexing job.

        done = False

        while not done:
            try:
                done = idxref.run()

                # N.B. in here if the IDXREF step was being run in the first
                # pass done is FALSE however there should be a refined
                # P1 orientation matrix etc. available - so keep it!

            except XDSException as e:
                # inspect this - if we have complaints about not
                # enough reflections indexed, and we have a target
                # unit cell, and they are the same, well ignore it

                if 'solution is inaccurate' in str(e):
                    Debug.write('XDS complains solution inaccurate - ignoring')
                    done = idxref.continue_from_error()
                elif ('insufficient percentage (< 70%)' in str(e) or
                      'insufficient percentage (< 50%)' in str(e)) and \
                         original_cell:
                    done = idxref.continue_from_error()
                    lattice, cell, mosaic = \
                             idxref.get_indexing_solution()
                    # compare solutions
                    check = PhilIndex.params.xia2.settings.xds_check_cell_deviation
                    for j in range(3):
                        # allow two percent variation in unit cell length
                        if math.fabs((cell[j] - original_cell[j]) / \
                                     original_cell[j]) > 0.02 and check:
                            Debug.write('XDS unhappy and solution wrong')
                            raise e
                        # and two degree difference in angle
                        if math.fabs(cell[j + 3] - original_cell[j + 3]) \
                               > 2.0 and check:
                            Debug.write('XDS unhappy and solution wrong')
                            raise e
                    Debug.write('XDS unhappy but solution ok')
                elif 'insufficient percentage (< 70%)' in str(e) or \
                         'insufficient percentage (< 50%)' in str(e):
                    Debug.write('XDS unhappy but solution probably ok')
                    done = idxref.continue_from_error()
                else:
                    raise e

        FileHandler.record_log_file(
            '%s INDEX' % self.get_indexer_full_name(),
            os.path.join(self.get_working_directory(), 'IDXREF.LP'))

        for file in ['SPOT.XDS', 'XPARM.XDS']:
            self._indxr_payload[file] = idxref.get_output_data_file(file)

        # need to get the indexing solutions out somehow...

        self._indxr_other_lattice_cell = idxref.get_indexing_solutions()

        self._indxr_lattice, self._indxr_cell, self._indxr_mosaic = \
                             idxref.get_indexing_solution()

        import dxtbx
        from dxtbx.serialize.xds import to_crystal
        xparm_file = os.path.join(self.get_working_directory(), 'XPARM.XDS')
        models = dxtbx.load(xparm_file)
        crystal_model = to_crystal(xparm_file)

        from dxtbx.model import Experiment, ExperimentList
        experiment = Experiment(
            beam=models.get_beam(),
            detector=models.get_detector(),
            goniometer=models.get_goniometer(),
            scan=models.get_scan(),
            crystal=crystal_model,
            #imageset=self.get_imageset(),
        )

        experiment_list = ExperimentList([experiment])
        self.set_indexer_experiment_list(experiment_list)

        # I will want this later on to check that the lattice was ok
        self._idxref_subtree_problem = idxref.get_index_tree_problem()

        return
コード例 #16
0
    def json_object(self, command_line=""):

        result = {}

        for crystal in sorted(self._crystals):
            xcrystal = self._crystals[crystal]

            cell = xcrystal.get_cell()
            spacegroup = xcrystal.get_likely_spacegroups()[0]

            result["AutoProc"] = {}
            tmp = result["AutoProc"]

            tmp["spaceGroup"] = spacegroup
            for name, value in zip(["a", "b", "c", "alpha", "beta", "gamma"],
                                   cell):
                tmp["refinedCell_%s" % name] = value

            result["AutoProcScalingContainer"] = {}
            tmp = result["AutoProcScalingContainer"]
            tmp["AutoProcScaling"] = {
                "recordTimeStamp":
                time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
            }

            statistics_all = xcrystal.get_statistics()
            reflection_files = xcrystal.get_scaled_merged_reflections()

            for key in list(statistics_all.keys()):
                pname, xname, dname = key

                # FIXME should assert that the dname is a
                # valid wavelength name

                keys = [
                    "High resolution limit",
                    "Low resolution limit",
                    "Completeness",
                    "Multiplicity",
                    "I/sigma",
                    "Rmerge(I+/-)",
                    "CC half",
                    "Anomalous completeness",
                    "Anomalous correlation",
                    "Anomalous multiplicity",
                    "Total observations",
                    "Total unique",
                    "Rmeas(I)",
                    "Rmeas(I+/-)",
                    "Rpim(I)",
                    "Rpim(I+/-)",
                    "Partial Bias",
                ]
                stats = [k for k in keys if k in statistics_all[key]]

                xwavelength = xcrystal.get_xwavelength(dname)
                sweeps = xwavelength.get_sweeps()

                tmp["AutoProcScalingStatistics"] = []
                tmp2 = tmp["AutoProcScalingStatistics"]

                for j, name in enumerate(
                    ["overall", "innerShell", "outerShell"]):
                    statistics_cache = {"scalingStatisticsType": name}

                    for s in stats:

                        if s in self._name_map:
                            n = self._name_map[s]
                        else:
                            continue

                        if isinstance(statistics_all[key][s], type([])):
                            statistics_cache[n] = statistics_all[key][s][j]
                        elif isinstance(statistics_all[key][s], type(())):
                            statistics_cache[n] = statistics_all[key][s][j]

                    tmp2.append(statistics_cache)

                tmp["AutoProcIntegrationContainer"] = []
                tmp2 = tmp["AutoProcIntegrationContainer"]
                for sweep in sweeps:
                    if "#" in sweep.get_template():
                        image_name = sweep.get_image_name(0)
                    else:
                        image_name = os.path.join(sweep.get_directory(),
                                                  sweep.get_template())
                    cell = sweep.get_integrater_cell()
                    intgr_tmp = {}
                    for name, value in zip(
                        ["a", "b", "c", "alpha", "beta", "gamma"], cell):
                        intgr_tmp["cell_%s" % name] = value

                    # FIXME this is naughty
                    indxr = sweep._get_indexer()
                    intgr = sweep._get_integrater()

                    start, end = intgr.get_integrater_wedge()

                    intgr_tmp["startImageNumber"] = start
                    intgr_tmp["endImageNumber"] = end

                    intgr_tmp[
                        "refinedDetectorDistance"] = indxr.get_indexer_distance(
                        )

                    beam = indxr.get_indexer_beam_centre_raw_image()

                    intgr_tmp["refinedXBeam"] = beam[0]
                    intgr_tmp["refinedYBeam"] = beam[1]

                    tmp2.append({
                        "Image": {
                            "fileName": os.path.split(image_name)[-1],
                            "fileLocation":
                            sanitize(os.path.split(image_name)[0]),
                        },
                        "AutoProcIntegration": intgr_tmp,
                    })

            # file unpacking nonsense
            result["AutoProcProgramContainer"] = {}
            tmp = result["AutoProcProgramContainer"]
            tmp2 = {}

            if not command_line:
                from xia2.Handlers.CommandLine import CommandLine

                command_line = CommandLine.get_command_line()

            tmp2["processingCommandLine"] = sanitize(command_line)
            tmp2["processingProgram"] = "xia2"

            tmp["AutoProcProgram"] = tmp2
            tmp["AutoProcProgramAttachment"] = []
            tmp2 = tmp["AutoProcProgramAttachment"]

            data_directory = self._project.path / "DataFiles"

            for k in reflection_files:
                reflection_file = reflection_files[k]

                if not isinstance(reflection_file, type("")):
                    continue

                reflection_file = FileHandler.get_data_file(
                    self._project.path, reflection_file)
                basename = os.path.basename(reflection_file)

                if data_directory.joinpath(basename).exists():
                    # Use file in DataFiles directory in preference (if it exists)
                    reflection_file = str(data_directory.joinpath(basename))

                tmp2.append({
                    "fileType":
                    "Result",
                    "fileName":
                    os.path.split(reflection_file)[-1],
                    "filePath":
                    sanitize(os.path.split(reflection_file)[0]),
                })

            tmp2.append({
                "fileType": "Log",
                "fileName": "xia2.txt",
                "filePath": sanitize(os.getcwd()),
            })

        return result
コード例 #17
0
ファイル: MosflmIntegrater.py プロジェクト: xia2/xia2
      (self._intgr_wedge[0] - offset, self._intgr_wedge[1] - offset))

    try:
      integrater.run()
    except RuntimeError, e:
      if 'integration failed: reason unknown' in str(e):
        Chatter.write('Mosflm has failed in integration')
        message = 'The input was:\n\n'
        for input in integrater.get_all_input():
          message += '  %s' % input
        Chatter.write(message)
      raise

    FileHandler.record_log_file(
        '%s %s %s %s mosflm integrate' % \
        (self.get_integrater_sweep_name(),
         pname, xname, dname),
        integrater.get_log_file())

    self._intgr_per_image_statistics = integrater.get_per_image_statistics()

    self._mosflm_hklout = integrater.get_hklout()
    Debug.write('Integration output: %s' %self._mosflm_hklout)

    self._intgr_n_ref = integrater.get_nref()

    # if a BGSIG error happened try not refining the
    # profile and running again...

    if integrater.get_bgsig_too_large():
      if not self._mosflm_refine_profiles:
コード例 #18
0
    def _index(self):
        if PhilIndex.params.dials.index.method in (libtbx.Auto, None):
            if self._indxr_input_cell is not None:
                indexer = self._do_indexing("real_space_grid_search")
            else:
                try:
                    indexer_fft3d = self._do_indexing(method="fft3d")
                    nref_3d, rmsd_3d = indexer_fft3d.get_nref_rmsds()
                except Exception as e:
                    nref_3d = None
                    rmsd_3d = None
                    indexing_failure = e
                try:
                    indexer_fft1d = self._do_indexing(method="fft1d")
                    nref_1d, rmsd_1d = indexer_fft1d.get_nref_rmsds()
                except Exception as e:
                    nref_1d = None
                    rmsd_1d = None
                    indexing_failure = e

                if (nref_1d is not None and nref_3d is None or
                    (nref_1d > nref_3d and rmsd_1d[0] < rmsd_3d[0]
                     and rmsd_1d[1] < rmsd_3d[1] and rmsd_1d[2] < rmsd_3d[2])):
                    indexer = indexer_fft1d
                elif nref_3d is not None:
                    indexer = indexer_fft3d
                else:
                    raise RuntimeError(indexing_failure)

        else:
            indexer = self._do_indexing(
                method=PhilIndex.params.dials.index.method)

        # not strictly the P1 cell, rather the cell that was used in indexing
        self._p1_cell = indexer._p1_cell
        self.set_indexer_payload("indexed_filename",
                                 indexer.get_indexed_filename())

        indexed_file = indexer.get_indexed_filename()
        indexed_experiments = indexer.get_experiments_filename()

        fast_mode = PhilIndex.params.dials.fast_mode
        trust_beam_centre = PhilIndex.params.xia2.settings.trust_beam_centre
        multi_sweep_indexing = PhilIndex.params.xia2.settings.multi_sweep_indexing
        check_indexing_symmetry = PhilIndex.params.dials.check_indexing_symmetry

        if check_indexing_symmetry and not (trust_beam_centre or fast_mode
                                            or multi_sweep_indexing):
            checksym = self.CheckIndexingSymmetry()
            checksym.set_experiments_filename(indexed_experiments)
            checksym.set_indexed_filename(indexed_file)
            checksym.set_grid_search_scope(1)
            checksym.run()
            hkl_offset = checksym.get_hkl_offset()
            logger.debug("hkl_offset: %s", str(hkl_offset))
            if hkl_offset is not None and hkl_offset != (0, 0, 0):
                reindex = self.Reindex()
                reindex.set_hkl_offset(hkl_offset)
                reindex.set_indexed_filename(indexed_file)
                reindex.run()
                indexed_file = reindex.get_reindexed_reflections_filename()

                # do some scan-static refinement - run twice, first without outlier
                # rejection as the model is too far from reality to do a sensible job of
                # outlier rejection
                refiner = self.Refine()
                refiner.set_experiments_filename(indexed_experiments)
                refiner.set_indexed_filename(
                    reindex.get_reindexed_reflections_filename())
                refiner.set_outlier_algorithm(None)
                refiner.run()
                indexed_experiments = refiner.get_refined_experiments_filename(
                )

                # now again with outlier rejection (possibly)
                refiner = self.Refine()
                refiner.set_experiments_filename(indexed_experiments)
                refiner.set_indexed_filename(indexed_file)
                refiner.run()
                indexed_experiments = refiner.get_refined_experiments_filename(
                )

        if self._indxr_input_lattice is None:

            # FIXME in here should respect the input unit cell and lattice if provided

            # FIXME from this (i) populate the helper table,
            # (ii) try to avoid re-running the indexing
            # step if we eliminate a solution as we have all of the refined results
            # already available.

            rbs = self.RefineBravaisSettings()
            rbs.set_experiments_filename(indexed_experiments)
            rbs.set_indexed_filename(indexed_file)
            if PhilIndex.params.dials.fix_geometry:
                rbs.set_detector_fix("all")
                rbs.set_beam_fix("all")
            elif PhilIndex.params.dials.fix_distance:
                rbs.set_detector_fix("distance")

            FileHandler.record_log_file(
                "%s LATTICE" % self.get_indexer_full_name(),
                rbs.get_log_file())
            rbs.run()

            for k in sorted(rbs.get_bravais_summary()):
                summary = rbs.get_bravais_summary()[k]

                # FIXME need to do this better - for the moment only accept lattices
                # where R.M.S. deviation is less than twice P1 R.M.S. deviation.

                if self._indxr_input_lattice is None:
                    if not summary["recommended"]:
                        continue

                experiments = load.experiment_list(summary["experiments_file"],
                                                   check_format=False)
                cryst = experiments.crystals()[0]
                cs = crystal.symmetry(unit_cell=cryst.get_unit_cell(),
                                      space_group=cryst.get_space_group())
                lattice = str(
                    bravais_types.bravais_lattice(group=cs.space_group()))
                cb_op = sgtbx.change_of_basis_op(str(summary["cb_op"]))

                self._solutions[k] = {
                    "number": k,
                    "mosaic": 0.0,
                    "metric": summary["max_angular_difference"],
                    "rmsd": summary["rmsd"],
                    "nspots": summary["nspots"],
                    "lattice": lattice,
                    "cell": cs.unit_cell().parameters(),
                    "experiments_file": summary["experiments_file"],
                    "cb_op": str(cb_op),
                }

            self._solution = self.get_solution()
            self._indxr_lattice = self._solution["lattice"]

            for solution in self._solutions:
                lattice = self._solutions[solution]["lattice"]
                if (self._indxr_input_lattice is not None
                        and self._indxr_input_lattice != lattice):
                    continue
                if lattice in self._indxr_other_lattice_cell:
                    if (self._indxr_other_lattice_cell[lattice]["metric"] <
                            self._solutions[solution]["metric"]):
                        continue

                self._indxr_other_lattice_cell[lattice] = {
                    "metric": self._solutions[solution]["metric"],
                    "cell": self._solutions[solution]["cell"],
                }

            self._indxr_mosaic = self._solution["mosaic"]

            experiments_file = self._solution["experiments_file"]
            experiment_list = load.experiment_list(experiments_file)
            self.set_indexer_experiment_list(experiment_list)

            self.set_indexer_payload("experiments_filename", experiments_file)

            # reindex the output reflection list to this solution
            reindex = self.Reindex()
            reindex.set_indexed_filename(indexed_file)
            reindex.set_cb_op(self._solution["cb_op"])
            reindex.set_space_group(
                str(lattice_to_spacegroup_number(self._solution["lattice"])))
            reindex.run()
            indexed_file = reindex.get_reindexed_reflections_filename()
            self.set_indexer_payload("indexed_filename", indexed_file)

        else:
            experiment_list = load.experiment_list(indexed_experiments)
            self.set_indexer_experiment_list(experiment_list)
            self.set_indexer_payload("experiments_filename",
                                     indexed_experiments)

            cryst = experiment_list.crystals()[0]
            lattice = str(
                bravais_types.bravais_lattice(group=cryst.get_space_group()))
            self._indxr_lattice = lattice
            self._solutions = {}
            self._solutions[0] = {
                "number": 0,
                "mosaic": 0.0,
                "metric": -1,
                "rmsd": -1,
                "nspots": -1,
                "lattice": lattice,
                "cell": cryst.get_unit_cell().parameters(),
                "experiments_file": indexed_experiments,
                "cb_op": "a,b,c",
            }

            self._indxr_other_lattice_cell[lattice] = {
                "metric": self._solutions[0]["metric"],
                "cell": self._solutions[0]["cell"],
            }
コード例 #19
0
ファイル: XDSScalerA.py プロジェクト: xia2/xia2
      def run_one_sweep(args):
        sweep_information = args[0]
        pointless_indexer_jiffy = args[1]
        factory = args[2]
        job_type = args[3]

        if job_type:
          DriverFactory.set_driver_type(job_type)

        intgr = sweep_information['integrater']
        hklin = sweep_information['corrected_intensities']
        refiner = intgr.get_integrater_refiner()

        # in here need to consider what to do if the user has
        # assigned the pointgroup on the command line ...

        if not self._scalr_input_pointgroup:
          pointgroup, reindex_op, ntr = \
                      self._pointless_indexer_jiffy(hklin, refiner)

          if ntr:

            # Bug # 3373

            Debug.write('Reindex to standard (PIJ): %s' % \
                        reindex_op)

            intgr.set_integrater_reindex_operator(
                reindex_op, compose = False)
            reindex_op = 'h,k,l'
            need_to_return = True

        else:

          # 27/FEB/08 to support user assignment of pointgroups

          Debug.write('Using input pointgroup: %s' % \
                      self._scalr_input_pointgroup)
          pointgroup = self._scalr_input_pointgroup
          reindex_op = 'h,k,l'

        intgr.set_integrater_reindex_operator(reindex_op)
        intgr.set_integrater_spacegroup_number(
            Syminfo.spacegroup_name_to_number(pointgroup))
        sweep_information['corrected_intensities'] \
          = intgr.get_integrater_corrected_intensities()

        # convert the XDS_ASCII for this sweep to mtz - on the next
        # get this should be in the correct setting...

        hklin = sweep_information['corrected_intensities']

        # now use pointless to make this conversion

        # try with no conversion?!

        pointless = self._factory.Pointless()
        pointless.set_xdsin(hklin)
        hklout = os.path.join(
          self.get_working_directory(),
          '%d_xds-pointgroup-unsorted.mtz' %pointless.get_xpid())
        FileHandler.record_temporary_file(hklout)
        pointless.set_hklout(hklout)
        pointless.xds_to_mtz()

        pointless = self._factory.Pointless()
        pointless.set_hklin(hklout)
        pointless.set_hklref(self._reference)
        pointless.decide_pointgroup()

        pointgroup = pointless.get_pointgroup()
        reindex_op = pointless.get_reindex_operator()

        # for debugging print out the reindexing operations and
        # what have you...

        Debug.write('Reindex to standard: %s' % reindex_op)

        # this should send back enough information that this
        # is in the correct pointgroup (from the call above) and
        # also in the correct setting, from the interaction
        # with the reference set... - though I guess that the
        # spacegroup number should not have changed, right?

        # set the reindex operation afterwards... though if the
        # spacegroup number is the same this should make no
        # difference, right?!

        intgr.set_integrater_spacegroup_number(
            Syminfo.spacegroup_name_to_number(pointgroup))
        intgr.set_integrater_reindex_operator(reindex_op)
        sweep_information['corrected_intensities'] \
          = intgr.get_integrater_corrected_intensities()

        # and copy the reflection file to the local directory

        dname = sweep_information['dname']
        sname = intgr.get_integrater_sweep_name()
        hklin = sweep_information['corrected_intensities']
        hklout = os.path.join(self.get_working_directory(),
                              '%s_%s.HKL' % (dname, sname))

        Debug.write('Copying %s to %s' % (hklin, hklout))
        shutil.copyfile(hklin, hklout)

        # record just the local file name...
        sweep_information['prepared_reflections'] = os.path.split(hklout)[-1]
        return sweep_information
コード例 #20
0
    def get_output(self):

        result = "Crystal: %s\n" % self._name

        if self._aa_sequence:
            result += "Sequence: %s\n" % self._aa_sequence.get_sequence()
        for wavelength in self._wavelengths.keys():
            result += self._wavelengths[wavelength].get_output()

        scaler = self._get_scaler()
        if scaler.get_scaler_finish_done():
            for wname, xwav in self._wavelengths.iteritems():
                for xsweep in xwav.get_sweeps():
                    idxr = xsweep._get_indexer()
                    if PhilIndex.params.xia2.settings.show_template:
                        result += "%s\n" % banner(
                            "Autoindexing %s (%s)" %
                            (idxr.get_indexer_sweep_name(),
                             idxr.get_template()))
                    else:
                        result += "%s\n" % banner(
                            "Autoindexing %s" % idxr.get_indexer_sweep_name())
                    result += "%s\n" % idxr.show_indexer_solutions()

                    intgr = xsweep._get_integrater()
                    if PhilIndex.params.xia2.settings.show_template:
                        result += "%s\n" % banner(
                            "Integrating %s (%s)" %
                            (intgr.get_integrater_sweep_name(),
                             intgr.get_template()))
                    else:
                        result += "%s\n" % banner(
                            "Integrating %s" %
                            intgr.get_integrater_sweep_name())
                    result += "%s\n" % intgr.show_per_image_statistics()

            result += "%s\n" % banner("Scaling %s" % self.get_name())

            for (
                (dname, sname),
                (limit, suggested),
            ) in scaler.get_scaler_resolution_limits().iteritems():
                if suggested is None or limit == suggested:
                    result += "Resolution limit for %s/%s: %5.2f\n" % (
                        dname,
                        sname,
                        limit,
                    )
                else:
                    result += (
                        "Resolution limit for %s/%s: %5.2f (%5.2f suggested)\n"
                        % (dname, sname, limit, suggested))

        # this is now deprecated - be explicit in what you are
        # asking for...
        reflections_all = self.get_scaled_merged_reflections()
        statistics_all = self._get_scaler().get_scaler_statistics()

        # print some of these statistics, perhaps?

        for key in statistics_all.keys():
            result += format_statistics(statistics_all[key],
                                        caption="For %s/%s/%s" % key)

        # then print out some "derived" information based on the
        # scaling - this is presented through the Scaler interface
        # explicitly...

        cell = self._get_scaler().get_scaler_cell()
        cell_esd = self._get_scaler().get_scaler_cell_esd()
        spacegroups = self._get_scaler().get_scaler_likely_spacegroups()

        spacegroup = spacegroups[0]
        resolution = self._get_scaler().get_scaler_highest_resolution()

        from cctbx import sgtbx

        sg = sgtbx.space_group_type(str(spacegroup))
        spacegroup = sg.lookup_symbol()
        CIF.set_spacegroup(sg)
        mmCIF.set_spacegroup(sg)

        if len(self._wavelengths) == 1:
            CIF.set_wavelengths(
                [w.get_wavelength() for w in self._wavelengths.itervalues()])
            mmCIF.set_wavelengths(
                [w.get_wavelength() for w in self._wavelengths.itervalues()])
        else:
            for wavelength in self._wavelengths.keys():
                full_wave_name = "%s_%s_%s" % (
                    self._project._name,
                    self._name,
                    wavelength,
                )
                CIF.get_block(full_wave_name)[
                    "_diffrn_radiation_wavelength"] = self._wavelengths[
                        wavelength].get_wavelength()
                mmCIF.get_block(full_wave_name)[
                    "_diffrn_radiation_wavelength"] = self._wavelengths[
                        wavelength].get_wavelength()
            CIF.set_wavelengths({
                name: wave.get_wavelength()
                for name, wave in self._wavelengths.iteritems()
            })
            mmCIF.set_wavelengths({
                name: wave.get_wavelength()
                for name, wave in self._wavelengths.iteritems()
            })

        result += "Assuming spacegroup: %s\n" % spacegroup
        if len(spacegroups) > 1:
            result += "Other likely alternatives are:\n"
            for sg in spacegroups[1:]:
                result += "%s\n" % sg

        if cell_esd:
            from libtbx.utils import format_float_with_standard_uncertainty

            def match_formatting(dimA, dimB):
                def conditional_split(s):
                    return ((s[:s.index(".")],
                             s[s.index("."):]) if "." in s else (s, ""))

                A, B = conditional_split(dimA), conditional_split(dimB)
                maxlen = (max(len(A[0]), len(B[0])), max(len(A[1]), len(B[1])))
                return (
                    A[0].rjust(maxlen[0]) + A[1].ljust(maxlen[1]),
                    B[0].rjust(maxlen[0]) + B[1].ljust(maxlen[1]),
                )

            formatted_cell_esds = tuple(
                format_float_with_standard_uncertainty(v, sd)
                for v, sd in zip(cell, cell_esd))
            formatted_rows = (formatted_cell_esds[0:3],
                              formatted_cell_esds[3:6])
            formatted_rows = zip(*(match_formatting(l, a)
                                   for l, a in zip(*formatted_rows)))
            result += "Unit cell (with estimated std devs):\n"
            result += "%s %s %s\n%s %s %s\n" % (formatted_rows[0] +
                                                formatted_rows[1])
        else:
            result += "Unit cell:\n"
            result += "%7.3f %7.3f %7.3f\n%7.3f %7.3f %7.3f\n" % tuple(cell)

        # now, use this information and the sequence (if provided)
        # and also matthews_coef (should I be using this directly, here?)
        # to compute a likely number of molecules in the ASU and also
        # the solvent content...

        if self._aa_sequence:
            residues = self._aa_sequence.get_sequence()
            if residues:
                nres = len(residues)

                # first compute the number of molecules using the K&R
                # method

                nmol = compute_nmol(
                    cell[0],
                    cell[1],
                    cell[2],
                    cell[3],
                    cell[4],
                    cell[5],
                    spacegroup,
                    resolution,
                    nres,
                )

                # then compute the solvent fraction

                solvent = compute_solvent(
                    cell[0],
                    cell[1],
                    cell[2],
                    cell[3],
                    cell[4],
                    cell[5],
                    spacegroup,
                    nmol,
                    nres,
                )

                result += "Likely number of molecules in ASU: %d\n" % nmol
                result += "Giving solvent fraction:        %4.2f\n" % solvent

                self._nmol = nmol

        if isinstance(reflections_all, type({})):
            for format in reflections_all.keys():
                result += "%s format:\n" % format
                reflections = reflections_all[format]

                if isinstance(reflections, type({})):
                    for wavelength in reflections.keys():
                        target = FileHandler.get_data_file(
                            reflections[wavelength])
                        result += "Scaled reflections (%s): %s\n" % (
                            wavelength, target)

                else:
                    target = FileHandler.get_data_file(reflections)
                    result += "Scaled reflections: %s\n" % target

        CIF.write_cif()
        mmCIF.write_cif()

        return result
コード例 #21
0
ファイル: CommonScaler.py プロジェクト: hainm/xia2
  def _scale_finish(self):

    # compute anomalous signals if anomalous

    if self.get_scaler_anomalous():
      for key in self._scalr_scaled_refl_files:
        f = self._scalr_scaled_refl_files[key]
        from iotbx import mtz
        m = mtz.object(f)
        if m.space_group().is_centric():
          Debug.write('Spacegroup is centric: %s' % f)
          continue
        Debug.write('Running anomalous signal analysis on %s' % f)
        a_s = anomalous_signals(f)
        self._scalr_statistics[
            (self._scalr_pname, self._scalr_xname, key)
            ]['dF/F'] = [a_s[0]]
        self._scalr_statistics[
            (self._scalr_pname, self._scalr_xname, key)
            ]['dI/s(dI)'] = [a_s[1]]

    # next transform to F's from I's etc.

    if len(self._scalr_scaled_refl_files.keys()) == 0:
      raise RuntimeError, 'no reflection files stored'

    # run xia2.report on each unmerged mtz file
    from iotbx.reflection_file_reader import any_reflection_file
    from iotbx import mtz
    from cctbx.array_family import flex

    for wavelength in self._scalr_scaled_refl_files.keys():
      mtz_unmerged = self._scalr_scaled_reflection_files['mtz_unmerged'][wavelength]
      reader = any_reflection_file(mtz_unmerged)
      mtz_object = reader.file_content()
      batches = mtz_object.as_miller_arrays_dict()['HKL_base', 'HKL_base', 'BATCH']
      dose = flex.double(batches.size(), -1)
      batch_to_dose = self.get_batch_to_dose()
      for i, b in enumerate(batches.data()):
        dose[i] = batch_to_dose[b]
      c = mtz_object.crystals()[0]
      d = c.datasets()[0]
      d.add_column('DOSE', 'R').set_values(dose.as_float())
      tmp_mtz = os.path.join(self.get_working_directory(), 'dose_tmp.mtz')
      mtz_object.write(tmp_mtz)
      hklin = tmp_mtz
      FileHandler.record_temporary_file(hklin)

      from xia2.Wrappers.XIA.Report import Report
      report = Report()
      report.set_working_directory(self.get_working_directory())
      report.set_mtz_filename(hklin)
      htmlout = os.path.join(
        self.get_working_directory(), '%s_%s_%s_report.html' %(
          self._scalr_pname, self._scalr_xname, wavelength))
      report.set_html_filename(htmlout)
      report.set_chef_min_completeness(0.95) # sensible?
      from xia2.lib.bits import auto_logfiler
      auto_logfiler(report)
      try:
        report.run()
      except Exception, e:
        Debug.write('xia2.report failed:')
        Debug.write(str(e))
        continue
      FileHandler.record_html_file(
        '%s %s %s report' %(
          self._scalr_pname, self._scalr_xname, wavelength), htmlout)
コード例 #22
0
ファイル: XDSScalerA.py プロジェクト: xia2/xia2
  def _scale_prepare(self):
    '''Prepare the data for scaling - this will reindex it the
    reflections to the correct pointgroup and setting, for instance,
    and move the reflection files to the scale directory.'''

    Citations.cite('xds')
    Citations.cite('ccp4')
    Citations.cite('pointless')

    # GATHER phase - get the reflection files together... note that
    # it is not necessary in here to keep the batch information as we
    # don't wish to rebatch the reflections prior to scaling.
    # FIXME need to think about what I will do about the radiation
    # damage analysis in here...

    self._sweep_information = { }

    # FIXME in here I want to record the batch number to
    # epoch mapping as per the CCP4 Scaler implementation.

    Journal.block(
        'gathering', self.get_scaler_xcrystal().get_name(), 'XDS',
        {'working directory':self.get_working_directory()})

    for epoch in self._scalr_integraters.keys():
      intgr = self._scalr_integraters[epoch]
      pname, xname, dname = intgr.get_integrater_project_info()
      sname = intgr.get_integrater_sweep_name()
      self._sweep_information[epoch] = {
          'pname':pname,
          'xname':xname,
          'dname':dname,
          'integrater':intgr,
          'corrected_intensities':intgr.get_integrater_corrected_intensities(),
          'prepared_reflections':None,
          'scaled_reflections':None,
          'header':intgr.get_header(),
          'batches':intgr.get_integrater_batches(),
          'image_to_epoch':intgr.get_integrater_sweep(
          ).get_image_to_epoch(),
          'image_to_dose':{},
          'batch_offset':0,
          'sname':sname
          }

      Journal.entry({'adding data from':'%s/%s/%s' % \
                     (xname, dname, sname)})

      # what are these used for?
      # pname / xname / dname - dataset identifiers
      # image to epoch / batch offset / batches - for RD analysis

      Debug.write('For EPOCH %s have:' % str(epoch))
      Debug.write('ID = %s/%s/%s' % (pname, xname, dname))
      Debug.write('SWEEP = %s' % intgr.get_integrater_sweep_name())

    # next work through all of the reflection files and make sure that
    # they are XDS_ASCII format...

    epochs = self._sweep_information.keys()
    epochs.sort()

    self._first_epoch = min(epochs)

    self._scalr_pname = self._sweep_information[epochs[0]]['pname']
    self._scalr_xname = self._sweep_information[epochs[0]]['xname']

    for epoch in epochs:
      intgr = self._scalr_integraters[epoch]
      pname = self._sweep_information[epoch]['pname']
      xname = self._sweep_information[epoch]['xname']
      dname = self._sweep_information[epoch]['dname']
      sname = self._sweep_information[epoch]['sname']
      if self._scalr_pname != pname:
        raise RuntimeError, 'all data must have a common project name'
      xname = self._sweep_information[epoch]['xname']
      if self._scalr_xname != xname:
        raise RuntimeError, \
              'all data for scaling must come from one crystal'

      xsh = XDSScalerHelper()
      xsh.set_working_directory(self.get_working_directory())
      hklin = self._sweep_information[epoch]['corrected_intensities']
      hklout = os.path.join(self.get_working_directory(),
                            '%s_%s_%s_%s_CORRECTED.HKL' %(
                              pname, xname, dname, sname))
      sweep = intgr.get_integrater_sweep()
      if sweep.get_frames_to_process() is not None:
        offset = intgr.get_frame_offset()
        #print "offset: %d" %offset
        start, end = sweep.get_frames_to_process()
        start -= offset
        end -= offset
        #end += 1 ????
        #print "limiting batches: %d-%d" %(start, end)
        xsh.limit_batches(hklin, hklout, start, end)
        self._sweep_information[epoch]['corrected_intensities'] = hklout

    # if there is more than one sweep then compare the lattices
    # and eliminate all but the lowest symmetry examples if
    # there are more than one...

    # -------------------------------------------------
    # Ensure that the integration lattices are the same
    # -------------------------------------------------

    need_to_return = False

    if len(self._sweep_information.keys()) > 1:

      lattices = []

      # FIXME run this stuff in parallel as well...

      for epoch in self._sweep_information.keys():

        intgr = self._sweep_information[epoch]['integrater']
        hklin = self._sweep_information[epoch]['corrected_intensities']
        refiner = intgr.get_integrater_refiner()

        if self._scalr_input_pointgroup:
          pointgroup = self._scalr_input_pointgroup
          reindex_op = 'h,k,l'
          ntr = False

        else:

          pointgroup, reindex_op, ntr = \
                      self._pointless_indexer_jiffy(hklin, refiner)

          Debug.write('X1698: %s: %s' % (pointgroup, reindex_op))

        lattice = Syminfo.get_lattice(pointgroup)

        if not lattice in lattices:
          lattices.append(lattice)

        if ntr:

          # if we need to return, we should logically reset
          # any reindexing operator right? right here all
          # we are talking about is the correctness of
          # individual pointgroups?? Bug # 3373

          reindex_op = 'h,k,l'
          # actually, should this not be done "by magic"
          # when a new pointgroup is assigned in the
          # pointless indexer jiffy above?!

          intgr.set_integrater_reindex_operator(
              reindex_op, compose = False)

          need_to_return = True

      # bug # 2433 - need to ensure that all of the lattice
      # conclusions were the same...

      if len(lattices) > 1:
        ordered_lattices = []
        for l in lattices_in_order():
          if l in lattices:
            ordered_lattices.append(l)

        correct_lattice = ordered_lattices[0]
        Debug.write('Correct lattice asserted to be %s' % \
                    correct_lattice)

        # transfer this information back to the indexers
        for epoch in self._sweep_information.keys():
          integrater = self._sweep_information[
              epoch]['integrater']
          refiner = integrater.get_integrater_refiner()
          sname = integrater.get_integrater_sweep_name()

          if not refiner:
            continue

          state = refiner.set_refiner_asserted_lattice(
              correct_lattice)
          if state == refiner.LATTICE_CORRECT:
            Debug.write('Lattice %s ok for sweep %s' % \
                        (correct_lattice, sname))
          elif state == refiner.LATTICE_IMPOSSIBLE:
            raise RuntimeError, 'Lattice %s impossible for %s' % \
                  (correct_lattice, sname)
          elif state == refiner.LATTICE_POSSIBLE:
            Debug.write('Lattice %s assigned for sweep %s' % \
                        (correct_lattice, sname))
            need_to_return = True

    # if one or more of them was not in the lowest lattice,
    # need to return here to allow reprocessing

    if need_to_return:
      self.set_scaler_done(False)
      self.set_scaler_prepare_done(False)
      return

    # next if there is more than one sweep then generate
    # a merged reference reflection file to check that the
    # setting for all reflection files is the same...

    # if we get to here then all data was processed with the same
    # lattice

    # ----------------------------------------------------------
    # next ensure that all sweeps are set in the correct setting
    # ----------------------------------------------------------

    if self.get_scaler_reference_reflection_file():
      self._reference = self.get_scaler_reference_reflection_file()
      Debug.write('Using HKLREF %s' % self._reference)

      md = self._factory.Mtzdump()
      md.set_hklin(self.get_scaler_reference_reflection_file())
      md.dump()

      self._xds_spacegroup = Syminfo.spacegroup_name_to_number(
          md.get_spacegroup())

      Debug.write('Spacegroup %d' % self._xds_spacegroup)

    elif PhilIndex.params.xia2.settings.scale.reference_reflection_file:
      self._reference = PhilIndex.params.xia2.settings.scale.reference_reflection_file

      Debug.write('Using HKLREF %s' % self._reference)

      md = self._factory.Mtzdump()
      md.set_hklin(PhilIndex.params.xia2.settings.scale.reference_reflection_file)
      md.dump()

      self._xds_spacegroup = Syminfo.spacegroup_name_to_number(
          md.get_spacegroup())

      Debug.write('Spacegroup %d' % self._xds_spacegroup)

    params = PhilIndex.params
    use_brehm_diederichs = params.xia2.settings.use_brehm_diederichs
    if len(self._sweep_information.keys()) > 1 and use_brehm_diederichs:
      brehm_diederichs_files_in = []
      for epoch in self._sweep_information.keys():

        intgr = self._sweep_information[epoch]['integrater']
        hklin = self._sweep_information[epoch]['corrected_intensities']
        refiner = intgr.get_integrater_refiner()

        # in here need to consider what to do if the user has
        # assigned the pointgroup on the command line ...

        if not self._scalr_input_pointgroup:
          pointgroup, reindex_op, ntr = \
                      self._pointless_indexer_jiffy(hklin, refiner)

          if ntr:

            # Bug # 3373

            Debug.write('Reindex to standard (PIJ): %s' % \
                        reindex_op)

            intgr.set_integrater_reindex_operator(
                reindex_op, compose = False)
            reindex_op = 'h,k,l'
            need_to_return = True

        else:

          # 27/FEB/08 to support user assignment of pointgroups

          Debug.write('Using input pointgroup: %s' % \
                      self._scalr_input_pointgroup)
          pointgroup = self._scalr_input_pointgroup
          reindex_op = 'h,k,l'

        intgr.set_integrater_reindex_operator(reindex_op)
        intgr.set_integrater_spacegroup_number(
            Syminfo.spacegroup_name_to_number(pointgroup))
        self._sweep_information[epoch]['corrected_intensities'] \
          = intgr.get_integrater_corrected_intensities()

        # convert the XDS_ASCII for this sweep to mtz - on the next
        # get this should be in the correct setting...

        dname = self._sweep_information[epoch]['dname']
        sname = intgr.get_integrater_sweep_name()
        hklin = self._sweep_information[epoch]['corrected_intensities']
        hklout = os.path.join(self.get_working_directory(),
                              '%s_%s.mtz' % (dname, sname))

        FileHandler.record_temporary_file(hklout)

        # now use pointless to make this conversion

        pointless = self._factory.Pointless()
        pointless.set_xdsin(hklin)
        pointless.set_hklout(hklout)
        pointless.xds_to_mtz()
        brehm_diederichs_files_in.append(hklout)

      # now run cctbx.brehm_diederichs to figure out the indexing hand for
      # each sweep
      from xia2.Wrappers.Cctbx.BrehmDiederichs import BrehmDiederichs
      brehm_diederichs = BrehmDiederichs()
      brehm_diederichs.set_working_directory(self.get_working_directory())
      auto_logfiler(brehm_diederichs)
      brehm_diederichs.set_input_filenames(brehm_diederichs_files_in)
      # 1 or 3? 1 seems to work better?
      brehm_diederichs.set_asymmetric(1)
      brehm_diederichs.run()
      reindexing_dict = brehm_diederichs.get_reindexing_dict()

      for epoch in self._sweep_information.keys():

        intgr = self._sweep_information[epoch]['integrater']

        dname = self._sweep_information[epoch]['dname']
        sname = intgr.get_integrater_sweep_name()
        hklin = self._sweep_information[epoch]['corrected_intensities']
        hklout = os.path.join(self.get_working_directory(),
                              '%s_%s.mtz' % (dname, sname))

        # apply the reindexing operator
        intgr.set_integrater_reindex_operator(reindex_op)

        # and copy the reflection file to the local directory
        hklin = self._sweep_information[epoch]['corrected_intensities']
        hklout = os.path.join(self.get_working_directory(),
                              '%s_%s.HKL' % (dname, sname))

        Debug.write('Copying %s to %s' % (hklin, hklout))
        shutil.copyfile(hklin, hklout)

        # record just the local file name...
        self._sweep_information[epoch][
            'prepared_reflections'] = os.path.split(hklout)[-1]

    elif len(self._sweep_information.keys()) > 1 and \
           not self._reference:
      # need to generate a reference reflection file - generate this
      # from the reflections in self._first_epoch
      #
      # FIXME this should really use the Brehm and Diederichs method
      # if you have lots of little sweeps...

      intgr = self._sweep_information[self._first_epoch]['integrater']

      hklin = self._sweep_information[epoch]['corrected_intensities']
      refiner = intgr.get_integrater_refiner()

      if self._scalr_input_pointgroup:
        Debug.write('Using input pointgroup: %s' % \
                    self._scalr_input_pointgroup)
        pointgroup = self._scalr_input_pointgroup
        ntr = False
        reindex_op = 'h,k,l'

      else:
        pointgroup, reindex_op, ntr = self._pointless_indexer_jiffy(
            hklin, refiner)

        Debug.write('X1698: %s: %s' % (pointgroup, reindex_op))

      reference_reindex_op = intgr.get_integrater_reindex_operator()

      if ntr:

        # Bug # 3373

        intgr.set_integrater_reindex_operator(
            reindex_op, compose = False)
        reindex_op = 'h,k,l'
        need_to_return = True

      self._xds_spacegroup = Syminfo.spacegroup_name_to_number(pointgroup)

      # next pass this reindexing operator back to the source
      # of the reflections

      intgr.set_integrater_reindex_operator(reindex_op)
      intgr.set_integrater_spacegroup_number(
          Syminfo.spacegroup_name_to_number(pointgroup))
      self._sweep_information[epoch]['corrected_intensities'] \
        = intgr.get_integrater_corrected_intensities()

      hklin = self._sweep_information[epoch]['corrected_intensities']

      hklout = os.path.join(self.get_working_directory(),
                            'xds-pointgroup-reference-unsorted.mtz')
      FileHandler.record_temporary_file(hklout)

      # now use pointless to handle this conversion

      pointless = self._factory.Pointless()
      pointless.set_xdsin(hklin)
      pointless.set_hklout(hklout)
      pointless.xds_to_mtz()

      self._reference = hklout

    if self._reference:

      from xia2.Driver.DriverFactory import DriverFactory

      def run_one_sweep(args):
        sweep_information = args[0]
        pointless_indexer_jiffy = args[1]
        factory = args[2]
        job_type = args[3]

        if job_type:
          DriverFactory.set_driver_type(job_type)

        intgr = sweep_information['integrater']
        hklin = sweep_information['corrected_intensities']
        refiner = intgr.get_integrater_refiner()

        # in here need to consider what to do if the user has
        # assigned the pointgroup on the command line ...

        if not self._scalr_input_pointgroup:
          pointgroup, reindex_op, ntr = \
                      self._pointless_indexer_jiffy(hklin, refiner)

          if ntr:

            # Bug # 3373

            Debug.write('Reindex to standard (PIJ): %s' % \
                        reindex_op)

            intgr.set_integrater_reindex_operator(
                reindex_op, compose = False)
            reindex_op = 'h,k,l'
            need_to_return = True

        else:

          # 27/FEB/08 to support user assignment of pointgroups

          Debug.write('Using input pointgroup: %s' % \
                      self._scalr_input_pointgroup)
          pointgroup = self._scalr_input_pointgroup
          reindex_op = 'h,k,l'

        intgr.set_integrater_reindex_operator(reindex_op)
        intgr.set_integrater_spacegroup_number(
            Syminfo.spacegroup_name_to_number(pointgroup))
        sweep_information['corrected_intensities'] \
          = intgr.get_integrater_corrected_intensities()

        # convert the XDS_ASCII for this sweep to mtz - on the next
        # get this should be in the correct setting...

        hklin = sweep_information['corrected_intensities']

        # now use pointless to make this conversion

        # try with no conversion?!

        pointless = self._factory.Pointless()
        pointless.set_xdsin(hklin)
        hklout = os.path.join(
          self.get_working_directory(),
          '%d_xds-pointgroup-unsorted.mtz' %pointless.get_xpid())
        FileHandler.record_temporary_file(hklout)
        pointless.set_hklout(hklout)
        pointless.xds_to_mtz()

        pointless = self._factory.Pointless()
        pointless.set_hklin(hklout)
        pointless.set_hklref(self._reference)
        pointless.decide_pointgroup()

        pointgroup = pointless.get_pointgroup()
        reindex_op = pointless.get_reindex_operator()

        # for debugging print out the reindexing operations and
        # what have you...

        Debug.write('Reindex to standard: %s' % reindex_op)

        # this should send back enough information that this
        # is in the correct pointgroup (from the call above) and
        # also in the correct setting, from the interaction
        # with the reference set... - though I guess that the
        # spacegroup number should not have changed, right?

        # set the reindex operation afterwards... though if the
        # spacegroup number is the same this should make no
        # difference, right?!

        intgr.set_integrater_spacegroup_number(
            Syminfo.spacegroup_name_to_number(pointgroup))
        intgr.set_integrater_reindex_operator(reindex_op)
        sweep_information['corrected_intensities'] \
          = intgr.get_integrater_corrected_intensities()

        # and copy the reflection file to the local directory

        dname = sweep_information['dname']
        sname = intgr.get_integrater_sweep_name()
        hklin = sweep_information['corrected_intensities']
        hklout = os.path.join(self.get_working_directory(),
                              '%s_%s.HKL' % (dname, sname))

        Debug.write('Copying %s to %s' % (hklin, hklout))
        shutil.copyfile(hklin, hklout)

        # record just the local file name...
        sweep_information['prepared_reflections'] = os.path.split(hklout)[-1]
        return sweep_information

      from libtbx import easy_mp
      params = PhilIndex.get_python_object()
      mp_params = params.xia2.settings.multiprocessing
      njob = mp_params.njob

      if njob > 1:
        # cache drivertype
        drivertype = DriverFactory.get_driver_type()

        args = [
          (self._sweep_information[epoch], self._pointless_indexer_jiffy,
           self._factory, mp_params.type)
                for epoch in self._sweep_information.keys()]
        results_list = easy_mp.parallel_map(
          run_one_sweep, args, params=None,
          processes=njob,
          method="threading",
          asynchronous=True,
          callback=None,
          preserve_order=True,
          preserve_exception_message=True)

        # restore drivertype
        DriverFactory.set_driver_type(drivertype)

        # results should be given back in the same order
        for i, epoch in enumerate(self._sweep_information.keys()):
          self._sweep_information[epoch] = results_list[i]

      else:
        for epoch in self._sweep_information.keys():
          self._sweep_information[epoch] = run_one_sweep(
            (self._sweep_information[epoch], self._pointless_indexer_jiffy,
             self._factory, None))

    else:
      # convert the XDS_ASCII for this sweep to mtz

      epoch = self._first_epoch
      intgr = self._sweep_information[epoch]['integrater']
      refiner = intgr.get_integrater_refiner()
      sname = intgr.get_integrater_sweep_name()

      hklout = os.path.join(self.get_working_directory(),
                            '%s-pointless.mtz' % sname)
      FileHandler.record_temporary_file(hklout)

      pointless = self._factory.Pointless()
      pointless.set_xdsin(self._sweep_information[epoch]['corrected_intensities'])
      pointless.set_hklout(hklout)
      pointless.xds_to_mtz()

      # run it through pointless interacting with the
      # Indexer which belongs to this sweep

      hklin = hklout

      if self._scalr_input_pointgroup:
        Debug.write('Using input pointgroup: %s' % \
                    self._scalr_input_pointgroup)
        pointgroup = self._scalr_input_pointgroup
        ntr = False
        reindex_op = 'h,k,l'

      else:
        pointgroup, reindex_op, ntr = self._pointless_indexer_jiffy(
            hklin, refiner)

      if ntr:

        # if we need to return, we should logically reset
        # any reindexing operator right? right here all
        # we are talking about is the correctness of
        # individual pointgroups?? Bug # 3373

        reindex_op = 'h,k,l'
        intgr.set_integrater_reindex_operator(
            reindex_op, compose = False)

        need_to_return = True

      self._xds_spacegroup = Syminfo.spacegroup_name_to_number(pointgroup)

      # next pass this reindexing operator back to the source
      # of the reflections

      intgr.set_integrater_reindex_operator(reindex_op)
      intgr.set_integrater_spacegroup_number(
          Syminfo.spacegroup_name_to_number(pointgroup))
      self._sweep_information[epoch]['corrected_intensities'] \
        = intgr.get_integrater_corrected_intensities()

      hklin = self._sweep_information[epoch]['corrected_intensities']
      dname = self._sweep_information[epoch]['dname']
      hklout = os.path.join(self.get_working_directory(),
                            '%s_%s.HKL' % (dname, sname))

      # and copy the reflection file to the local
      # directory

      Debug.write('Copying %s to %s' % (hklin, hklout))
      shutil.copyfile(hklin, hklout)

      # record just the local file name...
      self._sweep_information[epoch][
          'prepared_reflections'] = os.path.split(hklout)[-1]

    if need_to_return:
      self.set_scaler_done(False)
      self.set_scaler_prepare_done(False)
      return

    unit_cell_list = []

    for epoch in self._sweep_information.keys():
      integrater = self._sweep_information[epoch]['integrater']
      cell = integrater.get_integrater_cell()
      n_ref = integrater.get_integrater_n_ref()

      Debug.write('Cell for %s: %.2f %.2f %.2f %.2f %.2f %.2f' % \
                  (integrater.get_integrater_sweep_name(),
                   cell[0], cell[1], cell[2],
                   cell[3], cell[4], cell[5]))
      Debug.write('=> %d reflections' % n_ref)

      unit_cell_list.append((cell, n_ref))

    self._scalr_cell = compute_average_unit_cell(unit_cell_list)

    self._scalr_resolution_limits = { }

    Debug.write('Determined unit cell: %.2f %.2f %.2f %.2f %.2f %.2f' % \
                tuple(self._scalr_cell))

    if os.path.exists(os.path.join(
        self.get_working_directory(),
        'REMOVE.HKL')):
      os.remove(os.path.join(
          self.get_working_directory(),
          'REMOVE.HKL'))

      Debug.write('Deleting REMOVE.HKL at end of scale prepare.')

    return
コード例 #23
0
ファイル: XCrystal.py プロジェクト: xia2/xia2
  def get_output(self):

    result = 'Crystal: %s\n' % self._name

    if self._aa_sequence:
      result += 'Sequence: %s\n' % self._aa_sequence.get_sequence()
    for wavelength in self._wavelengths.keys():
      result += self._wavelengths[wavelength].get_output()

    scaler = self._get_scaler()
    if scaler.get_scaler_finish_done():
      for wname, xwav in self._wavelengths.iteritems():
        for xsweep in xwav.get_sweeps():
          idxr = xsweep._get_indexer()
          if PhilIndex.params.xia2.settings.show_template:
            result += '%s\n' %banner('Autoindexing %s (%s)' %(
              idxr.get_indexer_sweep_name(), idxr.get_template()))
          else:
            result += '%s\n' %banner(
              'Autoindexing %s' %idxr.get_indexer_sweep_name())
          result += '%s\n' %idxr.show_indexer_solutions()

          intgr = xsweep._get_integrater()
          if PhilIndex.params.xia2.settings.show_template:
            result += '%s\n' %banner('Integrating %s (%s)' %(
              intgr.get_integrater_sweep_name(), intgr.get_template()))
          else:
            result += '%s\n' %banner(
              'Integrating %s' %intgr.get_integrater_sweep_name())
          result += '%s\n' % intgr.show_per_image_statistics()

      result += '%s\n' %banner('Scaling %s' %self.get_name())

      for (dname, sname), (limit, suggested) in scaler.get_scaler_resolution_limits().iteritems():
        if suggested is None or limit == suggested:
          result += 'Resolution limit for %s/%s: %5.2f\n' %(dname, sname, limit)
        else:
          result += 'Resolution limit for %s/%s: %5.2f (%5.2f suggested)\n' %(dname, sname, limit, suggested)

    # this is now deprecated - be explicit in what you are
    # asking for...
    reflections_all = self.get_scaled_merged_reflections()
    statistics_all = self._get_scaler().get_scaler_statistics()

    # print some of these statistics, perhaps?

    for key in statistics_all.keys():
      result += format_statistics(statistics_all[key], caption='For %s/%s/%s' % key)

    # then print out some "derived" information based on the
    # scaling - this is presented through the Scaler interface
    # explicitly...

    cell = self._get_scaler().get_scaler_cell()
    cell_esd = self._get_scaler().get_scaler_cell_esd()
    spacegroups = self._get_scaler().get_scaler_likely_spacegroups()

    spacegroup = spacegroups[0]
    resolution = self._get_scaler().get_scaler_highest_resolution()

    from cctbx import sgtbx
    sg = sgtbx.space_group_type(str(spacegroup))
    spacegroup = sg.lookup_symbol()
    CIF.set_spacegroup(sg)
    mmCIF.set_spacegroup(sg)

    if len(self._wavelengths) == 1:
      CIF.set_wavelengths([w.get_wavelength() for w in self._wavelengths.itervalues()])
      mmCIF.set_wavelengths([w.get_wavelength() for w in self._wavelengths.itervalues()])
    else:
      for wavelength in self._wavelengths.keys():
        full_wave_name = '%s_%s_%s' % (self._project._name, self._name, wavelength)
        CIF.get_block(full_wave_name)['_diffrn_radiation_wavelength'] = \
          self._wavelengths[wavelength].get_wavelength()
        mmCIF.get_block(full_wave_name)['_diffrn_radiation_wavelength'] = \
          self._wavelengths[wavelength].get_wavelength()
      CIF.set_wavelengths({name: wave.get_wavelength() for name, wave in self._wavelengths.iteritems()})
      mmCIF.set_wavelengths({name: wave.get_wavelength() for name, wave in self._wavelengths.iteritems()})

    result += 'Assuming spacegroup: %s\n' % spacegroup
    if len(spacegroups) > 1:
      result += 'Other likely alternatives are:\n'
      for sg in spacegroups[1:]:
        result += '%s\n' % sg

    if cell_esd:
      from libtbx.utils import format_float_with_standard_uncertainty
      def match_formatting(dimA, dimB):
        def conditional_split(s):
          return (s[:s.index('.')],s[s.index('.'):]) if '.' in s else (s, '')
        A, B = conditional_split(dimA), conditional_split(dimB)
        maxlen = (max(len(A[0]), len(B[0])), max(len(A[1]), len(B[1])))
        return (
          A[0].rjust(maxlen[0])+A[1].ljust(maxlen[1]),
          B[0].rjust(maxlen[0])+B[1].ljust(maxlen[1])
        )
      formatted_cell_esds = tuple(format_float_with_standard_uncertainty(v, sd) for v, sd in zip(cell, cell_esd))
      formatted_rows = (formatted_cell_esds[0:3], formatted_cell_esds[3:6])
      formatted_rows = zip(*(match_formatting(l, a) for l, a in zip(*formatted_rows)))
      result += 'Unit cell (with estimated std devs):\n'
      result += '%s %s %s\n%s %s %s\n' % (formatted_rows[0] + formatted_rows[1])
    else:
      result += 'Unit cell:\n'
      result += '%7.3f %7.3f %7.3f\n%7.3f %7.3f %7.3f\n' % tuple(cell)

    # now, use this information and the sequence (if provided)
    # and also matthews_coef (should I be using this directly, here?)
    # to compute a likely number of molecules in the ASU and also
    # the solvent content...

    if self._aa_sequence:
      residues = self._aa_sequence.get_sequence()
      if residues:
        nres = len(residues)

        # first compute the number of molecules using the K&R
        # method

        nmol = compute_nmol(cell[0], cell[1], cell[2],
                            cell[3], cell[4], cell[5],
                            spacegroup, resolution, nres)

        # then compute the solvent fraction

        solvent = compute_solvent(cell[0], cell[1], cell[2],
                                  cell[3], cell[4], cell[5],
                                  spacegroup, nmol, nres)

        result += 'Likely number of molecules in ASU: %d\n' % nmol
        result += 'Giving solvent fraction:        %4.2f\n' % solvent

        self._nmol = nmol

    if type(reflections_all) == type({}):
      for format in reflections_all.keys():
        result += '%s format:\n' % format
        reflections = reflections_all[format]

        if type(reflections) == type({}):
          for wavelength in reflections.keys():
            target = FileHandler.get_data_file(
                reflections[wavelength])
            result += 'Scaled reflections (%s): %s\n' % \
                      (wavelength, target)

        else:
          target = FileHandler.get_data_file(
              reflections)
          result += 'Scaled reflections: %s\n' % target

    CIF.write_cif()
    mmCIF.write_cif()

    return result
コード例 #24
0
ファイル: MosflmIntegrater.py プロジェクト: xia2/xia2
  def _mosflm_parallel_integrate(self):
    '''Perform the integration as before, but this time as a
    number of parallel Mosflm jobs (hence, in separate directories)
    and including a step of pre-refinement of the mosaic spread and
    missets. This will all be kind of explicit and hence probably
    messy!'''

    refinr = self.get_integrater_refiner()

    lattice = refinr.get_refiner_lattice()
    spacegroup_number = lattice_to_spacegroup(lattice)
    mosaic = refinr.get_refiner_payload('mosaic')
    beam = refinr.get_refiner_payload('beam')
    distance = refinr.get_refiner_payload('distance')
    matrix = refinr.get_refiner_payload('mosflm_orientation_matrix')

    integration_params = refinr.get_refiner_payload(
      'mosflm_integration_parameters')

    if integration_params:
      if 'separation' in integration_params:
        self.set_integrater_parameter(
            'mosflm', 'separation',
            '%s %s' % tuple(integration_params['separation']))
      if 'raster' in integration_params:
        self.set_integrater_parameter(
            'mosflm', 'raster',
            '%d %d %d %d %d' % tuple(integration_params['raster']))

    refinr.set_refiner_payload('mosflm_integration_parameters', None)
    pname, xname, dname = self.get_integrater_project_info()

    # what follows below should (i) be run in separate directories
    # and (ii) be repeated N=parallel times.

    nproc = PhilIndex.params.xia2.settings.multiprocessing.nproc
    parallel = nproc

    # FIXME this is something of a kludge - if too few frames refinement
    # and integration does not work well... ideally want at least 15
    # frames / chunk (say)
    nframes = self._intgr_wedge[1] - self._intgr_wedge[0] + 1

    if parallel > nframes / 15:
      parallel = nframes // 15

    if not parallel:
      raise RuntimeError, 'parallel not set'
    if parallel < 2:
      raise RuntimeError, 'parallel not parallel: %s' % parallel

    jobs = []
    hklouts = []
    nref = 0

    # calculate the chunks to use
    offset = self.get_frame_offset()
    start = self._intgr_wedge[0] - offset
    end = self._intgr_wedge[1] - offset

    left_images = 1 + end - start
    left_chunks = parallel
    chunks = []

    while left_images > 0:
      size = left_images // left_chunks
      chunks.append((start, start + size - 1))
      start += size
      left_images -= size
      left_chunks -= 1

    summary_files = []

    for j in range(parallel):

      # make some working directories, as necessary - chunk-(0:N-1)
      wd = os.path.join(self.get_working_directory(),
                        'chunk-%d' % j)
      if not os.path.exists(wd):
        os.makedirs(wd)

      job = MosflmIntegrate()
      job.set_working_directory(wd)

      auto_logfiler(job)

      l = refinr.get_refiner_lattice()

      # create the starting point
      f = open(os.path.join(wd, 'xiaintegrate-%s.mat' % l), 'w')
      for m in matrix:
        f.write(m)
      f.close()

      spacegroup_number = lattice_to_spacegroup(lattice)

      job.set_refine_profiles(self._mosflm_refine_profiles)

      # N.B. for harvesting need to append N to dname.

      if pname is not None and xname is not None and dname is not None:
        Debug.write('Harvesting: %s/%s/%s' %
                    (pname, xname, dname))
        harvest_dir = self.get_working_directory()
        temp_dname = '%s_%s' % \
                     (dname, self.get_integrater_sweep_name())
        job.set_pname_xname_dname(pname, xname, temp_dname)

      job.set_template(os.path.basename(self.get_template()))
      job.set_directory(self.get_directory())

      # check for ice - and if so, exclude (ranges taken from
      # XDS documentation)
      if self.get_integrater_ice() != 0:
        Debug.write('Excluding ice rings')
        job.set_exclude_ice(True)

      # exclude specified resolution ranges
      if len(self.get_integrater_excluded_regions()) != 0:
        regions = self.get_integrater_excluded_regions()
        Debug.write('Excluding regions: %s' % `regions`)
        job.set_exclude_regions(regions)

      mask = standard_mask(self.get_detector())
      for m in mask:
        job.add_instruction(m)

      job.set_input_mat_file('xiaintegrate-%s.mat' % l)

      job.set_beam_centre(beam)
      job.set_distance(distance)
      job.set_space_group_number(spacegroup_number)
      job.set_mosaic(mosaic)

      if self.get_wavelength_prov() == 'user':
        job.set_wavelength(self.get_wavelength())

      parameters = self.get_integrater_parameters('mosflm')
      job.update_parameters(parameters)

      if self._mosflm_gain:
        job.set_gain(self._mosflm_gain)

      # check for resolution limits
      if self._intgr_reso_high > 0.0:
        job.set_d_min(self._intgr_reso_high)
      if self._intgr_reso_low:
        job.set_d_max(self._intgr_reso_low)

      if PhilIndex.params.general.backstop_mask:
        from xia2.Toolkit.BackstopMask import BackstopMask
        mask = BackstopMask(PhilIndex.params.general.backstop_mask)
        mask = mask.calculate_mask_mosflm(self.get_header())
        job.set_mask(mask)

      detector = self.get_detector()
      detector_width, detector_height = detector[0].get_image_size_mm()

      lim_x = 0.5 * detector_width
      lim_y = 0.5 * detector_height

      Debug.write('Scanner limits: %.1f %.1f' % (lim_x, lim_y))
      job.set_limits(lim_x, lim_y)

      job.set_fix_mosaic(self._mosflm_postref_fix_mosaic)

      job.set_pre_refinement(True)
      job.set_image_range(chunks[j])


      # these are now running so ...

      jobs.append(job)

      continue

    # ok, at this stage I need to ...
    #
    # (i) accumulate the statistics as a function of batch
    # (ii) mong them into a single block
    #
    # This is likely to be a pain in the arse!

    first_integrated_batch = 1.0e6
    last_integrated_batch = -1.0e6

    all_residuals = []

    threads = []

    for j in range(parallel):
      job = jobs[j]

      # now wait for them to finish - first wait will really be the
      # first one, then all should be finished...

      thread = Background(job, 'run')
      thread.start()
      threads.append(thread)

    mosaics = []
    postref_result = { }

    integrated_images_first = 1.0e6
    integrated_images_last = -1.0e6
    self._intgr_per_image_statistics = {}

    for j in range(parallel):
      thread = threads[j]
      thread.stop()
      job = jobs[j]

      # get the log file
      output = job.get_all_output()

      # record a copy of it, perhaps - though not if parallel
      if self.get_integrater_sweep_name() and False:
        pname, xname, dname = self.get_integrater_project_info()
        FileHandler.record_log_file(
            '%s %s %s %s mosflm integrate' % \
            (self.get_integrater_sweep_name(),
             pname, xname, '%s_%d' % (dname, j)),
            job.get_log_file())

      # look for things that we want to know...
      # that is, the output reflection file name, the updated
      # value for the gain (if present,) any warnings, errors,
      # or just interesting facts.

      batches = job.get_batches_out()
      integrated_images_first = min(batches[0], integrated_images_first)
      integrated_images_last = max(batches[1], integrated_images_last)

      mosaics.extend(job.get_mosaic_spreads())

      if min(mosaics) < 0:
        raise IntegrationError, 'negative mosaic spread: %s' % min(mosaic)

      if (job.get_detector_gain_error() and not
          (self.get_imageset().get_detector()[0].get_type() == 'SENSOR_PAD')):
        gain = job.get_suggested_gain()
        if gain is not None:
          self.set_integrater_parameter('mosflm', 'gain', gain)
          self.set_integrater_export_parameter('mosflm', 'gain', gain)
          if self._mosflm_gain:
            Debug.write('GAIN updated to %f' % gain)
          else:
            Debug.write('GAIN found to be %f' % gain)

          self._mosflm_gain = gain
          self._mosflm_rerun_integration = True

      hklout = job.get_hklout()
      Debug.write('Integration output: %s' % hklout)
      hklouts.append(hklout)

      nref += job.get_nref()

      # if a BGSIG error happened try not refining the
      # profile and running again...

      if job.get_bgsig_too_large():
        if not self._mosflm_refine_profiles:
          raise RuntimeError, 'BGSIG error with profiles fixed'

        Debug.write(
            'BGSIG error detected - try fixing profile...')

        self._mosflm_refine_profiles = False
        self.set_integrater_done(False)

        return

      if job.get_getprof_error():
        Debug.write(
            'GETPROF error detected - try fixing profile...')
        self._mosflm_refine_profiles = False
        self.set_integrater_done(False)

        return

      # here
      # write the report for each image as .*-#$ to Chatter -
      # detailed report will be written automagically to science...

      self._intgr_per_image_statistics.update(job.get_per_image_statistics())
      postref_result.update(job.get_postref_result())

      # inspect the output for e.g. very high weighted residuals

      all_residuals.extend(job.get_residuals())

    self._intgr_batches_out = (integrated_images_first,
                               integrated_images_last)

    if mosaics and len(mosaics) > 0:
      self.set_integrater_mosaic_min_mean_max(
          min(mosaics), sum(mosaics) / len(mosaics), max(mosaics))
    else:
      m = indxr.get_indexer_mosaic()
      self.set_integrater_mosaic_min_mean_max(m, m, m)

    Chatter.write(self.show_per_image_statistics())

    Chatter.write('Mosaic spread: %.3f < %.3f < %.3f' % \
                  self.get_integrater_mosaic_min_mean_max())

    # gather the statistics from the postrefinement for all sweeps
    # now write this to a postrefinement log

    postref_log = os.path.join(self.get_working_directory(),
                               'postrefinement.log')

    fout = open(postref_log, 'w')

    fout.write('$TABLE: Postrefinement for %s:\n' % \
               self._intgr_sweep_name)
    fout.write('$GRAPHS: Missetting angles:A:1, 2, 3, 4: $$\n')
    fout.write('Batch PhiX PhiY PhiZ $$ Batch PhiX PhiY PhiZ $$\n')

    for image in sorted(postref_result):
      phix = postref_result[image].get('phix', 0.0)
      phiy = postref_result[image].get('phiy', 0.0)
      phiz = postref_result[image].get('phiz', 0.0)

      fout.write('%d %5.2f %5.2f %5.2f\n' % \
                 (image, phix, phiy, phiz))

    fout.write('$$\n')
    fout.close()

    if self.get_integrater_sweep_name():
      pname, xname, dname = self.get_integrater_project_info()
      FileHandler.record_log_file('%s %s %s %s postrefinement' % \
                                  (self.get_integrater_sweep_name(),
                                   pname, xname, dname),
                                  postref_log)

    hklouts.sort()

    hklout = os.path.join(self.get_working_directory(),
                          os.path.split(hklouts[0])[-1])

    Debug.write('Sorting data to %s' % hklout)
    for hklin in hklouts:
      Debug.write('<= %s' % hklin)

    sortmtz = Sortmtz()
    sortmtz.set_hklout(hklout)
    for hklin in hklouts:
      sortmtz.add_hklin(hklin)

    sortmtz.sort()

    self._mosflm_hklout = hklout

    return self._mosflm_hklout
コード例 #25
0
ファイル: XSweep.py プロジェクト: hainm/xia2
  def __init__(self, name,
               wavelength,
               sample,
               directory = None,
               image = None,
               beam = None,
               reversephi = False,
               distance = None,
               gain = 0.0,
               dmin = 0.0,
               dmax = 0.0,
               polarization = 0.0,
               frames_to_process = None,
               user_lattice = None,
               user_cell = None,
               epoch = 0,
               ice = False,
               excluded_regions = []):
    '''Create a new sweep named name, belonging to XWavelength object
    wavelength, representing the images in directory starting with image,
    with beam centre optionally defined.'''

    # + check the wavelength is an XWavelength object
    #   raise an exception if not... or not...

    if not wavelength.__class__.__name__ == 'XWavelength':
      pass

    # FIXME bug 2221 if DIRECTORY starts with ~/ or ~graeme (say) need to
    # interpret this properly - e.g. map it to a full PATH.

    directory = expand_path(directory)

    # bug # 2274 - maybe migrate the data to a local disk (this
    # will depend if the user has added -migrate_data to the cl)

    directory = FileHandler.migrate(directory)

    self._name = name
    self._wavelength = wavelength
    self._sample = sample
    self._directory = directory
    self._image = image
    self._reversephi = reversephi
    self._epoch = epoch
    self._user_lattice = user_lattice
    self._user_cell = user_cell
    self._header = { }
    self._resolution_high = dmin
    self._resolution_low = dmax
    self._ice = ice
    self._excluded_regions = excluded_regions
    self._imageset = None

    # FIXME in here also need to be able to accumulate the total
    # dose from all experimental measurements (complex) and provide
    # a _epoch_to_dose dictionary or some such... may be fiddly as
    # this will need to parse across multiple templates. c/f Bug # 2798

    self._epoch_to_image = { }
    self._image_to_epoch = { }

    # to allow first, last image for processing to be
    # set... c/f integrater interface
    self._frames_to_process = frames_to_process

    # + derive template, list of images

    if directory and image:
      self._template, self._directory = \
                      image2template_directory(os.path.join(directory,
                                                            image))

      from xia2.Schema import load_imagesets
      imagesets = load_imagesets(
        self._template, self._directory, image_range=self._frames_to_process,
        reversephi=(Flags.get_reversephi() or self._reversephi))

      assert len(imagesets) == 1, "one imageset expected, %d found" % \
          len(imagesets)
      self._imageset = copy.deepcopy(imagesets[0])
      start, end = self._imageset.get_array_range()
      self._images = list(range(start+1, end+1))

      # FIXME in here check that (1) the list of images is continuous
      # and (2) that all of the images are readable. This should also
      # take into account frames_to_process if set.

      if self._frames_to_process is None:
        self._frames_to_process = min(self._images), max(self._images)

      start, end = self._frames_to_process

      error = False

      from xia2.Handlers.Phil import PhilIndex
      params = PhilIndex.get_python_object()
      if params.general.check_image_files_readable:
        for j in range(start, end + 1):
          if not j in self._images:
            Debug.write('image %s missing' % \
                        self.get_image_name(j))
            error = True
            continue
          if not os.access(self.get_image_name(j), os.R_OK):
            Debug.write('image %s unreadable' % \
                        self.get_image_name(j))
            error = True
            continue

        if error:
          raise RuntimeError, 'problem with sweep %s' % self._name

      # + read the image header information into here?
      #   or don't I need it? it would be useful for checking
      #   against wavelength.getWavelength() I guess to make
      #   sure that the plumbing is all sound.

      # check that they match by closer than 0.0001A, if wavelength
      # is not None

      beam_ = self._imageset.get_beam()
      scan = self._imageset.get_scan()
      if not wavelength == None:

        # FIXME 29/NOV/06 if the wavelength wavelength value
        # is 0.0 then first set it to the header value - note
        # that this assumes that the header value is correct
        # (a reasonable assumption)

        if wavelength.get_wavelength() == 0.0:
          wavelength.set_wavelength(beam_.get_wavelength())

        # FIXME 08/DEC/06 in here need to allow for the fact
        # that the wavelength in the image header could be wrong and
        # in fact it should be replaced with the input value -
        # through the user will need to be warned of this and
        # also everything using the FrameProcessor interface
        # will also have to respect this!

        if math.fabs(beam_.get_wavelength() -
                     wavelength.get_wavelength()) > 0.0001:
          # format = 'wavelength for sweep %s does not ' + \
          # 'match wavelength %s'
          # raise RuntimeError, format  % \
          # (name, wavelength.get_name())

          format = 'Header wavelength for sweep %s different' + \
                   ' to assigned value (%4.2f vs. %4.2f)'

          Chatter.write(format % (name, beam_.get_wavelength(),
                                  wavelength.get_wavelength()))


      # also in here look at the image headers to see if we can
      # construct a mapping between exposure epoch and image ...

      images = []

      if self._frames_to_process:
        start, end = self._frames_to_process
        for j in self._images:
          if j >= start and j <= end:
            images.append(j)
      else:
        images = self._images

      for j in images:
        epoch = scan.get_image_epoch(j)
        if epoch == 0.0:
          epoch = float(os.stat(self._imageset.get_path(j-images[0])).st_mtime)
        self._epoch_to_image[epoch] = j
        self._image_to_epoch[j] = epoch

      epochs = self._epoch_to_image.keys()

      Debug.write('Exposure epoch for sweep %s: %d %d' % \
                  (self._template, min(epochs), max(epochs)))

    self._input_imageset = copy.deepcopy(self._imageset)

    # + get the lattice - can this be a pointer, so that when
    #   this object updates lattice it is globally-for-this-crystal
    #   updated? The lattice included directly in here includes an
    #   exact unit cell for data reduction, the crystal lattice
    #   contains an approximate unit cell which should be
    #   from the unit cells from all sweeps contained in the
    #   XCrystal. FIXME should I be using a LatticeInfo object
    #   in here? See what the Indexer interface produces. ALT:
    #   just provide an Indexer implementation "hook".
    #   See Headnote 001 above. See also _get_indexer,
    #   _get_integrater below.

    self._indexer = None
    self._refiner = None
    self._integrater = None

    # I don't need this - it is equivalent to self.getWavelength(
    # ).getCrystal().getLattice()
    # self._crystal_lattice = None

    # this means that this module will have to present largely the
    # same interface as Indexer and Integrater so that the calls
    # can be appropriately forwarded.

    # finally configure the beam if set

    if beam is not None:
      from dxtbx.model.detector_helpers import set_mosflm_beam_centre
      try:
        set_mosflm_beam_centre(self.get_imageset().get_detector(),
                               self.get_imageset().get_beam(),
                               beam)
      except AssertionError, e:
        Debug.write('Error setting mosflm beam centre: %s' % e)
コード例 #26
0
ファイル: DialsRefiner.py プロジェクト: hainm/xia2
  def _refine(self):

    for epoch, idxr in self._refinr_indexers.iteritems():
      # decide what images we are going to process, if not already
      # specified
      #if not self._intgr_wedge:
        #images = self.get_matching_images()
        #self.set_integrater_wedge(min(images),
                    #max(images))

      #Debug.write('DIALS INTEGRATE PREPARE:')
      #Debug.write('Wavelength: %.6f' % self.get_wavelength())
      #Debug.write('Distance: %.2f' % self.get_distance())

      #if not self._intgr_indexer:
        #self.set_integrater_indexer(DialsIndexer())
        #self.get_integrater_indexer().set_indexer_sweep(
        #self.get_integrater_sweep())

        #self._intgr_indexer.set_working_directory(
        #self.get_working_directory())

        #self._intgr_indexer.setup_from_imageset(self.get_imageset())

        #if self.get_frame_wedge():
        #wedge = self.get_frame_wedge()
        #Debug.write('Propogating wedge limit: %d %d' % wedge)
        #self._intgr_indexer.set_frame_wedge(wedge[0], wedge[1],
                          #apply_offset = False)

        ## this needs to be set up from the contents of the
        ## Integrater frame processer - wavelength &c.

        #if self.get_beam_centre():
        #self._intgr_indexer.set_beam_centre(self.get_beam_centre())

        #if self.get_distance():
        #self._intgr_indexer.set_distance(self.get_distance())

        #if self.get_wavelength():
        #self._intgr_indexer.set_wavelength(
          #self.get_wavelength())

      # get the unit cell from this indexer to initiate processing
      # if it is new... and also copy out all of the information for
      # the Dials indexer if not...

      experiments = idxr.get_indexer_experiment_list()

      indexed_experiments = idxr.get_indexer_payload("experiments_filename")
      indexed_reflections = idxr.get_indexer_payload("indexed_filename")

      if len(experiments) > 1:
        xsweeps = idxr._indxr_sweeps
        assert len(xsweeps) == len(experiments)
        assert len(self._refinr_sweeps) == 1 # don't currently support joint refinement
        xsweep = self._refinr_sweeps[0]
        i = xsweeps.index(xsweep)
        experiments = experiments[i:i+1]

        # Extract and output experiment and reflections for current sweep
        indexed_experiments = os.path.join(
          self.get_working_directory(),
          "%s_indexed_experiments.json" %xsweep.get_name())
        indexed_reflections = os.path.join(
          self.get_working_directory(),
          "%s_indexed_reflections.pickle" %xsweep.get_name())

        from dxtbx.serialize import dump
        dump.experiment_list(experiments, indexed_experiments)

        from libtbx import easy_pickle
        from scitbx.array_family import flex
        reflections = easy_pickle.load(
          idxr.get_indexer_payload("indexed_filename"))
        sel = reflections['id'] == i
        assert sel.count(True) > 0
        imageset_id = reflections['imageset_id'].select(sel)
        assert imageset_id.all_eq(imageset_id[0])
        sel = reflections['imageset_id'] == imageset_id[0]
        reflections = reflections.select(sel)
        # set indexed reflections to id == 0 and imageset_id == 0
        reflections['id'].set_selected(reflections['id'] == i, 0)
        reflections['imageset_id'] = flex.int(len(reflections), 0)
        easy_pickle.dump(indexed_reflections, reflections)

      assert len(experiments.crystals()) == 1 # currently only handle one lattice/sweep
      crystal_model = experiments.crystals()[0]
      lattice = idxr.get_indexer_lattice()

      # check if the lattice was user assigned...
      user_assigned = idxr.get_indexer_user_input_lattice()

      # XXX check that the indexer is an Dials indexer - if not then
      # create one...

      # set a low resolution limit (which isn't really used...)
      # this should perhaps be done more intelligently from an
      # analysis of the spot list or something...?

      #if not self.get_integrater_low_resolution():

        #dmax = idxr.get_indexer_low_resolution()
        #self.set_integrater_low_resolution(dmax)

        #Debug.write('Low resolution set to: %s' % \
              #self.get_integrater_low_resolution())

      ## copy the data across
      from dxtbx.serialize import load, dump

      refiner = self.Refine()
      refiner.set_experiments_filename(indexed_experiments)
      refiner.set_indexed_filename(indexed_reflections)

      # XXX Temporary workaround for dials.refine error for scan_varying
      # refinement with smaller wedges
      total_phi_range = idxr._indxr_imagesets[0].get_scan().get_oscillation_range()[1]
      if total_phi_range < 5: # arbitrary value
        refiner.set_scan_varying(False)
      elif total_phi_range < 36:
        refiner.set_interval_width_degrees(total_phi_range/2)

      FileHandler.record_log_file('%s REFINE' % idxr.get_indexer_full_name(),
                                  refiner.get_log_file())
      refiner.run()
      self._refinr_experiments_filename \
        = refiner.get_refined_experiments_filename()
      experiments = load.experiment_list(self._refinr_experiments_filename)
      self._refinr_indexed_filename = refiner.get_refined_filename()
      self.set_refiner_payload("experiments.json", self._refinr_experiments_filename)
      self.set_refiner_payload("reflections.pickle", self._refinr_indexed_filename)

      # this is the result of the cell refinement
      self._refinr_cell = experiments.crystals()[0].get_unit_cell().parameters()
コード例 #27
0
ファイル: XDSScalerA.py プロジェクト: xia2/xia2
  def _scale(self):
    '''Actually scale all of the data together.'''

    from xia2.Handlers.Environment import debug_memory_usage
    debug_memory_usage()

    Journal.block(
        'scaling', self.get_scaler_xcrystal().get_name(), 'XSCALE',
        {'scaling model':'default (all)'})

    epochs = self._sweep_information.keys()
    epochs.sort()

    xscale = self.XScale()

    xscale.set_spacegroup_number(self._xds_spacegroup)
    xscale.set_cell(self._scalr_cell)

    Debug.write('Set CELL: %.2f %.2f %.2f %.2f %.2f %.2f' % \
                tuple(self._scalr_cell))
    Debug.write('Set SPACEGROUP_NUMBER: %d' % \
                self._xds_spacegroup)

    Debug.write('Gathering measurements for scaling')

    for epoch in epochs:

      # get the prepared reflections
      reflections = self._sweep_information[epoch][
          'prepared_reflections']

      # and the get wavelength that this belongs to
      dname = self._sweep_information[epoch]['dname']
      sname = self._sweep_information[epoch]['sname']

      # and the resolution range for the reflections
      intgr = self._sweep_information[epoch]['integrater']
      Debug.write('Epoch: %d' % epoch)
      Debug.write('HKL: %s (%s/%s)' % (reflections, dname, sname))

      resolution_low = intgr.get_integrater_low_resolution()
      resolution_high, _ = self._scalr_resolution_limits.get((dname, sname), (0.0, None))

      resolution = (resolution_high, resolution_low)

      xscale.add_reflection_file(reflections, dname, resolution)

    # set the global properties of the sample
    xscale.set_crystal(self._scalr_xname)
    xscale.set_anomalous(self._scalr_anomalous)

    debug_memory_usage()
    xscale.run()

    scale_factor = xscale.get_scale_factor()

    Debug.write('XSCALE scale factor found to be: %e' % scale_factor)

    # record the log file

    pname = self._scalr_pname
    xname = self._scalr_xname

    FileHandler.record_log_file('%s %s XSCALE' % \
                                (pname, xname),
                                os.path.join(self.get_working_directory(),
                                             'XSCALE.LP'))

    # check for outlier reflections and if a number are found
    # then iterate (that is, rerun XSCALE, rejecting these outliers)

    if not PhilIndex.params.dials.fast_mode and not PhilIndex.params.xds.keep_outliers:
      xscale_remove = xscale.get_remove()
      if xscale_remove:
        current_remove = []
        final_remove = []

        # first ensure that there are no duplicate entries...
        if os.path.exists(os.path.join(
            self.get_working_directory(),
            'REMOVE.HKL')):
          for line in open(os.path.join(
              self.get_working_directory(),
              'REMOVE.HKL'), 'r').readlines():
            h, k, l = map(int, line.split()[:3])
            z = float(line.split()[3])

            if not (h, k, l, z) in current_remove:
              current_remove.append((h, k, l, z))

          for c in xscale_remove:
            if c in current_remove:
              continue
            final_remove.append(c)

          Debug.write(
              '%d alien reflections are already removed' % \
              (len(xscale_remove) - len(final_remove)))

        else:
          # we want to remove all of the new dodgy reflections
          final_remove = xscale_remove

        remove_hkl = open(os.path.join(
            self.get_working_directory(),
            'REMOVE.HKL'), 'w')

        z_min = PhilIndex.params.xds.z_min
        rejected = 0

        # write in the old reflections
        for remove in current_remove:
          z = remove[3]
          if z >= z_min:
            remove_hkl.write('%d %d %d %f\n' % remove)
          else:
            rejected += 1
        Debug.write('Wrote %d old reflections to REMOVE.HKL' % \
                    (len(current_remove) - rejected))
        Debug.write('Rejected %d as z < %f' % \
                    (rejected, z_min))

        # and the new reflections
        rejected = 0
        used = 0
        for remove in final_remove:
          z = remove[3]
          if z >= z_min:
            used += 1
            remove_hkl.write('%d %d %d %f\n' % remove)
          else:
            rejected += 1
        Debug.write('Wrote %d new reflections to REMOVE.HKL' % \
                    (len(final_remove) - rejected))
        Debug.write('Rejected %d as z < %f' % \
                    (rejected, z_min))

        remove_hkl.close()

        # we want to rerun the finishing step so...
        # unless we have added no new reflections
        if used:
          self.set_scaler_done(False)

    if not self.get_scaler_done():
      Chatter.write('Excluding outlier reflections Z > %.2f' %
                    PhilIndex.params.xds.z_min)
      return

    debug_memory_usage()

    # now get the reflection files out and merge them with aimless

    output_files = xscale.get_output_reflection_files()
    wavelength_names = output_files.keys()

    # these are per wavelength - also allow for user defined resolution
    # limits a la bug # 3183. No longer...

    for epoch in self._sweep_information.keys():

      input = self._sweep_information[epoch]

      intgr = input['integrater']

      rkey = input['dname'], input['sname']

      if intgr.get_integrater_user_resolution():
        dmin = intgr.get_integrater_high_resolution()

        if rkey not in self._user_resolution_limits:
          self._scalr_resolution_limits[rkey] = (dmin, None)
          self._user_resolution_limits[rkey] = dmin
        elif dmin < self._user_resolution_limits[rkey]:
          self._scalr_resolution_limits[rkey] = (dmin, None)
          self._user_resolution_limits[rkey] = dmin

    self._scalr_scaled_refl_files = { }

    self._scalr_statistics = { }

    max_batches = 0
    mtz_dict = { }

    project_info = { }
    for epoch in self._sweep_information.keys():
      pname = self._scalr_pname
      xname = self._scalr_xname
      dname = self._sweep_information[epoch]['dname']
      reflections = os.path.split(
          self._sweep_information[epoch]['prepared_reflections'])[-1]
      project_info[reflections] = (pname, xname, dname)

    for epoch in self._sweep_information.keys():
      self._sweep_information[epoch]['scaled_reflections'] = None

    debug_memory_usage()

    for wavelength in wavelength_names:
      hklin = output_files[wavelength]

      xsh = XDSScalerHelper()
      xsh.set_working_directory(self.get_working_directory())

      ref = xsh.split_and_convert_xscale_output(
          hklin, 'SCALED_', project_info, 1.0 / scale_factor)

      for hklout in ref.keys():
        for epoch in self._sweep_information.keys():
          if os.path.split(self._sweep_information[epoch][
              'prepared_reflections'])[-1] == \
              os.path.split(hklout)[-1]:
            if self._sweep_information[epoch][
                'scaled_reflections'] is not None:
              raise RuntimeError, 'duplicate entries'
            self._sweep_information[epoch][
                'scaled_reflections'] = ref[hklout]

      del(xsh)

    debug_memory_usage()

    for epoch in self._sweep_information.keys():
      hklin = self._sweep_information[epoch]['scaled_reflections']
      dname = self._sweep_information[epoch]['dname']
      sname = self._sweep_information[epoch]['sname']

      hkl_copy = os.path.join(self.get_working_directory(),
                              'R_%s' % os.path.split(hklin)[-1])

      if not os.path.exists(hkl_copy):
        shutil.copyfile(hklin, hkl_copy)

      # let's properly listen to the user's resolution limit needs...

      if self._user_resolution_limits.get((dname, sname), False):
        resolution = self._user_resolution_limits[(dname, sname)]

      else:
        if PhilIndex.params.xia2.settings.resolution.keep_all_reflections == True:
          try:
            resolution = intgr.get_detector().get_max_resolution(intgr.get_beam_obj().get_s0())
            Debug.write('keep_all_reflections set, using detector limits')
          except Exception:
            resolution = self._estimate_resolution_limit(hklin)
        else:
          resolution = self._estimate_resolution_limit(hklin)

      Chatter.write('Resolution for sweep %s/%s: %.2f' % \
                    (dname, sname, resolution))

      if (dname, sname) not in self._scalr_resolution_limits:
        self._scalr_resolution_limits[(dname, sname)] = (resolution, None)
        self.set_scaler_done(False)
      else:
        if resolution < self._scalr_resolution_limits[(dname, sname)][0]:
          self._scalr_resolution_limits[(dname, sname)] = (resolution, None)
          self.set_scaler_done(False)

    debug_memory_usage()

    if not self.get_scaler_done():
      Debug.write('Returning as scaling not finished...')
      return

    self._sort_together_data_xds()

    highest_resolution = min(limit for limit, _ in self._scalr_resolution_limits.values())

    self._scalr_highest_resolution = highest_resolution

    Debug.write('Scaler highest resolution set to %5.2f' % \
                highest_resolution)

    if not self.get_scaler_done():
      Debug.write('Returning as scaling not finished...')
      return

    sdadd_full = 0.0
    sdb_full = 0.0

    # ---------- FINAL MERGING ----------

    sc = self._factory.Aimless()

    FileHandler.record_log_file('%s %s aimless' % (self._scalr_pname,
                                                   self._scalr_xname),
                                sc.get_log_file())

    sc.set_resolution(highest_resolution)
    sc.set_hklin(self._prepared_reflections)
    sc.set_new_scales_file('%s_final.scales' % self._scalr_xname)

    if sdadd_full == 0.0 and sdb_full == 0.0:
      pass
    else:
      sc.add_sd_correction('both', 1.0, sdadd_full, sdb_full)

    for epoch in epochs:
      input = self._sweep_information[epoch]
      start, end = (min(input['batches']), max(input['batches']))

      rkey = input['dname'], input['sname']
      run_resolution_limit, _ = self._scalr_resolution_limits[rkey]

      sc.add_run(start, end, exclude = False,
                 resolution = run_resolution_limit,
                 name = input['sname'])

    sc.set_hklout(os.path.join(self.get_working_directory(),
                               '%s_%s_scaled.mtz' % \
                               (self._scalr_pname, self._scalr_xname)))

    if self.get_scaler_anomalous():
      sc.set_anomalous()

    sc.multi_merge()

    FileHandler.record_xml_file('%s %s aimless xml' % (self._scalr_pname,
                                                       self._scalr_xname),
                                sc.get_xmlout())
    data = sc.get_summary()

    loggraph = sc.parse_ccp4_loggraph()

    standard_deviation_info = { }

    for key in loggraph.keys():
      if 'standard deviation v. Intensity' in key:
        dataset = key.split(',')[-1].strip()
        standard_deviation_info[dataset] = transpose_loggraph(
            loggraph[key])

    resolution_info = { }

    for key in loggraph.keys():
      if 'Analysis against resolution' in key:
        dataset = key.split(',')[-1].strip()
        resolution_info[dataset] = transpose_loggraph(
            loggraph[key])

    # and also radiation damage stuff...

    batch_info = { }

    for key in loggraph.keys():
      if 'Analysis against Batch' in key:
        dataset = key.split(',')[-1].strip()
        batch_info[dataset] = transpose_loggraph(
            loggraph[key])


    # finally put all of the results "somewhere useful"

    self._scalr_statistics = data

    self._scalr_scaled_refl_files = copy.deepcopy(
        sc.get_scaled_reflection_files())

    self._scalr_scaled_reflection_files = { }

    # also output the unmerged scalepack format files...

    sc = self._factory.Aimless()
    sc.set_resolution(highest_resolution)
    sc.set_hklin(self._prepared_reflections)
    sc.set_scalepack()

    for epoch in epochs:
      input = self._sweep_information[epoch]
      start, end = (min(input['batches']), max(input['batches']))

      rkey = input['dname'], input['sname']
      run_resolution_limit, _ = self._scalr_resolution_limits[rkey]

      sc.add_run(start, end, exclude = False,
                 resolution = run_resolution_limit,
                 name = input['sname'])

    sc.set_hklout(os.path.join(self.get_working_directory(),
                               '%s_%s_scaled.mtz' % \
                               (self._scalr_pname,
                                self._scalr_xname)))

    if self.get_scaler_anomalous():
      sc.set_anomalous()

    sc.multi_merge()

    self._scalr_scaled_reflection_files['sca_unmerged'] = { }
    self._scalr_scaled_reflection_files['mtz_unmerged'] = { }

    for dataset in sc.get_scaled_reflection_files().keys():
      hklout = sc.get_scaled_reflection_files()[dataset]

      # then mark the scalepack files for copying...

      scalepack = os.path.join(os.path.split(hklout)[0],
                               os.path.split(hklout)[1].replace(
          '_scaled', '_scaled_unmerged').replace('.mtz', '.sca'))
      self._scalr_scaled_reflection_files['sca_unmerged'][
          dataset] = scalepack
      FileHandler.record_data_file(scalepack)
      mtz_unmerged = os.path.splitext(scalepack)[0] + '.mtz'
      self._scalr_scaled_reflection_files['mtz_unmerged'][dataset] = mtz_unmerged
      FileHandler.record_data_file(mtz_unmerged)

    if PhilIndex.params.xia2.settings.merging_statistics.source == 'cctbx':
      for key in self._scalr_scaled_refl_files:
        stats = self._compute_scaler_statistics(
          self._scalr_scaled_reflection_files['mtz_unmerged'][key], wave=key)
        self._scalr_statistics[
          (self._scalr_pname, self._scalr_xname, key)] = stats

    # convert reflection files to .sca format - use mtz2various for this

    self._scalr_scaled_reflection_files['sca'] = { }
    self._scalr_scaled_reflection_files['hkl'] = { }

    for key in self._scalr_scaled_refl_files:

      f = self._scalr_scaled_refl_files[key]
      scaout = '%s.sca' % f[:-4]

      m2v = self._factory.Mtz2various()
      m2v.set_hklin(f)
      m2v.set_hklout(scaout)
      m2v.convert()

      self._scalr_scaled_reflection_files['sca'][key] = scaout
      FileHandler.record_data_file(scaout)

      if PhilIndex.params.xia2.settings.small_molecule == True:
        hklout = '%s.hkl' % f[:-4]

        m2v = self._factory.Mtz2various()
        m2v.set_hklin(f)
        m2v.set_hklout(hklout)
        m2v.convert_shelx()

        self._scalr_scaled_reflection_files['hkl'][key] = hklout
        FileHandler.record_data_file(hklout)
コード例 #28
0
    def _scale(self):
        "Perform all of the operations required to deliver the scaled data."

        epochs = self._sweep_handler.get_epochs()

        sc = self._updated_aimless()
        sc.set_hklin(self._prepared_reflections)
        sc.set_chef_unmerged(True)
        sc.set_new_scales_file("%s.scales" % self._scalr_xname)

        user_resolution_limits = {}

        for epoch in epochs:
            si = self._sweep_handler.get_sweep_information(epoch)
            pname, xname, dname = si.get_project_info()
            sname = si.get_sweep_name()
            intgr = si.get_integrater()

            if intgr.get_integrater_user_resolution():
                dmin = intgr.get_integrater_high_resolution()

                if (dname, sname) not in user_resolution_limits:
                    user_resolution_limits[(dname, sname)] = dmin
                elif dmin < user_resolution_limits[(dname, sname)]:
                    user_resolution_limits[(dname, sname)] = dmin

            start, end = si.get_batch_range()

            if (dname, sname) in self._scalr_resolution_limits:
                resolution, _ = self._scalr_resolution_limits[(dname, sname)]
                sc.add_run(start, end, exclude=False, resolution=resolution, name=sname)
            else:
                sc.add_run(start, end, name=sname)

        sc.set_hklout(
            os.path.join(
                self.get_working_directory(),
                f"{self._scalr_pname}_{self._scalr_xname}_scaled_test.mtz",
            )
        )

        if self.get_scaler_anomalous():
            sc.set_anomalous()

        # what follows, sucks

        failover = PhilIndex.params.xia2.settings.failover
        if failover:

            try:
                sc.scale()
            except RuntimeError as e:

                es = str(e)

                if (
                    "bad batch" in es
                    or "negative scales run" in es
                    or "no observations" in es
                ):

                    # first ID the sweep from the batch no

                    batch = int(es.split()[-1])
                    epoch = self._identify_sweep_epoch(batch)
                    sweep = self._scalr_integraters[epoch].get_integrater_sweep()

                    # then remove it from my parent xcrystal

                    self.get_scaler_xcrystal().remove_sweep(sweep)

                    # then remove it from the scaler list of intergraters
                    # - this should really be a scaler interface method

                    del self._scalr_integraters[epoch]

                    # then tell the user what is happening

                    logger.info(
                        "Sweep %s gave negative scales - removing", sweep.get_name()
                    )

                    # then reset the prepare, do, finish flags

                    self.set_scaler_prepare_done(False)
                    self.set_scaler_done(False)
                    self.set_scaler_finish_done(False)

                    # and return
                    return

                else:

                    raise e

        else:
            sc.scale()

        # then gather up all of the resulting reflection files
        # and convert them into the required formats (.sca, .mtz.)

        loggraph = sc.parse_ccp4_loggraph()

        resolution_info = {}

        reflection_files = sc.get_scaled_reflection_files()

        for dataset in reflection_files:
            FileHandler.record_temporary_file(reflection_files[dataset])

        for key in loggraph:
            if "Analysis against resolution" in key:
                dataset = key.split(",")[-1].strip()
                resolution_info[dataset] = transpose_loggraph(loggraph[key])

        # check in here that there is actually some data to scale..!

        if not resolution_info:
            raise RuntimeError("no resolution info")

        highest_suggested_resolution = self.assess_resolution_limits(
            sc.get_unmerged_reflection_file(), user_resolution_limits
        )

        if not self.get_scaler_done():
            logger.debug("Returning as scaling not finished...")
            return

        batch_info = {}

        for key in loggraph:
            if "Analysis against Batch" in key:
                dataset = key.split(",")[-1].strip()
                batch_info[dataset] = transpose_loggraph(loggraph[key])

        sc = self._updated_aimless()

        FileHandler.record_log_file(
            f"{self._scalr_pname} {self._scalr_xname} aimless", sc.get_log_file()
        )

        sc.set_hklin(self._prepared_reflections)
        sc.set_new_scales_file("%s_final.scales" % self._scalr_xname)

        for epoch in epochs:

            si = self._sweep_handler.get_sweep_information(epoch)
            pname, xname, dname = si.get_project_info()
            sname = si.get_sweep_name()
            start, end = si.get_batch_range()

            resolution_limit, _ = self._scalr_resolution_limits[(dname, sname)]

            sc.add_run(
                start, end, exclude=False, resolution=resolution_limit, name=xname
            )

        sc.set_hklout(
            os.path.join(
                self.get_working_directory(),
                f"{self._scalr_pname}_{self._scalr_xname}_scaled.mtz",
            )
        )

        if self.get_scaler_anomalous():
            sc.set_anomalous()

        sc.scale()

        FileHandler.record_xml_file(
            f"{self._scalr_pname} {self._scalr_xname} aimless", sc.get_xmlout()
        )

        data = sc.get_summary()
        scales_file = sc.get_new_scales_file()
        loggraph = sc.parse_ccp4_loggraph()

        standard_deviation_info = {}

        for key in loggraph:
            if "standard deviation v. Intensity" in key:
                dataset = key.split(",")[-1].strip()
                standard_deviation_info[dataset] = transpose_loggraph(loggraph[key])

        resolution_info = {}

        for key in loggraph:
            if "Analysis against resolution" in key:
                dataset = key.split(",")[-1].strip()
                resolution_info[dataset] = transpose_loggraph(loggraph[key])

        batch_info = {}

        for key in loggraph:
            if "Analysis against Batch" in key:
                dataset = key.split(",")[-1].strip()
                batch_info[dataset] = transpose_loggraph(loggraph[key])

        # finally put all of the results "somewhere useful"

        self._scalr_statistics = data

        self._scalr_scaled_refl_files = copy.deepcopy(sc.get_scaled_reflection_files())

        sc = self._updated_aimless()
        sc.set_hklin(self._prepared_reflections)
        sc.set_scales_file(scales_file)

        self._wavelengths_in_order = []

        for epoch in epochs:
            si = self._sweep_handler.get_sweep_information(epoch)
            pname, xname, dname = si.get_project_info()
            sname = si.get_sweep_name()
            start, end = si.get_batch_range()

            resolution_limit, _ = self._scalr_resolution_limits[(dname, sname)]

            sc.add_run(
                start, end, exclude=False, resolution=resolution_limit, name=sname
            )

            if dname not in self._wavelengths_in_order:
                self._wavelengths_in_order.append(dname)

        sc.set_hklout(
            os.path.join(
                self.get_working_directory(),
                f"{self._scalr_pname}_{self._scalr_xname}_scaled.mtz",
            )
        )

        sc.set_scalepack()

        if self.get_scaler_anomalous():
            sc.set_anomalous()
        sc.scale()

        self._update_scaled_unit_cell()

        self._scalr_scaled_reflection_files = {}
        self._scalr_scaled_reflection_files["sca"] = {}
        self._scalr_scaled_reflection_files["sca_unmerged"] = {}
        self._scalr_scaled_reflection_files["mtz_unmerged"] = {}

        for key in self._scalr_scaled_refl_files:
            hklout = self._scalr_scaled_refl_files[key]

            scaout = "%s.sca" % hklout[:-4]
            self._scalr_scaled_reflection_files["sca"][key] = scaout
            FileHandler.record_data_file(scaout)
            scalepack = os.path.join(
                os.path.split(hklout)[0],
                os.path.split(hklout)[1]
                .replace("_scaled", "_scaled_unmerged")
                .replace(".mtz", ".sca"),
            )
            self._scalr_scaled_reflection_files["sca_unmerged"][key] = scalepack
            FileHandler.record_data_file(scalepack)
            mtz_unmerged = os.path.splitext(scalepack)[0] + ".mtz"
            self._scalr_scaled_reflection_files["mtz_unmerged"][key] = mtz_unmerged
            FileHandler.record_data_file(mtz_unmerged)

            if self._scalr_cell_esd is not None:
                # patch .mtz and overwrite unit cell information
                import xia2.Modules.Scaler.tools as tools

                override_cell = self._scalr_cell_dict.get(
                    f"{self._scalr_pname}_{self._scalr_xname}_{key}"
                )[0]
                tools.patch_mtz_unit_cell(mtz_unmerged, override_cell)
                tools.patch_mtz_unit_cell(hklout, override_cell)

            self._scalr_scaled_reflection_files["mtz_unmerged"][key] = mtz_unmerged
            FileHandler.record_data_file(mtz_unmerged)

        if PhilIndex.params.xia2.settings.merging_statistics.source == "cctbx":
            for key in self._scalr_scaled_refl_files:
                stats = self._compute_scaler_statistics(
                    self._scalr_scaled_reflection_files["mtz_unmerged"][key],
                    selected_band=(highest_suggested_resolution, None),
                    wave=key,
                )
                self._scalr_statistics[
                    (self._scalr_pname, self._scalr_xname, key)
                ] = stats

        sc = self._updated_aimless()
        sc.set_hklin(self._prepared_reflections)
        sc.set_scales_file(scales_file)

        self._wavelengths_in_order = []

        for epoch in epochs:

            si = self._sweep_handler.get_sweep_information(epoch)
            pname, xname, dname = si.get_project_info()
            sname = si.get_sweep_name()
            start, end = si.get_batch_range()

            resolution_limit, _ = self._scalr_resolution_limits[(dname, sname)]

            sc.add_run(
                start, end, exclude=False, resolution=resolution_limit, name=sname
            )

            if dname not in self._wavelengths_in_order:
                self._wavelengths_in_order.append(dname)

        sc.set_hklout(
            os.path.join(
                self.get_working_directory(),
                f"{self._scalr_pname}_{self._scalr_xname}_chef.mtz",
            )
        )

        sc.set_chef_unmerged(True)

        if self.get_scaler_anomalous():
            sc.set_anomalous()
        sc.scale()
        if not PhilIndex.params.dials.fast_mode:
            try:
                self._generate_absorption_map(sc)
            except Exception as e:
                # Map generation may fail for number of reasons, eg. matplotlib borken
                logger.debug("Could not generate absorption map (%s)", e)
コード例 #29
0
ファイル: CommonScaler.py プロジェクト: lizhen-dlut/xia2
    def _sort_together_data_xds(self):

        if len(self._sweep_information) == 1:
            return self._sort_together_data_xds_one_sweep()

        max_batches = 0

        for epoch in self._sweep_information.keys():

            hklin = self._sweep_information[epoch]['scaled_reflections']

            if self._sweep_information[epoch]['batches'] == [0, 0]:

                Chatter.write('Getting batches from %s' % hklin)
                batches = MtzUtils.batches_from_mtz(hklin)
                self._sweep_information[epoch]['batches'] = [
                    min(batches), max(batches)
                ]
                Chatter.write('=> %d to %d' % (min(batches), max(batches)))

            batches = self._sweep_information[epoch]['batches']
            if 1 + max(batches) - min(batches) > max_batches:
                max_batches = max(batches) - min(batches) + 1

        Debug.write('Biggest sweep has %d batches' % max_batches)
        max_batches = nifty_power_of_ten(max_batches)

        epochs = sorted(self._sweep_information.keys())

        counter = 0

        for epoch in epochs:

            hklin = self._sweep_information[epoch]['scaled_reflections']

            pname = self._sweep_information[epoch]['pname']
            xname = self._sweep_information[epoch]['xname']
            dname = self._sweep_information[epoch]['dname']

            sname = self._sweep_information[epoch]['sname']

            hklout = os.path.join(self.get_working_directory(),
                                  '%s_%s_%s_%d.mtz' % \
                                  (pname, xname, dname, counter))

            # we will want to delete this one exit
            FileHandler.record_temporary_file(hklout)

            # record this for future reference - will be needed in the
            # radiation damage analysis...

            # hack - reset this as it gets in a muddle...
            intgr = self._sweep_information[epoch]['integrater']
            self._sweep_information[epoch][
                'batches'] = intgr.get_integrater_batches()

            first_batch = min(self._sweep_information[epoch]['batches'])
            offset = counter * max_batches - first_batch + 1
            self._sweep_information[epoch]['batch_offset'] = offset

            from xia2.Modules.Scaler.rebatch import rebatch
            new_batches = rebatch(hklin,
                                  hklout,
                                  add_batch=offset,
                                  pname=pname,
                                  xname=xname,
                                  dname=dname)

            # update the "input information"

            self._sweep_information[epoch]['hklin'] = hklout
            self._sweep_information[epoch]['batches'] = new_batches

            # update the counter & recycle

            counter += 1

        s = self._factory.Sortmtz()

        hklout = os.path.join(self.get_working_directory(),
                              '%s_%s_sorted.mtz' % \
                              (self._scalr_pname, self._scalr_xname))

        s.set_hklout(hklout)

        for epoch in epochs:
            s.add_hklin(self._sweep_information[epoch]['hklin'])

        s.sort(vrset=-99999999.0)

        self._prepared_reflections = hklout

        if self.get_scaler_reference_reflection_file():
            spacegroups = [
                MtzUtils.space_group_name_from_mtz(
                    self.get_scaler_reference_reflection_file())
            ]
            reindex_operator = 'h,k,l'

        else:
            pointless = self._factory.Pointless()
            pointless.set_hklin(hklout)
            pointless.decide_spacegroup()

            FileHandler.record_log_file('%s %s pointless' % \
                                        (self._scalr_pname,
                                         self._scalr_xname),
                                        pointless.get_log_file())

            spacegroups = pointless.get_likely_spacegroups()
            reindex_operator = pointless.get_spacegroup_reindex_operator()

            if self._scalr_input_spacegroup:
                Debug.write('Assigning user input spacegroup: %s' % \
                            self._scalr_input_spacegroup)
                spacegroups = [self._scalr_input_spacegroup]
                reindex_operator = 'h,k,l'

        self._scalr_likely_spacegroups = spacegroups
        spacegroup = self._scalr_likely_spacegroups[0]

        self._scalr_reindex_operator = reindex_operator

        Chatter.write('Likely spacegroups:')
        for spag in self._scalr_likely_spacegroups:
            Chatter.write('%s' % spag)

        Chatter.write(
            'Reindexing to first spacegroup setting: %s (%s)' % \
            (spacegroup, clean_reindex_operator(reindex_operator)))

        hklin = self._prepared_reflections
        hklout = os.path.join(self.get_working_directory(),
                              '%s_%s_reindex.mtz' % \
                              (self._scalr_pname, self._scalr_xname))

        FileHandler.record_temporary_file(hklout)

        ri = self._factory.Reindex()
        ri.set_hklin(hklin)
        ri.set_hklout(hklout)
        ri.set_spacegroup(spacegroup)
        ri.set_operator(reindex_operator)
        ri.reindex()

        hklin = hklout
        hklout = os.path.join(self.get_working_directory(),
                              '%s_%s_sorted.mtz' % \
                              (self._scalr_pname, self._scalr_xname))

        s = self._factory.Sortmtz()
        s.set_hklin(hklin)
        s.set_hklout(hklout)

        s.sort(vrset=-99999999.0)

        self._prepared_reflections = hklout

        Debug.write(
            'Updating unit cell to %.2f %.2f %.2f %.2f %.2f %.2f' % \
            tuple(ri.get_cell()))
        self._scalr_cell = tuple(ri.get_cell())

        return
コード例 #30
0
ファイル: ISPyBXmlHandler.py プロジェクト: lizhen-dlut/xia2
  def write_xml(self, file, command_line=''):

    fout = open(file, 'w')

    fout.write('<?xml version="1.0"?>')
    fout.write('<AutoProcContainer>\n')

    for crystal in sorted(self._crystals):
      xcrystal = self._crystals[crystal]

      cell = xcrystal.get_cell()
      spacegroup = xcrystal.get_likely_spacegroups()[0]

      fout.write('<AutoProc><spaceGroup>%s</spaceGroup>' % spacegroup)
      self.write_refined_cell(fout, cell)
      fout.write('</AutoProc>')

      fout.write('<AutoProcScalingContainer>')
      fout.write('<AutoProcScaling>')
      self.write_date(fout)
      fout.write('</AutoProcScaling>')

      statistics_all = xcrystal.get_statistics()
      reflection_files = xcrystal.get_scaled_merged_reflections()

      wavelength_names = xcrystal.get_wavelength_names()

      for key in statistics_all.keys():
        pname, xname, dname = key

        # FIXME should assert that the dname is a
        # valid wavelength name

        available = statistics_all[key].keys()

        stats = []
        keys = [
            'High resolution limit',
            'Low resolution limit',
            'Completeness',
            'Multiplicity',
            'I/sigma',
            'Rmerge(I+/-)',
            'CC half',
            'Anomalous completeness',
            'Anomalous correlation',
            'Anomalous multiplicity',
            'Total observations',
            'Total unique',
            'Rmeas(I)',
            'Rmeas(I+/-)',
            'Rpim(I)',
            'Rpim(I+/-)',
            'Partial Bias'
            ]

        for k in keys:
          if k in available:
            stats.append(k)

        xwavelength = xcrystal.get_xwavelength(dname)
        sweeps = xwavelength.get_sweeps()

        for j, name in enumerate(['overall', 'innerShell', 'outerShell']):
          statistics_cache = {}

          for s in stats:
            if isinstance(statistics_all[key][s], type([])):
              statistics_cache[s] = statistics_all[key][s][j]
            elif isinstance(statistics_all[key][s], type(())):
              statistics_cache[s] = statistics_all[key][s][j]

          # send these to be written out
          self.write_scaling_statistics(fout, name, statistics_cache)

        for sweep in sweeps:
          fout.write('<AutoProcIntegrationContainer>\n')
          if '#' in sweep.get_template():
            image_name = sweep.get_image_name(0)
          else:
            image_name = os.path.join(sweep.get_directory(),
                                      sweep.get_template())
          fout.write('<Image><fileName>%s</fileName>' % \
                     os.path.split(image_name)[-1])
          fout.write('<fileLocation>%s</fileLocation></Image>' %
                     sanitize(os.path.split(image_name)[0]))
          fout.write('<AutoProcIntegration>\n')
          cell = sweep.get_integrater_cell()
          self.write_cell(fout, cell)

          # FIXME this is naughty
          intgr = sweep._get_integrater()

          start, end = intgr.get_integrater_wedge()

          fout.write('<startImageNumber>%d</startImageNumber>' % \
                     start)

          fout.write('<endImageNumber>%d</endImageNumber>' % \
                     end)

          # FIXME this is naughty
          indxr = sweep._get_indexer()

          fout.write(
              '<refinedDetectorDistance>%f</refinedDetectorDistance>' % \
              indxr.get_indexer_distance())

          beam = indxr.get_indexer_beam_centre()

          fout.write('<refinedXBeam>%f</refinedXBeam>' % beam[0])
          fout.write('<refinedYBeam>%f</refinedYBeam>' % beam[1])

          fout.write('</AutoProcIntegration>\n')
          fout.write('</AutoProcIntegrationContainer>\n')

      fout.write('</AutoProcScalingContainer>')

      # file unpacking nonsense

      if not command_line:
        from xia2.Handlers.CommandLine import CommandLine
        command_line = CommandLine.get_command_line()

      fout.write('<AutoProcProgramContainer><AutoProcProgram>')
      fout.write('<processingCommandLine>%s</processingCommandLine>' \
                 % sanitize(command_line))
      fout.write('<processingPrograms>xia2</processingPrograms>')
      fout.write('</AutoProcProgram>')

      from xia2.Handlers.Environment import Environment
      data_directory = Environment.generate_directory('DataFiles')
      log_directory = Environment.generate_directory('LogFiles')

      for k in reflection_files:

        reflection_file = reflection_files[k]

        if not isinstance(reflection_file, type('')):
          continue

        reflection_file = FileHandler.get_data_file(reflection_file)

        basename = os.path.basename(reflection_file)
        if os.path.isfile(os.path.join(data_directory, basename)):
          # Use file in DataFiles directory in preference (if it exists)
          reflection_file = os.path.join(data_directory, basename)

        fout.write('<AutoProcProgramAttachment><fileType>Result')
        fout.write('</fileType><fileName>%s</fileName>' % \
                   os.path.split(reflection_file)[-1])
        fout.write('<filePath>%s</filePath>' % \
                   sanitize(os.path.split(reflection_file)[0]))
        fout.write('</AutoProcProgramAttachment>\n')

      import glob
      g = glob.glob(os.path.join(log_directory, '*merging-statistics.json'))
      for merging_stats_json in g:
        fout.write('<AutoProcProgramAttachment><fileType>Graph')
        fout.write('</fileType><fileName>%s</fileName>' %
                   os.path.split(merging_stats_json)[-1])
        fout.write('<filePath>%s</filePath>' % sanitize(log_directory))
        fout.write('</AutoProcProgramAttachment>\n')

      # add the xia2.txt file...

      fout.write('<AutoProcProgramAttachment><fileType>Log')
      fout.write('</fileType><fileName>xia2.txt</fileName>')
      fout.write('<filePath>%s</filePath>' % sanitize(os.getcwd()))
      fout.write('</AutoProcProgramAttachment>\n')

      fout.write('</AutoProcProgramContainer>')

    fout.write('</AutoProcContainer>\n')
    fout.close()
コード例 #31
0
ファイル: CommonScaler.py プロジェクト: lizhen-dlut/xia2
    def _sort_together_data_ccp4(self):
        '''Sort together in the right order (rebatching as we go) the sweeps
    we want to scale together.'''

        max_batches = 0

        for epoch in self._sweep_handler.get_epochs():

            si = self._sweep_handler.get_sweep_information(epoch)
            pname, xname, dname = si.get_project_info()
            sname = si.get_sweep_name()
            hklin = si.get_reflections()

            # limit the reflections - e.g. if we are re-running the scaling step
            # on just a subset of the integrated data

            hklin = si.get_reflections()
            limit_batch_range = None
            for sweep in PhilIndex.params.xia2.settings.sweep:
                if sweep.id == sname and sweep.range is not None:
                    limit_batch_range = sweep.range
                    break

            if limit_batch_range is not None:
                Debug.write('Limiting batch range for %s: %s' %
                            (sname, limit_batch_range))
                start, end = limit_batch_range
                hklout = os.path.splitext(hklin)[0] + '_tmp.mtz'
                FileHandler.record_temporary_file(hklout)
                rb = self._factory.Pointless()
                rb.set_hklin(hklin)
                rb.set_hklout(hklout)
                rb.limit_batches(start, end)
                si.set_reflections(hklout)
                si.set_batches(limit_batch_range)

            # keep a count of the maximum number of batches in a block -
            # this will be used to make rebatch work below.

            hklin = si.get_reflections()

            batches = MtzUtils.batches_from_mtz(hklin)
            if 1 + max(batches) - min(batches) > max_batches:
                max_batches = max(batches) - min(batches) + 1

        Debug.write('Biggest sweep has %d batches' % max_batches)
        max_batches = nifty_power_of_ten(max_batches)

        # then rebatch the files, to make sure that the batch numbers are
        # in the same order as the epochs of data collection.

        counter = 0

        for epoch in self._sweep_handler.get_epochs():

            si = self._sweep_handler.get_sweep_information(epoch)

            hklin = si.get_reflections()

            pname, xname, dname = si.get_project_info()
            sname = si.get_sweep_name()

            hklout = os.path.join(self.get_working_directory(),
                                  '%s_%s_%s_%s_integrated.mtz' % \
                                  (pname, xname, dname, sname))

            first_batch = min(si.get_batches())
            si.set_batch_offset(counter * max_batches - first_batch + 1)

            from xia2.Modules.Scaler.rebatch import rebatch
            new_batches = rebatch(hklin,
                                  hklout,
                                  first_batch=counter * max_batches + 1,
                                  pname=pname,
                                  xname=xname,
                                  dname=dname)

            # update the "input information"

            si.set_reflections(hklout)
            si.set_batches(new_batches)

            # update the counter & recycle

            counter += 1

        s = self._factory.Sortmtz()

        hklout = os.path.join(self.get_working_directory(),
                              '%s_%s_sorted.mtz' % \
                              (self._scalr_pname, self._scalr_xname))

        s.set_hklout(hklout)

        for epoch in self._sweep_handler.get_epochs():
            s.add_hklin(
                self._sweep_handler.get_sweep_information(
                    epoch).get_reflections())

        s.sort()

        # verify that the measurements are in the correct setting
        # choice for the spacegroup

        hklin = hklout
        hklout = hklin.replace('sorted.mtz', 'temp.mtz')

        if not self.get_scaler_reference_reflection_file():

            if PhilIndex.params.xia2.settings.symmetry.program == 'dials':
                p = self._factory.dials_symmetry()
            else:
                p = self._factory.Pointless()

            FileHandler.record_log_file('%s %s pointless' % \
                                        (self._scalr_pname,
                                         self._scalr_xname),
                                        p.get_log_file())

            if len(self._sweep_handler.get_epochs()) > 1:
                p.set_hklin(hklin)
            else:
                # permit the use of pointless preparation...
                epoch = self._sweep_handler.get_epochs()[0]
                p.set_hklin(
                    self._prepare_pointless_hklin(
                        hklin,
                        self._sweep_handler.get_sweep_information(
                            epoch).get_integrater().get_phi_width()))

            if self._scalr_input_spacegroup:
                Debug.write('Assigning user input spacegroup: %s' % \
                            self._scalr_input_spacegroup)

                p.decide_spacegroup()
                spacegroup = p.get_spacegroup()
                reindex_operator = p.get_spacegroup_reindex_operator()

                Debug.write('Pointless thought %s (reindex as %s)' % \
                            (spacegroup, reindex_operator))

                spacegroup = self._scalr_input_spacegroup
                reindex_operator = 'h,k,l'
                self._spacegroup_reindex_operator = reindex_operator

            else:
                p.decide_spacegroup()
                spacegroup = p.get_spacegroup()
                reindex_operator = p.get_spacegroup_reindex_operator()
                self._spacegroup_reindex_operator = clean_reindex_operator(
                    reindex_operator)
                Debug.write('Pointless thought %s (reindex as %s)' % \
                            (spacegroup, reindex_operator))

            if self._scalr_input_spacegroup:
                self._scalr_likely_spacegroups = [self._scalr_input_spacegroup]
            else:
                self._scalr_likely_spacegroups = p.get_likely_spacegroups()

            Chatter.write('Likely spacegroups:')
            for spag in self._scalr_likely_spacegroups:
                Chatter.write('%s' % spag)

            Chatter.write(
                'Reindexing to first spacegroup setting: %s (%s)' % \
                (spacegroup, clean_reindex_operator(reindex_operator)))

        else:
            spacegroup = MtzUtils.space_group_name_from_mtz(
                self.get_scaler_reference_reflection_file())
            reindex_operator = 'h,k,l'

            self._scalr_likely_spacegroups = [spacegroup]

            Debug.write('Assigning spacegroup %s from reference' % \
                        spacegroup)

        # then run reindex to set the correct spacegroup

        ri = self._factory.Reindex()
        ri.set_hklin(hklin)
        ri.set_hklout(hklout)
        ri.set_spacegroup(spacegroup)
        ri.set_operator(reindex_operator)
        ri.reindex()

        FileHandler.record_temporary_file(hklout)

        # then resort the reflections (one last time!)

        s = self._factory.Sortmtz()

        temp = hklin
        hklin = hklout
        hklout = temp

        s.add_hklin(hklin)
        s.set_hklout(hklout)

        s.sort()

        # done preparing!

        self._prepared_reflections = s.get_hklout()
コード例 #32
0
    def _mosflm_parallel_integrate(self):
        '''Perform the integration as before, but this time as a
    number of parallel Mosflm jobs (hence, in separate directories)
    and including a step of pre-refinement of the mosaic spread and
    missets. This will all be kind of explicit and hence probably
    messy!'''

        refinr = self.get_integrater_refiner()

        lattice = refinr.get_refiner_lattice()
        spacegroup_number = lattice_to_spacegroup(lattice)
        mosaic = refinr.get_refiner_payload('mosaic')
        beam = refinr.get_refiner_payload('beam')
        distance = refinr.get_refiner_payload('distance')
        matrix = refinr.get_refiner_payload('mosflm_orientation_matrix')

        integration_params = refinr.get_refiner_payload(
            'mosflm_integration_parameters')

        if integration_params:
            if 'separation' in integration_params:
                self.set_integrater_parameter(
                    'mosflm', 'separation',
                    '%s %s' % tuple(integration_params['separation']))
            if 'raster' in integration_params:
                self.set_integrater_parameter(
                    'mosflm', 'raster',
                    '%d %d %d %d %d' % tuple(integration_params['raster']))

        refinr.set_refiner_payload('mosflm_integration_parameters', None)
        pname, xname, dname = self.get_integrater_project_info()

        # what follows below should (i) be run in separate directories
        # and (ii) be repeated N=parallel times.

        nproc = PhilIndex.params.xia2.settings.multiprocessing.nproc
        parallel = nproc

        # FIXME this is something of a kludge - if too few frames refinement
        # and integration does not work well... ideally want at least 15
        # frames / chunk (say)
        nframes = self._intgr_wedge[1] - self._intgr_wedge[0] + 1

        if parallel > nframes / 15:
            parallel = nframes // 15

        if not parallel:
            raise RuntimeError('parallel not set')
        if parallel < 2:
            raise RuntimeError('parallel not parallel: %s' % parallel)

        jobs = []
        hklouts = []
        nref = 0

        # calculate the chunks to use
        offset = self.get_frame_offset()
        start = self._intgr_wedge[0] - offset
        end = self._intgr_wedge[1] - offset

        left_images = 1 + end - start
        left_chunks = parallel
        chunks = []

        while left_images > 0:
            size = left_images // left_chunks
            chunks.append((start, start + size - 1))
            start += size
            left_images -= size
            left_chunks -= 1

        summary_files = []

        for j in range(parallel):

            # make some working directories, as necessary - chunk-(0:N-1)
            wd = os.path.join(self.get_working_directory(), 'chunk-%d' % j)
            if not os.path.exists(wd):
                os.makedirs(wd)

            job = MosflmIntegrate()
            job.set_working_directory(wd)

            auto_logfiler(job)

            l = refinr.get_refiner_lattice()

            # create the starting point
            f = open(os.path.join(wd, 'xiaintegrate-%s.mat' % l), 'w')
            for m in matrix:
                f.write(m)
            f.close()

            spacegroup_number = lattice_to_spacegroup(lattice)

            job.set_refine_profiles(self._mosflm_refine_profiles)

            # N.B. for harvesting need to append N to dname.

            if pname is not None and xname is not None and dname is not None:
                Debug.write('Harvesting: %s/%s/%s' % (pname, xname, dname))
                harvest_dir = self.get_working_directory()
                temp_dname = '%s_%s' % \
                             (dname, self.get_integrater_sweep_name())
                job.set_pname_xname_dname(pname, xname, temp_dname)

            job.set_template(os.path.basename(self.get_template()))
            job.set_directory(self.get_directory())

            # check for ice - and if so, exclude (ranges taken from
            # XDS documentation)
            if self.get_integrater_ice() != 0:
                Debug.write('Excluding ice rings')
                job.set_exclude_ice(True)

            # exclude specified resolution ranges
            if len(self.get_integrater_excluded_regions()) != 0:
                regions = self.get_integrater_excluded_regions()
                Debug.write('Excluding regions: %s' % repr(regions))
                job.set_exclude_regions(regions)

            mask = standard_mask(self.get_detector())
            for m in mask:
                job.add_instruction(m)

            job.set_input_mat_file('xiaintegrate-%s.mat' % l)

            job.set_beam_centre(beam)
            job.set_distance(distance)
            job.set_space_group_number(spacegroup_number)
            job.set_mosaic(mosaic)

            if self.get_wavelength_prov() == 'user':
                job.set_wavelength(self.get_wavelength())

            parameters = self.get_integrater_parameters('mosflm')
            job.update_parameters(parameters)

            if self._mosflm_gain:
                job.set_gain(self._mosflm_gain)

            # check for resolution limits
            if self._intgr_reso_high > 0.0:
                job.set_d_min(self._intgr_reso_high)
            if self._intgr_reso_low:
                job.set_d_max(self._intgr_reso_low)

            if PhilIndex.params.general.backstop_mask:
                from xia2.Toolkit.BackstopMask import BackstopMask
                mask = BackstopMask(PhilIndex.params.general.backstop_mask)
                mask = mask.calculate_mask_mosflm(self.get_header())
                job.set_mask(mask)

            detector = self.get_detector()
            detector_width, detector_height = detector[0].get_image_size_mm()

            lim_x = 0.5 * detector_width
            lim_y = 0.5 * detector_height

            Debug.write('Scanner limits: %.1f %.1f' % (lim_x, lim_y))
            job.set_limits(lim_x, lim_y)

            job.set_fix_mosaic(self._mosflm_postref_fix_mosaic)

            job.set_pre_refinement(True)
            job.set_image_range(chunks[j])

            # these are now running so ...

            jobs.append(job)

            continue

        # ok, at this stage I need to ...
        #
        # (i) accumulate the statistics as a function of batch
        # (ii) mong them into a single block
        #
        # This is likely to be a pain in the arse!

        first_integrated_batch = 1.0e6
        last_integrated_batch = -1.0e6

        all_residuals = []

        threads = []

        for j in range(parallel):
            job = jobs[j]

            # now wait for them to finish - first wait will really be the
            # first one, then all should be finished...

            thread = Background(job, 'run')
            thread.start()
            threads.append(thread)

        mosaics = []
        postref_result = {}

        integrated_images_first = 1.0e6
        integrated_images_last = -1.0e6
        self._intgr_per_image_statistics = {}

        for j in range(parallel):
            thread = threads[j]
            thread.stop()
            job = jobs[j]

            # get the log file
            output = job.get_all_output()

            # record a copy of it, perhaps - though not if parallel
            if self.get_integrater_sweep_name() and False:
                pname, xname, dname = self.get_integrater_project_info()
                FileHandler.record_log_file(
                    '%s %s %s %s mosflm integrate' % \
                    (self.get_integrater_sweep_name(),
                     pname, xname, '%s_%d' % (dname, j)),
                    job.get_log_file())

            # look for things that we want to know...
            # that is, the output reflection file name, the updated
            # value for the gain (if present,) any warnings, errors,
            # or just interesting facts.

            batches = job.get_batches_out()
            integrated_images_first = min(batches[0], integrated_images_first)
            integrated_images_last = max(batches[1], integrated_images_last)

            mosaics.extend(job.get_mosaic_spreads())

            if min(mosaics) < 0:
                raise IntegrationError('negative mosaic spread: %s' %
                                       min(mosaic))

            if (job.get_detector_gain_error()
                    and not (self.get_imageset().get_detector()[0].get_type()
                             == 'SENSOR_PAD')):
                gain = job.get_suggested_gain()
                if gain is not None:
                    self.set_integrater_parameter('mosflm', 'gain', gain)
                    self.set_integrater_export_parameter(
                        'mosflm', 'gain', gain)
                    if self._mosflm_gain:
                        Debug.write('GAIN updated to %f' % gain)
                    else:
                        Debug.write('GAIN found to be %f' % gain)

                    self._mosflm_gain = gain
                    self._mosflm_rerun_integration = True

            hklout = job.get_hklout()
            Debug.write('Integration output: %s' % hklout)
            hklouts.append(hklout)

            nref += job.get_nref()

            # if a BGSIG error happened try not refining the
            # profile and running again...

            if job.get_bgsig_too_large():
                if not self._mosflm_refine_profiles:
                    raise RuntimeError('BGSIG error with profiles fixed')

                Debug.write('BGSIG error detected - try fixing profile...')

                self._mosflm_refine_profiles = False
                self.set_integrater_done(False)

                return

            if job.get_getprof_error():
                Debug.write('GETPROF error detected - try fixing profile...')
                self._mosflm_refine_profiles = False
                self.set_integrater_done(False)

                return

            # here
            # write the report for each image as .*-#$ to Chatter -
            # detailed report will be written automagically to science...

            self._intgr_per_image_statistics.update(
                job.get_per_image_statistics())
            postref_result.update(job.get_postref_result())

            # inspect the output for e.g. very high weighted residuals

            all_residuals.extend(job.get_residuals())

        self._intgr_batches_out = (integrated_images_first,
                                   integrated_images_last)

        if mosaics and len(mosaics) > 0:
            self.set_integrater_mosaic_min_mean_max(
                min(mosaics),
                sum(mosaics) / len(mosaics), max(mosaics))
        else:
            m = indxr.get_indexer_mosaic()
            self.set_integrater_mosaic_min_mean_max(m, m, m)

        Chatter.write(self.show_per_image_statistics())

        Chatter.write('Mosaic spread: %.3f < %.3f < %.3f' % \
                      self.get_integrater_mosaic_min_mean_max())

        # gather the statistics from the postrefinement for all sweeps
        # now write this to a postrefinement log

        postref_log = os.path.join(self.get_working_directory(),
                                   'postrefinement.log')

        fout = open(postref_log, 'w')

        fout.write('$TABLE: Postrefinement for %s:\n' % \
                   self._intgr_sweep_name)
        fout.write('$GRAPHS: Missetting angles:A:1, 2, 3, 4: $$\n')
        fout.write('Batch PhiX PhiY PhiZ $$ Batch PhiX PhiY PhiZ $$\n')

        for image in sorted(postref_result):
            phix = postref_result[image].get('phix', 0.0)
            phiy = postref_result[image].get('phiy', 0.0)
            phiz = postref_result[image].get('phiz', 0.0)

            fout.write('%d %5.2f %5.2f %5.2f\n' % \
                       (image, phix, phiy, phiz))

        fout.write('$$\n')
        fout.close()

        if self.get_integrater_sweep_name():
            pname, xname, dname = self.get_integrater_project_info()
            FileHandler.record_log_file('%s %s %s %s postrefinement' % \
                                        (self.get_integrater_sweep_name(),
                                         pname, xname, dname),
                                        postref_log)

        hklouts.sort()

        hklout = os.path.join(self.get_working_directory(),
                              os.path.split(hklouts[0])[-1])

        Debug.write('Sorting data to %s' % hklout)
        for hklin in hklouts:
            Debug.write('<= %s' % hklin)

        sortmtz = Sortmtz()
        sortmtz.set_hklout(hklout)
        for hklin in hklouts:
            sortmtz.add_hklin(hklin)

        sortmtz.sort()

        self._mosflm_hklout = hklout

        return self._mosflm_hklout
コード例 #33
0
ファイル: CommonScaler.py プロジェクト: lizhen-dlut/xia2
    def _sort_together_data_xds_one_sweep(self):

        assert len(self._sweep_information) == 1

        epoch = self._sweep_information.keys()[0]
        hklin = self._sweep_information[epoch]['scaled_reflections']

        if self.get_scaler_reference_reflection_file():
            spacegroups = [
                MtzUtils.space_group_name_from_mtz(
                    self.get_scaler_reference_reflection_file())
            ]
            reindex_operator = 'h,k,l'

        elif self._scalr_input_spacegroup:
            Debug.write('Assigning user input spacegroup: %s' % \
                        self._scalr_input_spacegroup)
            spacegroups = [self._scalr_input_spacegroup]
            reindex_operator = 'h,k,l'

        else:
            pointless = self._factory.Pointless()
            pointless.set_hklin(hklin)
            pointless.decide_spacegroup()

            FileHandler.record_log_file('%s %s pointless' % \
                                        (self._scalr_pname,
                                         self._scalr_xname),
                                        pointless.get_log_file())

            spacegroups = pointless.get_likely_spacegroups()
            reindex_operator = pointless.get_spacegroup_reindex_operator()

        self._scalr_likely_spacegroups = spacegroups
        spacegroup = self._scalr_likely_spacegroups[0]

        self._scalr_reindex_operator = clean_reindex_operator(reindex_operator)

        Chatter.write('Likely spacegroups:')
        for spag in self._scalr_likely_spacegroups:
            Chatter.write('%s' % spag)

        Chatter.write(
            'Reindexing to first spacegroup setting: %s (%s)' % \
            (spacegroup, clean_reindex_operator(reindex_operator)))

        hklout = os.path.join(self.get_working_directory(),
                              '%s_%s_reindex.mtz' % \
                              (self._scalr_pname, self._scalr_xname))

        FileHandler.record_temporary_file(hklout)

        if reindex_operator == '[h,k,l]':
            # just assign spacegroup

            from cctbx import sgtbx

            s = sgtbx.space_group(
                sgtbx.space_group_symbols(str(spacegroup)).hall())

            m = mtz.object(hklin)
            m.set_space_group(s).write(hklout)
            self._scalr_cell = m.crystals()[-1].unit_cell().parameters()
            Debug.write(
                'Updating unit cell to %.2f %.2f %.2f %.2f %.2f %.2f' % \
                tuple(self._scalr_cell))
            del m
            del s

        else:
            ri = self._factory.Reindex()
            ri.set_hklin(hklin)
            ri.set_hklout(hklout)
            ri.set_spacegroup(spacegroup)
            ri.set_operator(reindex_operator)
            ri.reindex()

            Debug.write(
                'Updating unit cell to %.2f %.2f %.2f %.2f %.2f %.2f' % \
                tuple(ri.get_cell()))
            self._scalr_cell = tuple(ri.get_cell())

        hklin = hklout
        hklout = os.path.join(self.get_working_directory(),
                              '%s_%s_sorted.mtz' % \
                              (self._scalr_pname, self._scalr_xname))

        s = self._factory.Sortmtz()
        s.set_hklin(hklin)
        s.set_hklout(hklout)

        s.sort(vrset=-99999999.0)

        self._prepared_reflections = hklout
コード例 #34
0
    def _do_indexing(self, method=None):
        indexer = self.Index()
        for spot_list in self._indxr_payload["spot_lists"]:
            indexer.add_spot_filename(spot_list)
        for filename in self._indxr_payload["experiments"]:
            indexer.add_sweep_filename(filename)
        if PhilIndex.params.dials.index.phil_file is not None:
            indexer.set_phil_file(PhilIndex.params.dials.index.phil_file)
        indexer.set_max_cell(
            max_cell=PhilIndex.params.dials.index.max_cell,
            max_height_fraction=PhilIndex.params.dials.index.
            max_cell_estimation.max_height_fraction,
        )
        if PhilIndex.params.xia2.settings.small_molecule:
            indexer.set_min_cell(3)
        if PhilIndex.params.dials.fix_geometry:
            indexer.set_detector_fix("all")
            indexer.set_beam_fix("all")
        elif PhilIndex.params.dials.fix_distance:
            indexer.set_detector_fix("distance")
        indexer.set_close_to_spindle_cutoff(
            PhilIndex.params.dials.close_to_spindle_cutoff)

        if self._indxr_input_lattice:
            indexer.set_indexer_input_lattice(self._indxr_input_lattice)
            logger.debug("Set lattice: %s", self._indxr_input_lattice)

        if self._indxr_input_cell:
            indexer.set_indexer_input_cell(self._indxr_input_cell)
            logger.debug("Set cell: %f %f %f %f %f %f" %
                         self._indxr_input_cell)

        if method is None:
            if PhilIndex.params.dials.index.method is None:
                method = "fft3d"
                logger.debug("Choosing indexing method: %s", method)
            else:
                method = PhilIndex.params.dials.index.method

        FileHandler.record_log_file("%s INDEX" % self.get_indexer_full_name(),
                                    indexer.get_log_file())
        indexer.run(method)

        if not os.path.exists(indexer.get_experiments_filename()):
            raise RuntimeError(
                "Indexing has failed: see %s for more details." %
                indexer.get_log_file())
        elif not os.path.exists(indexer.get_indexed_filename()):
            raise RuntimeError("Indexing has failed: %s does not exist." %
                               indexer.get_indexed_filename())

        report = self.Report()
        report.set_experiments_filename(indexer.get_experiments_filename())
        report.set_reflections_filename(indexer.get_indexed_filename())
        html_filename = os.path.join(
            self.get_working_directory(),
            "%i_dials.index.report.html" % report.get_xpid(),
        )
        report.set_html_filename(html_filename)
        report.run()
        FileHandler.record_html_file("%s INDEX" % self.get_indexer_full_name(),
                                     html_filename)

        return indexer
コード例 #35
0
ファイル: CommonScaler.py プロジェクト: hainm/xia2
  def _sort_together_data_xds_one_sweep(self):

    assert(len(self._sweep_information) == 1)

    epoch = self._sweep_information.keys()[0]
    hklin = self._sweep_information[epoch]['scaled_reflections']

    if Flags.get_chef():
      self._sweep_information_to_chef()

    if self.get_scaler_reference_reflection_file():
      md = self._factory.Mtzdump()
      md.set_hklin(self.get_scaler_reference_reflection_file())
      md.dump()

      spacegroups = [md.get_spacegroup()]
      reindex_operator = 'h,k,l'

    elif self._scalr_input_spacegroup:
      Debug.write('Assigning user input spacegroup: %s' % \
                  self._scalr_input_spacegroup)
      spacegroups = [self._scalr_input_spacegroup]
      reindex_operator = 'h,k,l'

    else:
      pointless = self._factory.Pointless()
      pointless.set_hklin(hklin)
      pointless.decide_spacegroup()

      FileHandler.record_log_file('%s %s pointless' % \
                                  (self._scalr_pname,
                                   self._scalr_xname),
                                  pointless.get_log_file())

      spacegroups = pointless.get_likely_spacegroups()
      reindex_operator = pointless.get_spacegroup_reindex_operator()


    self._scalr_likely_spacegroups = spacegroups
    spacegroup = self._scalr_likely_spacegroups[0]

    self._scalr_reindex_operator = reindex_operator

    Chatter.write('Likely spacegroups:')
    for spag in self._scalr_likely_spacegroups:
      Chatter.write('%s' % spag)

    Chatter.write(
        'Reindexing to first spacegroup setting: %s (%s)' % \
        (spacegroup, clean_reindex_operator(reindex_operator)))

    hklout = os.path.join(self.get_working_directory(),
                          '%s_%s_reindex.mtz' % \
                          (self._scalr_pname, self._scalr_xname))

    FileHandler.record_temporary_file(hklout)

    if reindex_operator == '[h,k,l]':
      # just assign spacegroup

      from iotbx import mtz
      from cctbx import sgtbx

      s = sgtbx.space_group(sgtbx.space_group_symbols(
           str(spacegroup)).hall())

      m = mtz.object(hklin)
      m.set_space_group(s).write(hklout)
      self._scalr_cell = m.crystals()[-1].unit_cell().parameters()
      Debug.write(
          'Updating unit cell to %.2f %.2f %.2f %.2f %.2f %.2f' % \
          tuple(self._scalr_cell))
      del(m)
      del(s)

    else:
      ri = self._factory.Reindex()
      ri.set_hklin(hklin)
      ri.set_hklout(hklout)
      ri.set_spacegroup(spacegroup)
      ri.set_operator(reindex_operator)
      ri.reindex()

      Debug.write(
          'Updating unit cell to %.2f %.2f %.2f %.2f %.2f %.2f' % \
          tuple(ri.get_cell()))
      self._scalr_cell = tuple(ri.get_cell())

    hklin = hklout
    hklout = os.path.join(self.get_working_directory(),
                          '%s_%s_sorted.mtz' % \
                          (self._scalr_pname, self._scalr_xname))

    s = self._factory.Sortmtz()
    s.set_hklin(hklin)
    s.set_hklout(hklout)

    s.sort(vrset = -99999999.0)

    self._prepared_reflections = hklout

    return
コード例 #36
0
    def write_xml(self, file, command_line="", working_phil=None):
        if working_phil is not None:
            PhilIndex.merge_phil(working_phil)
        params = PhilIndex.get_python_object()

        fout = open(file, "w")

        fout.write('<?xml version="1.0"?>')
        fout.write("<AutoProcContainer>\n")

        for crystal in sorted(self._crystals):
            xcrystal = self._crystals[crystal]

            cell = xcrystal.get_cell()
            spacegroup = xcrystal.get_likely_spacegroups()[0]

            fout.write("<AutoProc><spaceGroup>%s</spaceGroup>" % spacegroup)
            self.write_refined_cell(fout, cell)
            fout.write("</AutoProc>")

            fout.write("<AutoProcScalingContainer>")
            fout.write("<AutoProcScaling>")
            self.write_date(fout)
            fout.write("</AutoProcScaling>")

            statistics_all = xcrystal.get_statistics()
            reflection_files = xcrystal.get_scaled_merged_reflections()

            for key in statistics_all:
                pname, xname, dname = key

                # FIXME should assert that the dname is a
                # valid wavelength name

                keys = [
                    "High resolution limit",
                    "Low resolution limit",
                    "Completeness",
                    "Multiplicity",
                    "I/sigma",
                    "Rmerge(I+/-)",
                    "CC half",
                    "Anomalous completeness",
                    "Anomalous correlation",
                    "Anomalous multiplicity",
                    "Total observations",
                    "Total unique",
                    "Rmeas(I)",
                    "Rmeas(I+/-)",
                    "Rpim(I)",
                    "Rpim(I+/-)",
                    "Partial Bias",
                ]

                stats = [k for k in keys if k in statistics_all[key]]

                xwavelength = xcrystal.get_xwavelength(dname)
                sweeps = xwavelength.get_sweeps()

                for j, name in enumerate(
                    ["overall", "innerShell", "outerShell"]):
                    statistics_cache = {}

                    for s in stats:
                        if isinstance(statistics_all[key][s], type([])):
                            statistics_cache[s] = statistics_all[key][s][j]
                        elif isinstance(statistics_all[key][s], type(())):
                            statistics_cache[s] = statistics_all[key][s][j]

                    # send these to be written out
                    self.write_scaling_statistics(fout, name, statistics_cache)

                for sweep in sweeps:
                    fout.write("<AutoProcIntegrationContainer>\n")
                    if "#" in sweep.get_template():
                        image_name = sweep.get_image_name(0)
                    else:
                        image_name = os.path.join(sweep.get_directory(),
                                                  sweep.get_template())
                    fout.write("<Image><fileName>%s</fileName>" %
                               os.path.split(image_name)[-1])
                    fout.write("<fileLocation>%s</fileLocation></Image>" %
                               sanitize(os.path.split(image_name)[0]))
                    fout.write("<AutoProcIntegration>\n")
                    cell = sweep.get_integrater_cell()
                    self.write_cell(fout, cell)

                    # FIXME this is naughty
                    intgr = sweep._get_integrater()

                    start, end = intgr.get_integrater_wedge()

                    fout.write("<startImageNumber>%d</startImageNumber>" %
                               start)

                    fout.write("<endImageNumber>%d</endImageNumber>" % end)

                    # FIXME this is naughty
                    indxr = sweep._get_indexer()

                    fout.write(
                        "<refinedDetectorDistance>%f</refinedDetectorDistance>"
                        % indxr.get_indexer_distance())

                    beam = indxr.get_indexer_beam_centre_raw_image()

                    fout.write("<refinedXBeam>%f</refinedXBeam>" % beam[0])
                    fout.write("<refinedYBeam>%f</refinedYBeam>" % beam[1])

                    fout.write("</AutoProcIntegration>\n")
                    fout.write("</AutoProcIntegrationContainer>\n")

            fout.write("</AutoProcScalingContainer>")

            # file unpacking nonsense

            if not command_line:
                from xia2.Handlers.CommandLine import CommandLine

                command_line = CommandLine.get_command_line()

            pipeline = params.xia2.settings.pipeline
            fout.write("<AutoProcProgramContainer><AutoProcProgram>")
            fout.write("<processingCommandLine>%s</processingCommandLine>" %
                       sanitize(command_line))
            fout.write("<processingPrograms>xia2 %s</processingPrograms>" %
                       pipeline)
            fout.write("</AutoProcProgram>")

            data_directory = self._project.path / "DataFiles"
            log_directory = self._project.path / "LogFiles"

            for k in reflection_files:
                reflection_file = reflection_files[k]

                if not isinstance(reflection_file, type("")):
                    continue

                reflection_file = FileHandler.get_data_file(
                    self._project.path, reflection_file)

                basename = os.path.basename(reflection_file)
                if data_directory.joinpath(basename).exists():
                    # Use file in DataFiles directory in preference (if it exists)
                    reflection_file = str(data_directory.joinpath(basename))

                fout.write("<AutoProcProgramAttachment><fileType>Result")
                fout.write("</fileType><fileName>%s</fileName>" %
                           os.path.split(reflection_file)[-1])
                fout.write("<filePath>%s</filePath>" %
                           sanitize(os.path.split(reflection_file)[0]))
                fout.write("</AutoProcProgramAttachment>\n")

            g = log_directory.glob("*merging-statistics.json")
            for merging_stats_json in g:
                fout.write("<AutoProcProgramAttachment><fileType>Graph")
                fout.write("</fileType><fileName>%s</fileName>" %
                           os.path.split(str(merging_stats_json))[-1])
                fout.write("<filePath>%s</filePath>" %
                           sanitize(str(log_directory)))
                fout.write("</AutoProcProgramAttachment>\n")

            # add the xia2.txt file...

            fout.write("<AutoProcProgramAttachment><fileType>Log")
            fout.write("</fileType><fileName>xia2.txt</fileName>")
            fout.write("<filePath>%s</filePath>" % sanitize(os.getcwd()))
            fout.write("</AutoProcProgramAttachment>\n")

            fout.write("</AutoProcProgramContainer>")

        fout.write("</AutoProcContainer>\n")
        fout.close()
コード例 #37
0
ファイル: DialsIntegrater.py プロジェクト: xia2/xia2
  def _integrate(self):
    '''Actually do the integration - in XDS terms this will mean running
    DEFPIX and INTEGRATE to measure all the reflections.'''

    images_str = '%d to %d' % tuple(self._intgr_wedge)
    cell_str = '%.2f %.2f %.2f %.2f %.2f %.2f' % tuple(self._intgr_cell)

    if len(self._fp_directory) <= 50:
      dirname = self._fp_directory
    else:
      dirname = '...%s' % self._fp_directory[-46:]

    Journal.block(
        'integrating', self._intgr_sweep_name, 'DIALS',
        {'images':images_str,
         'cell':cell_str,
         'lattice':self.get_integrater_refiner().get_refiner_lattice(),
         'template':self._fp_template,
         'directory':dirname,
         'resolution':'%.2f' % self._intgr_reso_high})

    integrate = self.Integrate()

    # decide what images we are going to process, if not already
    # specified

    if not self._intgr_wedge:
      images = self.get_matching_images()
      self.set_integrater_wedge(min(images),
                                max(images))

    imageset = self.get_imageset()
    beam = imageset.get_beam()
    detector = imageset.get_detector()

    d_min_limit = detector.get_max_resolution(beam.get_s0())
    if d_min_limit > self._intgr_reso_high \
        or PhilIndex.params.xia2.settings.resolution.keep_all_reflections:
      Debug.write('Overriding high resolution limit: %f => %f' % \
                  (self._intgr_reso_high, d_min_limit))
      self._intgr_reso_high = d_min_limit

    integrate.set_experiments_filename(self._intgr_experiments_filename)
    integrate.set_reflections_filename(self._intgr_indexed_filename)
    integrate.set_d_max(self._intgr_reso_low)
    integrate.set_d_min(self._intgr_reso_high)
    pname, xname, dname = self.get_integrater_project_info()
    sweep = self.get_integrater_sweep_name()
    FileHandler.record_log_file('%s %s %s %s INTEGRATE' % \
                                (pname, xname, dname, sweep),
                                integrate.get_log_file())

    try:
      integrate.run()
    except RuntimeError, e:
      s = str(e)
      if ('dials.integrate requires more memory than is available.' in s
          and not self._intgr_reso_high):
        # Try to estimate a more sensible resolution limit for integration
        # in case we were just integrating noise to the edge of the detector
        images = self._integrate_select_images_wedges()

        Debug.write(
          'Integrating subset of images to estimate resolution limit.\n'
          'Integrating images %s' %images)

        integrate = self.Integrate()
        integrate.set_experiments_filename(self._intgr_experiments_filename)
        integrate.set_reflections_filename(self._intgr_indexed_filename)
        integrate.set_d_max(self._intgr_reso_low)
        integrate.set_d_min(self._intgr_reso_high)
        for (start, stop) in images:
          integrate.add_scan_range(start-self.get_matching_images()[0], stop-self.get_matching_images()[0])
        integrate.set_reflections_per_degree(1000)
        integrate.run()

        integrated_pickle = integrate.get_integrated_filename()

        from xia2.Wrappers.Dials.EstimateResolutionLimit import EstimateResolutionLimit
        d_min_estimater = EstimateResolutionLimit()
        d_min_estimater.set_working_directory(self.get_working_directory())
        auto_logfiler(d_min_estimater)
        d_min_estimater.set_experiments_filename(self._intgr_experiments_filename)
        d_min_estimater.set_reflections_filename(integrated_pickle)
        d_min = d_min_estimater.run()

        Debug.write('Estimate for d_min: %.2f' %d_min)
        Debug.write('Re-running integration to this resolution limit')

        self._intgr_reso_high = d_min
        self.set_integrater_done(False)
        return
      raise
コード例 #38
0
ファイル: CCP4ScalerA.py プロジェクト: hainm/xia2
    else:
      sc.scale()

    # then gather up all of the resulting reflection files
    # and convert them into the required formats (.sca, .mtz.)

    data = sc.get_summary()

    loggraph = sc.parse_ccp4_loggraph()

    resolution_info = { }

    reflection_files = sc.get_scaled_reflection_files()

    for dataset in reflection_files:
      FileHandler.record_temporary_file(reflection_files[dataset])

    for key in loggraph:
      if 'Analysis against resolution' in key:
        dataset = key.split(',')[-1].strip()
        resolution_info[dataset] = transpose_loggraph(
            loggraph[key])

    highest_resolution = 100.0

    # check in here that there is actually some data to scale..!

    if len(resolution_info) == 0:
      raise RuntimeError, 'no resolution info'

    for epoch in epochs:
コード例 #39
0
ファイル: CommonScaler.py プロジェクト: hainm/xia2
  def _sort_together_data_ccp4(self):
    '''Sort together in the right order (rebatching as we go) the sweeps
    we want to scale together.'''

    max_batches = 0

    for e in self._sweep_handler.get_epochs():
      if Flags.get_small_molecule():
        continue
      si = self._sweep_handler.get_sweep_information(e)

      pname, xname, dname = si.get_project_info()
      sname = si.get_sweep_name()



    for epoch in self._sweep_handler.get_epochs():

      si = self._sweep_handler.get_sweep_information(epoch)
      hklin = si.get_reflections()

      # limit the reflections - e.g. if we are re-running the scaling step
      # on just a subset of the integrated data

      hklin = si.get_reflections()
      limit_batch_range = None
      for sweep in PhilIndex.params.xia2.settings.sweep:
        if sweep.id == sname and sweep.range is not None:
          limit_batch_range = sweep.range
          break

      if limit_batch_range is not None:
        Debug.write('Limiting batch range for %s: %s' %(sname, limit_batch_range))
        start, end = limit_batch_range
        hklout = os.path.splitext(hklin)[0] + '_tmp.mtz'
        FileHandler.record_temporary_file(hklout)
        rb = self._factory.Pointless()
        rb.set_hklin(hklin)
        rb.set_hklout(hklout)
        rb.limit_batches(start, end)
        si.set_reflections(hklout)
        si.set_batches(limit_batch_range)

      # keep a count of the maximum number of batches in a block -
      # this will be used to make rebatch work below.

      hklin = si.get_reflections()
      md = self._factory.Mtzdump()
      md.set_hklin(hklin)
      md.dump()

      batches = md.get_batches()
      if 1 + max(batches) - min(batches) > max_batches:
        max_batches = max(batches) - min(batches) + 1

      datasets = md.get_datasets()

      Debug.write('In reflection file %s found:' % hklin)
      for d in datasets:
        Debug.write('... %s' % d)

      dataset_info = md.get_dataset_info(datasets[0])

    Debug.write('Biggest sweep has %d batches' % max_batches)
    max_batches = nifty_power_of_ten(max_batches)

    # then rebatch the files, to make sure that the batch numbers are
    # in the same order as the epochs of data collection.

    counter = 0

    for epoch in self._sweep_handler.get_epochs():

      si = self._sweep_handler.get_sweep_information(epoch)
      rb = self._factory.Rebatch()

      hklin = si.get_reflections()

      pname, xname, dname = si.get_project_info()
      sname = si.get_sweep_name()

      hklout = os.path.join(self.get_working_directory(),
                            '%s_%s_%s_%s_integrated.mtz' % \
                            (pname, xname, dname, sname))

      first_batch = min(si.get_batches())
      si.set_batch_offset(counter * max_batches - first_batch + 1)

      rb.set_hklin(hklin)
      rb.set_first_batch(counter * max_batches + 1)
      rb.set_project_info(pname, xname, dname)
      rb.set_hklout(hklout)

      new_batches = rb.rebatch()

      # update the "input information"

      si.set_reflections(hklout)
      si.set_batches(new_batches)

      # update the counter & recycle

      counter += 1

    s = self._factory.Sortmtz()

    hklout = os.path.join(self.get_working_directory(),
                          '%s_%s_sorted.mtz' % \
                          (self._scalr_pname, self._scalr_xname))

    s.set_hklout(hklout)

    for epoch in self._sweep_handler.get_epochs():
      s.add_hklin(self._sweep_handler.get_sweep_information(
          epoch).get_reflections())

    s.sort()

    # verify that the measurements are in the correct setting
    # choice for the spacegroup

    hklin = hklout
    hklout = hklin.replace('sorted.mtz', 'temp.mtz')

    if not self.get_scaler_reference_reflection_file():

      p = self._factory.Pointless()

      FileHandler.record_log_file('%s %s pointless' % \
                                  (self._scalr_pname,
                                   self._scalr_xname),
                                  p.get_log_file())

      if len(self._sweep_handler.get_epochs()) > 1:
        p.set_hklin(hklin)
      else:
        # permit the use of pointless preparation...
        epoch = self._sweep_handler.get_epochs()[0]
        p.set_hklin(self._prepare_pointless_hklin(
            hklin, self._sweep_handler.get_sweep_information(
            epoch).get_integrater().get_phi_width()))

      if self._scalr_input_spacegroup:
        Debug.write('Assigning user input spacegroup: %s' % \
                    self._scalr_input_spacegroup)

        p.decide_spacegroup()
        spacegroup = p.get_spacegroup()
        reindex_operator = p.get_spacegroup_reindex_operator()

        Debug.write('Pointless thought %s (reindex as %s)' % \
                    (spacegroup, reindex_operator))

        spacegroup = self._scalr_input_spacegroup
        reindex_operator = 'h,k,l'

      elif Flags.get_small_molecule() and False:
        p.decide_pointgroup()
        spacegroup = p.get_pointgroup()
        reindex_operator = p.get_reindex_operator()

        Debug.write('Pointless thought %s (reindex as %s)' % \
                    (spacegroup, reindex_operator))
        self._scalr_likely_spacegroups = [spacegroup]

      else:
        p.decide_spacegroup()
        spacegroup = p.get_spacegroup()
        reindex_operator = p.get_spacegroup_reindex_operator()

        Debug.write('Pointless thought %s (reindex as %s)' % \
                    (spacegroup, reindex_operator))

      if self._scalr_input_spacegroup:
        self._scalr_likely_spacegroups = [self._scalr_input_spacegroup]
      else:
        self._scalr_likely_spacegroups = p.get_likely_spacegroups()

      Chatter.write('Likely spacegroups:')
      for spag in self._scalr_likely_spacegroups:
        Chatter.write('%s' % spag)

      Chatter.write(
          'Reindexing to first spacegroup setting: %s (%s)' % \
          (spacegroup, clean_reindex_operator(reindex_operator)))

    else:

      md = self._factory.Mtzdump()
      md.set_hklin(self.get_scaler_reference_reflection_file())
      md.dump()

      spacegroup = md.get_spacegroup()
      reindex_operator = 'h,k,l'

      self._scalr_likely_spacegroups = [spacegroup]

      Debug.write('Assigning spacegroup %s from reference' % \
                  spacegroup)

    # then run reindex to set the correct spacegroup

    ri = self._factory.Reindex()
    ri.set_hklin(hklin)
    ri.set_hklout(hklout)
    ri.set_spacegroup(spacegroup)
    ri.set_operator(reindex_operator)
    ri.reindex()

    FileHandler.record_temporary_file(hklout)

    # then resort the reflections (one last time!)

    s = self._factory.Sortmtz()

    temp = hklin
    hklin = hklout
    hklout = temp

    s.add_hklin(hklin)
    s.set_hklout(hklout)

    s.sort()

    # done preparing!

    self._prepared_reflections = s.get_hklout()

    return
コード例 #40
0
ファイル: XDSIntegrater.py プロジェクト: hainm/xia2
  def _integrate(self):
    '''Actually do the integration - in XDS terms this will mean running
    DEFPIX and INTEGRATE to measure all the reflections.'''

    experiment = self._intgr_refiner.get_refined_experiment_list(
      self.get_integrater_epoch())[0]
    crystal_model = experiment.crystal
    self._intgr_refiner_cell = crystal_model.get_unit_cell().parameters()

    images_str = '%d to %d' % tuple(self._intgr_wedge)
    cell_str = '%.2f %.2f %.2f %.2f %.2f %.2f' %tuple(self._intgr_refiner_cell)

    if len(self._fp_directory) <= 50:
      dirname = self._fp_directory
    else:
      dirname = '...%s' % self._fp_directory[-46:]

    Journal.block(
        'integrating', self._intgr_sweep_name, 'XDS',
        {'images':images_str,
         'cell':cell_str,
         'lattice':self._intgr_refiner.get_refiner_lattice(),
         'template':self._fp_template,
         'directory':dirname,
         'resolution':'%.2f' % self._intgr_reso_high})

    first_image_in_wedge = self.get_image_name(self._intgr_wedge[0])

    defpix = self.Defpix()

    # pass in the correct data

    for file in ['X-CORRECTIONS.cbf',
                 'Y-CORRECTIONS.cbf',
                 'BKGINIT.cbf',
                 'XPARM.XDS']:
      defpix.set_input_data_file(file, self._xds_data_files[file])

    defpix.set_data_range(self._intgr_wedge[0],
                          self._intgr_wedge[1])

    if self.get_integrater_high_resolution() > 0.0 and \
           self.get_integrater_user_resolution():
      Debug.write('Setting resolution limit in DEFPIX to %.2f' % \
                  self.get_integrater_high_resolution())
      defpix.set_resolution_high(self.get_integrater_high_resolution())
      defpix.set_resolution_low(self.get_integrater_low_resolution())

    elif self.get_integrater_low_resolution():
      Debug.write('Setting low resolution limit in DEFPIX to %.2f' % \
                  self.get_integrater_low_resolution())
      defpix.set_resolution_high(0.0)
      defpix.set_resolution_low(self.get_integrater_low_resolution())

    defpix.run()

    # and gather the result files
    for file in ['BKGPIX.cbf',
                 'ABS.cbf']:
      self._xds_data_files[file] = defpix.get_output_data_file(file)

    integrate = self.Integrate()

    if self._xds_integrate_parameters:
      integrate.set_updates(self._xds_integrate_parameters)

    # decide what images we are going to process, if not already
    # specified

    if not self._intgr_wedge:
      images = self.get_matching_images()
      self.set_integrater_wedge(min(images),
                                max(images))

    first_image_in_wedge = self.get_image_name(self._intgr_wedge[0])

    integrate.set_data_range(self._intgr_wedge[0],
                             self._intgr_wedge[1])

    for file in ['X-CORRECTIONS.cbf',
                 'Y-CORRECTIONS.cbf',
                 'BLANK.cbf',
                 'BKGPIX.cbf',
                 'GAIN.cbf']:
      integrate.set_input_data_file(file, self._xds_data_files[file])

    if self._xds_data_files.has_key('GXPARM.XDS'):
      Debug.write('Using globally refined parameters')
      integrate.set_input_data_file(
          'XPARM.XDS', self._xds_data_files['GXPARM.XDS'])
      integrate.set_refined_xparm()
    else:
      integrate.set_input_data_file(
          'XPARM.XDS', self._xds_data_files['XPARM.XDS'])

    integrate.run()

    self._intgr_per_image_statistics = integrate.get_per_image_statistics()
    Chatter.write(self.show_per_image_statistics())

    # record the log file -

    pname, xname, dname = self.get_integrater_project_info()
    sweep = self.get_integrater_sweep_name()
    FileHandler.record_log_file('%s %s %s %s INTEGRATE' % \
                                (pname, xname, dname, sweep),
                                os.path.join(self.get_working_directory(),
                                             'INTEGRATE.LP'))

    # and copy the first pass INTEGRATE.HKL...

    lattice = self._intgr_refiner.get_refiner_lattice()
    if not os.path.exists(os.path.join(
        self.get_working_directory(),
        'INTEGRATE-%s.HKL' % lattice)):
      here = self.get_working_directory()
      shutil.copyfile(os.path.join(here, 'INTEGRATE.HKL'),
                      os.path.join(here, 'INTEGRATE-%s.HKL' % lattice))

    # record INTEGRATE.HKL for e.g. BLEND.

    FileHandler.record_more_data_file(
        '%s %s %s %s INTEGRATE' % (pname, xname, dname, sweep),
        os.path.join(self.get_working_directory(), 'INTEGRATE.HKL'))

    # should the existence of these require that I rerun the
    # integration or can we assume that the application of a
    # sensible resolution limit will achieve this??

    self._xds_integrate_parameters = integrate.get_updates()

    # record the mosaic spread &c.

    m_min, m_mean, m_max = integrate.get_mosaic()
    self.set_integrater_mosaic_min_mean_max(m_min, m_mean, m_max)

    Chatter.write('Mosaic spread: %.3f < %.3f < %.3f' % \
                  self.get_integrater_mosaic_min_mean_max())

    return os.path.join(self.get_working_directory(), 'INTEGRATE.HKL')
コード例 #41
0
ファイル: CommonScaler.py プロジェクト: lizhen-dlut/xia2
    def _scale_finish_export_shelxt(self):
        '''Read hklin (unmerged reflection file) and generate SHELXT input file
    and HKL file'''

        from iotbx.reflection_file_reader import any_reflection_file
        from iotbx.shelx import writer
        from iotbx.shelx.hklf import miller_array_export_as_shelx_hklf
        from cctbx.xray.structure import structure
        from cctbx.xray import scatterer

        for wavelength_name in self._scalr_scaled_refl_files.keys():
            prefix = wavelength_name
            if len(self._scalr_scaled_refl_files.keys()) == 1:
                prefix = 'shelxt'
            prefixpath = os.path.join(self.get_working_directory(), prefix)

            mtz_unmerged = self._scalr_scaled_reflection_files['mtz_unmerged'][
                wavelength_name]
            reader = any_reflection_file(mtz_unmerged)
            intensities = [
                ma for ma in reader.as_miller_arrays(merge_equivalents=False)
                if ma.info().labels == ['I', 'SIGI']
            ][0]

            # FIXME do I need to reindex to a conventional setting here

            indices = reader.file_content(
            ).extract_original_index_miller_indices()
            intensities = intensities.customized_copy(indices=indices,
                                                      info=intensities.info())

            with open('%s.hkl' % prefixpath, 'wb') as hkl_file_handle:
                # limit values to 4 digits (before decimal point), as this is what shelxt
                # writes in its output files, and shelxl seems to read. ShelXL apparently
                # does not read values >9999 properly
                miller_array_export_as_shelx_hklf(
                    intensities,
                    hkl_file_handle,
                    scale_range=(-9999., 9999.),
                    normalise_if_format_overflow=True)

            crystal_symm = intensities.crystal_symmetry()

            unit_cell_dims = self._scalr_cell
            unit_cell_esds = self._scalr_cell_esd

            cb_op = crystal_symm.change_of_basis_op_to_reference_setting()

            if cb_op.c().r().as_hkl() == 'h,k,l':
                print('Change of basis to reference setting: %s' % cb_op)
                crystal_symm = crystal_symm.change_basis(cb_op)
                if str(cb_op) != "a,b,c":
                    unit_cell_dims = None
                    unit_cell_esds = None
                    # Would need to apply operation to cell errors, too. Need a test case for this

            # crystal_symm.show_summary()
            xray_structure = structure(crystal_symmetry=crystal_symm)

            compound = 'CNOH'
            if compound:
                from xia2.command_line.to_shelx import parse_compound
                result = parse_compound(compound)
                for element in result:
                    xray_structure.add_scatterer(
                        scatterer(label=element, occupancy=result[element]))

            wavelength = self._scalr_xcrystal.get_xwavelength(
                wavelength_name).get_wavelength()

            with open('%s.ins' % prefixpath, 'w') as insfile:
                insfile.write(''.join(
                    writer.generator(xray_structure,
                                     wavelength=wavelength,
                                     full_matrix_least_squares_cycles=0,
                                     title=prefix,
                                     unit_cell_dims=unit_cell_dims,
                                     unit_cell_esds=unit_cell_esds)))

            FileHandler.record_data_file('%s.ins' % prefixpath)
            FileHandler.record_data_file('%s.hkl' % prefixpath)
コード例 #42
0
    def _index(self):
        if PhilIndex.params.dials.index.method in (libtbx.Auto, None):
            if self._indxr_input_cell is not None:
                indexer = self._do_indexing("real_space_grid_search")
            else:
                try:
                    indexer_fft3d = self._do_indexing(method="fft3d")
                    nref_3d, rmsd_3d = indexer_fft3d.get_nref_rmsds()
                except Exception as e:
                    nref_3d = None
                    rmsd_3d = None
                try:
                    indexer_fft1d = self._do_indexing(method="fft1d")
                    nref_1d, rmsd_1d = indexer_fft1d.get_nref_rmsds()
                except Exception as e:
                    nref_1d = None
                    rmsd_1d = None

                if (nref_1d is not None and nref_3d is None or
                    (nref_1d > nref_3d and rmsd_1d[0] < rmsd_3d[0]
                     and rmsd_1d[1] < rmsd_3d[1] and rmsd_1d[2] < rmsd_3d[2])):
                    indexer = indexer_fft1d
                elif nref_3d is not None:
                    indexer = indexer_fft3d
                else:
                    raise RuntimeError(e)

        else:
            indexer = self._do_indexing(
                method=PhilIndex.params.dials.index.method)

        # not strictly the P1 cell, rather the cell that was used in indexing
        self._p1_cell = indexer._p1_cell
        self.set_indexer_payload("indexed_filename",
                                 indexer.get_indexed_filename())

        from cctbx.sgtbx import bravais_types
        from dxtbx.serialize import load

        indexed_file = indexer.get_indexed_filename()
        indexed_experiments = indexer.get_experiments_filename()

        fast_mode = PhilIndex.params.dials.fast_mode
        trust_beam_centre = PhilIndex.params.xia2.settings.trust_beam_centre
        multi_sweep_indexing = PhilIndex.params.xia2.settings.multi_sweep_indexing == True

        if not (trust_beam_centre or fast_mode or multi_sweep_indexing):
            checksym = self.CheckIndexingSymmetry()
            checksym.set_experiments_filename(indexed_experiments)
            checksym.set_indexed_filename(indexed_file)
            checksym.set_grid_search_scope(1)
            checksym.run()
            hkl_offset = checksym.get_hkl_offset()
            Debug.write("hkl_offset: %s" % str(hkl_offset))
            if hkl_offset is not None and hkl_offset != (0, 0, 0):
                reindex = self.Reindex()
                reindex.set_hkl_offset(hkl_offset)
                reindex.set_indexed_filename(indexed_file)
                reindex.run()
                indexed_file = reindex.get_reindexed_reflections_filename()

                # do some scan-static refinement - run twice, first without outlier
                # rejection as the model is too far from reality to do a sensible job of
                # outlier rejection
                refiner = self.Refine()
                refiner.set_experiments_filename(indexed_experiments)
                refiner.set_indexed_filename(
                    reindex.get_reindexed_reflections_filename())
                refiner.set_outlier_algorithm(None)
                refiner.run()
                indexed_experiments = refiner.get_refined_experiments_filename(
                )

                # now again with outlier rejection (possibly)
                refiner = self.Refine()
                refiner.set_experiments_filename(indexed_experiments)
                refiner.set_indexed_filename(indexed_file)
                refiner.run()
                indexed_experiments = refiner.get_refined_experiments_filename(
                )

        if self._indxr_input_lattice is None:

            # FIXME in here should respect the input unit cell and lattice if provided

            # FIXME from this (i) populate the helper table,
            # (ii) try to avoid re-running the indexing
            # step if we eliminate a solution as we have all of the refined results
            # already available.

            rbs = self.RefineBravaisSettings()
            rbs.set_experiments_filename(indexed_experiments)
            rbs.set_indexed_filename(indexed_file)
            if PhilIndex.params.dials.fix_geometry:
                rbs.set_detector_fix('all')
                rbs.set_beam_fix('all')

            FileHandler.record_log_file(
                '%s LATTICE' % self.get_indexer_full_name(),
                rbs.get_log_file())
            rbs.run()

            from cctbx import crystal, sgtbx

            for k in sorted(rbs.get_bravais_summary()):
                summary = rbs.get_bravais_summary()[k]

                # FIXME need to do this better - for the moment only accept lattices
                # where R.M.S. deviation is less than twice P1 R.M.S. deviation.

                if self._indxr_input_lattice is None:
                    if not summary['recommended']:
                        continue

                experiments = load.experiment_list(summary['experiments_file'],
                                                   check_format=False)
                cryst = experiments.crystals()[0]
                cs = crystal.symmetry(unit_cell=cryst.get_unit_cell(),
                                      space_group=cryst.get_space_group())
                cb_op_best_to_ref = cs.change_of_basis_op_to_reference_setting(
                )
                cs_reference = cs.change_basis(cb_op_best_to_ref)
                lattice = str(
                    bravais_types.bravais_lattice(
                        group=cs_reference.space_group()))
                cb_op = cb_op_best_to_ref * sgtbx.change_of_basis_op(
                    str(summary['cb_op']))

                self._solutions[k] = {
                    'number': k,
                    'mosaic': 0.0,
                    'metric': summary['max_angular_difference'],
                    'rmsd': summary['rmsd'],
                    'nspots': summary['nspots'],
                    'lattice': lattice,
                    'cell': cs_reference.unit_cell().parameters(),
                    'experiments_file': summary['experiments_file'],
                    'cb_op': str(cb_op)
                }

            self._solution = self.get_solution()
            self._indxr_lattice = self._solution['lattice']

            for solution in self._solutions.keys():
                lattice = self._solutions[solution]['lattice']
                if (self._indxr_input_lattice is not None
                        and self._indxr_input_lattice != lattice):
                    continue
                if lattice in self._indxr_other_lattice_cell:
                    if self._indxr_other_lattice_cell[lattice]['metric'] < \
                      self._solutions[solution]['metric']:
                        continue

                self._indxr_other_lattice_cell[lattice] = {
                    'metric': self._solutions[solution]['metric'],
                    'cell': self._solutions[solution]['cell']
                }

            self._indxr_mosaic = self._solution['mosaic']

            experiment_list = load.experiment_list(
                self._solution['experiments_file'])
            self.set_indexer_experiment_list(experiment_list)

            # reindex the output experiments list to the reference setting
            # (from the best cell/conventional setting)
            cb_op_to_ref = experiment_list.crystals()[0].get_space_group().info()\
              .change_of_basis_op_to_reference_setting()
            reindex = self.Reindex()
            reindex.set_experiments_filename(
                self._solution['experiments_file'])
            reindex.set_cb_op(cb_op_to_ref)
            reindex.set_space_group(
                str(lattice_to_spacegroup_number(self._solution['lattice'])))
            reindex.run()
            experiments_file = reindex.get_reindexed_experiments_filename()
            experiment_list = load.experiment_list(experiments_file)
            self.set_indexer_experiment_list(experiment_list)
            self.set_indexer_payload("experiments_filename", experiments_file)

            # reindex the output reflection list to this solution
            reindex = self.Reindex()
            reindex.set_indexed_filename(indexed_file)
            reindex.set_cb_op(self._solution['cb_op'])
            reindex.set_space_group(
                str(lattice_to_spacegroup_number(self._solution['lattice'])))
            reindex.run()
            indexed_file = reindex.get_reindexed_reflections_filename()
            self.set_indexer_payload("indexed_filename", indexed_file)

        else:
            experiment_list = load.experiment_list(indexed_experiments)
            self.set_indexer_experiment_list(experiment_list)
            self.set_indexer_payload("experiments_filename",
                                     indexed_experiments)

            cryst = experiment_list.crystals()[0]
            lattice = str(
                bravais_types.bravais_lattice(group=cryst.get_space_group()))
            self._indxr_lattice = lattice
            self._solutions = {}
            self._solutions[0] = {
                'number': 0,
                'mosaic': 0.0,
                'metric': -1,
                'rmsd': -1,
                'nspots': -1,
                'lattice': lattice,
                'cell': cryst.get_unit_cell().parameters(),
                'experiments_file': indexed_experiments,
                'cb_op': 'a,b,c'
            }

            self._indxr_other_lattice_cell[lattice] = {
                'metric': self._solutions[0]['metric'],
                'cell': self._solutions[0]['cell']
            }

        return
コード例 #43
0
ファイル: CommonScaler.py プロジェクト: lizhen-dlut/xia2
    def _scale_finish_chunk_6_add_free_r(self):
        hklout = os.path.join(
            self.get_working_directory(),
            '%s_%s_free_temp.mtz' % (self._scalr_pname, self._scalr_xname))

        FileHandler.record_temporary_file(hklout)

        scale_params = PhilIndex.params.xia2.settings.scale
        if self.get_scaler_freer_file():
            # e.g. via .xinfo file

            freein = self.get_scaler_freer_file()

            Debug.write('Copying FreeR_flag from %s' % freein)

            c = self._factory.Cad()
            c.set_freein(freein)
            c.add_hklin(self._scalr_scaled_reflection_files['mtz_merged'])
            c.set_hklout(hklout)
            c.copyfree()

        elif scale_params.freer_file is not None:
            # e.g. via -freer_file command line argument

            freein = scale_params.freer_file

            Debug.write('Copying FreeR_flag from %s' % freein)

            c = self._factory.Cad()
            c.set_freein(freein)
            c.add_hklin(self._scalr_scaled_reflection_files['mtz_merged'])
            c.set_hklout(hklout)
            c.copyfree()

        else:

            if scale_params.free_total:
                ntot = scale_params.free_total

                # need to get a fraction, so...
                nref = MtzUtils.nref_from_mtz(hklin)
                free_fraction = float(ntot) / float(nref)
            else:
                free_fraction = scale_params.free_fraction

            f = self._factory.Freerflag()
            f.set_free_fraction(free_fraction)
            f.set_hklin(self._scalr_scaled_reflection_files['mtz_merged'])
            f.set_hklout(hklout)
            f.add_free_flag()

        # then check that this FreeR set is complete

        hklin = hklout
        hklout = os.path.join(
            self.get_working_directory(),
            '%s_%s_free.mtz' % (self._scalr_pname, self._scalr_xname))

        # default fraction of 0.05
        free_fraction = 0.05

        if scale_params.free_fraction:
            free_fraction = scale_params.free_fraction
        elif scale_params.free_total:
            ntot = scale_params.free_total()

            # need to get a fraction, so...
            nref = MtzUtils.nref_from_mtz(hklin)
            free_fraction = float(ntot) / float(nref)

        f = self._factory.Freerflag()
        f.set_free_fraction(free_fraction)
        f.set_hklin(hklin)
        f.set_hklout(hklout)
        f.complete_free_flag()

        # remove 'mtz_merged' from the dictionary - this is made
        # redundant by the merged free...
        del self._scalr_scaled_reflection_files['mtz_merged']

        # changed from mtz_merged_free to plain ol' mtz
        self._scalr_scaled_reflection_files['mtz'] = hklout

        # record this for future reference
        FileHandler.record_data_file(hklout)
コード例 #44
0
    def _scale_prepare(self):
        """Perform all of the preparation required to deliver the scaled
        data. This should sort together the reflection files, ensure that
        they are correctly indexed (via pointless) and generally tidy
        things up."""

        # acknowledge all of the programs we are about to use...

        Citations.cite("pointless")
        Citations.cite("aimless")
        Citations.cite("ccp4")

        # ---------- GATHER ----------

        self._sweep_handler = SweepInformationHandler(self._scalr_integraters)

        for epoch in self._sweep_handler.get_epochs():
            si = self._sweep_handler.get_sweep_information(epoch)
            pname, xname, dname = si.get_project_info()
            sname = si.get_sweep_name()

            exclude_sweep = False

            for sweep in PhilIndex.params.xia2.settings.sweep:
                if sweep.id == sname and sweep.exclude:
                    exclude_sweep = True
                    break

            if exclude_sweep:
                self._sweep_handler.remove_epoch(epoch)
                logger.debug("Excluding sweep %s", sname)
            else:
                logger.debug("%-30s %s/%s/%s", "adding data from:", xname, dname, sname)

        # gather data for all images which belonged to the parent
        # crystal - allowing for the fact that things could go wrong
        # e.g. epoch information not available, exposure times not in
        # headers etc...

        for e in self._sweep_handler.get_epochs():
            si = self._sweep_handler.get_sweep_information(e)
            assert is_mtz_file(si.get_reflections()), repr(si.get_reflections())

        p, x = self._sweep_handler.get_project_info()
        self._scalr_pname = p
        self._scalr_xname = x

        # verify that the lattices are consistent, calling eliminate if
        # they are not N.B. there could be corner cases here

        need_to_return = False

        multi_sweep_indexing = PhilIndex.params.xia2.settings.multi_sweep_indexing

        # START OF if more than one epoch
        if len(self._sweep_handler.get_epochs()) > 1:

            # if we have multi-sweep-indexing going on then logic says all should
            # share common lattice & UB definition => this is not used here?

            # START OF if multi_sweep indexing and not input pg
            if multi_sweep_indexing and not self._scalr_input_pointgroup:
                pointless_hklins = []

                max_batches = 0
                for epoch in self._sweep_handler.get_epochs():
                    si = self._sweep_handler.get_sweep_information(epoch)
                    hklin = si.get_reflections()

                    batches = MtzUtils.batches_from_mtz(hklin)
                    if 1 + max(batches) - min(batches) > max_batches:
                        max_batches = max(batches) - min(batches) + 1

                logger.debug("Biggest sweep has %d batches", max_batches)
                max_batches = nifty_power_of_ten(max_batches)

                counter = 0

                refiners = []

                for epoch in self._sweep_handler.get_epochs():
                    si = self._sweep_handler.get_sweep_information(epoch)
                    hklin = si.get_reflections()
                    integrater = si.get_integrater()
                    refiner = integrater.get_integrater_refiner()
                    refiners.append(refiner)

                    hklin = self._prepare_pointless_hklin(
                        hklin, si.get_integrater().get_phi_width()
                    )

                    hklout = os.path.join(
                        self.get_working_directory(),
                        "%s_%s_%s_%s_prepointless.mtz"
                        % (pname, xname, dname, si.get_sweep_name()),
                    )

                    # we will want to delete this one exit
                    FileHandler.record_temporary_file(hklout)

                    first_batch = min(si.get_batches())
                    si.set_batch_offset(counter * max_batches - first_batch + 1)

                    rebatch(
                        hklin,
                        hklout,
                        first_batch=counter * max_batches + 1,
                        pname=pname,
                        xname=xname,
                        dname=dname,
                    )

                    pointless_hklins.append(hklout)

                    # update the counter & recycle
                    counter += 1

                    # SUMMARY - have added all sweeps to pointless_hklins

                s = self._factory.Sortmtz()

                pointless_hklin = os.path.join(
                    self.get_working_directory(),
                    "%s_%s_prepointless_sorted.mtz"
                    % (self._scalr_pname, self._scalr_xname),
                )

                s.set_hklout(pointless_hklin)

                for hklin in pointless_hklins:
                    s.add_hklin(hklin)

                s.sort()

                # FIXME xia2-51 in here look at running constant scaling on the
                # pointless hklin to put the runs on the same scale. Ref=[A]

                pointless_const = os.path.join(
                    self.get_working_directory(),
                    "%s_%s_prepointless_const.mtz"
                    % (self._scalr_pname, self._scalr_xname),
                )
                FileHandler.record_temporary_file(pointless_const)

                aimless_const = self._factory.Aimless()
                aimless_const.set_hklin(pointless_hklin)
                aimless_const.set_hklout(pointless_const)
                aimless_const.const()

                pointless_const = os.path.join(
                    self.get_working_directory(),
                    "%s_%s_prepointless_const_unmerged.mtz"
                    % (self._scalr_pname, self._scalr_xname),
                )
                FileHandler.record_temporary_file(pointless_const)
                pointless_hklin = pointless_const

                # FIXME xia2-51 in here need to pass all refiners to ensure that the
                # information is passed back to all of them not just the last one...
                logger.debug(
                    "Running multisweep pointless for %d sweeps", len(refiners)
                )
                pointgroup, reindex_op, ntr, pt = self._pointless_indexer_multisweep(
                    pointless_hklin, refiners
                )

                logger.debug("X1698: %s: %s", pointgroup, reindex_op)

                lattices = [Syminfo.get_lattice(pointgroup)]

                for epoch in self._sweep_handler.get_epochs():
                    si = self._sweep_handler.get_sweep_information(epoch)
                    intgr = si.get_integrater()
                    hklin = si.get_reflections()
                    refiner = intgr.get_integrater_refiner()

                    if ntr:
                        intgr.integrater_reset_reindex_operator()
                        need_to_return = True

                # SUMMARY - added all sweeps together into an mtz, ran
                # _pointless_indexer_multisweep on this, made a list of one lattice
                # and potentially reset reindex op?
            # END OF if multi_sweep indexing and not input pg

            # START OF if not multi_sweep, or input pg given
            else:
                lattices = []

                for epoch in self._sweep_handler.get_epochs():

                    si = self._sweep_handler.get_sweep_information(epoch)
                    intgr = si.get_integrater()
                    hklin = si.get_reflections()
                    refiner = intgr.get_integrater_refiner()

                    if self._scalr_input_pointgroup:
                        pointgroup = self._scalr_input_pointgroup
                        reindex_op = "h,k,l"
                        ntr = False

                    else:
                        pointless_hklin = self._prepare_pointless_hklin(
                            hklin, si.get_integrater().get_phi_width()
                        )

                        pointgroup, reindex_op, ntr, pt = self._pointless_indexer_jiffy(
                            pointless_hklin, refiner
                        )

                        logger.debug("X1698: %s: %s", pointgroup, reindex_op)

                    lattice = Syminfo.get_lattice(pointgroup)

                    if lattice not in lattices:
                        lattices.append(lattice)

                    if ntr:
                        intgr.integrater_reset_reindex_operator()
                        need_to_return = True
                # SUMMARY do pointless_indexer on each sweep, get lattices and make a list
                # of unique lattices, potentially reset reindex op.
            # END OF if not multi_sweep, or input pg given

            # SUMMARY - still within if more than one epoch, now have a list of number
            # of lattices

            # START OF if multiple-lattices
            if len(lattices) > 1:

                # why not using pointless indexer jiffy??!

                correct_lattice = sort_lattices(lattices)[0]

                logger.info("Correct lattice asserted to be %s", correct_lattice)

                # transfer this information back to the indexers
                for epoch in self._sweep_handler.get_epochs():

                    si = self._sweep_handler.get_sweep_information(epoch)
                    refiner = si.get_integrater().get_integrater_refiner()
                    sname = si.get_sweep_name()

                    state = refiner.set_refiner_asserted_lattice(correct_lattice)

                    if state == refiner.LATTICE_CORRECT:
                        logger.info(
                            "Lattice %s ok for sweep %s", correct_lattice, sname
                        )
                    elif state == refiner.LATTICE_IMPOSSIBLE:
                        raise RuntimeError(
                            f"Lattice {correct_lattice} impossible for {sname}"
                        )
                    elif state == refiner.LATTICE_POSSIBLE:
                        logger.info(
                            "Lattice %s assigned for sweep %s", correct_lattice, sname
                        )
                        need_to_return = True
            # END OF if multiple-lattices
            # SUMMARY - forced all lattices to be same and hope its okay.
        # END OF if more than one epoch

        # if one or more of them was not in the lowest lattice,
        # need to return here to allow reprocessing

        if need_to_return:
            self.set_scaler_done(False)
            self.set_scaler_prepare_done(False)
            return

        # ---------- REINDEX ALL DATA TO CORRECT POINTGROUP ----------

        # all should share the same pointgroup, unless twinned... in which
        # case force them to be...

        pointgroups = {}
        reindex_ops = {}
        probably_twinned = False

        need_to_return = False

        multi_sweep_indexing = PhilIndex.params.xia2.settings.multi_sweep_indexing

        # START OF if multi-sweep and not input pg
        if multi_sweep_indexing and not self._scalr_input_pointgroup:
            pointless_hklins = []

            max_batches = 0
            for epoch in self._sweep_handler.get_epochs():
                si = self._sweep_handler.get_sweep_information(epoch)
                hklin = si.get_reflections()

                batches = MtzUtils.batches_from_mtz(hklin)
                if 1 + max(batches) - min(batches) > max_batches:
                    max_batches = max(batches) - min(batches) + 1

            logger.debug("Biggest sweep has %d batches", max_batches)
            max_batches = nifty_power_of_ten(max_batches)

            counter = 0

            refiners = []

            for epoch in self._sweep_handler.get_epochs():
                si = self._sweep_handler.get_sweep_information(epoch)
                hklin = si.get_reflections()
                integrater = si.get_integrater()
                refiner = integrater.get_integrater_refiner()
                refiners.append(refiner)

                hklin = self._prepare_pointless_hklin(
                    hklin, si.get_integrater().get_phi_width()
                )

                hklout = os.path.join(
                    self.get_working_directory(),
                    "%s_%s_%s_%s_prepointless.mtz"
                    % (pname, xname, dname, si.get_sweep_name()),
                )

                # we will want to delete this one exit
                FileHandler.record_temporary_file(hklout)

                first_batch = min(si.get_batches())
                si.set_batch_offset(counter * max_batches - first_batch + 1)

                rebatch(
                    hklin,
                    hklout,
                    first_batch=counter * max_batches + 1,
                    pname=pname,
                    xname=xname,
                    dname=dname,
                )

                pointless_hklins.append(hklout)

                # update the counter & recycle
                counter += 1

            # FIXME related to xia2-51 - this looks very very similar to the logic
            # in [A] above - is this duplicated logic?
            s = self._factory.Sortmtz()

            pointless_hklin = os.path.join(
                self.get_working_directory(),
                "%s_%s_prepointless_sorted.mtz"
                % (self._scalr_pname, self._scalr_xname),
            )

            s.set_hklout(pointless_hklin)

            for hklin in pointless_hklins:
                s.add_hklin(hklin)

            s.sort()

            pointless_const = os.path.join(
                self.get_working_directory(),
                f"{self._scalr_pname}_{self._scalr_xname}_prepointless_const.mtz",
            )
            FileHandler.record_temporary_file(pointless_const)

            aimless_const = self._factory.Aimless()
            aimless_const.set_hklin(pointless_hklin)
            aimless_const.set_hklout(pointless_const)
            aimless_const.const()

            pointless_const = os.path.join(
                self.get_working_directory(),
                "%s_%s_prepointless_const_unmerged.mtz"
                % (self._scalr_pname, self._scalr_xname),
            )
            FileHandler.record_temporary_file(pointless_const)
            pointless_hklin = pointless_const

            pointgroup, reindex_op, ntr, pt = self._pointless_indexer_multisweep(
                pointless_hklin, refiners
            )

            for epoch in self._sweep_handler.get_epochs():
                pointgroups[epoch] = pointgroup
                reindex_ops[epoch] = reindex_op
            # SUMMARY ran pointless multisweep on combined mtz and made a dict
            # of  pointgroups and reindex_ops (all same)
        # END OF if multi-sweep and not input pg

        # START OF if not mulit-sweep or pg given
        else:
            for epoch in self._sweep_handler.get_epochs():
                si = self._sweep_handler.get_sweep_information(epoch)

                hklin = si.get_reflections()

                integrater = si.get_integrater()
                refiner = integrater.get_integrater_refiner()

                if self._scalr_input_pointgroup:
                    logger.debug(
                        "Using input pointgroup: %s", self._scalr_input_pointgroup
                    )
                    pointgroup = self._scalr_input_pointgroup
                    reindex_op = "h,k,l"
                    pt = False

                else:

                    pointless_hklin = self._prepare_pointless_hklin(
                        hklin, si.get_integrater().get_phi_width()
                    )

                    pointgroup, reindex_op, ntr, pt = self._pointless_indexer_jiffy(
                        pointless_hklin, refiner
                    )

                    logger.debug("X1698: %s: %s", pointgroup, reindex_op)

                    if ntr:

                        integrater.integrater_reset_reindex_operator()
                        need_to_return = True

                if pt and not probably_twinned:
                    probably_twinned = True

                logger.debug("Pointgroup: %s (%s)", pointgroup, reindex_op)

                pointgroups[epoch] = pointgroup
                reindex_ops[epoch] = reindex_op
            # SUMMARY - for each sweep, run indexer jiffy and get reindex operators
            # and pointgroups dictionaries (could be different between sweeps)

        # END OF if not mulit-sweep or pg given

        overall_pointgroup = None

        pointgroup_set = {pointgroups[e] for e in pointgroups}

        if len(pointgroup_set) > 1 and not probably_twinned:
            raise RuntimeError(
                "non uniform pointgroups: %s" % str(list(pointgroup_set))
            )

        if len(pointgroup_set) > 1:
            logger.debug(
                "Probably twinned, pointgroups: %s",
                " ".join(p.replace(" ", "") for p in pointgroup_set),
            )
            numbers = (Syminfo.spacegroup_name_to_number(ps) for ps in pointgroup_set)
            overall_pointgroup = Syminfo.spacegroup_number_to_name(min(numbers))
            self._scalr_input_pointgroup = overall_pointgroup

            logger.info("Twinning detected, assume pointgroup %s", overall_pointgroup)

            need_to_return = True

        else:
            overall_pointgroup = pointgroup_set.pop()
        # SUMMARY - Have handled if different pointgroups & chosen an overall_pointgroup
        # which is the lowest symmetry

        # Now go through sweeps and do reindexing
        for epoch in self._sweep_handler.get_epochs():
            si = self._sweep_handler.get_sweep_information(epoch)

            integrater = si.get_integrater()

            integrater.set_integrater_spacegroup_number(
                Syminfo.spacegroup_name_to_number(overall_pointgroup)
            )
            integrater.set_integrater_reindex_operator(
                reindex_ops[epoch], reason="setting point group"
            )
            # This will give us the reflections in the correct point group
            si.set_reflections(integrater.get_integrater_intensities())

        if need_to_return:
            self.set_scaler_done(False)
            self.set_scaler_prepare_done(False)
            return

        # in here now optionally work through the data files which should be
        # indexed with a consistent point group, and transform the orientation
        # matrices by the lattice symmetry operations (if possible) to get a
        # consistent definition of U matrix modulo fixed rotations

        if PhilIndex.params.xia2.settings.unify_setting:
            self.unify_setting()

        if self.get_scaler_reference_reflection_file():
            self._reference = self.get_scaler_reference_reflection_file()
            logger.debug("Using HKLREF %s", self._reference)

        elif PhilIndex.params.xia2.settings.scale.reference_reflection_file:
            self._reference = (
                PhilIndex.params.xia2.settings.scale.reference_reflection_file
            )
            logger.debug("Using HKLREF %s", self._reference)

        params = PhilIndex.params
        use_brehm_diederichs = params.xia2.settings.use_brehm_diederichs
        if len(self._sweep_handler.get_epochs()) > 1 and use_brehm_diederichs:
            self.brehm_diederichs_reindexing()
        # If not Brehm-deidrichs, set reference as first sweep
        elif len(self._sweep_handler.get_epochs()) > 1 and not self._reference:

            first = self._sweep_handler.get_epochs()[0]
            si = self._sweep_handler.get_sweep_information(first)
            self._reference = si.get_reflections()

        # Now reindex to be consistent with first dataset - run pointless on each
        # dataset with reference
        if self._reference:

            md = self._factory.Mtzdump()
            md.set_hklin(self._reference)
            md.dump()

            datasets = md.get_datasets()

            # then get the unit cell, lattice etc.

            reference_lattice = Syminfo.get_lattice(md.get_spacegroup())
            reference_cell = md.get_dataset_info(datasets[0])["cell"]

            # then compute the pointgroup from this...

            # ---------- REINDEX TO CORRECT (REFERENCE) SETTING ----------

            for epoch in self._sweep_handler.get_epochs():

                # if we are working with unified UB matrix then this should not
                # be a problem here (note, *if*; *should*)

                # what about e.g. alternative P1 settings?
                # see JIRA MXSW-904
                if PhilIndex.params.xia2.settings.unify_setting:
                    continue

                pl = self._factory.Pointless()

                si = self._sweep_handler.get_sweep_information(epoch)
                hklin = si.get_reflections()

                pl.set_hklin(
                    self._prepare_pointless_hklin(
                        hklin, si.get_integrater().get_phi_width()
                    )
                )

                hklout = os.path.join(
                    self.get_working_directory(),
                    "%s_rdx2.mtz" % os.path.split(hklin)[-1][:-4],
                )

                # we will want to delete this one exit
                FileHandler.record_temporary_file(hklout)

                # now set the initial reflection set as a reference...

                pl.set_hklref(self._reference)

                # https://github.com/xia2/xia2/issues/115 - should ideally iteratively
                # construct a reference or a tree of correlations to ensure correct
                # reference setting - however if small molecule assume has been
                # multi-sweep-indexed so can ignore "fatal errors" - temporary hack
                pl.decide_pointgroup(
                    ignore_errors=PhilIndex.params.xia2.settings.small_molecule
                )

                logger.debug("Reindexing analysis of %s", pl.get_hklin())

                pointgroup = pl.get_pointgroup()
                reindex_op = pl.get_reindex_operator()

                logger.debug("Operator: %s", reindex_op)

                # apply this...

                integrater = si.get_integrater()

                integrater.set_integrater_reindex_operator(
                    reindex_op, reason="match reference"
                )
                integrater.set_integrater_spacegroup_number(
                    Syminfo.spacegroup_name_to_number(pointgroup)
                )
                si.set_reflections(integrater.get_integrater_intensities())

                md = self._factory.Mtzdump()
                md.set_hklin(si.get_reflections())
                md.dump()

                datasets = md.get_datasets()

                if len(datasets) > 1:
                    raise RuntimeError(
                        "more than one dataset in %s" % si.get_reflections()
                    )

                # then get the unit cell, lattice etc.

                lattice = Syminfo.get_lattice(md.get_spacegroup())
                cell = md.get_dataset_info(datasets[0])["cell"]

                if lattice != reference_lattice:
                    raise RuntimeError(
                        "lattices differ in %s and %s"
                        % (self._reference, si.get_reflections())
                    )

                logger.debug("Cell: %.2f %.2f %.2f %.2f %.2f %.2f" % cell)
                logger.debug("Ref:  %.2f %.2f %.2f %.2f %.2f %.2f" % reference_cell)

                for j in range(6):
                    if (
                        math.fabs((cell[j] - reference_cell[j]) / reference_cell[j])
                        > 0.1
                    ):
                        raise RuntimeError(
                            "unit cell parameters differ in %s and %s"
                            % (self._reference, si.get_reflections())
                        )

        # ---------- SORT TOGETHER DATA ----------

        self._sort_together_data_ccp4()

        self._scalr_resolution_limits = {}

        # store central resolution limit estimates

        batch_ranges = [
            self._sweep_handler.get_sweep_information(epoch).get_batch_range()
            for epoch in self._sweep_handler.get_epochs()
        ]

        self._resolution_limit_estimates = ersatz_resolution(
            self._prepared_reflections, batch_ranges
        )
コード例 #45
0
ファイル: CommonScaler.py プロジェクト: hainm/xia2
  def _sort_together_data_xds(self):

    if len(self._sweep_information) == 1:
      return self._sort_together_data_xds_one_sweep()

    max_batches = 0

    for epoch in self._sweep_information.keys():

      hklin = self._sweep_information[epoch]['scaled_reflections']

      md = self._factory.Mtzdump()
      md.set_hklin(hklin)
      md.dump()

      if self._sweep_information[epoch]['batches'] == [0, 0]:

        Chatter.write('Getting batches from %s' % hklin)
        batches = md.get_batches()
        self._sweep_information[epoch]['batches'] = [min(batches),
                                                     max(batches)]
        Chatter.write('=> %d to %d' % (min(batches),
                                       max(batches)))

      batches = self._sweep_information[epoch]['batches']
      if 1 + max(batches) - min(batches) > max_batches:
        max_batches = max(batches) - min(batches) + 1

      datasets = md.get_datasets()

      Debug.write('In reflection file %s found:' % hklin)
      for d in datasets:
        Debug.write('... %s' % d)

      dataset_info = md.get_dataset_info(datasets[0])

    Debug.write('Biggest sweep has %d batches' % max_batches)
    max_batches = nifty_power_of_ten(max_batches)

    epochs = self._sweep_information.keys()
    epochs.sort()

    counter = 0

    for epoch in epochs:
      rb = self._factory.Rebatch()

      hklin = self._sweep_information[epoch]['scaled_reflections']

      pname = self._sweep_information[epoch]['pname']
      xname = self._sweep_information[epoch]['xname']
      dname = self._sweep_information[epoch]['dname']

      sname = self._sweep_information[epoch]['sname']

      hklout = os.path.join(self.get_working_directory(),
                            '%s_%s_%s_%d.mtz' % \
                            (pname, xname, dname, counter))

      # we will want to delete this one exit
      FileHandler.record_temporary_file(hklout)

      # record this for future reference - will be needed in the
      # radiation damage analysis...

      # hack - reset this as it gets in a muddle...
      intgr = self._sweep_information[epoch]['integrater']
      self._sweep_information[epoch][
          'batches'] = intgr.get_integrater_batches()

      first_batch = min(self._sweep_information[epoch]['batches'])
      self._sweep_information[epoch][
          'batch_offset'] = counter * max_batches - first_batch + 1

      rb.set_hklin(hklin)
      rb.set_first_batch(counter * max_batches + 1)
      rb.set_hklout(hklout)

      new_batches = rb.rebatch()

      # update the "input information"

      self._sweep_information[epoch]['hklin'] = hklout
      self._sweep_information[epoch]['batches'] = new_batches

      # update the counter & recycle

      counter += 1

    if Flags.get_chef():
      self._sweep_information_to_chef()

    s = self._factory.Sortmtz()

    hklout = os.path.join(self.get_working_directory(),
                          '%s_%s_sorted.mtz' % \
                          (self._scalr_pname, self._scalr_xname))

    s.set_hklout(hklout)

    for epoch in epochs:
      s.add_hklin(self._sweep_information[epoch]['hklin'])

    s.sort(vrset = -99999999.0)

    self._prepared_reflections = hklout

    if self.get_scaler_reference_reflection_file():
      md = self._factory.Mtzdump()
      md.set_hklin(self.get_scaler_reference_reflection_file())
      md.dump()

      spacegroups = [md.get_spacegroup()]
      reindex_operator = 'h,k,l'

    else:

      pointless = self._factory.Pointless()
      pointless.set_hklin(hklout)
      pointless.decide_spacegroup()

      FileHandler.record_log_file('%s %s pointless' % \
                                  (self._scalr_pname,
                                   self._scalr_xname),
                                  pointless.get_log_file())

      spacegroups = pointless.get_likely_spacegroups()
      reindex_operator = pointless.get_spacegroup_reindex_operator()

      if self._scalr_input_spacegroup:
        Debug.write('Assigning user input spacegroup: %s' % \
                    self._scalr_input_spacegroup)
        spacegroups = [self._scalr_input_spacegroup]
        reindex_operator = 'h,k,l'

    self._scalr_likely_spacegroups = spacegroups
    spacegroup = self._scalr_likely_spacegroups[0]

    self._scalr_reindex_operator = reindex_operator

    Chatter.write('Likely spacegroups:')
    for spag in self._scalr_likely_spacegroups:
      Chatter.write('%s' % spag)

    Chatter.write(
        'Reindexing to first spacegroup setting: %s (%s)' % \
        (spacegroup, clean_reindex_operator(reindex_operator)))

    hklin = self._prepared_reflections
    hklout = os.path.join(self.get_working_directory(),
                          '%s_%s_reindex.mtz' % \
                          (self._scalr_pname, self._scalr_xname))

    FileHandler.record_temporary_file(hklout)

    ri = self._factory.Reindex()
    ri.set_hklin(hklin)
    ri.set_hklout(hklout)
    ri.set_spacegroup(spacegroup)
    ri.set_operator(reindex_operator)
    ri.reindex()

    hklin = hklout
    hklout = os.path.join(self.get_working_directory(),
                          '%s_%s_sorted.mtz' % \
                          (self._scalr_pname, self._scalr_xname))

    s = self._factory.Sortmtz()
    s.set_hklin(hklin)
    s.set_hklout(hklout)

    s.sort(vrset = -99999999.0)

    self._prepared_reflections = hklout

    Debug.write(
        'Updating unit cell to %.2f %.2f %.2f %.2f %.2f %.2f' % \
        tuple(ri.get_cell()))
    self._scalr_cell = tuple(ri.get_cell())

    return
コード例 #46
0
ファイル: DialsRefiner.py プロジェクト: ndevenish/xia2
    def _refine(self):
        for epoch, idxr in self._refinr_indexers.iteritems():
            experiments = idxr.get_indexer_experiment_list()

            indexed_experiments = idxr.get_indexer_payload(
                "experiments_filename")
            indexed_reflections = idxr.get_indexer_payload("indexed_filename")

            if len(experiments) > 1:
                xsweeps = idxr._indxr_sweeps
                assert len(xsweeps) == len(experiments)
                assert len(self._refinr_sweeps
                           ) == 1  # don't currently support joint refinement
                xsweep = self._refinr_sweeps[0]
                i = xsweeps.index(xsweep)
                experiments = experiments[i:i + 1]

                # Extract and output experiment and reflections for current sweep
                indexed_experiments = os.path.join(
                    self.get_working_directory(),
                    "%s_indexed_experiments.json" % xsweep.get_name())
                indexed_reflections = os.path.join(
                    self.get_working_directory(),
                    "%s_indexed_reflections.pickle" % xsweep.get_name())

                from dxtbx.serialize import dump
                dump.experiment_list(experiments, indexed_experiments)

                from libtbx import easy_pickle
                from scitbx.array_family import flex
                reflections = easy_pickle.load(
                    idxr.get_indexer_payload("indexed_filename"))
                sel = reflections['id'] == i
                assert sel.count(True) > 0
                imageset_id = reflections['imageset_id'].select(sel)
                assert imageset_id.all_eq(imageset_id[0])
                sel = reflections['imageset_id'] == imageset_id[0]
                reflections = reflections.select(sel)
                # set indexed reflections to id == 0 and imageset_id == 0
                reflections['id'].set_selected(reflections['id'] == i, 0)
                reflections['imageset_id'] = flex.int(len(reflections), 0)
                easy_pickle.dump(indexed_reflections, reflections)

            assert len(experiments.crystals()
                       ) == 1  # currently only handle one lattice/sweep
            crystal_model = experiments.crystals()[0]
            lattice = idxr.get_indexer_lattice()

            from dxtbx.serialize import load

            scan_static = PhilIndex.params.dials.refine.scan_static

            # XXX Temporary workaround for dials.refine error for scan_varying
            # refinement with smaller wedges
            start, end = experiments[0].scan.get_oscillation_range()
            total_phi_range = end - start

            if (PhilIndex.params.dials.refine.scan_varying
                    and total_phi_range > 5
                    and not PhilIndex.params.dials.fast_mode):
                scan_varying = PhilIndex.params.dials.refine.scan_varying
            else:
                scan_varying = False

            if scan_static:
                refiner = self.Refine()
                refiner.set_experiments_filename(indexed_experiments)
                refiner.set_indexed_filename(indexed_reflections)
                refiner.set_scan_varying(False)
                refiner.run()
                self._refinr_experiments_filename \
                  = refiner.get_refined_experiments_filename()
                self._refinr_indexed_filename = refiner.get_refined_filename()
            else:
                self._refinr_experiments_filename = indexed_experiments
                self._refinr_indexed_filename = indexed_reflections

            if scan_varying:
                refiner = self.Refine()
                refiner.set_experiments_filename(
                    self._refinr_experiments_filename)
                refiner.set_indexed_filename(self._refinr_indexed_filename)
                if total_phi_range < 36:
                    refiner.set_interval_width_degrees(total_phi_range / 2)
                refiner.run()
                self._refinr_experiments_filename \
                  = refiner.get_refined_experiments_filename()
                self._refinr_indexed_filename = refiner.get_refined_filename()

            if scan_static or scan_varying:
                FileHandler.record_log_file(
                    '%s REFINE' % idxr.get_indexer_full_name(),
                    refiner.get_log_file())
                report = self.Report()
                report.set_experiments_filename(
                    self._refinr_experiments_filename)
                report.set_reflections_filename(self._refinr_indexed_filename)
                html_filename = os.path.join(
                    self.get_working_directory(),
                    '%i_dials.refine.report.html' % report.get_xpid())
                report.set_html_filename(html_filename)
                report.run()
                FileHandler.record_html_file(
                    '%s REFINE' % idxr.get_indexer_full_name(), html_filename)

            experiments = load.experiment_list(
                self._refinr_experiments_filename)
            self.set_refiner_payload("experiments.json",
                                     self._refinr_experiments_filename)
            self.set_refiner_payload("reflections.pickle",
                                     self._refinr_indexed_filename)

            # this is the result of the cell refinement
            self._refinr_cell = experiments.crystals()[0].get_unit_cell(
            ).parameters()
コード例 #47
0
    def _integrate(self):
        '''Implement the integrater interface.'''

        # cite the program
        Citations.cite('mosflm')

        images_str = '%d to %d' % tuple(self._intgr_wedge)
        cell_str = '%.2f %.2f %.2f %.2f %.2f %.2f' % tuple(self._intgr_cell)

        if len(self._fp_directory) <= 50:
            dirname = self._fp_directory
        else:
            dirname = '...%s' % self._fp_directory[-46:]

        Journal.block(
            'integrating', self._intgr_sweep_name, 'mosflm', {
                'images': images_str,
                'cell': cell_str,
                'lattice': self.get_integrater_refiner().get_refiner_lattice(),
                'template': self._fp_template,
                'directory': dirname,
                'resolution': '%.2f' % self._intgr_reso_high
            })

        self._mosflm_rerun_integration = False

        wd = self.get_working_directory()

        try:

            if self.get_integrater_sweep_name():
                pname, xname, dname = self.get_integrater_project_info()

            nproc = PhilIndex.params.xia2.settings.multiprocessing.nproc
            if nproc > 1:
                Debug.write('Parallel integration: %d jobs' % nproc)
                self._mosflm_hklout = self._mosflm_parallel_integrate()
            else:
                self._mosflm_hklout = self._mosflm_integrate()

            # record integration output for e.g. BLEND.

            sweep = self.get_integrater_sweep_name()
            if sweep:
                FileHandler.record_more_data_file(
                    '%s %s %s %s INTEGRATE' % (pname, xname, dname, sweep),
                    self._mosflm_hklout)

        except IntegrationError as e:
            if 'negative mosaic spread' in str(e):
                if self._mosflm_postref_fix_mosaic:
                    Chatter.write(
                        'Negative mosaic spread - stopping integration')
                    raise BadLatticeError('negative mosaic spread')

                Chatter.write('Negative mosaic spread - rerunning integration')
                self.set_integrater_done(False)
                self._mosflm_postref_fix_mosaic = True

        if self._mosflm_rerun_integration and not PhilIndex.params.dials.fast_mode:
            # make sure that this is run again...
            Chatter.write('Need to rerun the integration...')
            self.set_integrater_done(False)

        return self._mosflm_hklout
コード例 #48
0
    def _index(self):
        """Actually do the autoindexing using the data prepared by the
        previous method."""

        self._index_remove_masked_regions()

        if self._i_or_ii is None:
            self._i_or_ii = self.decide_i_or_ii()
            logger.debug("Selecting I or II, chose %s", self._i_or_ii)

        idxref = self.Idxref()

        for file in ["SPOT.XDS"]:
            idxref.set_input_data_file(file, self._indxr_payload[file])

        # set the phi start etc correctly

        idxref.set_data_range(self._indxr_images[0][0],
                              self._indxr_images[0][1])
        idxref.set_background_range(self._indxr_images[0][0],
                                    self._indxr_images[0][1])

        if self._i_or_ii == "i":
            blocks = self._index_select_images_i()
            for block in blocks[:1]:
                starting_frame = block[0]
                starting_angle = self.get_scan().get_angle_from_image_index(
                    starting_frame)

                idxref.set_starting_frame(starting_frame)
                idxref.set_starting_angle(starting_angle)

                idxref.add_spot_range(block[0], block[1])

            for block in blocks[1:]:
                idxref.add_spot_range(block[0], block[1])
        else:
            for block in self._indxr_images[:1]:
                starting_frame = block[0]
                starting_angle = self.get_scan().get_angle_from_image_index(
                    starting_frame)

                idxref.set_starting_frame(starting_frame)
                idxref.set_starting_angle(starting_angle)

                idxref.add_spot_range(block[0], block[1])

            for block in self._indxr_images[1:]:
                idxref.add_spot_range(block[0], block[1])

        # FIXME need to also be able to pass in the known unit
        # cell and lattice if already available e.g. from
        # the helper... indirectly

        if self._indxr_user_input_lattice:
            idxref.set_indexer_user_input_lattice(True)

        if self._indxr_input_lattice and self._indxr_input_cell:
            idxref.set_indexer_input_lattice(self._indxr_input_lattice)
            idxref.set_indexer_input_cell(self._indxr_input_cell)

            logger.debug("Set lattice: %s", self._indxr_input_lattice)
            logger.debug("Set cell: %f %f %f %f %f %f" %
                         self._indxr_input_cell)

            original_cell = self._indxr_input_cell
        elif self._indxr_input_lattice:
            idxref.set_indexer_input_lattice(self._indxr_input_lattice)
            original_cell = None
        else:
            original_cell = None

        # FIXED need to set the beam centre here - this needs to come
        # from the input .xinfo object or header, and be converted
        # to the XDS frame... done.

        from dxtbx.serialize.xds import to_xds

        converter = to_xds(self.get_imageset())
        xds_beam_centre = converter.detector_origin

        idxref.set_beam_centre(xds_beam_centre[0], xds_beam_centre[1])

        # fixme need to check if the lattice, cell have been set already,
        # and if they have, pass these in as input to the indexing job.

        done = False

        while not done:
            try:
                done = idxref.run()

                # N.B. in here if the IDXREF step was being run in the first
                # pass done is FALSE however there should be a refined
                # P1 orientation matrix etc. available - so keep it!

            except XDSException as e:
                # inspect this - if we have complaints about not
                # enough reflections indexed, and we have a target
                # unit cell, and they are the same, well ignore it

                if "solution is inaccurate" in str(e):
                    logger.debug(
                        "XDS complains solution inaccurate - ignoring")
                    done = idxref.continue_from_error()
                elif ("insufficient percentage (< 70%)" in str(e)
                      or "insufficient percentage (< 50%)"
                      in str(e)) and original_cell:
                    done = idxref.continue_from_error()
                    lattice, cell, mosaic = idxref.get_indexing_solution()
                    # compare solutions
                    check = PhilIndex.params.xia2.settings.xds_check_cell_deviation
                    for j in range(3):
                        # allow two percent variation in unit cell length
                        if (math.fabs(
                            (cell[j] - original_cell[j]) / original_cell[j]) >
                                0.02 and check):
                            logger.debug("XDS unhappy and solution wrong")
                            raise e
                        # and two degree difference in angle
                        if (math.fabs(cell[j + 3] - original_cell[j + 3]) > 2.0
                                and check):
                            logger.debug("XDS unhappy and solution wrong")
                            raise e
                    logger.debug("XDS unhappy but solution ok")
                elif "insufficient percentage (< 70%)" in str(
                        e) or "insufficient percentage (< 50%)" in str(e):
                    logger.debug("XDS unhappy but solution probably ok")
                    done = idxref.continue_from_error()
                else:
                    raise e

        FileHandler.record_log_file(
            "%s INDEX" % self.get_indexer_full_name(),
            os.path.join(self.get_working_directory(), "IDXREF.LP"),
        )

        for file in ["SPOT.XDS", "XPARM.XDS"]:
            self._indxr_payload[file] = idxref.get_output_data_file(file)

        # need to get the indexing solutions out somehow...

        self._indxr_other_lattice_cell = idxref.get_indexing_solutions()

        (
            self._indxr_lattice,
            self._indxr_cell,
            self._indxr_mosaic,
        ) = idxref.get_indexing_solution()

        xparm_file = os.path.join(self.get_working_directory(), "XPARM.XDS")
        models = dxtbx.load(xparm_file)
        crystal_model = to_crystal(xparm_file)

        # this information gets lost when re-creating the models from the
        # XDS results - however is not refined so can simply copy from the
        # input - https://github.com/xia2/xia2/issues/372
        models.get_detector()[0].set_thickness(
            converter.get_detector()[0].get_thickness())

        experiment = Experiment(
            beam=models.get_beam(),
            detector=models.get_detector(),
            goniometer=models.get_goniometer(),
            scan=models.get_scan(),
            crystal=crystal_model,
            # imageset=self.get_imageset(),
        )

        experiment_list = ExperimentList([experiment])
        self.set_indexer_experiment_list(experiment_list)

        # I will want this later on to check that the lattice was ok
        self._idxref_subtree_problem = idxref.get_index_tree_problem()
コード例 #49
0
    def _mosflm_integrate(self):
        '''Perform the actual integration, based on the results of the
    cell refinement or indexing (they have the equivalent form.)'''

        refinr = self.get_integrater_refiner()

        if not refinr.get_refiner_payload('mosflm_orientation_matrix'):
            raise RuntimeError('unexpected situation in indexing')

        lattice = refinr.get_refiner_lattice()
        spacegroup_number = lattice_to_spacegroup(lattice)
        mosaic = refinr.get_refiner_payload('mosaic')
        beam = refinr.get_refiner_payload('beam')
        distance = refinr.get_refiner_payload('distance')
        matrix = refinr.get_refiner_payload('mosflm_orientation_matrix')

        integration_params = refinr.get_refiner_payload(
            'mosflm_integration_parameters')

        if integration_params:
            if 'separation' in integration_params:
                self.set_integrater_parameter(
                    'mosflm', 'separation',
                    '%s %s' % tuple(integration_params['separation']))
            if 'raster' in integration_params:
                self.set_integrater_parameter(
                    'mosflm', 'raster',
                    '%d %d %d %d %d' % tuple(integration_params['raster']))

        refinr.set_refiner_payload('mosflm_integration_parameters', None)

        f = open(
            os.path.join(self.get_working_directory(), 'xiaintegrate.mat'),
            'w')
        for m in matrix:
            f.write(m)
        f.close()

        # then start the integration
        integrater = MosflmIntegrate()
        integrater.set_working_directory(self.get_working_directory())
        auto_logfiler(integrater)

        integrater.set_refine_profiles(self._mosflm_refine_profiles)

        pname, xname, dname = self.get_integrater_project_info()

        if pname is not None and xname is not None and dname is not None:
            Debug.write('Harvesting: %s/%s/%s' % (pname, xname, dname))
            harvest_dir = self.get_working_directory()
            # harvest file name will be %s.mosflm_run_start_end % dname
            temp_dname = '%s_%s' % \
                         (dname, self.get_integrater_sweep_name())
            integrater.set_pname_xname_dname(pname, xname, temp_dname)

        integrater.set_template(os.path.basename(self.get_template()))
        integrater.set_directory(self.get_directory())

        # check for ice - and if so, exclude (ranges taken from
        # XDS documentation)
        if self.get_integrater_ice() != 0:
            Debug.write('Excluding ice rings')
            integrater.set_exclude_ice(True)

        # exclude specified resolution ranges
        if len(self.get_integrater_excluded_regions()) != 0:
            regions = self.get_integrater_excluded_regions()
            Debug.write('Excluding regions: %s' % repr(regions))
            integrater.set_exclude_regions(regions)

        mask = standard_mask(self.get_detector())
        for m in mask:
            integrater.add_instruction(m)

        integrater.set_input_mat_file('xiaintegrate.mat')

        integrater.set_beam_centre(beam)
        integrater.set_distance(distance)
        integrater.set_space_group_number(spacegroup_number)
        integrater.set_mosaic(mosaic)

        if self.get_wavelength_prov() == 'user':
            integrater.set_wavelength(self.get_wavelength())

        parameters = self.get_integrater_parameters('mosflm')
        integrater.update_parameters(parameters)

        if self._mosflm_gain:
            integrater.set_gain(self._mosflm_gain)

        # check for resolution limits
        if self._intgr_reso_high > 0.0:
            integrater.set_d_min(self._intgr_reso_high)
        if self._intgr_reso_low:
            integrater.set_d_max(self._intgr_reso_low)

        if PhilIndex.params.general.backstop_mask:
            from xia2.Toolkit.BackstopMask import BackstopMask
            mask = BackstopMask(PhilIndex.params.general.backstop_mask)
            mask = mask.calculate_mask_mosflm(self.get_header())
            integrater.set_mask(mask)

        detector = self.get_detector()
        detector_width, detector_height = detector[0].get_image_size_mm()

        lim_x = 0.5 * detector_width
        lim_y = 0.5 * detector_height

        Debug.write('Scanner limits: %.1f %.1f' % (lim_x, lim_y))
        integrater.set_limits(lim_x, lim_y)

        integrater.set_fix_mosaic(self._mosflm_postref_fix_mosaic)
        offset = self.get_frame_offset()

        integrater.set_image_range(
            (self._intgr_wedge[0] - offset, self._intgr_wedge[1] - offset))

        try:
            integrater.run()
        except RuntimeError as e:
            if 'integration failed: reason unknown' in str(e):
                Chatter.write('Mosflm has failed in integration')
                message = 'The input was:\n\n'
                for input in integrater.get_all_input():
                    message += '  %s' % input
                Chatter.write(message)
            raise

        FileHandler.record_log_file(
            '%s %s %s %s mosflm integrate' % \
            (self.get_integrater_sweep_name(),
             pname, xname, dname),
            integrater.get_log_file())

        self._intgr_per_image_statistics = integrater.get_per_image_statistics(
        )

        self._mosflm_hklout = integrater.get_hklout()
        Debug.write('Integration output: %s' % self._mosflm_hklout)

        self._intgr_n_ref = integrater.get_nref()

        # if a BGSIG error happened try not refining the
        # profile and running again...

        if integrater.get_bgsig_too_large():
            if not self._mosflm_refine_profiles:
                raise RuntimeError('BGSIG error with profiles fixed')

            Debug.write('BGSIG error detected - try fixing profile...')

            self._mosflm_refine_profiles = False
            self.set_integrater_done(False)

            return

        if integrater.get_getprof_error():
            Debug.write('GETPROF error detected - try fixing profile...')
            self._mosflm_refine_profiles = False
            self.set_integrater_done(False)

            return

        if (integrater.get_detector_gain_error()
                and not (self.get_imageset().get_detector()[0].get_type()
                         == 'SENSOR_PAD')):
            gain = integrater.get_suggested_gain()
            if gain is not None:
                self.set_integrater_parameter('mosflm', 'gain', gain)
                self.set_integrater_export_parameter('mosflm', 'gain', gain)
                if self._mosflm_gain:
                    Debug.write('GAIN updated to %f' % gain)
                else:
                    Debug.write('GAIN found to be %f' % gain)

                self._mosflm_gain = gain
                self._mosflm_rerun_integration = True

        if not self._mosflm_hklout:
            raise RuntimeError('processing abandoned')

        self._intgr_batches_out = integrater.get_batches_out()

        mosaics = integrater.get_mosaic_spreads()
        if mosaics and len(mosaics) > 0:
            self.set_integrater_mosaic_min_mean_max(
                min(mosaics),
                sum(mosaics) / len(mosaics), max(mosaics))
        else:
            m = indxr.get_indexer_mosaic()
            self.set_integrater_mosaic_min_mean_max(m, m, m)

        # write the report for each image as .*-#$ to Chatter -
        # detailed report will be written automagically to science...

        Chatter.write(self.show_per_image_statistics())

        Chatter.write('Mosaic spread: %.3f < %.3f < %.3f' % \
                      self.get_integrater_mosaic_min_mean_max())

        # gather the statistics from the postrefinement
        postref_result = integrater.get_postref_result()

        # now write this to a postrefinement log
        postref_log = os.path.join(self.get_working_directory(),
                                   'postrefinement.log')

        fout = open(postref_log, 'w')

        fout.write('$TABLE: Postrefinement for %s:\n' % \
                   self._intgr_sweep_name)
        fout.write('$GRAPHS: Missetting angles:A:1, 2, 3, 4: $$\n')
        fout.write('Batch PhiX PhiY PhiZ $$ Batch PhiX PhiY PhiZ $$\n')

        for image in sorted(postref_result):
            phix = postref_result[image].get('phix', 0.0)
            phiy = postref_result[image].get('phiy', 0.0)
            phiz = postref_result[image].get('phiz', 0.0)

            fout.write('%d %5.2f %5.2f %5.2f\n' % \
                       (image, phix, phiy, phiz))

        fout.write('$$\n')
        fout.close()

        if self.get_integrater_sweep_name():
            pname, xname, dname = self.get_integrater_project_info()
            FileHandler.record_log_file('%s %s %s %s postrefinement' % \
                                        (self.get_integrater_sweep_name(),
                                         pname, xname, dname),
                                        postref_log)

        return self._mosflm_hklout
コード例 #50
0
ファイル: ISPyBXmlHandler.py プロジェクト: lizhen-dlut/xia2
  def json_object(self, command_line=''):

    result = {}

    for crystal in sorted(self._crystals):
      xcrystal = self._crystals[crystal]

      cell = xcrystal.get_cell()
      spacegroup = xcrystal.get_likely_spacegroups()[0]

      result['AutoProc'] = {}
      tmp = result['AutoProc']

      tmp['spaceGroup'] = spacegroup
      for name, value in zip(['a', 'b', 'c', 'alpha', 'beta', 'gamma'], cell):
        tmp['refinedCell_%s' % name] = value

      result['AutoProcScalingContainer'] = {}
      tmp = result['AutoProcScalingContainer']
      tmp['AutoProcScaling'] = {
          'recordTimeStamp': time.strftime('%Y-%m-%d %H:%M:%S',
                                           time.localtime())
      }

      statistics_all = xcrystal.get_statistics()
      reflection_files = xcrystal.get_scaled_merged_reflections()

      wavelength_names = xcrystal.get_wavelength_names()

      for key in statistics_all.keys():
        pname, xname, dname = key

        # FIXME should assert that the dname is a
        # valid wavelength name

        available = statistics_all[key].keys()

        stats = []
        keys = [
            'High resolution limit',
            'Low resolution limit',
            'Completeness',
            'Multiplicity',
            'I/sigma',
            'Rmerge(I+/-)',
            'CC half',
            'Anomalous completeness',
            'Anomalous correlation',
            'Anomalous multiplicity',
            'Total observations',
            'Total unique',
            'Rmeas(I)',
            'Rmeas(I+/-)',
            'Rpim(I)',
            'Rpim(I+/-)',
            'Partial Bias'
            ]

        for k in keys:
          if k in available:
            stats.append(k)

        xwavelength = xcrystal.get_xwavelength(dname)
        sweeps = xwavelength.get_sweeps()

        tmp['AutoProcScalingStatistics'] = []
        tmp2 = tmp['AutoProcScalingStatistics']

        for j, name in enumerate(
            ['overall', 'innerShell', 'outerShell']):
          statistics_cache = {'scalingStatisticsType':name}

          for s in stats:

            if s in self._name_map:
              n = self._name_map[s]
            else:
              continue

            if isinstance(statistics_all[key][s], type([])):
              statistics_cache[n] = statistics_all[key][s][j]
            elif isinstance(statistics_all[key][s], type(())):
              statistics_cache[n] = statistics_all[key][s][j]

          tmp2.append(statistics_cache)

        tmp['AutoProcIntegrationContainer'] = []
        tmp2 = tmp['AutoProcIntegrationContainer']
        for sweep in sweeps:
          if '#' in sweep.get_template():
            image_name = sweep.get_image_name(0)
          else:
            image_name = os.path.join(sweep.get_directory(),
                                      sweep.get_template())
          cell = sweep.get_integrater_cell()
          intgr_tmp = {}
          for name, value in zip(['a', 'b', 'c', 'alpha', 'beta', 'gamma'],
                                 cell):
            intgr_tmp['cell_%s' % name] = value

          # FIXME this is naughty
          indxr = sweep._get_indexer()
          intgr = sweep._get_integrater()

          start, end = intgr.get_integrater_wedge()

          intgr_tmp['startImageNumber'] = start
          intgr_tmp['endImageNumber'] = end

          intgr_tmp['refinedDetectorDistance'] = indxr.get_indexer_distance()

          beam = indxr.get_indexer_beam_centre()

          intgr_tmp['refinedXBeam'] = beam[0]
          intgr_tmp['refinedYBeam'] = beam[1]

          tmp2.append(
            {'Image':{'fileName':os.path.split(image_name)[-1],
                      'fileLocation':sanitize(os.path.split(image_name)[0])},
             'AutoProcIntegration': intgr_tmp})

      # file unpacking nonsense
      result['AutoProcProgramContainer'] = {}
      tmp = result['AutoProcProgramContainer']
      tmp2 = {}

      if not command_line:
        from xia2.Handlers.CommandLine import CommandLine
        command_line = CommandLine.get_command_line()

      tmp2['processingCommandLine'] = sanitize(command_line)
      tmp2['processingProgram'] = 'xia2'

      tmp['AutoProcProgram'] = tmp2
      tmp['AutoProcProgramAttachment'] = []
      tmp2 = tmp['AutoProcProgramAttachment']

      from xia2.Handlers.Environment import Environment
      data_directory = Environment.generate_directory('DataFiles')

      for k in reflection_files:
        reflection_file = reflection_files[k]

        if not isinstance(reflection_file, type('')):
          continue

        reflection_file = FileHandler.get_data_file(reflection_file)
        basename = os.path.basename(reflection_file)

        if os.path.isfile(os.path.join(data_directory, basename)):
          # Use file in DataFiles directory in preference (if it exists)
          reflection_file = os.path.join(data_directory, basename)

        tmp2.append({
          'fileType': 'Result',
          'fileName': os.path.split(reflection_file)[-1],
          'filePath': sanitize(os.path.split(reflection_file)[0]),
        })

      tmp2.append({'fileType':'Log',
                   'fileName':'xia2.txt',
                   'filePath':sanitize(os.getcwd())})

    return result
コード例 #51
0
ファイル: ISPyBXmlHandler.py プロジェクト: hainm/xia2
  def write_xml(self, file):

    fout = open(file, 'w')

    fout.write('<?xml version="1.0"?>')
    fout.write('<AutoProcContainer>\n')

    for crystal in sorted(self._crystals):
      xcrystal = self._crystals[crystal]

      cell = xcrystal.get_cell()
      spacegroup = xcrystal.get_likely_spacegroups()[0]

      fout.write('<AutoProc><spaceGroup>%s</spaceGroup>' % spacegroup)
      self.write_refined_cell(fout, cell)
      fout.write('</AutoProc>')

      fout.write('<AutoProcScalingContainer>')
      fout.write('<AutoProcScaling>')
      self.write_date(fout)
      fout.write('</AutoProcScaling>')

      statistics_all = xcrystal.get_statistics()
      reflection_files = xcrystal.get_scaled_merged_reflections()

      wavelength_names = xcrystal.get_wavelength_names()

      for key in statistics_all.keys():
        pname, xname, dname = key

        # FIXME should assert that the dname is a
        # valid wavelength name

        available = statistics_all[key].keys()

        stats = []
        keys = [
            'High resolution limit',
            'Low resolution limit',
            'Completeness',
            'Multiplicity',
            'I/sigma',
            'Rmerge(I+/I-)',
            'CC half',
            'Anomalous completeness',
            'Anomalous correlation',
            'Anomalous multiplicity',
            'Total observations',
            'Total unique',
            'Rmeas(I)',
            'Rmeas(I+,-)',
            'Rpim(I)',
            'Rpim(I+/-)',
            'Partial Bias'
            ]

        for k in keys:
          if k in available:
            stats.append(k)

        xwavelength = xcrystal.get_xwavelength(dname)
        sweeps = xwavelength.get_sweeps()

        for j, name in enumerate(
            ['overall', 'innerShell', 'outerShell']):
          statistics_cache = { }

          for s in stats:
            if type(statistics_all[key][s]) == type([]):
              statistics_cache[s] = statistics_all[key][s][j]
            elif type(statistics_all[key][s]) == type(()):
              statistics_cache[s] = statistics_all[key][s][j]

          # send these to be written out
          self.write_scaling_statistics(fout, name,
                                        statistics_cache)

        for sweep in sweeps:
          fout.write('<AutoProcIntegrationContainer>\n')
          image_name = sweep.get_all_image_names()[0]
          fout.write('<Image><fileName>%s</fileName>' % \
                     os.path.split(image_name)[-1])
          fout.write('<fileLocation>%s</fileLocation></Image>' %
                     sanitize(os.path.split(image_name)[0]))
          fout.write('<AutoProcIntegration>\n')
          cell = sweep.get_integrater_cell()
          self.write_cell(fout, cell)

          # FIXME this is naughty
          intgr = sweep._get_integrater()

          start, end = intgr.get_integrater_wedge()

          fout.write('<startImageNumber>%d</startImageNumber>' % \
                     start)

          fout.write('<endImageNumber>%d</endImageNumber>' % \
                     end)

          # FIXME this is naughty
          indxr = sweep._get_indexer()

          fout.write(
              '<refinedDetectorDistance>%f</refinedDetectorDistance>' % \
              indxr.get_indexer_distance())

          beam = indxr.get_indexer_beam_centre()

          fout.write('<refinedXBeam>%f</refinedXBeam>' % beam[0])
          fout.write('<refinedYBeam>%f</refinedYBeam>' % beam[1])

          fout.write('</AutoProcIntegration>\n')
          fout.write('</AutoProcIntegrationContainer>\n')

      fout.write('</AutoProcScalingContainer>')

      # file unpacking nonsense

      from xia2.Handlers.CommandLine import CommandLine

      fout.write('<AutoProcProgramContainer><AutoProcProgram>')
      fout.write('<processingCommandLine>%s</processingCommandLine>' \
                 % sanitize(CommandLine.get_command_line()))
      fout.write('<processingPrograms>xia2</processingPrograms>')
      fout.write('</AutoProcProgram>')

      for k in reflection_files:

        reflection_file = reflection_files[k]

        if not type(reflection_file) == type(''):
          continue

        reflection_file = FileHandler.get_data_file(reflection_file)

        fout.write(
            '<AutoProcProgramAttachment><fileType>Result')
        fout.write('</fileType><fileName>%s</fileName>' % \
                   os.path.split(reflection_file)[-1])
        fout.write('<filePath>%s</filePath>' % \
                   sanitize(os.path.split(reflection_file)[0]))
        fout.write('</AutoProcProgramAttachment>\n')


      # add the xia2.txt file...

      fout.write('<AutoProcProgramAttachment><fileType>Log')
      fout.write('</fileType><fileName>xia2.txt</fileName>')
      fout.write('<filePath>%s</filePath>' % sanitize(os.getcwd()))
      fout.write('</AutoProcProgramAttachment>\n')

      fout.write('</AutoProcProgramContainer>')

    fout.write('</AutoProcContainer>\n')
    fout.close()

    return
コード例 #52
0
ファイル: DialsIntegrater.py プロジェクト: xia2/xia2
  def _integrate_finish(self):
    '''Finish off the integration by running dials.export.'''

    # FIXME - do we want to export every time we call this method
    # (the file will not have changed) and also (more important) do
    # we want a different exported MTZ file every time (I do not think
    # that we do; these can be very large) - was exporter.get_xpid() ->
    # now dials

    exporter = self.ExportMtz()
    exporter.set_reflections_filename(self._intgr_integrated_pickle)
    mtz_filename = os.path.join(
      self.get_working_directory(), '%s_integrated.mtz' % 'dials')
    exporter.set_mtz_filename(mtz_filename)
    exporter.run()
    self._intgr_integrated_filename = mtz_filename

    # record integrated MTZ file for e.g. BLEND.

    pname, xname, dname = self.get_integrater_project_info()
    sweep = self.get_integrater_sweep_name()
    FileHandler.record_more_data_file(
        '%s %s %s %s INTEGRATE' % (pname, xname, dname, sweep), mtz_filename)

    if not os.path.isfile(self._intgr_integrated_filename):
      raise RuntimeError("dials.export failed: %s does not exist."
                         % self._intgr_integrated_filename)

    if self._intgr_reindex_operator is None and \
      self._intgr_spacegroup_number == lattice_to_spacegroup(
        self.get_integrater_refiner().get_refiner_lattice()):
      Debug.write('Not reindexing to spacegroup %d (%s)' % \
                    (self._intgr_spacegroup_number,
                     self._intgr_reindex_operator))
      return mtz_filename

    if self._intgr_reindex_operator is None and \
      self._intgr_spacegroup_number == 0:
      Debug.write('Not reindexing to spacegroup %d (%s)' % \
                    (self._intgr_spacegroup_number,
                     self._intgr_reindex_operator))
      return mtz_filename

    Debug.write('Reindexing to spacegroup %d (%s)' % \
                (self._intgr_spacegroup_number,
                 self._intgr_reindex_operator))

    hklin = mtz_filename
    reindex = Reindex()
    reindex.set_working_directory(self.get_working_directory())
    auto_logfiler(reindex)

    reindex.set_operator(self._intgr_reindex_operator)

    if self._intgr_spacegroup_number:
      reindex.set_spacegroup(self._intgr_spacegroup_number)
    else:
      reindex.set_spacegroup(lattice_to_spacegroup(
        self.get_integrater_refiner().get_refiner_lattice()))

    hklout = '%s_reindex.mtz' % hklin[:-4]
    reindex.set_hklin(hklin)
    reindex.set_hklout(hklout)
    reindex.reindex()
    self._intgr_integrated_filename = hklout
    self._intgr_cell = reindex.get_cell()

    pname, xname, dname = self.get_integrater_project_info()
    sweep = self.get_integrater_sweep_name()
    FileHandler.record_more_data_file(
      '%s %s %s %s experiments' % (pname, xname, dname, sweep),
      self.get_integrated_experiments())

    from iotbx.reflection_file_reader import any_reflection_file
    miller_arrays = any_reflection_file(hklout).as_miller_arrays()
    # look for profile-fitted intensities
    intensities = [ma for ma in miller_arrays
                   if ma.info().labels == ['IPR', 'SIGIPR']]
    if len(intensities) == 0:
      # look instead for summation-integrated intensities
      intensities = [ma for ma in miller_arrays
                     if ma.info().labels == ['I', 'SIGI']]
      assert len(intensities)
    self._intgr_n_ref = intensities[0].size()

    return hklout
コード例 #53
0
ファイル: DialsIndexer.py プロジェクト: xia2/xia2
      # FIXME in here should respect the input unit cell and lattice if provided

      # FIXME from this (i) populate the helper table,
      # (ii) try to avoid re-running the indexing
      # step if we eliminate a solution as we have all of the refined results
      # already available.

      rbs = self.RefineBravaisSettings()
      rbs.set_experiments_filename(indexed_experiments)
      rbs.set_indexed_filename(indexed_file)
      if PhilIndex.params.dials.fix_geometry:
        rbs.set_detector_fix('all')
        rbs.set_beam_fix('all')

      FileHandler.record_log_file('%s LATTICE' % self.get_indexer_full_name(),
                                  rbs.get_log_file())
      rbs.run()

      from cctbx import crystal, sgtbx

      for k in sorted(rbs.get_bravais_summary()):
        summary = rbs.get_bravais_summary()[k]

        # FIXME need to do this better - for the moment only accept lattices
        # where R.M.S. deviation is less than twice P1 R.M.S. deviation.

        if self._indxr_input_lattice is None:
          if not summary['recommended']:
            continue

        experiments = load.experiment_list(