Exemple #1
0
  def _integrate(self):
    '''Implement the integrater interface.'''

    # cite the program
    Citations.cite('mosflm')

    images_str = '%d to %d' % tuple(self._intgr_wedge)
    cell_str = '%.2f %.2f %.2f %.2f %.2f %.2f' % tuple(self._intgr_cell)

    if len(self._fp_directory) <= 50:
      dirname = self._fp_directory
    else:
      dirname = '...%s' % self._fp_directory[-46:]

    Journal.block(
        'integrating', self._intgr_sweep_name, 'mosflm',
        {'images':images_str,
         'cell':cell_str,
         'lattice':self.get_integrater_refiner().get_refiner_lattice(),
         'template':self._fp_template,
         'directory':dirname,
         'resolution':'%.2f' % self._intgr_reso_high})

    self._mosflm_rerun_integration = False

    wd = self.get_working_directory()

    try:

      if self.get_integrater_sweep_name():
        pname, xname, dname = self.get_integrater_project_info()

      nproc = PhilIndex.params.xia2.settings.multiprocessing.nproc
      if nproc > 1:
        Debug.write('Parallel integration: %d jobs' %nproc)
        self._mosflm_hklout = self._mosflm_parallel_integrate()
      else:
        self._mosflm_hklout = self._mosflm_integrate()

      # record integration output for e.g. BLEND.

      sweep = self.get_integrater_sweep_name()
      if sweep:
        FileHandler.record_more_data_file(
            '%s %s %s %s INTEGRATE' % (pname, xname, dname, sweep),
            self._mosflm_hklout)

    except IntegrationError, e:
      if 'negative mosaic spread' in str(e):
        if self._mosflm_postref_fix_mosaic:
          Chatter.write(
              'Negative mosaic spread - stopping integration')
          raise BadLatticeError, 'negative mosaic spread'

        Chatter.write(
            'Negative mosaic spread - rerunning integration')
        self.set_integrater_done(False)
        self._mosflm_postref_fix_mosaic = True
    def _integrate(self):
        """Actually do the integration - in XDS terms this will mean running
        DEFPIX and INTEGRATE to measure all the reflections."""

        images_str = "%d to %d" % tuple(self._intgr_wedge)
        cell_str = "%.2f %.2f %.2f %.2f %.2f %.2f" % tuple(self._intgr_cell)

        if len(self._fp_directory) <= 50:
            dirname = self._fp_directory
        else:
            dirname = "...%s" % self._fp_directory[-46:]

        Journal.block(
            "integrating",
            self._intgr_sweep_name,
            "DIALS",
            {
                "images": images_str,
                "cell": cell_str,
                "lattice": self.get_integrater_refiner().get_refiner_lattice(),
                "template": self._fp_template,
                "directory": dirname,
                "resolution": "%.2f" % self._intgr_reso_high,
            },
        )

        integrate = self.Integrate()

        # decide what images we are going to process, if not already
        # specified

        if not self._intgr_wedge:
            images = self.get_matching_images()
            self.set_integrater_wedge(min(images), max(images))

        imageset = self.get_imageset()
        beam = imageset.get_beam()
        detector = imageset.get_detector()

        d_min_limit = detector.get_max_resolution(beam.get_s0())
        if (d_min_limit > self._intgr_reso_high or
                PhilIndex.params.xia2.settings.resolution.keep_all_reflections
            ):
            Debug.write("Overriding high resolution limit: %f => %f" %
                        (self._intgr_reso_high, d_min_limit))
            self._intgr_reso_high = d_min_limit

        integrate.set_experiments_filename(self._intgr_experiments_filename)
        integrate.set_reflections_filename(self._intgr_indexed_filename)
        if PhilIndex.params.dials.integrate.d_max:
            integrate.set_d_max(PhilIndex.params.dials.integrate.d_max)
        else:
            integrate.set_d_max(self._intgr_reso_low)
        if PhilIndex.params.dials.integrate.d_min:
            integrate.set_d_min(PhilIndex.params.dials.integrate.d_min)
        else:
            integrate.set_d_min(self._intgr_reso_high)
        pname, xname, dname = self.get_integrater_project_info()
        sweep = self.get_integrater_sweep_name()
        FileHandler.record_log_file(
            "%s %s %s %s INTEGRATE" % (pname, xname, dname, sweep),
            integrate.get_log_file(),
        )

        try:
            integrate.run()
        except xia2.Wrappers.Dials.Integrate.DIALSIntegrateError as e:
            s = str(e)
            if ("dials.integrate requires more memory than is available." in s
                    and not self._intgr_reso_high):
                # Try to estimate a more sensible resolution limit for integration
                # in case we were just integrating noise to the edge of the detector
                images = self._integrate_select_images_wedges()

                Debug.write(
                    "Integrating subset of images to estimate resolution limit.\n"
                    "Integrating images %s" % images)

                integrate = self.Integrate()
                integrate.set_experiments_filename(
                    self._intgr_experiments_filename)
                integrate.set_reflections_filename(
                    self._intgr_indexed_filename)
                integrate.set_d_max(self._intgr_reso_low)
                integrate.set_d_min(self._intgr_reso_high)
                for (start, stop) in images:
                    integrate.add_scan_range(
                        start - self.get_matching_images()[0],
                        stop - self.get_matching_images()[0],
                    )
                integrate.set_reflections_per_degree(1000)
                integrate.run()

                integrated_reflections = integrate.get_integrated_filename()

                from xia2.Wrappers.Dials.EstimateResolutionLimit import (
                    EstimateResolutionLimit, )

                d_min_estimater = EstimateResolutionLimit()
                d_min_estimater.set_working_directory(
                    self.get_working_directory())
                auto_logfiler(d_min_estimater)
                d_min_estimater.set_experiments_filename(
                    self._intgr_experiments_filename)
                d_min_estimater.set_reflections_filename(
                    integrated_reflections)
                d_min = d_min_estimater.run()

                Debug.write("Estimate for d_min: %.2f" % d_min)
                Debug.write("Re-running integration to this resolution limit")

                self._intgr_reso_high = d_min
                self.set_integrater_done(False)
                return
            raise Sorry(e)

        self._intgr_experiments_filename = integrate.get_integrated_experiments(
        )

        # also record the batch range - needed for the analysis of the
        # radiation damage in chef...

        self._intgr_batches_out = (self._intgr_wedge[0], self._intgr_wedge[1])

        # FIXME (i) record the log file, (ii) get more information out from the
        # integration log on the quality of the data and (iii) the mosaic spread
        # range observed and R.M.S. deviations.

        self._intgr_integrated_reflections = integrate.get_integrated_reflections(
        )
        if not os.path.isfile(self._intgr_integrated_reflections):
            raise RuntimeError("Integration failed: %s does not exist." %
                               self._intgr_integrated_reflections)

        self._intgr_per_image_statistics = integrate.get_per_image_statistics()
        Chatter.write(self.show_per_image_statistics())

        report = self.Report()
        html_filename = os.path.join(
            self.get_working_directory(),
            "%i_dials.integrate.report.html" % report.get_xpid(),
        )
        report.set_html_filename(html_filename)
        report.run(wait_for_completion=True)
        FileHandler.record_html_file(
            "%s %s %s %s INTEGRATE" % (pname, xname, dname, sweep),
            html_filename)

        from dxtbx.serialize import load

        experiments = load.experiment_list(self._intgr_experiments_filename)
        profile = experiments.profiles()[0]
        mosaic = profile.sigma_m()
        try:
            m_min, m_max, m_mean = mosaic.min_max_mean().as_tuple()
            self.set_integrater_mosaic_min_mean_max(m_min, m_mean, m_max)
        except AttributeError as e:
            self.set_integrater_mosaic_min_mean_max(mosaic, mosaic, mosaic)

        Chatter.write("Mosaic spread: %.3f < %.3f < %.3f" %
                      self.get_integrater_mosaic_min_mean_max())

        return self._intgr_integrated_reflections
Exemple #3
0
  def _integrate(self):
    '''Actually do the integration - in XDS terms this will mean running
    DEFPIX and INTEGRATE to measure all the reflections.'''

    images_str = '%d to %d' % tuple(self._intgr_wedge)
    cell_str = '%.2f %.2f %.2f %.2f %.2f %.2f' % tuple(self._intgr_cell)

    if len(self._fp_directory) <= 50:
      dirname = self._fp_directory
    else:
      dirname = '...%s' % self._fp_directory[-46:]

    Journal.block(
        'integrating', self._intgr_sweep_name, 'DIALS',
        {'images':images_str,
         'cell':cell_str,
         'lattice':self.get_integrater_refiner().get_refiner_lattice(),
         'template':self._fp_template,
         'directory':dirname,
         'resolution':'%.2f' % self._intgr_reso_high})

    integrate = self.Integrate()

    # decide what images we are going to process, if not already
    # specified

    if not self._intgr_wedge:
      images = self.get_matching_images()
      self.set_integrater_wedge(min(images),
                                max(images))

    imageset = self.get_imageset()
    beam = imageset.get_beam()
    detector = imageset.get_detector()

    d_min_limit = detector.get_max_resolution(beam.get_s0())
    if d_min_limit > self._intgr_reso_high \
        or PhilIndex.params.xia2.settings.resolution.keep_all_reflections:
      Debug.write('Overriding high resolution limit: %f => %f' % \
                  (self._intgr_reso_high, d_min_limit))
      self._intgr_reso_high = d_min_limit

    integrate.set_experiments_filename(self._intgr_experiments_filename)
    integrate.set_reflections_filename(self._intgr_indexed_filename)
    integrate.set_d_max(self._intgr_reso_low)
    integrate.set_d_min(self._intgr_reso_high)
    pname, xname, dname = self.get_integrater_project_info()
    sweep = self.get_integrater_sweep_name()
    FileHandler.record_log_file('%s %s %s %s INTEGRATE' % \
                                (pname, xname, dname, sweep),
                                integrate.get_log_file())

    try:
      integrate.run()
    except RuntimeError, e:
      s = str(e)
      if ('dials.integrate requires more memory than is available.' in s
          and not self._intgr_reso_high):
        # Try to estimate a more sensible resolution limit for integration
        # in case we were just integrating noise to the edge of the detector
        images = self._integrate_select_images_wedges()

        Debug.write(
          'Integrating subset of images to estimate resolution limit.\n'
          'Integrating images %s' %images)

        integrate = self.Integrate()
        integrate.set_experiments_filename(self._intgr_experiments_filename)
        integrate.set_reflections_filename(self._intgr_indexed_filename)
        integrate.set_d_max(self._intgr_reso_low)
        integrate.set_d_min(self._intgr_reso_high)
        for (start, stop) in images:
          integrate.add_scan_range(start-self.get_matching_images()[0], stop-self.get_matching_images()[0])
        integrate.set_reflections_per_degree(1000)
        integrate.run()

        integrated_pickle = integrate.get_integrated_filename()

        from xia2.Wrappers.Dials.EstimateResolutionLimit import EstimateResolutionLimit
        d_min_estimater = EstimateResolutionLimit()
        d_min_estimater.set_working_directory(self.get_working_directory())
        auto_logfiler(d_min_estimater)
        d_min_estimater.set_experiments_filename(self._intgr_experiments_filename)
        d_min_estimater.set_reflections_filename(integrated_pickle)
        d_min = d_min_estimater.run()

        Debug.write('Estimate for d_min: %.2f' %d_min)
        Debug.write('Re-running integration to this resolution limit')

        self._intgr_reso_high = d_min
        self.set_integrater_done(False)
        return
      raise
Exemple #4
0
  def _integrate(self):
    '''Actually do the integration - in XDS terms this will mean running
    DEFPIX and INTEGRATE to measure all the reflections.'''

    experiment = self._intgr_refiner.get_refined_experiment_list(
      self.get_integrater_epoch())[0]
    crystal_model = experiment.crystal
    self._intgr_refiner_cell = crystal_model.get_unit_cell().parameters()

    images_str = '%d to %d' % tuple(self._intgr_wedge)
    cell_str = '%.2f %.2f %.2f %.2f %.2f %.2f' %tuple(self._intgr_refiner_cell)

    if len(self._fp_directory) <= 50:
      dirname = self._fp_directory
    else:
      dirname = '...%s' % self._fp_directory[-46:]

    Journal.block(
        'integrating', self._intgr_sweep_name, 'XDS',
        {'images':images_str,
         'cell':cell_str,
         'lattice':self._intgr_refiner.get_refiner_lattice(),
         'template':self._fp_template,
         'directory':dirname,
         'resolution':'%.2f' % self._intgr_reso_high})

    first_image_in_wedge = self.get_image_name(self._intgr_wedge[0])

    defpix = self.Defpix()

    # pass in the correct data

    for file in ['X-CORRECTIONS.cbf',
                 'Y-CORRECTIONS.cbf',
                 'BKGINIT.cbf',
                 'XPARM.XDS']:
      defpix.set_input_data_file(file, self._xds_data_files[file])

    defpix.set_data_range(self._intgr_wedge[0],
                          self._intgr_wedge[1])

    if self.get_integrater_high_resolution() > 0.0 and \
           self.get_integrater_user_resolution():
      Debug.write('Setting resolution limit in DEFPIX to %.2f' % \
                  self.get_integrater_high_resolution())
      defpix.set_resolution_high(self.get_integrater_high_resolution())
      defpix.set_resolution_low(self.get_integrater_low_resolution())

    elif self.get_integrater_low_resolution():
      Debug.write('Setting low resolution limit in DEFPIX to %.2f' % \
                  self.get_integrater_low_resolution())
      defpix.set_resolution_high(0.0)
      defpix.set_resolution_low(self.get_integrater_low_resolution())

    defpix.run()

    # and gather the result files
    for file in ['BKGPIX.cbf',
                 'ABS.cbf']:
      self._xds_data_files[file] = defpix.get_output_data_file(file)

    integrate = self.Integrate()

    if self._xds_integrate_parameters:
      integrate.set_updates(self._xds_integrate_parameters)

    # decide what images we are going to process, if not already
    # specified

    if not self._intgr_wedge:
      images = self.get_matching_images()
      self.set_integrater_wedge(min(images),
                                max(images))

    first_image_in_wedge = self.get_image_name(self._intgr_wedge[0])

    integrate.set_data_range(self._intgr_wedge[0],
                             self._intgr_wedge[1])

    for file in ['X-CORRECTIONS.cbf',
                 'Y-CORRECTIONS.cbf',
                 'BLANK.cbf',
                 'BKGPIX.cbf',
                 'GAIN.cbf']:
      integrate.set_input_data_file(file, self._xds_data_files[file])

    if self._xds_data_files.has_key('GXPARM.XDS'):
      Debug.write('Using globally refined parameters')
      integrate.set_input_data_file(
          'XPARM.XDS', self._xds_data_files['GXPARM.XDS'])
      integrate.set_refined_xparm()
    else:
      integrate.set_input_data_file(
          'XPARM.XDS', self._xds_data_files['XPARM.XDS'])

    integrate.run()

    self._intgr_per_image_statistics = integrate.get_per_image_statistics()
    Chatter.write(self.show_per_image_statistics())

    # record the log file -

    pname, xname, dname = self.get_integrater_project_info()
    sweep = self.get_integrater_sweep_name()
    FileHandler.record_log_file('%s %s %s %s INTEGRATE' % \
                                (pname, xname, dname, sweep),
                                os.path.join(self.get_working_directory(),
                                             'INTEGRATE.LP'))

    # and copy the first pass INTEGRATE.HKL...

    lattice = self._intgr_refiner.get_refiner_lattice()
    if not os.path.exists(os.path.join(
        self.get_working_directory(),
        'INTEGRATE-%s.HKL' % lattice)):
      here = self.get_working_directory()
      shutil.copyfile(os.path.join(here, 'INTEGRATE.HKL'),
                      os.path.join(here, 'INTEGRATE-%s.HKL' % lattice))

    # record INTEGRATE.HKL for e.g. BLEND.

    FileHandler.record_more_data_file(
        '%s %s %s %s INTEGRATE' % (pname, xname, dname, sweep),
        os.path.join(self.get_working_directory(), 'INTEGRATE.HKL'))

    # should the existence of these require that I rerun the
    # integration or can we assume that the application of a
    # sensible resolution limit will achieve this??

    self._xds_integrate_parameters = integrate.get_updates()

    # record the mosaic spread &c.

    m_min, m_mean, m_max = integrate.get_mosaic()
    self.set_integrater_mosaic_min_mean_max(m_min, m_mean, m_max)

    Chatter.write('Mosaic spread: %.3f < %.3f < %.3f' % \
                  self.get_integrater_mosaic_min_mean_max())

    return os.path.join(self.get_working_directory(), 'INTEGRATE.HKL')
Exemple #5
0
  def _scale(self):
    '''Perform all of the operations required to deliver the scaled
    data.'''

    epochs = self._sweep_handler.get_epochs()

    if PhilIndex.params.xia2.settings.optimize_scaling:
      self._determine_best_scale_model_8way()
    else:
      self._scalr_corrections = True
      self._scalr_correct_absorption = True
      self._scalr_correct_partiality = False
      self._scalr_correct_decay = True

    if self._scalr_corrections:
      Journal.block(
          'scaling', self.get_scaler_xcrystal().get_name(), 'CCP4',
          {'scaling model':'automatic',
           'absorption':self._scalr_correct_absorption,
           'tails':self._scalr_correct_partiality,
           'decay':self._scalr_correct_decay
           })

    else:
      Journal.block(
          'scaling', self.get_scaler_xcrystal().get_name(), 'CCP4',
          {'scaling model':'default'})

    sc = self._updated_aimless()
    sc.set_hklin(self._prepared_reflections)
    sc.set_intensities(PhilIndex.params.ccp4.aimless.intensities)

    sc.set_chef_unmerged(True)

    sc.set_new_scales_file('%s.scales' % self._scalr_xname)

    user_resolution_limits = { }

    for epoch in epochs:

      si = self._sweep_handler.get_sweep_information(epoch)
      pname, xname, dname = si.get_project_info()
      sname = si.get_sweep_name()
      intgr = si.get_integrater()

      if intgr.get_integrater_user_resolution():
        dmin = intgr.get_integrater_high_resolution()

        if not user_resolution_limits.has_key((dname, sname)):
          user_resolution_limits[(dname, sname)] = dmin
        elif dmin < user_resolution_limits[(dname, sname)]:
          user_resolution_limits[(dname, sname)] = dmin

      start, end = si.get_batch_range()

      if (dname, sname) in self._scalr_resolution_limits:
        resolution = self._scalr_resolution_limits[(dname, sname)]
        sc.add_run(start, end, exclude = False,
                   resolution = resolution, name = sname)
      else:
        sc.add_run(start, end, name = sname)

    sc.set_hklout(os.path.join(self.get_working_directory(),
                               '%s_%s_scaled_test.mtz' % \
                               (self._scalr_pname, self._scalr_xname)))

    if self.get_scaler_anomalous():
      sc.set_anomalous()

    # what follows, sucks

    if Flags.get_failover():

      try:
        sc.scale()
      except RuntimeError, e:

        es = str(e)

        if 'bad batch' in es or \
               'negative scales run' in es or \
               'no observations' in es:

          # first ID the sweep from the batch no

          batch = int(es.split()[-1])
          epoch = self._identify_sweep_epoch(batch)
          sweep = self._scalr_integraters[
              epoch].get_integrater_sweep()

          # then remove it from my parent xcrystal

          self.get_scaler_xcrystal().remove_sweep(sweep)

          # then remove it from the scaler list of intergraters
          # - this should really be a scaler interface method

          del(self._scalr_integraters[epoch])

          # then tell the user what is happening

          Chatter.write(
              'Sweep %s gave negative scales - removing' % \
              sweep.get_name())

          # then reset the prepare, do, finish flags

          self.set_scaler_prepare_done(False)
          self.set_scaler_done(False)
          self.set_scaler_finish_done(False)

          # and return

          return

        else:

          raise e
    def _index(self):
        """Actually index the diffraction pattern. Note well that
        this is not going to compute the matrix..."""

        # acknowledge this program

        if not self._indxr_images:
            raise RuntimeError("No good spots found on any images")

        Citations.cite("labelit")
        Citations.cite("distl")

        _images = []
        for i in self._indxr_images:
            for j in i:
                if not j in _images:
                    _images.append(j)

        _images.sort()

        images_str = "%d" % _images[0]
        for i in _images[1:]:
            images_str += ", %d" % i

        cell_str = None
        if self._indxr_input_cell:
            cell_str = "%.2f %.2f %.2f %.2f %.2f %.2f" % self._indxr_input_cell

        if self._indxr_sweep_name:

            # then this is a proper autoindexing run - describe this
            # to the journal entry

            if len(self._fp_directory) <= 50:
                dirname = self._fp_directory
            else:
                dirname = "...%s" % self._fp_directory[-46:]

            Journal.block(
                "autoindexing",
                self._indxr_sweep_name,
                "labelit",
                {
                    "images": images_str,
                    "target cell": cell_str,
                    "target lattice": self._indxr_input_lattice,
                    "template": self._fp_template,
                    "directory": dirname,
                },
            )

        # auto_logfiler(self)

        from xia2.Wrappers.Labelit.LabelitIndex import LabelitIndex

        index = LabelitIndex()
        index.set_working_directory(self.get_working_directory())
        auto_logfiler(index)

        # task = 'Autoindex from images:'

        # for i in _images:
        # task += ' %s' % self.get_image_name(i)

        # self.set_task(task)

        # self.add_command_line('--index_only')

        Debug.write("Indexing from images:")
        for i in _images:
            index.add_image(self.get_image_name(i))
            Debug.write("%s" % self.get_image_name(i))

        if self._primitive_unit_cell:
            index.set_primitive_unit_cell(self._primitive_unit_cell)

        if self._indxr_input_cell:
            index.set_max_cell(1.25 * max(self._indxr_input_cell[:3]))

        xsweep = self.get_indexer_sweep()
        if xsweep is not None:
            if xsweep.get_distance() is not None:
                index.set_distance(xsweep.get_distance())
            # if self.get_wavelength_prov() == 'user':
            # index.set_wavelength(self.get_wavelength())
            if xsweep.get_beam_centre() is not None:
                index.set_beam_centre(xsweep.get_beam_centre())

        if self._refine_beam is False:
            index.set_refine_beam(False)
        else:
            index.set_refine_beam(True)
            index.set_beam_search_scope(self._beam_search_scope)

        if (math.fabs(self.get_wavelength() - 1.54) <
                0.01) or (math.fabs(self.get_wavelength() - 2.29) < 0.01):
            index.set_Cu_KA_or_Cr_KA(True)

        try:
            index.run()
        except RuntimeError as e:

            if self._refine_beam is False:
                raise e

            # can we improve the situation?

            if self._beam_search_scope < 4.0:
                self._beam_search_scope += 4.0

                # try repeating the indexing!

                self.set_indexer_done(False)
                return "failed"

            # otherwise this is beyond redemption

            raise e

        self._solutions = index.get_solutions()

        # FIXME this needs to check the smilie status e.g.
        # ":)" or ";(" or "  ".

        # FIXME need to check the value of the RMSD and raise an
        # exception if the P1 solution has an RMSD > 1.0...

        # Change 27/FEB/08 to support user assigned spacegroups
        # (euugh!) have to "ignore" solutions with higher symmetry
        # otherwise the rest of xia will override us. Bummer.

        for i, solution in self._solutions.iteritems():
            if self._indxr_user_input_lattice:
                if lattice_to_spacegroup(
                        solution["lattice"]) > lattice_to_spacegroup(
                            self._indxr_input_lattice):
                    Debug.write("Ignoring solution: %s" % solution["lattice"])
                    del self._solutions[i]

        # configure the "right" solution
        self._solution = self.get_solution()

        # now store also all of the other solutions... keyed by the
        # lattice - however these should only be added if they
        # have a smiley in the appropriate record, perhaps?

        for solution in self._solutions.keys():
            lattice = self._solutions[solution]["lattice"]
            if lattice in self._indxr_other_lattice_cell:
                if (self._indxr_other_lattice_cell[lattice]["goodness"] <
                        self._solutions[solution]["metric"]):
                    continue

            self._indxr_other_lattice_cell[lattice] = {
                "goodness": self._solutions[solution]["metric"],
                "cell": self._solutions[solution]["cell"],
            }

        self._indxr_lattice = self._solution["lattice"]
        self._indxr_cell = tuple(self._solution["cell"])
        self._indxr_mosaic = self._solution["mosaic"]

        lms = LabelitMosflmMatrix()
        lms.set_working_directory(self.get_working_directory())
        lms.set_solution(self._solution["number"])
        self._indxr_payload["mosflm_orientation_matrix"] = lms.calculate()

        # get the beam centre from the mosflm script - mosflm
        # may have inverted the beam centre and labelit will know
        # this!

        mosflm_beam_centre = lms.get_mosflm_beam()

        if mosflm_beam_centre:
            self._indxr_payload["mosflm_beam_centre"] = tuple(
                mosflm_beam_centre)

        detector = copy.deepcopy(self.get_detector())
        beam = copy.deepcopy(self.get_beam())
        from dxtbx.model.detector_helpers import set_mosflm_beam_centre

        set_mosflm_beam_centre(detector, beam, mosflm_beam_centre)

        from xia2.Experts.SymmetryExpert import lattice_to_spacegroup_number
        from scitbx import matrix
        from cctbx import sgtbx, uctbx
        from dxtbx.model import CrystalFactory

        mosflm_matrix = matrix.sqr([
            float(i) for line in lms.calculate()
            for i in line.replace("-", " -").split()
        ][:9])

        space_group = sgtbx.space_group_info(
            lattice_to_spacegroup_number(self._solution["lattice"])).group()
        crystal_model = CrystalFactory.from_mosflm_matrix(
            mosflm_matrix,
            unit_cell=uctbx.unit_cell(tuple(self._solution["cell"])),
            space_group=space_group,
        )

        from dxtbx.model import Experiment, ExperimentList

        experiment = Experiment(
            beam=beam,
            detector=detector,
            goniometer=self.get_goniometer(),
            scan=self.get_scan(),
            crystal=crystal_model,
        )

        experiment_list = ExperimentList([experiment])
        self.set_indexer_experiment_list(experiment_list)

        # also get an estimate of the resolution limit from the
        # labelit.stats_distl output... FIXME the name is wrong!

        lsd = LabelitStats_distl()
        lsd.set_working_directory(self.get_working_directory())
        lsd.stats_distl()

        resolution = 1.0e6
        for i in _images:
            stats = lsd.get_statistics(self.get_image_name(i))

            resol = 0.5 * (stats["resol_one"] + stats["resol_two"])

            if resol < resolution:
                resolution = resol

        self._indxr_resolution_estimate = resolution

        return "ok"
Exemple #7
0
    def _refine(self):

        for epoch, idxr in self._refinr_indexers.iteritems():
            #self.digest_template()

            gain = idxr._indxr_sweeps[0].get_gain()
            if not self._mosflm_gain and gain:
                self._mosflm_gain = gain

            # if pilatus override GAIN to 1.0

            if idxr.get_imageset().get_detector()[0].get_type(
            ) == 'SENSOR_PAD':
                self._mosflm_gain = 1.0

            #indxr = self.get_refiner_indexer()
            indxr = idxr

            if not self._mosflm_cell_ref_images:
                mosaic = indxr.get_indexer_mosaic()
                self._mosflm_cell_ref_images = self._refine_select_images(
                    idxr, mosaic)

            # generate human readable output

            images_str = '%d to %d' % tuple(self._mosflm_cell_ref_images[0])
            for i in self._mosflm_cell_ref_images[1:]:
                images_str += ', %d to %d' % tuple(i)

            cell_str = '%.2f %.2f %.2f %.2f %.2f %.2f' % \
              indxr.get_indexer_cell()

            #if len(idxr._fp_directory) <= 50:
            #dirname = idxr._fp_directory
            #else:
            #dirname = '...%s' % idxr._fp_directory[-46:]
            dirname = idxr.get_directory()

            Journal.block(
                'cell refining', idxr._indxr_sweep_name, 'mosflm', {
                    'images': images_str,
                    'start cell': cell_str,
                    'target lattice': indxr.get_indexer_lattice(),
                    'template': idxr.get_template(),
                    'directory': dirname
                })

            # end generate human readable output

            # in here, check to see if we have the raster parameters and
            # separation from indexing - if we used a different indexer
            # we may not, so if this is the case call a function to generate
            # them...

            if not indxr.get_indexer_payload('mosflm_integration_parameters'):

                # generate a list of first images

                images = []
                for cri in self._mosflm_cell_ref_images:
                    images.append(cri[0])

                images.sort()

                integration_params = self._mosflm_generate_raster(
                    images, indxr)
                indxr.set_indexer_payload('mosflm_integration_params',
                                          integration_params)

                # copy them over to where they are needed

                if 'separation' in integration_params:
                    self.set_refiner_parameter(
                        'mosflm', 'separation',
                        '%f %f' % tuple(integration_params['separation']))
                if 'raster' in integration_params:
                    self.set_refiner_parameter(
                        'mosflm', 'raster',
                        '%d %d %d %d %d' % tuple(integration_params['raster']))

            # next test the cell refinement with the correct lattice
            # and P1 and see how the numbers stack up...

            # copy the cell refinement resolution in...

            self._mosflm_cell_ref_resolution = indxr.get_indexer_resolution()

            Debug.write(
              'Using resolution limit of %.2f for cell refinement' % \
              self._mosflm_cell_ref_resolution)

            # now trap NegativeMosaicError exception - once!

            try:

                # now reading the background residual values as well - if these
                # are > 10 it would indicate that the images are blank (assert)
                # so ignore from the analyis / comparison

                if not PhilIndex.params.xia2.settings.lattice_rejection or \
                   idxr.get_indexer_sweep().get_user_lattice():
                    rms_deviations_p1 = []
                    br_p1 = []
                else:
                    rms_deviations_p1, br_p1 = self._mosflm_test_refine_cell(
                        idxr, 'aP')

                rms_deviations, br = self._mosflm_refine_cell(idxr)

            except NegativeMosaicError:

                if self._mosflm_cell_ref_double_mosaic:

                    # reset flag; half mosaic; raise BadLatticeError
                    Debug.write('Mosaic negative even x2 -> BadLattice')
                    self._mosflm_cell_ref_double_mosaic = False
                    raise BadLatticeError('negative mosaic spread')

                else:

                    # set flag, double mosaic, return to try again
                    Debug.write('Mosaic negative -> try x2')
                    self._mosflm_cell_ref_double_mosaic = True
                    self.set_integrater_prepare_done(False)

                    return

            if not self.get_refiner_done():
                return

            # compare cell refinement with lattice and in P1

            images = []
            for cri in self._mosflm_cell_ref_images:
                for j in range(cri[0], cri[1] + 1):
                    images.append(j)

            if rms_deviations and rms_deviations_p1:
                cycles = []
                j = 1
                while j in rms_deviations and \
                      j in rms_deviations_p1:
                    cycles.append(j)
                    j += 1
                Debug.write('Cell refinement comparison:')
                Debug.write('Image   correct   triclinic')
                ratio = 0.0

                ratios = []

                for c in cycles:
                    Debug.write('Cycle %d' % c)
                    for j, image in enumerate(images):

                        background_residual = max(br_p1[c][image],
                                                  br[c][image])

                        if background_residual > 10:
                            Debug.write('. %4d   %.2f     %.2f (ignored)' % \
                                        (images[j], rms_deviations[c][j],
                                         rms_deviations_p1[c][j]))
                            continue

                        Debug.write('. %4d   %.2f     %.2f' % \
                                    (images[j], rms_deviations[c][j],
                                     rms_deviations_p1[c][j]))

                        ratio += rms_deviations[c][j] / rms_deviations_p1[c][j]
                        ratios.append(
                            (rms_deviations[c][j] / rms_deviations_p1[c][j]))

                Debug.write('Average ratio: %.2f' % \
                            (ratio / len(ratios)))

                if (ratio / (max(cycles) * len(images))) > \
                   PhilIndex.params.xia2.settings.lattice_rejection_threshold and \
                   not self.get_integrater_sweep().get_user_lattice():
                    raise BadLatticeError('incorrect lattice constraints')

            else:
                Debug.write('Cell refinement in P1 failed... or was not run')

            cell_str = '%.2f %.2f %.2f %.2f %.2f %.2f' % \
              self._refinr_cell

            Journal.entry({'refined cell': cell_str})
    def _integrate(self):
        """Implement the integrater interface."""

        # cite the program
        Citations.cite("mosflm")

        images_str = "%d to %d" % tuple(self._intgr_wedge)
        cell_str = "%.2f %.2f %.2f %.2f %.2f %.2f" % tuple(self._intgr_cell)

        if len(self._fp_directory) <= 50:
            dirname = self._fp_directory
        else:
            dirname = "...%s" % self._fp_directory[-46:]

        Journal.block(
            "integrating",
            self._intgr_sweep_name,
            "mosflm",
            {
                "images": images_str,
                "cell": cell_str,
                "lattice": self.get_integrater_refiner().get_refiner_lattice(),
                "template": self._fp_template,
                "directory": dirname,
                "resolution": "%.2f" % self._intgr_reso_high,
            },
        )

        self._mosflm_rerun_integration = False

        wd = self.get_working_directory()

        try:

            if self.get_integrater_sweep_name():
                pname, xname, dname = self.get_integrater_project_info()

            nproc = PhilIndex.params.xia2.settings.multiprocessing.nproc
            if nproc > 1:
                Debug.write("Parallel integration: %d jobs" % nproc)
                self._mosflm_hklout = self._mosflm_parallel_integrate()
            else:
                self._mosflm_hklout = self._mosflm_integrate()

            # record integration output for e.g. BLEND.

            sweep = self.get_integrater_sweep_name()
            if sweep:
                FileHandler.record_more_data_file(
                    "%s %s %s %s INTEGRATE" % (pname, xname, dname, sweep),
                    self._mosflm_hklout,
                )

        except IntegrationError as e:
            if "negative mosaic spread" in str(e):
                if self._mosflm_postref_fix_mosaic:
                    Chatter.write(
                        "Negative mosaic spread - stopping integration")
                    raise BadLatticeError("negative mosaic spread")

                Chatter.write("Negative mosaic spread - rerunning integration")
                self.set_integrater_done(False)
                self._mosflm_postref_fix_mosaic = True

        if self._mosflm_rerun_integration and not PhilIndex.params.dials.fast_mode:
            # make sure that this is run again...
            Chatter.write("Need to rerun the integration...")
            self.set_integrater_done(False)

        return self._mosflm_hklout
Exemple #9
0
  def _index(self):
    '''Implement the indexer interface.'''

    Citations.cite('mosflm')

    indexer = MosflmIndex()
    indexer.set_working_directory(self.get_working_directory())
    auto_logfiler(indexer)

    from xia2.lib.bits import unique_elements
    _images = unique_elements(self._indxr_images)
    indexer.set_images(_images)
    images_str = ', '.join(map(str, _images))

    cell_str = None
    if self._indxr_input_cell:
      cell_str = '%.2f %.2f %.2f %.2f %.2f %.2f' % \
                  self._indxr_input_cell

    if self._indxr_sweep_name:

      #if len(self._fp_directory) <= 50:
        #dirname = self._fp_directory
      #else:
        #dirname = '...%s' % self._fp_directory[-46:]
      dirname = os.path.dirname(self.get_imageset().get_template())

      Journal.block(
          'autoindexing', self._indxr_sweep_name, 'mosflm',
          {'images':images_str,
           'target cell':self._indxr_input_cell,
           'target lattice':self._indxr_input_lattice,
           'template':self.get_imageset().get_template(),
           'directory':dirname})

    #task = 'Autoindex from images:'

    #for i in _images:
      #task += ' %s' % self.get_image_name(i)

    #self.set_task(task)

    indexer.set_template(os.path.basename(self.get_template()))
    indexer.set_directory(self.get_directory())

    xsweep = self.get_indexer_sweep()
    if xsweep is not None:
      if xsweep.get_distance() is not None:
        indexer.set_distance(xsweep.get_distance())
      #if self.get_wavelength_prov() == 'user':
        #index.set_wavelength(self.get_wavelength())
      if xsweep.get_beam_centre() is not None:
        indexer.set_beam_centre(xsweep.get_beam_centre())

    if self._indxr_input_cell:
      indexer.set_unit_cell(self._indxr_input_cell)

    if self._indxr_input_lattice != None:
      spacegroup_number = lattice_to_spacegroup(
          self._indxr_input_lattice)
      indexer.set_space_group_number(spacegroup_number)

    if not self._mosflm_autoindex_thresh:

      try:

        min_peaks = 200

        Debug.write('Aiming for at least %d spots...' % min_peaks)

        thresholds = []

        for i in _images:

          p = Printpeaks()
          p.set_working_directory(self.get_working_directory())
          auto_logfiler(p)
          p.set_image(self.get_image_name(i))
          thresh = p.threshold(min_peaks)

          Debug.write('Autoindex threshold for image %d: %d' % \
                      (i, thresh))

          thresholds.append(thresh)

        thresh = min(thresholds)
        self._mosflm_autoindex_thresh = thresh

      except Exception as e:
        print str(e) #XXX this should disappear!
        Debug.write('Error computing threshold: %s' % str(e))
        Debug.write('Using default of 20.0')
        thresh = 20.0

    else:
      thresh = self._mosflm_autoindex_thresh

    Debug.write('Using autoindex threshold: %d' % thresh)

    if self._mosflm_autoindex_sol:
      indexer.set_solution_number(self._mosflm_autoindex_sol)
    indexer.set_threshold(thresh)

    # now forget this to prevent weird things happening later on
    if self._mosflm_autoindex_sol:
      self._mosflm_autoindex_sol = 0

    indexer.run()

    #sweep = self.get_indexer_sweep_name()
    #FileHandler.record_log_file(
        #'%s INDEX' % (sweep), self.get_log_file())

    indxr_cell = indexer.get_refined_unit_cell()
    self._indxr_lattice = indexer.get_lattice()
    space_group_number = indexer.get_indexed_space_group_number()
    detector_distance = indexer.get_refined_distance()
    beam_centre = indexer.get_refined_beam_centre()
    mosaic_spreads = indexer.get_mosaic_spreads()

    if min(list(indxr_cell)) < 10.0 and \
       indxr_cell[2] / indxr_cell[0] > 6:

      Debug.write(
          'Unrealistic autoindexing solution: ' +
          '%.2f %.2f %.2f %.2f %.2f %.2f' % indxr_cell)

      # tweak some parameters and try again...
      self._mosflm_autoindex_thresh *= 1.5
      self.set_indexer_done(False)

      return

    intgr_params = { }

    # look up other possible indexing solutions (not well - in
    # standard settings only!) This is moved earlier as it could
    # result in returning if Mosflm has selected the wrong
    # solution!

    try:
      self._indxr_other_lattice_cell = indexer.get_solutions()

      # Change 27/FEB/08 to support user assigned spacegroups
      if self._indxr_user_input_lattice:
        lattice_to_spacegroup_dict = {
            'aP':1, 'mP':3, 'mC':5, 'oP':16, 'oC':20, 'oF':22,
            'oI':23, 'tP':75, 'tI':79, 'hP':143, 'hR':146,
            'cP':195, 'cF':196, 'cI':197}
        for k in self._indxr_other_lattice_cell.keys():
          if lattice_to_spacegroup_dict[k] > \
                 lattice_to_spacegroup_dict[
              self._indxr_input_lattice]:
            del(self._indxr_other_lattice_cell[k])

      # check that the selected unit cell matches - and if
      # not raise a "horrible" exception

      if self._indxr_input_cell:
        assert indxr_cell is not None
        for j in range(6):
          if math.fabs(self._indxr_input_cell[j] - indxr_cell[j]) > 2.0:
            Chatter.write(
                'Mosflm autoindexing did not select ' +
                'correct (target) unit cell')
            raise RuntimeError, \
                  'something horrible happened in indexing'

    except RuntimeError, e:
      # check if mosflm rejected a solution we have it
      if 'horribl' in str(e):
        # ok it did - time to break out the big guns...
        if not self._indxr_input_cell:
          raise RuntimeError, \
                'error in solution selection when not preset'

        # XXX FIXME
        self._mosflm_autoindex_sol = _get_indexing_solution_number(
          indexer.get_all_output(),
          self._indxr_input_cell,
          self._indxr_input_lattice)

        # set the fact that we are not done...
        self.set_indexer_done(False)

        # and return - hopefully this will restart everything
        return
      else:
        raise e
Exemple #10
0
    def _index(self):
        """Actually do the autoindexing using the data prepared by the
        previous method."""

        images_str = "%d to %d" % tuple(self._indxr_images[0])
        for i in self._indxr_images[1:]:
            images_str += ", %d to %d" % tuple(i)

        cell_str = None
        if self._indxr_input_cell:
            cell_str = "%.2f %.2f %.2f %.2f %.2f %.2f" % self._indxr_input_cell

        # then this is a proper autoindexing run - describe this
        # to the journal entry

        dirname = self.get_directory()

        Journal.block(
            "autoindexing",
            self._indxr_sweep_name,
            "XDS",
            {
                "images": images_str,
                "target cell": cell_str,
                "target lattice": self._indxr_input_lattice,
                "template": self.get_template(),
                "directory": dirname,
            },
        )

        idxref = self.Idxref()

        self._index_remove_masked_regions()
        for file in ["SPOT.XDS"]:
            idxref.set_input_data_file(file, self._indxr_payload[file])

        # edit SPOT.XDS to remove reflections in untrusted regions of the detector

        idxref.set_data_range(self._indxr_images[0][0],
                              self._indxr_images[0][1])
        idxref.set_background_range(self._indxr_images[0][0],
                                    self._indxr_images[0][1])

        # set the phi start etc correctly

        for block in self._indxr_images[:1]:
            starting_frame = block[0]
            starting_angle = self.get_scan().get_angle_from_image_index(
                starting_frame)

            idxref.set_starting_frame(starting_frame)
            idxref.set_starting_angle(starting_angle)

            idxref.add_spot_range(block[0], block[1])

        for block in self._indxr_images[1:]:
            idxref.add_spot_range(block[0], block[1])

        if self._indxr_user_input_lattice:
            idxref.set_indexer_user_input_lattice(True)

        if self._indxr_input_lattice and self._indxr_input_cell:
            idxref.set_indexer_input_lattice(self._indxr_input_lattice)
            idxref.set_indexer_input_cell(self._indxr_input_cell)

            Debug.write("Set lattice: %s" % self._indxr_input_lattice)
            Debug.write("Set cell: %f %f %f %f %f %f" % self._indxr_input_cell)

            original_cell = self._indxr_input_cell
        elif self._indxr_input_lattice:
            idxref.set_indexer_input_lattice(self._indxr_input_lattice)
            original_cell = None
        else:
            original_cell = None

        from dxtbx.serialize.xds import to_xds

        converter = to_xds(self.get_imageset())
        xds_beam_centre = converter.detector_origin

        idxref.set_beam_centre(xds_beam_centre[0], xds_beam_centre[1])

        # fixme need to check if the lattice, cell have been set already,
        # and if they have, pass these in as input to the indexing job.

        done = False

        while not done:
            try:
                done = idxref.run()

                # N.B. in here if the IDXREF step was being run in the first
                # pass done is FALSE however there should be a refined
                # P1 orientation matrix etc. available - so keep it!

            except XDSException as e:
                # inspect this - if we have complaints about not
                # enough reflections indexed, and we have a target
                # unit cell, and they are the same, well ignore it

                if "solution is inaccurate" in str(e):
                    Debug.write("XDS complains solution inaccurate - ignoring")
                    done = idxref.continue_from_error()
                elif ("insufficient percentage (< 70%)" in str(e)
                      or "insufficient percentage (< 50%)"
                      in str(e)) and original_cell:
                    done = idxref.continue_from_error()
                    lattice, cell, mosaic = idxref.get_indexing_solution()
                    # compare solutions FIXME should use xds_cell_deviation
                    check = PhilIndex.params.xia2.settings.xds_check_cell_deviation
                    for j in range(3):
                        # allow two percent variation in unit cell length
                        if (math.fabs(
                            (cell[j] - original_cell[j]) / original_cell[j]) >
                                0.02 and check):
                            Debug.write("XDS unhappy and solution wrong")
                            raise e
                        # and two degree difference in angle
                        if (math.fabs(cell[j + 3] - original_cell[j + 3]) > 2.0
                                and check):
                            Debug.write("XDS unhappy and solution wrong")
                            raise e
                    Debug.write("XDS unhappy but solution ok")
                elif "insufficient percentage (< 70%)" in str(
                        e) or "insufficient percentage (< 50%)" in str(e):
                    Debug.write("XDS unhappy but solution probably ok")
                    done = idxref.continue_from_error()
                else:
                    raise e

        FileHandler.record_log_file(
            "%s INDEX" % self.get_indexer_full_name(),
            os.path.join(self.get_working_directory(), "IDXREF.LP"),
        )

        for file in ["SPOT.XDS", "XPARM.XDS"]:
            self._indxr_payload[file] = idxref.get_output_data_file(file)

        # need to get the indexing solutions out somehow...

        self._indxr_other_lattice_cell = idxref.get_indexing_solutions()

        self._indxr_lattice, self._indxr_cell, self._indxr_mosaic = (
            idxref.get_indexing_solution())

        import dxtbx
        from dxtbx.serialize.xds import to_crystal

        xparm_file = os.path.join(self.get_working_directory(), "XPARM.XDS")
        models = dxtbx.load(xparm_file)
        crystal_model = to_crystal(xparm_file)

        from dxtbx.model import Experiment, ExperimentList

        experiment = Experiment(
            beam=models.get_beam(),
            detector=models.get_detector(),
            goniometer=models.get_goniometer(),
            scan=models.get_scan(),
            crystal=crystal_model,
            # imageset=self.get_imageset(),
        )

        experiment_list = ExperimentList([experiment])
        self.set_indexer_experiment_list(experiment_list)

        # I will want this later on to check that the lattice was ok
        self._idxref_subtree_problem = idxref.get_index_tree_problem()

        return
Exemple #11
0
    def _scale(self):
        """Perform all of the operations required to deliver the scaled
        data."""
        sweep_infos = [
            self._sweep_handler.get_sweep_information(e)
            for e in self._sweep_handler.get_epochs()
        ]

        if self._scalr_corrections:
            Journal.block(
                "scaling",
                self.get_scaler_xcrystal().get_name(),
                "Dials",
                {
                    "scaling model": "automatic",
                    "absorption": self._scalr_correct_absorption,
                    "decay": self._scalr_correct_decay,
                },
            )

        else:
            Journal.block(
                "scaling",
                self.get_scaler_xcrystal().get_name(),
                "Dials",
                {"scaling model": "default"},
            )

        ### Set the parameters and datafiles for dials.scale

        self._scaler = DialsScale()
        self._scaler = self._updated_dials_scaler()

        if self._scaled_experiments and self._scaled_reflections:
            # going to continue-where-left-off
            self._scaler.add_experiments_json(self._scaled_experiments)
            self._scaler.add_reflections_file(self._scaled_reflections)
        else:
            for si in sweep_infos:
                self._scaler.add_experiments_json(si.get_experiments())
                self._scaler.add_reflections_file(si.get_reflections())

        ### Set the unmerged mtz filepath

        self._scalr_scaled_reflection_files = {}
        self._scalr_scaled_reflection_files["mtz_unmerged"] = {}

        # First set the unmerged mtz output filename. Note that this is the
        # same for MAD datasets too, as need a single unmerged for merging
        # stats calc. For the merged mtz this is different.
        scaled_unmerged_mtz_path = os.path.join(
            self.get_working_directory(),
            "%s_%s_scaled_unmerged.mtz" % (self._scalr_pname, self._scalr_xname),
        )
        self._scaler.set_scaled_unmerged_mtz([scaled_unmerged_mtz_path])
        self._scaler.set_crystal_name(self._scalr_xname)  # Name goes in mtz

        ### Set the merged mtz filepath(s), making into account MAD case.

        # Find number of dnames (i.e. number of wavelengths)
        dnames_set = OrderedSet()
        for si in sweep_infos:
            dnames_set.add(si.get_project_info()[2])

        scaled_mtz_path = os.path.join(
            self.get_working_directory(),
            "%s_%s_scaled.mtz" % (self._scalr_pname, self._scalr_xname),
        )
        if len(dnames_set) == 1:
            self._scaler.set_scaled_mtz([scaled_mtz_path])
            self._scalr_scaled_reflection_files["mtz"] = {
                dnames_set[0]: scaled_mtz_path
            }
            self._scalr_scaled_reflection_files["mtz_unmerged"] = {
                dnames_set[0]: scaled_unmerged_mtz_path
            }
        else:
            merged_mtz_files = []
            self._scalr_scaled_reflection_files["mtz"] = {}
            for dname in dnames_set:
                this_mtz_path = scaled_mtz_path.rstrip(".mtz") + ("_%s.mtz" % dname)
                merged_mtz_files.append(this_mtz_path)
                self._scalr_scaled_reflection_files["mtz"][dname] = scaled_mtz_path
                # Note - we aren't logging individual unmerged here as not
                # generating until later.
            self._scaler.set_scaled_mtz(merged_mtz_files)

        ### Set the resolution limit if applicable

        user_resolution_limits = {}
        highest_resolution = 100.0
        for si in sweep_infos:
            dname = si.get_project_info()[2]
            sname = si.get_sweep_name()
            intgr = si.get_integrater()

            if intgr.get_integrater_user_resolution():
                # record user resolution here but don't use it until later - why?
                dmin = intgr.get_integrater_high_resolution()

                if (dname, sname) not in user_resolution_limits:
                    user_resolution_limits[(dname, sname)] = dmin
                elif dmin < user_resolution_limits[(dname, sname)]:
                    user_resolution_limits[(dname, sname)] = dmin

            if (dname, sname) in self._scalr_resolution_limits:
                d_min, _ = self._scalr_resolution_limits[(dname, sname)]
                if d_min < highest_resolution:
                    highest_resolution = d_min
        if highest_resolution < 99.9:
            self._scaler.set_resolution(d_min=highest_resolution)

        ### Setup final job details and run scale

        self._scaler.set_working_directory(self.get_working_directory())
        auto_logfiler(self._scaler)
        FileHandler.record_log_file(
            "%s %s SCALE" % (self._scalr_pname, self._scalr_xname),
            self._scaler.get_log_file(),
        )
        self._scaler.scale()
        self._scaled_experiments = self._scaler.get_scaled_experiments()
        self._scaled_reflections = self._scaler.get_scaled_reflections()

        FileHandler.record_data_file(scaled_unmerged_mtz_path)

        # make it so that only scaled.expt and scaled.refl are
        # the files that dials.scale knows about, so that if scale is called again,
        # scaling resumes from where it left off.
        self._scaler.clear_datafiles()

        # log datafiles here, picked up from here in commonscaler methods.
        if len(dnames_set) == 1:
            hklout = copy.deepcopy(self._scaler.get_scaled_mtz()[0])
            self._scalr_scaled_refl_files = {dnames_set[0]: hklout}
            FileHandler.record_data_file(hklout)
        else:
            self._scalr_scaled_refl_files = {}
            for i, dname in enumerate(dnames_set):
                hklout = copy.deepcopy(self._scaler.get_scaled_mtz()[i])
                self._scalr_scaled_refl_files[dname] = hklout
                FileHandler.record_data_file(hklout)

        ### Calculate the resolution limit and set done False if applicable

        highest_suggested_resolution = self.assess_resolution_limits(
            self._scaler.get_unmerged_reflection_file(),
            user_resolution_limits,
            use_misigma=False,
        )

        if not self.get_scaler_done():
            # reset for when resolution limit applied
            Debug.write("Returning as scaling not finished...")
            return

        ### For MAD case, generate individual unmerged mtz for stats.

        if len(dnames_set) > 1:
            unmerged_mtz_files = []
            scaler = DialsScale()
            scaler.set_working_directory(self.get_working_directory())
            scaler.set_export_mtz_only()
            scaler.add_experiments_json(self._scaled_experiments)
            scaler.add_reflections_file(self._scaled_reflections)
            for dname in dnames_set:
                this_mtz_path = scaled_unmerged_mtz_path.rstrip(".mtz") + (
                    "_%s.mtz" % dname
                )
                unmerged_mtz_files.append(this_mtz_path)
                self._scalr_scaled_reflection_files["mtz_unmerged"][
                    dname
                ] = this_mtz_path
            scaler.set_scaled_unmerged_mtz(unmerged_mtz_files)
            scaler.scale()
            for f in scaler.get_scaled_unmerged_mtz():  # a list
                FileHandler.record_data_file(f)
            # set refls, exps & unmerged mtz names"

        if PhilIndex.params.xia2.settings.merging_statistics.source == "cctbx":
            for key in self._scalr_scaled_refl_files:
                stats = self._compute_scaler_statistics(
                    self._scalr_scaled_reflection_files["mtz_unmerged"][key],
                    selected_band=(highest_suggested_resolution, None),
                    wave=key,
                )
                self._scalr_statistics[
                    (self._scalr_pname, self._scalr_xname, key)
                ] = stats

        # Run twotheta refine
        self._update_scaled_unit_cell()
Exemple #12
0
    def _scale_prepare(self):
        """Perform all of the preparation required to deliver the scaled
        data. This should sort together the reflection files, ensure that
        they are correctly indexed (via dials.symmetry) and generally tidy
        things up."""

        # AIM discover symmetry and reindex with dials.symmetry, and set the correct
        # reflections in si.reflections, si.experiments

        self._helper.set_working_directory(self.get_working_directory())
        self._factory.set_working_directory(self.get_working_directory())

        need_to_return = False

        self._sweep_handler = SweepInformationHandler(self._scalr_integraters)

        p, x = self._sweep_handler.get_project_info()
        self._scalr_pname = p
        self._scalr_xname = x

        self._helper.set_pname_xname(p, x)

        Journal.block(
            "gathering",
            self.get_scaler_xcrystal().get_name(),
            "Dials",
            {"working directory": self.get_working_directory()},
        )

        # First do stuff to work out if excluding any data
        # Note - does this actually work? I couldn't seem to get it to work
        # in either this pipeline or the standard dials pipeline
        for epoch in self._sweep_handler.get_epochs():
            si = self._sweep_handler.get_sweep_information(epoch)
            intgr = si.get_integrater()
            _, xname, dname = si.get_project_info()
            sname = si.get_sweep_name()

            exclude_sweep = False

            for sweep in PhilIndex.params.xia2.settings.sweep:
                if sweep.id == sname and sweep.exclude:
                    exclude_sweep = True
                    break

            if exclude_sweep:
                self._sweep_handler.remove_epoch(epoch)
                Debug.write("Excluding sweep %s" % sname)
            else:
                Journal.entry({"adding data from": "%s/%s/%s" % (xname, dname, sname)})

        # If multiple files, want to run symmetry to check for consistent indexing
        # also

        # try to reproduce what CCP4ScalerA is doing

        # first assign identifiers to avoid dataset-id collisions
        # Idea is that this should be called anytime you get data anew from the
        # integrater, to intercept and assign unique ids, then set in the
        # sweep_information (si) and always use si.set_reflections/
        # si.get_reflections as we process.

        # self._sweep_handler = self._helper.assign_and_return_datasets(
        #    self._sweep_handler
        # ) symmetry now sorts out identifiers.

        need_to_return = False

        if self._scalr_input_pointgroup:
            self._input_pointgroup_scale_prepare()
        elif (
            len(self._sweep_handler.get_epochs()) > 1
            and PhilIndex.params.xia2.settings.multi_sweep_indexing
        ):
            need_to_return = self._multi_sweep_scale_prepare()
        else:
            need_to_return = self._standard_scale_prepare()

        if need_to_return:
            self.set_scaler_done(False)
            self.set_scaler_prepare_done(False)
            return

        ### After this point, point group is good and only need to
        ### reindex to consistent setting. Don't need to call back to the
        ### integator, just use the data in the sweep info.

        # First work out if we're going to reindex against external reference
        param = PhilIndex.params.xia2.settings.scale
        using_external_references = False
        reference_refl = None
        reference_expt = None
        if param.reference_reflection_file:
            if not param.reference_experiment_file:
                Chatter.write(
                    """
No DIALS reference experiments file provided, reference reflection file will
not be used. Reference mtz files for reindexing not currently supported for
pipeline=dials (supported for pipeline=dials-aimless).
"""
                )
            else:
                reference_refl = param.reference_reflection_file
                reference_expt = param.reference_experiment_file
                using_external_references = True
                Debug.write("Using reference reflections %s" % reference_refl)
                Debug.write("Using reference experiments %s" % reference_expt)

        if len(self._sweep_handler.get_epochs()) > 1:
            if PhilIndex.params.xia2.settings.unify_setting:
                self.unify_setting()

            if PhilIndex.params.xia2.settings.use_brehm_diederichs:
                self.brehm_diederichs_reindexing()
            # If not using Brehm-deidrichs reindexing, set reference as first
            # sweep, unless using external reference.
            elif not using_external_references:
                Debug.write("First sweep will be used as reference for reindexing")
                first = self._sweep_handler.get_epochs()[0]
                si = self._sweep_handler.get_sweep_information(first)
                reference_expt = si.get_experiments()
                reference_refl = si.get_reflections()

        # Now reindex to be consistent with first dataset - run reindex on each
        # dataset with reference (unless did brehm diederichs and didn't supply
        # a reference file)

        if reference_refl and reference_expt:
            exp = load.experiment_list(reference_expt)
            reference_cell = exp[0].crystal.get_unit_cell().parameters()

            # ---------- REINDEX TO CORRECT (REFERENCE) SETTING ----------
            Chatter.write("Reindexing all datasets to common reference")

            if using_external_references:
                epochs = self._sweep_handler.get_epochs()
            else:
                epochs = self._sweep_handler.get_epochs()[1:]
            for epoch in epochs:
                # if we are working with unified UB matrix then this should not
                # be a problem here (note, *if*; *should*)

                # what about e.g. alternative P1 settings?
                # see JIRA MXSW-904
                if PhilIndex.params.xia2.settings.unify_setting:
                    continue

                reindexer = DialsReindex()
                reindexer.set_working_directory(self.get_working_directory())
                auto_logfiler(reindexer)

                si = self._sweep_handler.get_sweep_information(epoch)
                reindexer.set_reference_filename(reference_expt)
                reindexer.set_reference_reflections(reference_refl)
                reindexer.set_indexed_filename(si.get_reflections())
                reindexer.set_experiments_filename(si.get_experiments())
                reindexer.run()

                # At this point, CCP4ScalerA would reset in integrator so that
                # the integrater calls reindex, no need to do that here as
                # have access to the files and will never need to reintegrate.

                si.set_reflections(reindexer.get_reindexed_reflections_filename())
                si.set_experiments(reindexer.get_reindexed_experiments_filename())

                # FIXME how to get some indication of the reindexing used?

                exp = load.experiment_list(
                    reindexer.get_reindexed_experiments_filename()
                )
                cell = exp[0].crystal.get_unit_cell().parameters()

                # Note - no lattice check as this will already be caught by reindex
                Debug.write("Cell: %.2f %.2f %.2f %.2f %.2f %.2f" % cell)
                Debug.write("Ref:  %.2f %.2f %.2f %.2f %.2f %.2f" % reference_cell)

                for j in range(6):
                    if (
                        math.fabs((cell[j] - reference_cell[j]) / reference_cell[j])
                        > 0.1
                    ):
                        raise RuntimeError(
                            "unit cell parameters differ in %s and %s"
                            % (reference_expt, si.get_reflections())
                        )

        # Now make sure all batches ok before finish preparing
        # This should be made safer, currently after dials.scale there is no
        # concept of 'batch', dials.export uses the calculate_batch_offsets
        # to assign batches, giving the same result as below.

        experiments_to_rebatch = []
        for epoch in self._sweep_handler.get_epochs():
            si = self._sweep_handler.get_sweep_information(epoch)
            experiment = si.get_experiments()
            experiments_to_rebatch.append(load.experiment_list(experiment)[0])
        offsets = calculate_batch_offsets(experiments_to_rebatch)

        for i, epoch in enumerate(self._sweep_handler.get_epochs()):
            si = self._sweep_handler.get_sweep_information(epoch)
            r = si.get_batch_range()
            si.set_batch_offset(offsets[i])
            si.set_batches([r[0] + offsets[i], r[1] + offsets[i]])
Exemple #13
0
  def _index(self):
    '''Actually do the autoindexing using the data prepared by the
    previous method.'''

    images_str = '%d to %d' % tuple(self._indxr_images[0])
    for i in self._indxr_images[1:]:
      images_str += ', %d to %d' % tuple(i)

    cell_str = None
    if self._indxr_input_cell:
      cell_str = '%.2f %.2f %.2f %.2f %.2f %.2f' % \
                 self._indxr_input_cell

    # then this is a proper autoindexing run - describe this
    # to the journal entry

    dirname = self.get_directory()

    Journal.block('autoindexing', self._indxr_sweep_name, 'XDS',
                  {'images':images_str,
                   'target cell':cell_str,
                   'target lattice':self._indxr_input_lattice,
                   'template':self.get_template(),
                   'directory':dirname})

    idxref = self.Idxref()

    self._index_remove_masked_regions()
    for file in ['SPOT.XDS']:
      idxref.set_input_data_file(file, self._indxr_payload[file])

    # edit SPOT.XDS to remove reflections in untrusted regions of the detector

    idxref.set_data_range(self._indxr_images[0][0],
                          self._indxr_images[0][1])
    idxref.set_background_range(self._indxr_images[0][0],
                                self._indxr_images[0][1])

    # set the phi start etc correctly

    for block in self._indxr_images[:1]:
      starting_frame = block[0]
      starting_angle = self.get_scan().get_angle_from_image_index(starting_frame)

      idxref.set_starting_frame(starting_frame)
      idxref.set_starting_angle(starting_angle)

      idxref.add_spot_range(block[0], block[1])

    for block in self._indxr_images[1:]:
      idxref.add_spot_range(block[0], block[1])

    if self._indxr_user_input_lattice:
      idxref.set_indexer_user_input_lattice(True)

    if self._indxr_input_lattice and self._indxr_input_cell:
      idxref.set_indexer_input_lattice(self._indxr_input_lattice)
      idxref.set_indexer_input_cell(self._indxr_input_cell)

      Debug.write('Set lattice: %s' % self._indxr_input_lattice)
      Debug.write('Set cell: %f %f %f %f %f %f' % \
                  self._indxr_input_cell)

      original_cell = self._indxr_input_cell
    elif self._indxr_input_lattice:
      idxref.set_indexer_input_lattice(self._indxr_input_lattice)
      original_cell = None
    else:
      original_cell = None

    from dxtbx.serialize.xds import to_xds
    converter = to_xds(self.get_imageset())
    xds_beam_centre = converter.detector_origin

    idxref.set_beam_centre(xds_beam_centre[0],
                           xds_beam_centre[1])

    # fixme need to check if the lattice, cell have been set already,
    # and if they have, pass these in as input to the indexing job.

    done = False

    while not done:
      try:
        done = idxref.run()

        # N.B. in here if the IDXREF step was being run in the first
        # pass done is FALSE however there should be a refined
        # P1 orientation matrix etc. available - so keep it!

      except XDSException, e:
        # inspect this - if we have complaints about not
        # enough reflections indexed, and we have a target
        # unit cell, and they are the same, well ignore it

        if 'solution is inaccurate' in str(e):
          Debug.write(
              'XDS complains solution inaccurate - ignoring')
          done = idxref.continue_from_error()
        elif ('insufficient percentage (< 70%)' in str(e) or
              'insufficient percentage (< 50%)' in str(e)) and \
                 original_cell:
          done = idxref.continue_from_error()
          lattice, cell, mosaic = \
                   idxref.get_indexing_solution()
          # compare solutions FIXME should use xds_cell_deviation
          check = PhilIndex.params.xia2.settings.xds_check_cell_deviation
          for j in range(3):
            # allow two percent variation in unit cell length
            if math.fabs((cell[j] - original_cell[j]) / \
                         original_cell[j]) > 0.02 and check:
              Debug.write('XDS unhappy and solution wrong')
              raise e
            # and two degree difference in angle
            if math.fabs(cell[j + 3] - original_cell[j + 3]) \
                   > 2.0 and check:
              Debug.write('XDS unhappy and solution wrong')
              raise e
          Debug.write('XDS unhappy but solution ok')
        elif 'insufficient percentage (< 70%)' in str(e) or \
                 'insufficient percentage (< 50%)' in str(e):
          Debug.write('XDS unhappy but solution probably ok')
          done = idxref.continue_from_error()
        else:
          raise e
Exemple #14
0
    def _integrate(self):
        '''Actually do the integration - in XDS terms this will mean running
    DEFPIX and INTEGRATE to measure all the reflections.'''

        experiment = self._intgr_refiner.get_refined_experiment_list(
            self.get_integrater_epoch())[0]
        crystal_model = experiment.crystal
        self._intgr_refiner_cell = crystal_model.get_unit_cell().parameters()

        images_str = '%d to %d' % tuple(self._intgr_wedge)
        cell_str = '%.2f %.2f %.2f %.2f %.2f %.2f' % tuple(
            self._intgr_refiner_cell)

        if len(self._fp_directory) <= 50:
            dirname = self._fp_directory
        else:
            dirname = '...%s' % self._fp_directory[-46:]

        Journal.block(
            'integrating', self._intgr_sweep_name, 'XDS', {
                'images': images_str,
                'cell': cell_str,
                'lattice': self._intgr_refiner.get_refiner_lattice(),
                'template': self._fp_template,
                'directory': dirname,
                'resolution': '%.2f' % self._intgr_reso_high
            })

        defpix = self.Defpix()

        # pass in the correct data

        for file in [
                'X-CORRECTIONS.cbf', 'Y-CORRECTIONS.cbf', 'BKGINIT.cbf',
                'XPARM.XDS'
        ]:
            defpix.set_input_data_file(file, self._xds_data_files[file])

        defpix.set_data_range(self._intgr_wedge[0] + self.get_frame_offset(),
                              self._intgr_wedge[1] + self.get_frame_offset())

        if self.get_integrater_high_resolution() > 0.0 and \
               self.get_integrater_user_resolution():
            Debug.write('Setting resolution limit in DEFPIX to %.2f' % \
                        self.get_integrater_high_resolution())
            defpix.set_resolution_high(self.get_integrater_high_resolution())
            defpix.set_resolution_low(self.get_integrater_low_resolution())

        elif self.get_integrater_low_resolution():
            Debug.write('Setting low resolution limit in DEFPIX to %.2f' % \
                        self.get_integrater_low_resolution())
            defpix.set_resolution_high(0.0)
            defpix.set_resolution_low(self.get_integrater_low_resolution())

        defpix.run()

        # and gather the result files
        for file in ['BKGPIX.cbf', 'ABS.cbf']:
            self._xds_data_files[file] = defpix.get_output_data_file(file)

        integrate = self.Integrate()

        if self._xds_integrate_parameters:
            integrate.set_updates(self._xds_integrate_parameters)

        # decide what images we are going to process, if not already
        # specified

        if not self._intgr_wedge:
            images = self.get_matching_images()
            self.set_integrater_wedge(min(images), max(images))

        integrate.set_data_range(
            self._intgr_wedge[0] + self.get_frame_offset(),
            self._intgr_wedge[1] + self.get_frame_offset())

        for file in [
                'X-CORRECTIONS.cbf', 'Y-CORRECTIONS.cbf', 'BLANK.cbf',
                'BKGPIX.cbf', 'GAIN.cbf'
        ]:
            integrate.set_input_data_file(file, self._xds_data_files[file])

        if 'GXPARM.XDS' in self._xds_data_files:
            Debug.write('Using globally refined parameters')
            integrate.set_input_data_file('XPARM.XDS',
                                          self._xds_data_files['GXPARM.XDS'])
            integrate.set_refined_xparm()
        else:
            integrate.set_input_data_file('XPARM.XDS',
                                          self._xds_data_files['XPARM.XDS'])

        integrate.run()

        self._intgr_per_image_statistics = integrate.get_per_image_statistics()
        Chatter.write(self.show_per_image_statistics())

        # record the log file -

        pname, xname, dname = self.get_integrater_project_info()
        sweep = self.get_integrater_sweep_name()
        FileHandler.record_log_file('%s %s %s %s INTEGRATE' % \
                                    (pname, xname, dname, sweep),
                                    os.path.join(self.get_working_directory(),
                                                 'INTEGRATE.LP'))

        # and copy the first pass INTEGRATE.HKL...

        lattice = self._intgr_refiner.get_refiner_lattice()
        if not os.path.exists(
                os.path.join(self.get_working_directory(),
                             'INTEGRATE-%s.HKL' % lattice)):
            here = self.get_working_directory()
            shutil.copyfile(os.path.join(here, 'INTEGRATE.HKL'),
                            os.path.join(here, 'INTEGRATE-%s.HKL' % lattice))

        # record INTEGRATE.HKL for e.g. BLEND.

        FileHandler.record_more_data_file(
            '%s %s %s %s INTEGRATE' % (pname, xname, dname, sweep),
            os.path.join(self.get_working_directory(), 'INTEGRATE.HKL'))

        # should the existence of these require that I rerun the
        # integration or can we assume that the application of a
        # sensible resolution limit will achieve this??

        self._xds_integrate_parameters = integrate.get_updates()

        # record the mosaic spread &c.

        m_min, m_mean, m_max = integrate.get_mosaic()
        self.set_integrater_mosaic_min_mean_max(m_min, m_mean, m_max)

        Chatter.write('Mosaic spread: %.3f < %.3f < %.3f' % \
                      self.get_integrater_mosaic_min_mean_max())

        return os.path.join(self.get_working_directory(), 'INTEGRATE.HKL')
Exemple #15
0
  def _index(self):
    '''Actually do the autoindexing using the data prepared by the
    previous method.'''

    images_str = '%d to %d' % tuple(self._indxr_images[0])
    for i in self._indxr_images[1:]:
      images_str += ', %d to %d' % tuple(i)

    cell_str = None
    if self._indxr_input_cell:
      cell_str = '%.2f %.2f %.2f %.2f %.2f %.2f' % \
                 self._indxr_input_cell

    # then this is a proper autoindexing run - describe this
    # to the journal entry

    #if len(self._fp_directory) <= 50:
      #dirname = self._fp_directory
    #else:
      #dirname = '...%s' % self._fp_directory[-46:]
    dirname = self.get_directory()

    Journal.block('autoindexing', self._indxr_sweep_name, 'XDS',
                  {'images':images_str,
                   'target cell':cell_str,
                   'target lattice':self._indxr_input_lattice,
                   'template':self.get_template(),
                   'directory':dirname})

    idxref = self.Idxref()

    self._index_remove_masked_regions()
    for file in ['SPOT.XDS']:
      idxref.set_input_data_file(file, self._indxr_payload[file])

    idxref.set_data_range(self._indxr_images[0][0],
                          self._indxr_images[0][1])
    idxref.set_background_range(self._indxr_images[0][0],
                                self._indxr_images[0][1])

    # set the phi start etc correctly

    if self._i_or_ii == None:
      self._i_or_ii = self.decide_i_or_ii()
      Debug.write('Selecting I or II, chose %s' % self._i_or_ii)

    if self._i_or_ii == 'i':
      blocks = self._index_select_images_i()
      for block in blocks[:1]:
        starting_frame = block[0]

        dd = Diffdump()
        dd.set_image(self.get_image_name(starting_frame))
        starting_angle = dd.readheader()['phi_start']

        idxref.set_starting_frame(starting_frame)
        idxref.set_starting_angle(starting_angle)

        idxref.add_spot_range(block[0], block[1])

      for block in blocks[1:]:
        idxref.add_spot_range(block[0], block[1])
    else:
      for block in self._indxr_images[:1]:
        starting_frame = block[0]

        dd = Diffdump()
        dd.set_image(self.get_image_name(starting_frame))
        starting_angle = dd.readheader()['phi_start']

        idxref.set_starting_frame(starting_frame)
        idxref.set_starting_angle(starting_angle)

        idxref.add_spot_range(block[0], block[1])

      for block in self._indxr_images[1:]:
        idxref.add_spot_range(block[0], block[1])

    # FIXME need to also be able to pass in the known unit
    # cell and lattice if already available e.g. from
    # the helper... indirectly

    if self._indxr_user_input_lattice:
      idxref.set_indexer_user_input_lattice(True)

    if self._indxr_input_lattice and self._indxr_input_cell:
      idxref.set_indexer_input_lattice(self._indxr_input_lattice)
      idxref.set_indexer_input_cell(self._indxr_input_cell)

      Debug.write('Set lattice: %s' % self._indxr_input_lattice)
      Debug.write('Set cell: %f %f %f %f %f %f' % \
                  self._indxr_input_cell)

      original_cell = self._indxr_input_cell
    elif self._indxr_input_lattice:
      idxref.set_indexer_input_lattice(self._indxr_input_lattice)
      original_cell = None
    else:
      original_cell = None

    # FIXED need to set the beam centre here - this needs to come
    # from the input .xinfo object or header, and be converted
    # to the XDS frame... done.

    #mosflm_beam_centre = self.get_beam_centre()
    #xds_beam_centre = beam_centre_mosflm_to_xds(
        #mosflm_beam_centre[0], mosflm_beam_centre[1], self.get_header())
    from dxtbx.serialize.xds import to_xds
    converter = to_xds(self.get_imageset())
    xds_beam_centre = converter.detector_origin

    idxref.set_beam_centre(xds_beam_centre[0],
                           xds_beam_centre[1])

    # fixme need to check if the lattice, cell have been set already,
    # and if they have, pass these in as input to the indexing job.

    done = False

    while not done:
      try:
        done = idxref.run()

        # N.B. in here if the IDXREF step was being run in the first
        # pass done is FALSE however there should be a refined
        # P1 orientation matrix etc. available - so keep it!

      except XDSException, e:
        # inspect this - if we have complaints about not
        # enough reflections indexed, and we have a target
        # unit cell, and they are the same, well ignore it

        if 'solution is inaccurate' in str(e):
          Debug.write(
              'XDS complains solution inaccurate - ignoring')
          done = idxref.continue_from_error()
        elif ('insufficient percentage (< 70%)' in str(e) or
              'insufficient percentage (< 50%)' in str(e)) and \
                 original_cell:
          done = idxref.continue_from_error()
          lattice, cell, mosaic = \
                   idxref.get_indexing_solution()
          # compare solutions
          for j in range(3):
            # allow two percent variation in unit cell length
            if math.fabs((cell[j] - original_cell[j]) / \
                         original_cell[j]) > 0.02 and \
                         not Flags.get_relax():
              Debug.write('XDS unhappy and solution wrong')
              raise e
            # and two degree difference in angle
            if math.fabs(cell[j + 3] - original_cell[j + 3]) \
                   > 2.0 and not Flags.get_relax():
              Debug.write('XDS unhappy and solution wrong')
              raise e
          Debug.write('XDS unhappy but solution ok')
        elif 'insufficient percentage (< 70%)' in str(e) or \
                 'insufficient percentage (< 50%)' in str(e):
          Debug.write('XDS unhappy but solution probably ok')
          done = idxref.continue_from_error()
        else:
          raise e
Exemple #16
0
    def _scale_prepare(self):
        """Perform all of the preparation required to deliver the scaled
        data. This should sort together the reflection files, ensure that
        they are correctly indexed (via pointless) and generally tidy
        things up."""

        # acknowledge all of the programs we are about to use...

        Citations.cite("pointless")
        Citations.cite("aimless")
        Citations.cite("ccp4")

        # ---------- GATHER ----------

        self._sweep_handler = SweepInformationHandler(self._scalr_integraters)

        Journal.block(
            "gathering",
            self.get_scaler_xcrystal().get_name(),
            "CCP4",
            {"working directory": self.get_working_directory()},
        )

        for epoch in self._sweep_handler.get_epochs():
            si = self._sweep_handler.get_sweep_information(epoch)
            pname, xname, dname = si.get_project_info()
            sname = si.get_sweep_name()

            exclude_sweep = False

            for sweep in PhilIndex.params.xia2.settings.sweep:
                if sweep.id == sname and sweep.exclude:
                    exclude_sweep = True
                    break

            if exclude_sweep:
                self._sweep_handler.remove_epoch(epoch)
                Debug.write("Excluding sweep %s" % sname)
            else:
                Journal.entry({"adding data from": "%s/%s/%s" % (xname, dname, sname)})

        # gather data for all images which belonged to the parent
        # crystal - allowing for the fact that things could go wrong
        # e.g. epoch information not available, exposure times not in
        # headers etc...

        for e in self._sweep_handler.get_epochs():
            si = self._sweep_handler.get_sweep_information(e)
            assert is_mtz_file(si.get_reflections())

        p, x = self._sweep_handler.get_project_info()
        self._scalr_pname = p
        self._scalr_xname = x

        # verify that the lattices are consistent, calling eliminate if
        # they are not N.B. there could be corner cases here

        need_to_return = False

        multi_sweep_indexing = PhilIndex.params.xia2.settings.multi_sweep_indexing

        # START OF if more than one epoch
        if len(self._sweep_handler.get_epochs()) > 1:

            # if we have multi-sweep-indexing going on then logic says all should
            # share common lattice & UB definition => this is not used here?

            # START OF if multi_sweep indexing and not input pg
            if multi_sweep_indexing and not self._scalr_input_pointgroup:
                pointless_hklins = []

                max_batches = 0
                for epoch in self._sweep_handler.get_epochs():
                    si = self._sweep_handler.get_sweep_information(epoch)
                    hklin = si.get_reflections()

                    batches = MtzUtils.batches_from_mtz(hklin)
                    if 1 + max(batches) - min(batches) > max_batches:
                        max_batches = max(batches) - min(batches) + 1

                from xia2.lib.bits import nifty_power_of_ten

                Debug.write("Biggest sweep has %d batches" % max_batches)
                max_batches = nifty_power_of_ten(max_batches)

                counter = 0

                refiners = []

                for epoch in self._sweep_handler.get_epochs():
                    si = self._sweep_handler.get_sweep_information(epoch)
                    hklin = si.get_reflections()
                    integrater = si.get_integrater()
                    refiner = integrater.get_integrater_refiner()
                    refiners.append(refiner)

                    hklin = self._prepare_pointless_hklin(
                        hklin, si.get_integrater().get_phi_width()
                    )

                    hklout = os.path.join(
                        self.get_working_directory(),
                        "%s_%s_%s_%s_prepointless.mtz"
                        % (pname, xname, dname, si.get_sweep_name()),
                    )

                    # we will want to delete this one exit
                    FileHandler.record_temporary_file(hklout)

                    first_batch = min(si.get_batches())
                    si.set_batch_offset(counter * max_batches - first_batch + 1)

                    from xia2.Modules.Scaler.rebatch import rebatch

                    new_batches = rebatch(
                        hklin,
                        hklout,
                        first_batch=counter * max_batches + 1,
                        pname=pname,
                        xname=xname,
                        dname=dname,
                    )

                    pointless_hklins.append(hklout)

                    # update the counter & recycle
                    counter += 1

                    # SUMMARY - have added all sweeps to pointless_hklins

                s = self._factory.Sortmtz()

                pointless_hklin = os.path.join(
                    self.get_working_directory(),
                    "%s_%s_prepointless_sorted.mtz"
                    % (self._scalr_pname, self._scalr_xname),
                )

                s.set_hklout(pointless_hklin)

                for hklin in pointless_hklins:
                    s.add_hklin(hklin)

                s.sort()

                # FIXME xia2-51 in here look at running constant scaling on the
                # pointless hklin to put the runs on the same scale. Ref=[A]

                pointless_const = os.path.join(
                    self.get_working_directory(),
                    "%s_%s_prepointless_const.mtz"
                    % (self._scalr_pname, self._scalr_xname),
                )
                FileHandler.record_temporary_file(pointless_const)

                aimless_const = self._factory.Aimless()
                aimless_const.set_hklin(pointless_hklin)
                aimless_const.set_hklout(pointless_const)
                aimless_const.const()

                pointless_const = os.path.join(
                    self.get_working_directory(),
                    "%s_%s_prepointless_const_unmerged.mtz"
                    % (self._scalr_pname, self._scalr_xname),
                )
                FileHandler.record_temporary_file(pointless_const)
                pointless_hklin = pointless_const

                # FIXME xia2-51 in here need to pass all refiners to ensure that the
                # information is passed back to all of them not just the last one...
                Debug.write(
                    "Running multisweep pointless for %d sweeps" % len(refiners)
                )
                pointgroup, reindex_op, ntr, pt = self._pointless_indexer_multisweep(
                    pointless_hklin, refiners
                )

                Debug.write("X1698: %s: %s" % (pointgroup, reindex_op))

                lattices = [Syminfo.get_lattice(pointgroup)]

                for epoch in self._sweep_handler.get_epochs():
                    si = self._sweep_handler.get_sweep_information(epoch)
                    intgr = si.get_integrater()
                    hklin = si.get_reflections()
                    refiner = intgr.get_integrater_refiner()

                    if ntr:
                        intgr.integrater_reset_reindex_operator()
                        need_to_return = True

                # SUMMARY - added all sweeps together into an mtz, ran
                # _pointless_indexer_multisweep on this, made a list of one lattice
                # and potentially reset reindex op?
            # END OF if multi_sweep indexing and not input pg

            # START OF if not multi_sweep, or input pg given
            else:
                lattices = []

                for epoch in self._sweep_handler.get_epochs():

                    si = self._sweep_handler.get_sweep_information(epoch)
                    intgr = si.get_integrater()
                    hklin = si.get_reflections()
                    refiner = intgr.get_integrater_refiner()

                    if self._scalr_input_pointgroup:
                        pointgroup = self._scalr_input_pointgroup
                        reindex_op = "h,k,l"
                        ntr = False

                    else:
                        pointless_hklin = self._prepare_pointless_hklin(
                            hklin, si.get_integrater().get_phi_width()
                        )

                        pointgroup, reindex_op, ntr, pt = self._pointless_indexer_jiffy(
                            pointless_hklin, refiner
                        )

                        Debug.write("X1698: %s: %s" % (pointgroup, reindex_op))

                    lattice = Syminfo.get_lattice(pointgroup)

                    if not lattice in lattices:
                        lattices.append(lattice)

                    if ntr:

                        intgr.integrater_reset_reindex_operator()
                        need_to_return = True
                # SUMMARY do pointless_indexer on each sweep, get lattices and make a list
                # of unique lattices, potentially reset reindex op.
            # END OF if not multi_sweep, or input pg given

            # SUMMARY - still within if more than one epoch, now have a list of number
            # of lattices

            # START OF if multiple-lattices
            if len(lattices) > 1:

                # why not using pointless indexer jiffy??!

                correct_lattice = sort_lattices(lattices)[0]

                Chatter.write("Correct lattice asserted to be %s" % correct_lattice)

                # transfer this information back to the indexers
                for epoch in self._sweep_handler.get_epochs():

                    si = self._sweep_handler.get_sweep_information(epoch)
                    refiner = si.get_integrater().get_integrater_refiner()
                    sname = si.get_sweep_name()

                    state = refiner.set_refiner_asserted_lattice(correct_lattice)

                    if state == refiner.LATTICE_CORRECT:
                        Chatter.write(
                            "Lattice %s ok for sweep %s" % (correct_lattice, sname)
                        )
                    elif state == refiner.LATTICE_IMPOSSIBLE:
                        raise RuntimeError(
                            "Lattice %s impossible for %s" % (correct_lattice, sname)
                        )
                    elif state == refiner.LATTICE_POSSIBLE:
                        Chatter.write(
                            "Lattice %s assigned for sweep %s"
                            % (correct_lattice, sname)
                        )
                        need_to_return = True
            # END OF if multiple-lattices
            # SUMMARY - forced all lattices to be same and hope its okay.
        # END OF if more than one epoch

        # if one or more of them was not in the lowest lattice,
        # need to return here to allow reprocessing

        if need_to_return:
            self.set_scaler_done(False)
            self.set_scaler_prepare_done(False)
            return

        # ---------- REINDEX ALL DATA TO CORRECT POINTGROUP ----------

        # all should share the same pointgroup, unless twinned... in which
        # case force them to be...

        pointgroups = {}
        reindex_ops = {}
        probably_twinned = False

        need_to_return = False

        multi_sweep_indexing = PhilIndex.params.xia2.settings.multi_sweep_indexing

        # START OF if multi-sweep and not input pg
        if multi_sweep_indexing and not self._scalr_input_pointgroup:
            pointless_hklins = []

            max_batches = 0
            for epoch in self._sweep_handler.get_epochs():
                si = self._sweep_handler.get_sweep_information(epoch)
                hklin = si.get_reflections()

                batches = MtzUtils.batches_from_mtz(hklin)
                if 1 + max(batches) - min(batches) > max_batches:
                    max_batches = max(batches) - min(batches) + 1

            from xia2.lib.bits import nifty_power_of_ten

            Debug.write("Biggest sweep has %d batches" % max_batches)
            max_batches = nifty_power_of_ten(max_batches)

            counter = 0

            refiners = []

            for epoch in self._sweep_handler.get_epochs():
                si = self._sweep_handler.get_sweep_information(epoch)
                hklin = si.get_reflections()
                integrater = si.get_integrater()
                refiner = integrater.get_integrater_refiner()
                refiners.append(refiner)

                hklin = self._prepare_pointless_hklin(
                    hklin, si.get_integrater().get_phi_width()
                )

                hklout = os.path.join(
                    self.get_working_directory(),
                    "%s_%s_%s_%s_prepointless.mtz"
                    % (pname, xname, dname, si.get_sweep_name()),
                )

                # we will want to delete this one exit
                FileHandler.record_temporary_file(hklout)

                first_batch = min(si.get_batches())
                si.set_batch_offset(counter * max_batches - first_batch + 1)

                from xia2.Modules.Scaler.rebatch import rebatch

                new_batches = rebatch(
                    hklin,
                    hklout,
                    first_batch=counter * max_batches + 1,
                    pname=pname,
                    xname=xname,
                    dname=dname,
                )

                pointless_hklins.append(hklout)

                # update the counter & recycle
                counter += 1

            # FIXME related to xia2-51 - this looks very very similar to the logic
            # in [A] above - is this duplicated logic?
            s = self._factory.Sortmtz()

            pointless_hklin = os.path.join(
                self.get_working_directory(),
                "%s_%s_prepointless_sorted.mtz"
                % (self._scalr_pname, self._scalr_xname),
            )

            s.set_hklout(pointless_hklin)

            for hklin in pointless_hklins:
                s.add_hklin(hklin)

            s.sort()

            pointless_const = os.path.join(
                self.get_working_directory(),
                "%s_%s_prepointless_const.mtz" % (self._scalr_pname, self._scalr_xname),
            )
            FileHandler.record_temporary_file(pointless_const)

            aimless_const = self._factory.Aimless()
            aimless_const.set_hklin(pointless_hklin)
            aimless_const.set_hklout(pointless_const)
            aimless_const.const()

            pointless_const = os.path.join(
                self.get_working_directory(),
                "%s_%s_prepointless_const_unmerged.mtz"
                % (self._scalr_pname, self._scalr_xname),
            )
            FileHandler.record_temporary_file(pointless_const)
            pointless_hklin = pointless_const

            pointgroup, reindex_op, ntr, pt = self._pointless_indexer_multisweep(
                pointless_hklin, refiners
            )

            for epoch in self._sweep_handler.get_epochs():
                pointgroups[epoch] = pointgroup
                reindex_ops[epoch] = reindex_op
            # SUMMARY ran pointless multisweep on combined mtz and made a dict
            # of  pointgroups and reindex_ops (all same)
        # END OF if multi-sweep and not input pg

        # START OF if not mulit-sweep or pg given
        else:
            for epoch in self._sweep_handler.get_epochs():
                si = self._sweep_handler.get_sweep_information(epoch)

                hklin = si.get_reflections()

                integrater = si.get_integrater()
                refiner = integrater.get_integrater_refiner()

                if self._scalr_input_pointgroup:
                    Debug.write(
                        "Using input pointgroup: %s" % self._scalr_input_pointgroup
                    )
                    pointgroup = self._scalr_input_pointgroup
                    reindex_op = "h,k,l"
                    pt = False

                else:

                    pointless_hklin = self._prepare_pointless_hklin(
                        hklin, si.get_integrater().get_phi_width()
                    )

                    pointgroup, reindex_op, ntr, pt = self._pointless_indexer_jiffy(
                        pointless_hklin, refiner
                    )

                    Debug.write("X1698: %s: %s" % (pointgroup, reindex_op))

                    if ntr:

                        integrater.integrater_reset_reindex_operator()
                        need_to_return = True

                if pt and not probably_twinned:
                    probably_twinned = True

                Debug.write("Pointgroup: %s (%s)" % (pointgroup, reindex_op))

                pointgroups[epoch] = pointgroup
                reindex_ops[epoch] = reindex_op
            # SUMMARY - for each sweep, run indexer jiffy and get reindex operators
            # and pointgroups dictionaries (could be different between sweeps)

        # END OF if not mulit-sweep or pg given

        overall_pointgroup = None

        pointgroup_set = {pointgroups[e] for e in pointgroups}

        if len(pointgroup_set) > 1 and not probably_twinned:
            raise RuntimeError(
                "non uniform pointgroups: %s" % str(list(pointgroup_set))
            )

        if len(pointgroup_set) > 1:
            Debug.write(
                "Probably twinned, pointgroups: %s"
                % " ".join([p.replace(" ", "") for p in list(pointgroup_set)])
            )
            numbers = [Syminfo.spacegroup_name_to_number(s) for s in pointgroup_set]
            overall_pointgroup = Syminfo.spacegroup_number_to_name(min(numbers))
            self._scalr_input_pointgroup = overall_pointgroup

            Chatter.write(
                "Twinning detected, assume pointgroup %s" % overall_pointgroup
            )

            need_to_return = True

        else:
            overall_pointgroup = pointgroup_set.pop()
        # SUMMARY - Have handled if different pointgroups & chosen an overall_pointgroup
        # which is the lowest symmetry

        # Now go through sweeps and do reindexing
        for epoch in self._sweep_handler.get_epochs():
            si = self._sweep_handler.get_sweep_information(epoch)

            integrater = si.get_integrater()

            integrater.set_integrater_spacegroup_number(
                Syminfo.spacegroup_name_to_number(overall_pointgroup)
            )
            integrater.set_integrater_reindex_operator(
                reindex_ops[epoch], reason="setting point group"
            )
            # This will give us the reflections in the correct point group
            si.set_reflections(integrater.get_integrater_intensities())

        if need_to_return:
            self.set_scaler_done(False)
            self.set_scaler_prepare_done(False)
            return

        # in here now optionally work through the data files which should be
        # indexed with a consistent point group, and transform the orientation
        # matrices by the lattice symmetry operations (if possible) to get a
        # consistent definition of U matrix modulo fixed rotations

        if PhilIndex.params.xia2.settings.unify_setting:
            self.unify_setting()

        if self.get_scaler_reference_reflection_file():
            self._reference = self.get_scaler_reference_reflection_file()
            Debug.write("Using HKLREF %s" % self._reference)

        elif PhilIndex.params.xia2.settings.scale.reference_reflection_file:
            self._reference = (
                PhilIndex.params.xia2.settings.scale.reference_reflection_file
            )
            Debug.write("Using HKLREF %s" % self._reference)

        params = PhilIndex.params
        use_brehm_diederichs = params.xia2.settings.use_brehm_diederichs
        if len(self._sweep_handler.get_epochs()) > 1 and use_brehm_diederichs:
            self.brehm_diederichs_reindexing()
        # If not Brehm-deidrichs, set reference as first sweep
        elif len(self._sweep_handler.get_epochs()) > 1 and not self._reference:

            first = self._sweep_handler.get_epochs()[0]
            si = self._sweep_handler.get_sweep_information(first)
            self._reference = si.get_reflections()

        # Now reindex to be consistent with first dataset - run pointless on each
        # dataset with reference
        if self._reference:

            md = self._factory.Mtzdump()
            md.set_hklin(self._reference)
            md.dump()

            if md.get_batches() and False:
                raise RuntimeError(
                    "reference reflection file %s unmerged" % self._reference
                )

            datasets = md.get_datasets()

            if len(datasets) > 1 and False:
                raise RuntimeError("more than one dataset in %s" % self._reference)

            # then get the unit cell, lattice etc.

            reference_lattice = Syminfo.get_lattice(md.get_spacegroup())
            reference_cell = md.get_dataset_info(datasets[0])["cell"]

            # then compute the pointgroup from this...

            # ---------- REINDEX TO CORRECT (REFERENCE) SETTING ----------

            for epoch in self._sweep_handler.get_epochs():

                # if we are working with unified UB matrix then this should not
                # be a problem here (note, *if*; *should*)

                # what about e.g. alternative P1 settings?
                # see JIRA MXSW-904
                if PhilIndex.params.xia2.settings.unify_setting:
                    continue

                pl = self._factory.Pointless()

                si = self._sweep_handler.get_sweep_information(epoch)
                hklin = si.get_reflections()

                pl.set_hklin(
                    self._prepare_pointless_hklin(
                        hklin, si.get_integrater().get_phi_width()
                    )
                )

                hklout = os.path.join(
                    self.get_working_directory(),
                    "%s_rdx2.mtz" % os.path.split(hklin)[-1][:-4],
                )

                # we will want to delete this one exit
                FileHandler.record_temporary_file(hklout)

                # now set the initial reflection set as a reference...

                pl.set_hklref(self._reference)

                # https://github.com/xia2/xia2/issues/115 - should ideally iteratively
                # construct a reference or a tree of correlations to ensure correct
                # reference setting - however if small molecule assume has been
                # multi-sweep-indexed so can ignore "fatal errors" - temporary hack
                pl.decide_pointgroup(
                    ignore_errors=PhilIndex.params.xia2.settings.small_molecule
                )

                Debug.write("Reindexing analysis of %s" % pl.get_hklin())

                pointgroup = pl.get_pointgroup()
                reindex_op = pl.get_reindex_operator()

                Debug.write("Operator: %s" % reindex_op)

                # apply this...

                integrater = si.get_integrater()

                integrater.set_integrater_reindex_operator(
                    reindex_op, reason="match reference"
                )
                integrater.set_integrater_spacegroup_number(
                    Syminfo.spacegroup_name_to_number(pointgroup)
                )
                si.set_reflections(integrater.get_integrater_intensities())

                md = self._factory.Mtzdump()
                md.set_hklin(si.get_reflections())
                md.dump()

                datasets = md.get_datasets()

                if len(datasets) > 1:
                    raise RuntimeError(
                        "more than one dataset in %s" % si.get_reflections()
                    )

                # then get the unit cell, lattice etc.

                lattice = Syminfo.get_lattice(md.get_spacegroup())
                cell = md.get_dataset_info(datasets[0])["cell"]

                if lattice != reference_lattice:
                    raise RuntimeError(
                        "lattices differ in %s and %s"
                        % (self._reference, si.get_reflections())
                    )

                Debug.write("Cell: %.2f %.2f %.2f %.2f %.2f %.2f" % cell)
                Debug.write("Ref:  %.2f %.2f %.2f %.2f %.2f %.2f" % reference_cell)

                for j in range(6):
                    if (
                        math.fabs((cell[j] - reference_cell[j]) / reference_cell[j])
                        > 0.1
                    ):
                        raise RuntimeError(
                            "unit cell parameters differ in %s and %s"
                            % (self._reference, si.get_reflections())
                        )

        # ---------- SORT TOGETHER DATA ----------

        self._sort_together_data_ccp4()

        self._scalr_resolution_limits = {}

        # store central resolution limit estimates

        batch_ranges = [
            self._sweep_handler.get_sweep_information(epoch).get_batch_range()
            for epoch in self._sweep_handler.get_epochs()
        ]

        self._resolution_limit_estimates = ersatz_resolution(
            self._prepared_reflections, batch_ranges
        )
Exemple #17
0
    def _scale(self):
        """Perform all of the operations required to deliver the scaled
        data."""

        epochs = self._sweep_handler.get_epochs()

        if self._scalr_corrections:
            Journal.block(
                "scaling",
                self.get_scaler_xcrystal().get_name(),
                "CCP4",
                {
                    "scaling model": "automatic",
                    "absorption": self._scalr_correct_absorption,
                    "decay": self._scalr_correct_decay,
                },
            )

        else:
            Journal.block(
                "scaling",
                self.get_scaler_xcrystal().get_name(),
                "CCP4",
                {"scaling model": "default"},
            )

        sc = self._updated_aimless()
        sc.set_hklin(self._prepared_reflections)
        sc.set_chef_unmerged(True)
        sc.set_new_scales_file("%s.scales" % self._scalr_xname)

        user_resolution_limits = {}

        for epoch in epochs:

            si = self._sweep_handler.get_sweep_information(epoch)
            pname, xname, dname = si.get_project_info()
            sname = si.get_sweep_name()
            intgr = si.get_integrater()

            if intgr.get_integrater_user_resolution():
                dmin = intgr.get_integrater_high_resolution()

                if (dname, sname) not in user_resolution_limits:
                    user_resolution_limits[(dname, sname)] = dmin
                elif dmin < user_resolution_limits[(dname, sname)]:
                    user_resolution_limits[(dname, sname)] = dmin

            start, end = si.get_batch_range()

            if (dname, sname) in self._scalr_resolution_limits:
                resolution, _ = self._scalr_resolution_limits[(dname, sname)]
                sc.add_run(start, end, exclude=False, resolution=resolution, name=sname)
            else:
                sc.add_run(start, end, name=sname)

        sc.set_hklout(
            os.path.join(
                self.get_working_directory(),
                "%s_%s_scaled_test.mtz" % (self._scalr_pname, self._scalr_xname),
            )
        )

        if self.get_scaler_anomalous():
            sc.set_anomalous()

        # what follows, sucks

        failover = PhilIndex.params.xia2.settings.failover
        if failover:

            try:
                sc.scale()
            except RuntimeError as e:

                es = str(e)

                if (
                    "bad batch" in es
                    or "negative scales run" in es
                    or "no observations" in es
                ):

                    # first ID the sweep from the batch no

                    batch = int(es.split()[-1])
                    epoch = self._identify_sweep_epoch(batch)
                    sweep = self._scalr_integraters[epoch].get_integrater_sweep()

                    # then remove it from my parent xcrystal

                    self.get_scaler_xcrystal().remove_sweep(sweep)

                    # then remove it from the scaler list of intergraters
                    # - this should really be a scaler interface method

                    del self._scalr_integraters[epoch]

                    # then tell the user what is happening

                    Chatter.write(
                        "Sweep %s gave negative scales - removing" % sweep.get_name()
                    )

                    # then reset the prepare, do, finish flags

                    self.set_scaler_prepare_done(False)
                    self.set_scaler_done(False)
                    self.set_scaler_finish_done(False)

                    # and return

                    return

                else:

                    raise e

        else:
            sc.scale()

        # then gather up all of the resulting reflection files
        # and convert them into the required formats (.sca, .mtz.)

        data = sc.get_summary()

        loggraph = sc.parse_ccp4_loggraph()

        resolution_info = {}

        reflection_files = sc.get_scaled_reflection_files()

        for dataset in reflection_files:
            FileHandler.record_temporary_file(reflection_files[dataset])

        for key in loggraph:
            if "Analysis against resolution" in key:
                dataset = key.split(",")[-1].strip()
                resolution_info[dataset] = transpose_loggraph(loggraph[key])

        # check in here that there is actually some data to scale..!

        if not resolution_info:
            raise RuntimeError("no resolution info")

        highest_suggested_resolution = self.assess_resolution_limits(
            sc.get_unmerged_reflection_file(), user_resolution_limits
        )

        if not self.get_scaler_done():
            Debug.write("Returning as scaling not finished...")
            return

        batch_info = {}

        for key in loggraph:
            if "Analysis against Batch" in key:
                dataset = key.split(",")[-1].strip()
                batch_info[dataset] = transpose_loggraph(loggraph[key])

        sc = self._updated_aimless()

        FileHandler.record_log_file(
            "%s %s aimless" % (self._scalr_pname, self._scalr_xname), sc.get_log_file()
        )

        sc.set_hklin(self._prepared_reflections)
        sc.set_new_scales_file("%s_final.scales" % self._scalr_xname)

        for epoch in epochs:

            si = self._sweep_handler.get_sweep_information(epoch)
            pname, xname, dname = si.get_project_info()
            sname = si.get_sweep_name()
            start, end = si.get_batch_range()

            resolution_limit, _ = self._scalr_resolution_limits[(dname, sname)]

            sc.add_run(
                start, end, exclude=False, resolution=resolution_limit, name=xname
            )

        sc.set_hklout(
            os.path.join(
                self.get_working_directory(),
                "%s_%s_scaled.mtz" % (self._scalr_pname, self._scalr_xname),
            )
        )

        if self.get_scaler_anomalous():
            sc.set_anomalous()

        sc.scale()

        FileHandler.record_xml_file(
            "%s %s aimless" % (self._scalr_pname, self._scalr_xname), sc.get_xmlout()
        )

        data = sc.get_summary()
        scales_file = sc.get_new_scales_file()
        loggraph = sc.parse_ccp4_loggraph()

        standard_deviation_info = {}

        for key in loggraph:
            if "standard deviation v. Intensity" in key:
                dataset = key.split(",")[-1].strip()
                standard_deviation_info[dataset] = transpose_loggraph(loggraph[key])

        resolution_info = {}

        for key in loggraph:
            if "Analysis against resolution" in key:
                dataset = key.split(",")[-1].strip()
                resolution_info[dataset] = transpose_loggraph(loggraph[key])

        batch_info = {}

        for key in loggraph:
            if "Analysis against Batch" in key:
                dataset = key.split(",")[-1].strip()
                batch_info[dataset] = transpose_loggraph(loggraph[key])

        # finally put all of the results "somewhere useful"

        self._scalr_statistics = data

        self._scalr_scaled_refl_files = copy.deepcopy(sc.get_scaled_reflection_files())

        sc = self._updated_aimless()
        sc.set_hklin(self._prepared_reflections)
        sc.set_scales_file(scales_file)

        self._wavelengths_in_order = []

        for epoch in epochs:
            si = self._sweep_handler.get_sweep_information(epoch)
            pname, xname, dname = si.get_project_info()
            sname = si.get_sweep_name()
            start, end = si.get_batch_range()

            resolution_limit, _ = self._scalr_resolution_limits[(dname, sname)]

            sc.add_run(
                start, end, exclude=False, resolution=resolution_limit, name=sname
            )

            if not dname in self._wavelengths_in_order:
                self._wavelengths_in_order.append(dname)

        sc.set_hklout(
            os.path.join(
                self.get_working_directory(),
                "%s_%s_scaled.mtz" % (self._scalr_pname, self._scalr_xname),
            )
        )

        sc.set_scalepack()

        if self.get_scaler_anomalous():
            sc.set_anomalous()
        sc.scale()

        self._update_scaled_unit_cell()

        self._scalr_scaled_reflection_files = {}
        self._scalr_scaled_reflection_files["sca"] = {}
        self._scalr_scaled_reflection_files["sca_unmerged"] = {}
        self._scalr_scaled_reflection_files["mtz_unmerged"] = {}

        for key in self._scalr_scaled_refl_files:
            hklout = self._scalr_scaled_refl_files[key]

            scaout = "%s.sca" % hklout[:-4]
            self._scalr_scaled_reflection_files["sca"][key] = scaout
            FileHandler.record_data_file(scaout)
            scalepack = os.path.join(
                os.path.split(hklout)[0],
                os.path.split(hklout)[1]
                .replace("_scaled", "_scaled_unmerged")
                .replace(".mtz", ".sca"),
            )
            self._scalr_scaled_reflection_files["sca_unmerged"][key] = scalepack
            FileHandler.record_data_file(scalepack)
            mtz_unmerged = os.path.splitext(scalepack)[0] + ".mtz"
            self._scalr_scaled_reflection_files["mtz_unmerged"][key] = mtz_unmerged
            FileHandler.record_data_file(mtz_unmerged)

            if self._scalr_cell_esd is not None:
                # patch .mtz and overwrite unit cell information
                import xia2.Modules.Scaler.tools as tools

                override_cell = self._scalr_cell_dict.get(
                    "%s_%s_%s" % (self._scalr_pname, self._scalr_xname, key)
                )[0]
                tools.patch_mtz_unit_cell(mtz_unmerged, override_cell)
                tools.patch_mtz_unit_cell(hklout, override_cell)

            self._scalr_scaled_reflection_files["mtz_unmerged"][key] = mtz_unmerged
            FileHandler.record_data_file(mtz_unmerged)

        if PhilIndex.params.xia2.settings.merging_statistics.source == "cctbx":
            for key in self._scalr_scaled_refl_files:
                stats = self._compute_scaler_statistics(
                    self._scalr_scaled_reflection_files["mtz_unmerged"][key],
                    selected_band=(highest_suggested_resolution, None),
                    wave=key,
                )
                self._scalr_statistics[
                    (self._scalr_pname, self._scalr_xname, key)
                ] = stats

        sc = self._updated_aimless()
        sc.set_hklin(self._prepared_reflections)
        sc.set_scales_file(scales_file)

        self._wavelengths_in_order = []

        for epoch in epochs:

            si = self._sweep_handler.get_sweep_information(epoch)
            pname, xname, dname = si.get_project_info()
            sname = si.get_sweep_name()
            start, end = si.get_batch_range()

            resolution_limit, _ = self._scalr_resolution_limits[(dname, sname)]

            sc.add_run(
                start, end, exclude=False, resolution=resolution_limit, name=sname
            )

            if not dname in self._wavelengths_in_order:
                self._wavelengths_in_order.append(dname)

        sc.set_hklout(
            os.path.join(
                self.get_working_directory(),
                "%s_%s_chef.mtz" % (self._scalr_pname, self._scalr_xname),
            )
        )

        sc.set_chef_unmerged(True)

        if self.get_scaler_anomalous():
            sc.set_anomalous()
        sc.scale()
        if not PhilIndex.params.dials.fast_mode:
            try:
                self._generate_absorption_map(sc)
            except Exception as e:
                # Map generation may fail for number of reasons, eg. matplotlib borken
                Debug.write("Could not generate absorption map (%s)" % e)
Exemple #18
0
  def _scale_prepare(self):
    '''Perform all of the preparation required to deliver the scaled
    data. This should sort together the reflection files, ensure that
    they are correctly indexed (via pointless) and generally tidy
    things up.'''

    # acknowledge all of the programs we are about to use...

    Citations.cite('pointless')
    Citations.cite('aimless')
    Citations.cite('ccp4')

    # ---------- GATHER ----------

    self._sweep_handler = SweepInformationHandler(self._scalr_integraters)

    Journal.block(
        'gathering', self.get_scaler_xcrystal().get_name(), 'CCP4',
        {'working directory':self.get_working_directory()})

    for epoch in self._sweep_handler.get_epochs():
      si = self._sweep_handler.get_sweep_information(epoch)
      pname, xname, dname = si.get_project_info()
      sname = si.get_sweep_name()

      exclude_sweep = False

      for sweep in PhilIndex.params.xia2.settings.sweep:
        if sweep.id == sname and sweep.exclude:
          exclude_sweep = True
          break

      if exclude_sweep:
        self._sweep_handler.remove_epoch(epoch)
        Debug.write('Excluding sweep %s' % sname)
      else:
        Journal.entry({'adding data from':'%s/%s/%s' % \
                       (xname, dname, sname)})

    # gather data for all images which belonged to the parent
    # crystal - allowing for the fact that things could go wrong
    # e.g. epoch information not available, exposure times not in
    # headers etc...

    for e in self._sweep_handler.get_epochs():
      si = self._sweep_handler.get_sweep_information(e)
      assert is_mtz_file(si.get_reflections())

    p, x = self._sweep_handler.get_project_info()
    self._scalr_pname = p
    self._scalr_xname = x

    # verify that the lattices are consistent, calling eliminate if
    # they are not N.B. there could be corner cases here

    need_to_return = False

    multi_sweep_indexing = \
      PhilIndex.params.xia2.settings.multi_sweep_indexing == True

    if len(self._sweep_handler.get_epochs()) > 1:

      # if we have multi-sweep-indexing going on then logic says all should
      # share common lattice & UB definition => this is not used here?
      if multi_sweep_indexing and not self._scalr_input_pointgroup:
        pointless_hklins = []

        max_batches = 0
        for epoch in self._sweep_handler.get_epochs():
          si = self._sweep_handler.get_sweep_information(epoch)
          hklin = si.get_reflections()

          batches = MtzUtils.batches_from_mtz(hklin)
          if 1 + max(batches) - min(batches) > max_batches:
            max_batches = max(batches) - min(batches) + 1

        from xia2.lib.bits import nifty_power_of_ten
        Debug.write('Biggest sweep has %d batches' % max_batches)
        max_batches = nifty_power_of_ten(max_batches)

        counter = 0

        refiners = []

        for epoch in self._sweep_handler.get_epochs():
          si = self._sweep_handler.get_sweep_information(epoch)
          hklin = si.get_reflections()
          integrater = si.get_integrater()
          refiner = integrater.get_integrater_refiner()
          refiners.append(refiner)

          hklin = self._prepare_pointless_hklin(
            hklin, si.get_integrater().get_phi_width())

          hklout = os.path.join(self.get_working_directory(),
                                '%s_%s_%s_%s_prepointless.mtz' % \
                                (pname, xname, dname, si.get_sweep_name()))

          # we will want to delete this one exit
          FileHandler.record_temporary_file(hklout)

          first_batch = min(si.get_batches())
          si.set_batch_offset(counter * max_batches - first_batch + 1)

          from xia2.Modules.Scaler.rebatch import rebatch
          new_batches = rebatch(
            hklin, hklout, first_batch=counter * max_batches + 1,
            pname=pname, xname=xname, dname=dname)

          pointless_hklins.append(hklout)

          # update the counter & recycle
          counter += 1

        s = self._factory.Sortmtz()

        pointless_hklin = os.path.join(self.get_working_directory(),
                              '%s_%s_prepointless_sorted.mtz' % \
                              (self._scalr_pname, self._scalr_xname))

        s.set_hklout(pointless_hklin)

        for hklin in pointless_hklins:
          s.add_hklin(hklin)

        s.sort()

        # FIXME xia2-51 in here look at running constant scaling on the
        # pointless hklin to put the runs on the same scale. Ref=[A]

        pointless_const = os.path.join(self.get_working_directory(),
                              '%s_%s_prepointless_const.mtz' % \
                              (self._scalr_pname, self._scalr_xname))
        FileHandler.record_temporary_file(pointless_const)

        aimless_const = self._factory.Aimless()
        aimless_const.set_hklin(pointless_hklin)
        aimless_const.set_hklout(pointless_const)
        aimless_const.const()

        pointless_const = os.path.join(self.get_working_directory(),
                              '%s_%s_prepointless_const_unmerged.mtz' % \
                              (self._scalr_pname, self._scalr_xname))
        FileHandler.record_temporary_file(pointless_const)
        pointless_hklin = pointless_const

        # FIXME xia2-51 in here need to pass all refiners to ensure that the
        # information is passed back to all of them not just the last one...
        Debug.write('Running multisweep pointless for %d sweeps' %
                    len(refiners))
        pointgroup, reindex_op, ntr, pt = \
                    self._pointless_indexer_multisweep(pointless_hklin,
                                                       refiners)

        Debug.write('X1698: %s: %s' % (pointgroup, reindex_op))

        lattices = [Syminfo.get_lattice(pointgroup)]

        for epoch in self._sweep_handler.get_epochs():
          si = self._sweep_handler.get_sweep_information(epoch)
          intgr = si.get_integrater()
          hklin = si.get_reflections()
          refiner = intgr.get_integrater_refiner()

          if ntr:
            intgr.integrater_reset_reindex_operator()
            need_to_return = True

      else:
        lattices = []

        for epoch in self._sweep_handler.get_epochs():

          si = self._sweep_handler.get_sweep_information(epoch)
          intgr = si.get_integrater()
          hklin = si.get_reflections()
          refiner = intgr.get_integrater_refiner()

          if self._scalr_input_pointgroup:
            pointgroup = self._scalr_input_pointgroup
            reindex_op = 'h,k,l'
            ntr = False

          else:
            pointless_hklin = self._prepare_pointless_hklin(
              hklin, si.get_integrater().get_phi_width())

            pointgroup, reindex_op, ntr, pt = \
                        self._pointless_indexer_jiffy(
                pointless_hklin, refiner)

            Debug.write('X1698: %s: %s' % (pointgroup, reindex_op))

          lattice = Syminfo.get_lattice(pointgroup)

          if not lattice in lattices:
            lattices.append(lattice)

          if ntr:

            intgr.integrater_reset_reindex_operator()
            need_to_return = True

      if len(lattices) > 1:

        # why not using pointless indexer jiffy??!

        correct_lattice = sort_lattices(lattices)[0]

        Chatter.write('Correct lattice asserted to be %s' % \
                      correct_lattice)

        # transfer this information back to the indexers
        for epoch in self._sweep_handler.get_epochs():

          si = self._sweep_handler.get_sweep_information(epoch)
          refiner = si.get_integrater().get_integrater_refiner()
          sname = si.get_sweep_name()

          state = refiner.set_refiner_asserted_lattice(
              correct_lattice)

          if state == refiner.LATTICE_CORRECT:
            Chatter.write('Lattice %s ok for sweep %s' % \
                          (correct_lattice, sname))
          elif state == refiner.LATTICE_IMPOSSIBLE:
            raise RuntimeError('Lattice %s impossible for %s' \
                  % (correct_lattice, sname))
          elif state == refiner.LATTICE_POSSIBLE:
            Chatter.write('Lattice %s assigned for sweep %s' % \
                          (correct_lattice, sname))
            need_to_return = True

    # if one or more of them was not in the lowest lattice,
    # need to return here to allow reprocessing

    if need_to_return:
      self.set_scaler_done(False)
      self.set_scaler_prepare_done(False)
      return

    # ---------- REINDEX ALL DATA TO CORRECT POINTGROUP ----------

    # all should share the same pointgroup, unless twinned... in which
    # case force them to be...

    pointgroups = {}
    reindex_ops = {}
    probably_twinned = False

    need_to_return = False

    multi_sweep_indexing = \
      PhilIndex.params.xia2.settings.multi_sweep_indexing == True

    if multi_sweep_indexing and not self._scalr_input_pointgroup:
      pointless_hklins = []

      max_batches = 0
      for epoch in self._sweep_handler.get_epochs():
        si = self._sweep_handler.get_sweep_information(epoch)
        hklin = si.get_reflections()

        batches = MtzUtils.batches_from_mtz(hklin)
        if 1 + max(batches) - min(batches) > max_batches:
          max_batches = max(batches) - min(batches) + 1

      from xia2.lib.bits import nifty_power_of_ten
      Debug.write('Biggest sweep has %d batches' % max_batches)
      max_batches = nifty_power_of_ten(max_batches)

      counter = 0

      refiners = []

      for epoch in self._sweep_handler.get_epochs():
        si = self._sweep_handler.get_sweep_information(epoch)
        hklin = si.get_reflections()
        integrater = si.get_integrater()
        refiner = integrater.get_integrater_refiner()
        refiners.append(refiner)

        hklin = self._prepare_pointless_hklin(
            hklin, si.get_integrater().get_phi_width())

        hklout = os.path.join(self.get_working_directory(),
                              '%s_%s_%s_%s_prepointless.mtz' % \
                              (pname, xname, dname, si.get_sweep_name()))

        # we will want to delete this one exit
        FileHandler.record_temporary_file(hklout)

        first_batch = min(si.get_batches())
        si.set_batch_offset(counter * max_batches - first_batch + 1)

        from xia2.Modules.Scaler.rebatch import rebatch
        new_batches = rebatch(
          hklin, hklout, first_batch=counter * max_batches + 1,
          pname=pname, xname=xname, dname=dname)

        pointless_hklins.append(hklout)

        # update the counter & recycle
        counter += 1

      # FIXME related to xia2-51 - this looks very very similar to the logic
      # in [A] above - is this duplicated logic?
      s = self._factory.Sortmtz()

      pointless_hklin = os.path.join(self.get_working_directory(),
                            '%s_%s_prepointless_sorted.mtz' % \
                            (self._scalr_pname, self._scalr_xname))

      s.set_hklout(pointless_hklin)

      for hklin in pointless_hklins:
        s.add_hklin(hklin)

      s.sort()

      pointless_const = os.path.join(self.get_working_directory(),
                            '%s_%s_prepointless_const.mtz' % \
                            (self._scalr_pname, self._scalr_xname))
      FileHandler.record_temporary_file(pointless_const)

      aimless_const = self._factory.Aimless()
      aimless_const.set_hklin(pointless_hklin)
      aimless_const.set_hklout(pointless_const)
      aimless_const.const()

      pointless_const = os.path.join(self.get_working_directory(),
                            '%s_%s_prepointless_const_unmerged.mtz' % \
                            (self._scalr_pname, self._scalr_xname))
      FileHandler.record_temporary_file(pointless_const)
      pointless_hklin = pointless_const

      pointgroup, reindex_op, ntr, pt = \
                  self._pointless_indexer_multisweep(
          pointless_hklin, refiners)

      for epoch in self._sweep_handler.get_epochs():
        pointgroups[epoch] = pointgroup
        reindex_ops[epoch] = reindex_op

    else:
      for epoch in self._sweep_handler.get_epochs():
        si = self._sweep_handler.get_sweep_information(epoch)

        hklin = si.get_reflections()

        integrater = si.get_integrater()
        refiner = integrater.get_integrater_refiner()

        if self._scalr_input_pointgroup:
          Debug.write('Using input pointgroup: %s' % \
                      self._scalr_input_pointgroup)
          pointgroup = self._scalr_input_pointgroup
          reindex_op = 'h,k,l'
          pt = False

        else:

          pointless_hklin = self._prepare_pointless_hklin(
              hklin, si.get_integrater().get_phi_width())

          pointgroup, reindex_op, ntr, pt = \
                      self._pointless_indexer_jiffy(
              pointless_hklin, refiner)

          Debug.write('X1698: %s: %s' % (pointgroup, reindex_op))

          if ntr:

            integrater.integrater_reset_reindex_operator()
            need_to_return = True

        if pt and not probably_twinned:
          probably_twinned = True

        Debug.write('Pointgroup: %s (%s)' % (pointgroup, reindex_op))

        pointgroups[epoch] = pointgroup
        reindex_ops[epoch] = reindex_op

    overall_pointgroup = None

    pointgroup_set = {pointgroups[e] for e in pointgroups}

    if len(pointgroup_set) > 1 and \
       not probably_twinned:
      raise RuntimeError('non uniform pointgroups')

    if len(pointgroup_set) > 1:
      Debug.write('Probably twinned, pointgroups: %s' % \
                  ' '.join([p.replace(' ', '') for p in \
                            list(pointgroup_set)]))
      numbers = [Syminfo.spacegroup_name_to_number(s) for s in \
                 pointgroup_set]
      overall_pointgroup = Syminfo.spacegroup_number_to_name(min(numbers))
      self._scalr_input_pointgroup = overall_pointgroup

      Chatter.write('Twinning detected, assume pointgroup %s' % \
                    overall_pointgroup)

      need_to_return = True

    else:
      overall_pointgroup = pointgroup_set.pop()

    for epoch in self._sweep_handler.get_epochs():
      si = self._sweep_handler.get_sweep_information(epoch)

      integrater = si.get_integrater()

      integrater.set_integrater_spacegroup_number(
          Syminfo.spacegroup_name_to_number(overall_pointgroup))
      integrater.set_integrater_reindex_operator(
          reindex_ops[epoch], reason='setting point group')
      # This will give us the reflections in the correct point group
      si.set_reflections(integrater.get_integrater_intensities())

    if need_to_return:
      self.set_scaler_done(False)
      self.set_scaler_prepare_done(False)
      return

    # in here now optionally work through the data files which should be
    # indexed with a consistent point group, and transform the orientation
    # matrices by the lattice symmetry operations (if possible) to get a
    # consistent definition of U matrix modulo fixed rotations

    if PhilIndex.params.xia2.settings.unify_setting:

      from scitbx.matrix import sqr
      reference_U = None
      i3 = sqr((1, 0, 0, 0, 1, 0, 0, 0, 1))

      for epoch in self._sweep_handler.get_epochs():
        si = self._sweep_handler.get_sweep_information(epoch)
        intgr = si.get_integrater()
        fixed = sqr(intgr.get_goniometer().get_fixed_rotation())
        u, b, s = get_umat_bmat_lattice_symmetry_from_mtz(si.get_reflections())
        U = fixed.inverse() * sqr(u).transpose()
        B = sqr(b)

        if reference_U is None:
          reference_U = U
          continue

        results = []
        for op in s.all_ops():
          R = B * sqr(op.r().as_double()).transpose() * B.inverse()
          nearly_i3 = (U * R).inverse() * reference_U
          score = sum([abs(_n - _i) for (_n, _i) in zip(nearly_i3, i3)])
          results.append((score, op.r().as_hkl(), op))

        results.sort()
        best = results[0]
        Debug.write('Best reindex: %s %.3f' % (best[1], best[0]))
        intgr.set_integrater_reindex_operator(best[2].r().inverse().as_hkl(),
                                              reason='unifying [U] setting')
        si.set_reflections(intgr.get_integrater_intensities())

        # recalculate to verify
        u, b, s = get_umat_bmat_lattice_symmetry_from_mtz(si.get_reflections())
        U = fixed.inverse() * sqr(u).transpose()
        Debug.write('New reindex: %s' % (U.inverse() * reference_U))

        # FIXME I should probably raise an exception at this stage if this
        # is not about I3...

    if self.get_scaler_reference_reflection_file():
      self._reference = self.get_scaler_reference_reflection_file()
      Debug.write('Using HKLREF %s' % self._reference)

    elif PhilIndex.params.xia2.settings.scale.reference_reflection_file:
      self._reference = PhilIndex.params.xia2.settings.scale.reference_reflection_file
      Debug.write('Using HKLREF %s' % self._reference)

    params = PhilIndex.params
    use_brehm_diederichs = params.xia2.settings.use_brehm_diederichs
    if len(self._sweep_handler.get_epochs()) > 1 and use_brehm_diederichs:

      brehm_diederichs_files_in = []
      for epoch in self._sweep_handler.get_epochs():

        si = self._sweep_handler.get_sweep_information(epoch)
        hklin = si.get_reflections()
        brehm_diederichs_files_in.append(hklin)

      # now run cctbx.brehm_diederichs to figure out the indexing hand for
      # each sweep
      from xia2.Wrappers.Cctbx.BrehmDiederichs import BrehmDiederichs
      from xia2.lib.bits import auto_logfiler
      brehm_diederichs = BrehmDiederichs()
      brehm_diederichs.set_working_directory(self.get_working_directory())
      auto_logfiler(brehm_diederichs)
      brehm_diederichs.set_input_filenames(brehm_diederichs_files_in)
      # 1 or 3? 1 seems to work better?
      brehm_diederichs.set_asymmetric(1)
      brehm_diederichs.run()
      reindexing_dict = brehm_diederichs.get_reindexing_dict()

      for epoch in self._sweep_handler.get_epochs():

        si = self._sweep_handler.get_sweep_information(epoch)
        intgr = si.get_integrater()
        hklin = si.get_reflections()

        reindex_op = reindexing_dict.get(os.path.abspath(hklin))
        assert reindex_op is not None

        if 1 or reindex_op != 'h,k,l':
          # apply the reindexing operator
          intgr.set_integrater_reindex_operator(
            reindex_op, reason='match reference')
          si.set_reflections(intgr.get_integrater_intensities())

    elif len(self._sweep_handler.get_epochs()) > 1 and \
           not self._reference:

      first = self._sweep_handler.get_epochs()[0]
      si = self._sweep_handler.get_sweep_information(first)
      self._reference = si.get_reflections()

    if self._reference:

      md = self._factory.Mtzdump()
      md.set_hklin(self._reference)
      md.dump()

      if md.get_batches() and False:
        raise RuntimeError('reference reflection file %s unmerged' % \
              self._reference)

      datasets = md.get_datasets()

      if len(datasets) > 1 and False:
        raise RuntimeError('more than one dataset in %s' % \
              self._reference)

      # then get the unit cell, lattice etc.

      reference_lattice = Syminfo.get_lattice(md.get_spacegroup())
      reference_cell = md.get_dataset_info(datasets[0])['cell']

      # then compute the pointgroup from this...

      # ---------- REINDEX TO CORRECT (REFERENCE) SETTING ----------

      for epoch in self._sweep_handler.get_epochs():

        # if we are working with unified UB matrix then this should not
        # be a problem here (note, *if*; *should*)

        # what about e.g. alternative P1 settings?
        # see JIRA MXSW-904
        if PhilIndex.params.xia2.settings.unify_setting:
          continue

        pl = self._factory.Pointless()

        si = self._sweep_handler.get_sweep_information(epoch)
        hklin = si.get_reflections()

        pl.set_hklin(self._prepare_pointless_hklin(
            hklin, si.get_integrater().get_phi_width()))

        hklout = os.path.join(
            self.get_working_directory(),
            '%s_rdx2.mtz' % os.path.split(hklin)[-1][:-4])

        # we will want to delete this one exit
        FileHandler.record_temporary_file(hklout)

        # now set the initial reflection set as a reference...

        pl.set_hklref(self._reference)

        # https://github.com/xia2/xia2/issues/115 - should ideally iteratively
        # construct a reference or a tree of correlations to ensure correct
        # reference setting - however if small molecule assume has been
        # multi-sweep-indexed so can ignore "fatal errors" - temporary hack
        pl.decide_pointgroup(
          ignore_errors=PhilIndex.params.xia2.settings.small_molecule)

        Debug.write('Reindexing analysis of %s' % pl.get_hklin())

        pointgroup = pl.get_pointgroup()
        reindex_op = pl.get_reindex_operator()

        Debug.write('Operator: %s' % reindex_op)

        # apply this...

        integrater = si.get_integrater()

        integrater.set_integrater_reindex_operator(reindex_op,
                                                   reason='match reference')
        integrater.set_integrater_spacegroup_number(
            Syminfo.spacegroup_name_to_number(pointgroup))
        si.set_reflections(integrater.get_integrater_intensities())

        md = self._factory.Mtzdump()
        md.set_hklin(si.get_reflections())
        md.dump()

        datasets = md.get_datasets()

        if len(datasets) > 1:
          raise RuntimeError('more than one dataset in %s' % \
                si.get_reflections())

        # then get the unit cell, lattice etc.

        lattice = Syminfo.get_lattice(md.get_spacegroup())
        cell = md.get_dataset_info(datasets[0])['cell']

        if lattice != reference_lattice:
          raise RuntimeError('lattices differ in %s and %s' % \
                (self._reference, si.get_reflections()))

        Debug.write('Cell: %.2f %.2f %.2f %.2f %.2f %.2f' % cell)
        Debug.write('Ref:  %.2f %.2f %.2f %.2f %.2f %.2f' % reference_cell)

        for j in range(6):
          if math.fabs((cell[j] - reference_cell[j]) /
                       reference_cell[j]) > 0.1:
            raise RuntimeError( \
                  'unit cell parameters differ in %s and %s' % \
                  (self._reference, si.get_reflections()))

    # ---------- SORT TOGETHER DATA ----------

    self._sort_together_data_ccp4()

    self._scalr_resolution_limits = {}

    # store central resolution limit estimates

    batch_ranges = [
        self._sweep_handler.get_sweep_information(epoch).get_batch_range()
        for epoch in self._sweep_handler.get_epochs()
    ]

    self._resolution_limit_estimates = ersatz_resolution(
        self._prepared_reflections, batch_ranges)
Exemple #19
0
    def _integrate(self):
        '''Implement the integrater interface.'''

        # cite the program
        Citations.cite('mosflm')

        images_str = '%d to %d' % tuple(self._intgr_wedge)
        cell_str = '%.2f %.2f %.2f %.2f %.2f %.2f' % tuple(self._intgr_cell)

        if len(self._fp_directory) <= 50:
            dirname = self._fp_directory
        else:
            dirname = '...%s' % self._fp_directory[-46:]

        Journal.block(
            'integrating', self._intgr_sweep_name, 'mosflm', {
                'images': images_str,
                'cell': cell_str,
                'lattice': self.get_integrater_refiner().get_refiner_lattice(),
                'template': self._fp_template,
                'directory': dirname,
                'resolution': '%.2f' % self._intgr_reso_high
            })

        self._mosflm_rerun_integration = False

        wd = self.get_working_directory()

        try:

            if self.get_integrater_sweep_name():
                pname, xname, dname = self.get_integrater_project_info()

            nproc = PhilIndex.params.xia2.settings.multiprocessing.nproc
            if nproc > 1:
                Debug.write('Parallel integration: %d jobs' % nproc)
                self._mosflm_hklout = self._mosflm_parallel_integrate()
            else:
                self._mosflm_hklout = self._mosflm_integrate()

            # record integration output for e.g. BLEND.

            sweep = self.get_integrater_sweep_name()
            if sweep:
                FileHandler.record_more_data_file(
                    '%s %s %s %s INTEGRATE' % (pname, xname, dname, sweep),
                    self._mosflm_hklout)

        except IntegrationError as e:
            if 'negative mosaic spread' in str(e):
                if self._mosflm_postref_fix_mosaic:
                    Chatter.write(
                        'Negative mosaic spread - stopping integration')
                    raise BadLatticeError('negative mosaic spread')

                Chatter.write('Negative mosaic spread - rerunning integration')
                self.set_integrater_done(False)
                self._mosflm_postref_fix_mosaic = True

        if self._mosflm_rerun_integration and not PhilIndex.params.dials.fast_mode:
            # make sure that this is run again...
            Chatter.write('Need to rerun the integration...')
            self.set_integrater_done(False)

        return self._mosflm_hklout
Exemple #20
0
  def _scale(self):
    '''Perform all of the operations required to deliver the scaled
    data.'''

    epochs = self._sweep_handler.get_epochs()

    if self._scalr_corrections:
      Journal.block(
          'scaling', self.get_scaler_xcrystal().get_name(), 'CCP4',
          {'scaling model':'automatic',
           'absorption':self._scalr_correct_absorption,
           'decay':self._scalr_correct_decay
           })

    else:
      Journal.block(
          'scaling', self.get_scaler_xcrystal().get_name(), 'CCP4',
          {'scaling model':'default'})

    sc = self._updated_aimless()
    sc.set_hklin(self._prepared_reflections)
    sc.set_chef_unmerged(True)
    sc.set_new_scales_file('%s.scales' % self._scalr_xname)

    user_resolution_limits = {}

    for epoch in epochs:

      si = self._sweep_handler.get_sweep_information(epoch)
      pname, xname, dname = si.get_project_info()
      sname = si.get_sweep_name()
      intgr = si.get_integrater()

      if intgr.get_integrater_user_resolution():
        dmin = intgr.get_integrater_high_resolution()

        if (dname, sname) not in user_resolution_limits:
          user_resolution_limits[(dname, sname)] = dmin
        elif dmin < user_resolution_limits[(dname, sname)]:
          user_resolution_limits[(dname, sname)] = dmin

      start, end = si.get_batch_range()

      if (dname, sname) in self._scalr_resolution_limits:
        resolution, _ = self._scalr_resolution_limits[(dname, sname)]
        sc.add_run(start, end, exclude=False, resolution=resolution, name=sname)
      else:
        sc.add_run(start, end, name=sname)

    sc.set_hklout(os.path.join(self.get_working_directory(),
                               '%s_%s_scaled_test.mtz' % \
                               (self._scalr_pname, self._scalr_xname)))

    if self.get_scaler_anomalous():
      sc.set_anomalous()

    # what follows, sucks

    failover = PhilIndex.params.xia2.settings.failover
    if failover:

      try:
        sc.scale()
      except RuntimeError as e:

        es = str(e)

        if 'bad batch' in es or \
               'negative scales run' in es or \
               'no observations' in es:

          # first ID the sweep from the batch no

          batch = int(es.split()[-1])
          epoch = self._identify_sweep_epoch(batch)
          sweep = self._scalr_integraters[epoch].get_integrater_sweep()

          # then remove it from my parent xcrystal

          self.get_scaler_xcrystal().remove_sweep(sweep)

          # then remove it from the scaler list of intergraters
          # - this should really be a scaler interface method

          del self._scalr_integraters[epoch]

          # then tell the user what is happening

          Chatter.write(
              'Sweep %s gave negative scales - removing' % \
              sweep.get_name())

          # then reset the prepare, do, finish flags

          self.set_scaler_prepare_done(False)
          self.set_scaler_done(False)
          self.set_scaler_finish_done(False)

          # and return

          return

        else:

          raise e

    else:
      sc.scale()

    # then gather up all of the resulting reflection files
    # and convert them into the required formats (.sca, .mtz.)

    data = sc.get_summary()

    loggraph = sc.parse_ccp4_loggraph()

    resolution_info = {}

    reflection_files = sc.get_scaled_reflection_files()

    for dataset in reflection_files:
      FileHandler.record_temporary_file(reflection_files[dataset])

    for key in loggraph:
      if 'Analysis against resolution' in key:
        dataset = key.split(',')[-1].strip()
        resolution_info[dataset] = transpose_loggraph(loggraph[key])

    highest_resolution = 100.0
    highest_suggested_resolution = None

    # check in here that there is actually some data to scale..!

    if len(resolution_info) == 0:
      raise RuntimeError('no resolution info')

    for epoch in epochs:

      si = self._sweep_handler.get_sweep_information(epoch)
      pname, xname, dname = si.get_project_info()
      sname = si.get_sweep_name()
      intgr = si.get_integrater()
      start, end = si.get_batch_range()

      if (dname, sname) in self._scalr_resolution_limits:
        continue

      elif (dname, sname) in user_resolution_limits:
        limit = user_resolution_limits[(dname, sname)]
        self._scalr_resolution_limits[(dname, sname)] = (limit, None)
        if limit < highest_resolution:
          highest_resolution = limit
        Chatter.write('Resolution limit for %s: %5.2f (user provided)' % \
                      (dname, limit))
        continue

      hklin = sc.get_unmerged_reflection_file()
      limit, reasoning = self._estimate_resolution_limit(
        hklin, batch_range=(start, end))

      if PhilIndex.params.xia2.settings.resolution.keep_all_reflections == True:
        suggested = limit
        if highest_suggested_resolution is None or limit < highest_suggested_resolution:
          highest_suggested_resolution = limit
        limit = intgr.get_detector().get_max_resolution(intgr.get_beam_obj().get_s0())
        self._scalr_resolution_limits[(dname, sname)] = (limit, suggested)
        Debug.write('keep_all_reflections set, using detector limits')
      Debug.write('Resolution for sweep %s: %.2f' % \
                  (sname, limit))

      if not (dname, sname) in self._scalr_resolution_limits:
        self._scalr_resolution_limits[(dname, sname)] = (limit, None)
        self.set_scaler_done(False)

      if limit < highest_resolution:
        highest_resolution = limit

      limit, suggested = self._scalr_resolution_limits[(dname, sname)]
      if suggested is None or limit == suggested:
        reasoning_str = ''
        if reasoning:
          reasoning_str = ' (%s)' % reasoning
        Chatter.write('Resolution for sweep %s/%s: %.2f%s' % \
                      (dname, sname, limit, reasoning_str))
      else:
        Chatter.write('Resolution limit for %s/%s: %5.2f (%5.2f suggested)' % \
                      (dname, sname, limit, suggested))

    if highest_suggested_resolution is not None and \
        highest_resolution >= (highest_suggested_resolution - 0.004):
      Debug.write('Dropping resolution cut-off suggestion since it is'
                  ' essentially identical to the actual resolution limit.')
      highest_suggested_resolution = None
    self._scalr_highest_resolution = highest_resolution
    self._scalr_highest_suggested_resolution = highest_suggested_resolution
    if highest_suggested_resolution is not None:
      Debug.write('Suggested highest resolution is %5.2f (%5.2f suggested)' % \
                (highest_resolution, highest_suggested_resolution))
    else:
      Debug.write('Scaler highest resolution set to %5.2f' % \
                highest_resolution)

    if not self.get_scaler_done():
      Debug.write('Returning as scaling not finished...')
      return

    batch_info = {}

    for key in loggraph:
      if 'Analysis against Batch' in key:
        dataset = key.split(',')[-1].strip()
        batch_info[dataset] = transpose_loggraph(loggraph[key])

    sc = self._updated_aimless()

    FileHandler.record_log_file('%s %s aimless' % (self._scalr_pname,
                                                   self._scalr_xname),
                                sc.get_log_file())

    sc.set_hklin(self._prepared_reflections)
    sc.set_new_scales_file('%s_final.scales' % self._scalr_xname)

    for epoch in epochs:

      si = self._sweep_handler.get_sweep_information(epoch)
      pname, xname, dname = si.get_project_info()
      sname = si.get_sweep_name()
      start, end = si.get_batch_range()

      resolution_limit, _ = self._scalr_resolution_limits[(dname, sname)]

      sc.add_run(start, end, exclude=False, resolution=resolution_limit,
                 name=xname)

    sc.set_hklout(os.path.join(self.get_working_directory(),
                               '%s_%s_scaled.mtz' % \
                               (self._scalr_pname, self._scalr_xname)))

    if self.get_scaler_anomalous():
      sc.set_anomalous()

    sc.scale()

    FileHandler.record_xml_file('%s %s aimless xml' % (self._scalr_pname,
                                                       self._scalr_xname),
                                sc.get_xmlout())

    data = sc.get_summary()
    scales_file = sc.get_new_scales_file()
    loggraph = sc.parse_ccp4_loggraph()

    standard_deviation_info = {}

    for key in loggraph:
      if 'standard deviation v. Intensity' in key:
        dataset = key.split(',')[-1].strip()
        standard_deviation_info[dataset] = transpose_loggraph(loggraph[key])

    resolution_info = {}

    for key in loggraph:
      if 'Analysis against resolution' in key:
        dataset = key.split(',')[-1].strip()
        resolution_info[dataset] = transpose_loggraph(loggraph[key])

    batch_info = {}

    for key in loggraph:
      if 'Analysis against Batch' in key:
        dataset = key.split(',')[-1].strip()
        batch_info[dataset] = transpose_loggraph(loggraph[key])

    # finally put all of the results "somewhere useful"

    self._scalr_statistics = data

    self._scalr_scaled_refl_files = copy.deepcopy(
        sc.get_scaled_reflection_files())

    sc = self._updated_aimless()
    sc.set_hklin(self._prepared_reflections)
    sc.set_scales_file(scales_file)

    self._wavelengths_in_order = []

    for epoch in epochs:
      si = self._sweep_handler.get_sweep_information(epoch)
      pname, xname, dname = si.get_project_info()
      sname = si.get_sweep_name()
      start, end = si.get_batch_range()

      resolution_limit, _ = self._scalr_resolution_limits[(dname, sname)]

      sc.add_run(start, end, exclude=False, resolution=resolution_limit,
                 name=sname)

      if not dname in self._wavelengths_in_order:
        self._wavelengths_in_order.append(dname)

    sc.set_hklout(os.path.join(self.get_working_directory(),
                               '%s_%s_scaled.mtz' % \
                               (self._scalr_pname,
                                self._scalr_xname)))

    sc.set_scalepack()

    if self.get_scaler_anomalous():
      sc.set_anomalous()
    sc.scale()

    self._update_scaled_unit_cell()

    self._scalr_scaled_reflection_files = {}
    self._scalr_scaled_reflection_files['sca'] = {}
    self._scalr_scaled_reflection_files['sca_unmerged'] = {}
    self._scalr_scaled_reflection_files['mtz_unmerged'] = {}

    for key in self._scalr_scaled_refl_files:
      hklout = self._scalr_scaled_refl_files[key]

      scaout = '%s.sca' % hklout[:-4]
      self._scalr_scaled_reflection_files['sca'][key] = scaout
      FileHandler.record_data_file(scaout)
      scalepack = os.path.join(os.path.split(hklout)[0],
                               os.path.split(hklout)[1].replace(
          '_scaled', '_scaled_unmerged').replace('.mtz', '.sca'))
      self._scalr_scaled_reflection_files['sca_unmerged'][key] = scalepack
      FileHandler.record_data_file(scalepack)
      mtz_unmerged = os.path.splitext(scalepack)[0] + '.mtz'
      self._scalr_scaled_reflection_files['mtz_unmerged'][key] = mtz_unmerged
      FileHandler.record_data_file(mtz_unmerged)

      if self._scalr_cell_esd is not None:
        # patch .mtz and overwrite unit cell information
        import xia2.Modules.Scaler.tools as tools
        override_cell = self._scalr_cell_dict.get('%s_%s_%s' %
          (self._scalr_pname, self._scalr_xname, key))[0]
        tools.patch_mtz_unit_cell(mtz_unmerged, override_cell)
        tools.patch_mtz_unit_cell(hklout, override_cell)

      self._scalr_scaled_reflection_files['mtz_unmerged'][key] = mtz_unmerged
      FileHandler.record_data_file(mtz_unmerged)

    if PhilIndex.params.xia2.settings.merging_statistics.source == 'cctbx':
      for key in self._scalr_scaled_refl_files:
        stats = self._compute_scaler_statistics(
          self._scalr_scaled_reflection_files['mtz_unmerged'][key],
          selected_band=(highest_suggested_resolution, None), wave=key)
        self._scalr_statistics[
          (self._scalr_pname, self._scalr_xname, key)] = stats

    sc = self._updated_aimless()
    sc.set_hklin(self._prepared_reflections)
    sc.set_scales_file(scales_file)

    self._wavelengths_in_order = []

    for epoch in epochs:

      si = self._sweep_handler.get_sweep_information(epoch)
      pname, xname, dname = si.get_project_info()
      sname = si.get_sweep_name()
      start, end = si.get_batch_range()

      resolution_limit, _ = self._scalr_resolution_limits[(dname, sname)]

      sc.add_run(start, end, exclude=False, resolution=resolution_limit,
                 name=sname)

      if not dname in self._wavelengths_in_order:
        self._wavelengths_in_order.append(dname)

    sc.set_hklout(os.path.join(self.get_working_directory(),
                               '%s_%s_chef.mtz' % \
                               (self._scalr_pname,
                                self._scalr_xname)))

    sc.set_chef_unmerged(True)

    if self.get_scaler_anomalous():
      sc.set_anomalous()
    sc.scale()
    if not PhilIndex.params.dials.fast_mode:
      try:
        self._generate_absorption_map(sc)
      except Exception as e:
        # Map generation may fail for number of reasons, eg. matplotlib borken
        Debug.write("Could not generate absorption map (%s)" % e)
Exemple #21
0
    def _index(self):
        '''Actually do the autoindexing using the data prepared by the
    previous method.'''

        images_str = '%d to %d' % tuple(self._indxr_images[0])
        for i in self._indxr_images[1:]:
            images_str += ', %d to %d' % tuple(i)

        cell_str = None
        if self._indxr_input_cell:
            cell_str = '%.2f %.2f %.2f %.2f %.2f %.2f' % \
                       self._indxr_input_cell

        # then this is a proper autoindexing run - describe this
        # to the journal entry

        #if len(self._fp_directory) <= 50:
        #dirname = self._fp_directory
        #else:
        #dirname = '...%s' % self._fp_directory[-46:]
        dirname = self.get_directory()

        Journal.block(
            'autoindexing', self._indxr_sweep_name, 'XDS', {
                'images': images_str,
                'target cell': cell_str,
                'target lattice': self._indxr_input_lattice,
                'template': self.get_template(),
                'directory': dirname
            })

        self._index_remove_masked_regions()

        if self._i_or_ii is None:
            self._i_or_ii = self.decide_i_or_ii()
            Debug.write('Selecting I or II, chose %s' % self._i_or_ii)

        idxref = self.Idxref()

        for file in ['SPOT.XDS']:
            idxref.set_input_data_file(file, self._indxr_payload[file])

        # set the phi start etc correctly

        idxref.set_data_range(self._indxr_images[0][0],
                              self._indxr_images[0][1])
        idxref.set_background_range(self._indxr_images[0][0],
                                    self._indxr_images[0][1])

        if self._i_or_ii == 'i':
            blocks = self._index_select_images_i()
            for block in blocks[:1]:
                starting_frame = block[0]
                starting_angle = self.get_scan().get_angle_from_image_index(
                    starting_frame)

                idxref.set_starting_frame(starting_frame)
                idxref.set_starting_angle(starting_angle)

                idxref.add_spot_range(block[0], block[1])

            for block in blocks[1:]:
                idxref.add_spot_range(block[0], block[1])
        else:
            for block in self._indxr_images[:1]:
                starting_frame = block[0]
                starting_angle = self.get_scan().get_angle_from_image_index(
                    starting_frame)

                idxref.set_starting_frame(starting_frame)
                idxref.set_starting_angle(starting_angle)

                idxref.add_spot_range(block[0], block[1])

            for block in self._indxr_images[1:]:
                idxref.add_spot_range(block[0], block[1])

        # FIXME need to also be able to pass in the known unit
        # cell and lattice if already available e.g. from
        # the helper... indirectly

        if self._indxr_user_input_lattice:
            idxref.set_indexer_user_input_lattice(True)

        if self._indxr_input_lattice and self._indxr_input_cell:
            idxref.set_indexer_input_lattice(self._indxr_input_lattice)
            idxref.set_indexer_input_cell(self._indxr_input_cell)

            Debug.write('Set lattice: %s' % self._indxr_input_lattice)
            Debug.write('Set cell: %f %f %f %f %f %f' % \
                        self._indxr_input_cell)

            original_cell = self._indxr_input_cell
        elif self._indxr_input_lattice:
            idxref.set_indexer_input_lattice(self._indxr_input_lattice)
            original_cell = None
        else:
            original_cell = None

        # FIXED need to set the beam centre here - this needs to come
        # from the input .xinfo object or header, and be converted
        # to the XDS frame... done.

        #mosflm_beam_centre = self.get_beam_centre()
        #xds_beam_centre = beam_centre_mosflm_to_xds(
        #mosflm_beam_centre[0], mosflm_beam_centre[1], self.get_header())
        from dxtbx.serialize.xds import to_xds
        converter = to_xds(self.get_imageset())
        xds_beam_centre = converter.detector_origin

        idxref.set_beam_centre(xds_beam_centre[0], xds_beam_centre[1])

        # fixme need to check if the lattice, cell have been set already,
        # and if they have, pass these in as input to the indexing job.

        done = False

        while not done:
            try:
                done = idxref.run()

                # N.B. in here if the IDXREF step was being run in the first
                # pass done is FALSE however there should be a refined
                # P1 orientation matrix etc. available - so keep it!

            except XDSException as e:
                # inspect this - if we have complaints about not
                # enough reflections indexed, and we have a target
                # unit cell, and they are the same, well ignore it

                if 'solution is inaccurate' in str(e):
                    Debug.write('XDS complains solution inaccurate - ignoring')
                    done = idxref.continue_from_error()
                elif ('insufficient percentage (< 70%)' in str(e) or
                      'insufficient percentage (< 50%)' in str(e)) and \
                         original_cell:
                    done = idxref.continue_from_error()
                    lattice, cell, mosaic = \
                             idxref.get_indexing_solution()
                    # compare solutions
                    check = PhilIndex.params.xia2.settings.xds_check_cell_deviation
                    for j in range(3):
                        # allow two percent variation in unit cell length
                        if math.fabs((cell[j] - original_cell[j]) / \
                                     original_cell[j]) > 0.02 and check:
                            Debug.write('XDS unhappy and solution wrong')
                            raise e
                        # and two degree difference in angle
                        if math.fabs(cell[j + 3] - original_cell[j + 3]) \
                               > 2.0 and check:
                            Debug.write('XDS unhappy and solution wrong')
                            raise e
                    Debug.write('XDS unhappy but solution ok')
                elif 'insufficient percentage (< 70%)' in str(e) or \
                         'insufficient percentage (< 50%)' in str(e):
                    Debug.write('XDS unhappy but solution probably ok')
                    done = idxref.continue_from_error()
                else:
                    raise e

        FileHandler.record_log_file(
            '%s INDEX' % self.get_indexer_full_name(),
            os.path.join(self.get_working_directory(), 'IDXREF.LP'))

        for file in ['SPOT.XDS', 'XPARM.XDS']:
            self._indxr_payload[file] = idxref.get_output_data_file(file)

        # need to get the indexing solutions out somehow...

        self._indxr_other_lattice_cell = idxref.get_indexing_solutions()

        self._indxr_lattice, self._indxr_cell, self._indxr_mosaic = \
                             idxref.get_indexing_solution()

        import dxtbx
        from dxtbx.serialize.xds import to_crystal
        xparm_file = os.path.join(self.get_working_directory(), 'XPARM.XDS')
        models = dxtbx.load(xparm_file)
        crystal_model = to_crystal(xparm_file)

        from dxtbx.model import Experiment, ExperimentList
        experiment = Experiment(
            beam=models.get_beam(),
            detector=models.get_detector(),
            goniometer=models.get_goniometer(),
            scan=models.get_scan(),
            crystal=crystal_model,
            #imageset=self.get_imageset(),
        )

        experiment_list = ExperimentList([experiment])
        self.set_indexer_experiment_list(experiment_list)

        # I will want this later on to check that the lattice was ok
        self._idxref_subtree_problem = idxref.get_index_tree_problem()

        return
Exemple #22
0
    def _integrate(self):
        '''Actually do the integration - in XDS terms this will mean running
    DEFPIX and INTEGRATE to measure all the reflections.'''

        images_str = '%d to %d' % tuple(self._intgr_wedge)
        cell_str = '%.2f %.2f %.2f %.2f %.2f %.2f' % tuple(self._intgr_cell)

        if len(self._fp_directory) <= 50:
            dirname = self._fp_directory
        else:
            dirname = '...%s' % self._fp_directory[-46:]

        Journal.block(
            'integrating', self._intgr_sweep_name, 'DIALS', {
                'images': images_str,
                'cell': cell_str,
                'lattice': self.get_integrater_refiner().get_refiner_lattice(),
                'template': self._fp_template,
                'directory': dirname,
                'resolution': '%.2f' % self._intgr_reso_high
            })

        integrate = self.Integrate()

        # decide what images we are going to process, if not already
        # specified

        if not self._intgr_wedge:
            images = self.get_matching_images()
            self.set_integrater_wedge(min(images), max(images))

        imageset = self.get_imageset()
        beam = imageset.get_beam()
        detector = imageset.get_detector()

        d_min_limit = detector.get_max_resolution(beam.get_s0())
        if d_min_limit > self._intgr_reso_high \
            or PhilIndex.params.xia2.settings.resolution.keep_all_reflections:
            Debug.write('Overriding high resolution limit: %f => %f' % \
                        (self._intgr_reso_high, d_min_limit))
            self._intgr_reso_high = d_min_limit

        integrate.set_experiments_filename(self._intgr_experiments_filename)
        integrate.set_reflections_filename(self._intgr_indexed_filename)
        integrate.set_d_max(self._intgr_reso_low)
        integrate.set_d_min(self._intgr_reso_high)
        pname, xname, dname = self.get_integrater_project_info()
        sweep = self.get_integrater_sweep_name()
        FileHandler.record_log_file('%s %s %s %s INTEGRATE' % \
                                    (pname, xname, dname, sweep),
                                    integrate.get_log_file())

        try:
            integrate.run()
        except xia2.Wrappers.Dials.Integrate.DIALSIntegrateError as e:
            s = str(e)
            if ('dials.integrate requires more memory than is available.' in s
                    and not self._intgr_reso_high):
                # Try to estimate a more sensible resolution limit for integration
                # in case we were just integrating noise to the edge of the detector
                images = self._integrate_select_images_wedges()

                Debug.write(
                    'Integrating subset of images to estimate resolution limit.\n'
                    'Integrating images %s' % images)

                integrate = self.Integrate()
                integrate.set_experiments_filename(
                    self._intgr_experiments_filename)
                integrate.set_reflections_filename(
                    self._intgr_indexed_filename)
                integrate.set_d_max(self._intgr_reso_low)
                integrate.set_d_min(self._intgr_reso_high)
                for (start, stop) in images:
                    integrate.add_scan_range(
                        start - self.get_matching_images()[0],
                        stop - self.get_matching_images()[0])
                integrate.set_reflections_per_degree(1000)
                integrate.run()

                integrated_pickle = integrate.get_integrated_filename()

                from xia2.Wrappers.Dials.EstimateResolutionLimit import EstimateResolutionLimit
                d_min_estimater = EstimateResolutionLimit()
                d_min_estimater.set_working_directory(
                    self.get_working_directory())
                auto_logfiler(d_min_estimater)
                d_min_estimater.set_experiments_filename(
                    self._intgr_experiments_filename)
                d_min_estimater.set_reflections_filename(integrated_pickle)
                d_min = d_min_estimater.run()

                Debug.write('Estimate for d_min: %.2f' % d_min)
                Debug.write('Re-running integration to this resolution limit')

                self._intgr_reso_high = d_min
                self.set_integrater_done(False)
                return
            raise Sorry(e)

        self._intgr_experiments_filename = integrate.get_integrated_experiments(
        )

        # also record the batch range - needed for the analysis of the
        # radiation damage in chef...

        self._intgr_batches_out = (self._intgr_wedge[0], self._intgr_wedge[1])

        # FIXME (i) record the log file, (ii) get more information out from the
        # integration log on the quality of the data and (iii) the mosaic spread
        # range observed and R.M.S. deviations.

        self._intgr_integrated_pickle = integrate.get_integrated_reflections()
        if not os.path.isfile(self._intgr_integrated_pickle):
            raise RuntimeError("Integration failed: %s does not exist." %
                               self._intgr_integrated_pickle)

        self._intgr_per_image_statistics = integrate.get_per_image_statistics()
        Chatter.write(self.show_per_image_statistics())

        report = self.Report()
        html_filename = os.path.join(
            self.get_working_directory(),
            '%i_dials.integrate.report.html' % report.get_xpid())
        report.set_html_filename(html_filename)
        report.run()
        FileHandler.record_html_file('%s %s %s %s INTEGRATE' % \
                                     (pname, xname, dname, sweep),
                                     html_filename)

        import dials
        from dxtbx.serialize import load
        experiments = load.experiment_list(self._intgr_experiments_filename)
        profile = experiments.profiles()[0]
        mosaic = profile.sigma_m()
        self.set_integrater_mosaic_min_mean_max(mosaic, mosaic, mosaic)

        Chatter.write('Mosaic spread: %.3f < %.3f < %.3f' % \
                      self.get_integrater_mosaic_min_mean_max())

        return self._intgr_integrated_pickle
Exemple #23
0
  def _scale_prepare(self):
    '''Perform all of the preparation required to deliver the scaled
    data. This should sort together the reflection files, ensure that
    they are correctly indexed (via pointless) and generally tidy
    things up.'''

    # acknowledge all of the programs we are about to use...

    Citations.cite('pointless')
    Citations.cite('aimless')
    Citations.cite('ccp4')

    # ---------- GATHER ----------

    self._sweep_handler = SweepInformationHandler(self._scalr_integraters)

    Journal.block(
        'gathering', self.get_scaler_xcrystal().get_name(), 'CCP4',
        {'working directory':self.get_working_directory()})

    for epoch in self._sweep_handler.get_epochs():
      si = self._sweep_handler.get_sweep_information(epoch)
      pname, xname, dname = si.get_project_info()
      sname = si.get_sweep_name()

      exclude_sweep = False

      for sweep in PhilIndex.params.xia2.settings.sweep:
        if sweep.id == sname and sweep.exclude:
          exclude_sweep = True
          break

      if exclude_sweep:
        self._sweep_handler.remove_epoch(epoch)
        Debug.write('Excluding sweep %s' %sname)
      else:
        Journal.entry({'adding data from':'%s/%s/%s' % \
                       (xname, dname, sname)})

    # gather data for all images which belonged to the parent
    # crystal - allowing for the fact that things could go wrong
    # e.g. epoch information not available, exposure times not in
    # headers etc...

    for e in self._sweep_handler.get_epochs():
      si = self._sweep_handler.get_sweep_information(e)
      assert is_mtz_file(si.get_reflections())

    p, x = self._sweep_handler.get_project_info()
    self._scalr_pname = p
    self._scalr_xname = x

    # verify that the lattices are consistent, calling eliminate if
    # they are not N.B. there could be corner cases here

    need_to_return = False

    multi_sweep_indexing = \
      PhilIndex.params.xia2.settings.developmental.multi_sweep_indexing


    if len(self._sweep_handler.get_epochs()) > 1:

      if multi_sweep_indexing and not self._scalr_input_pointgroup:
        pointless_hklins = []

        max_batches = 0
        for epoch in self._sweep_handler.get_epochs():
          si = self._sweep_handler.get_sweep_information(epoch)
          hklin = si.get_reflections()

          md = self._factory.Mtzdump()
          md.set_hklin(hklin)
          md.dump()

          batches = md.get_batches()
          if 1 + max(batches) - min(batches) > max_batches:
            max_batches = max(batches) - min(batches) + 1

          datasets = md.get_datasets()

          Debug.write('In reflection file %s found:' % hklin)
          for d in datasets:
            Debug.write('... %s' % d)

          dataset_info = md.get_dataset_info(datasets[0])

        from xia2.lib.bits import nifty_power_of_ten
        Debug.write('Biggest sweep has %d batches' % max_batches)
        max_batches = nifty_power_of_ten(max_batches)

        counter = 0

        for epoch in self._sweep_handler.get_epochs():
          si = self._sweep_handler.get_sweep_information(epoch)
          hklin = si.get_reflections()
          integrater = si.get_integrater()
          refiner = integrater.get_integrater_refiner()

          hklin = self._prepare_pointless_hklin(
            hklin, si.get_integrater().get_phi_width())

          rb = self._factory.Rebatch()

          hklout = os.path.join(self.get_working_directory(),
                                '%s_%s_%s_%s_prepointless.mtz' % \
                                (pname, xname, dname, si.get_sweep_name()))

          # we will want to delete this one exit
          FileHandler.record_temporary_file(hklout)

          first_batch = min(si.get_batches())
          si.set_batch_offset(counter * max_batches - first_batch + 1)

          rb.set_hklin(hklin)
          rb.set_first_batch(counter * max_batches + 1)
          rb.set_project_info(pname, xname, dname)
          rb.set_hklout(hklout)

          new_batches = rb.rebatch()

          pointless_hklins.append(hklout)

          # update the counter & recycle
          counter += 1

        s = self._factory.Sortmtz()

        pointless_hklin = os.path.join(self.get_working_directory(),
                              '%s_%s_prepointless_sorted.mtz' % \
                              (self._scalr_pname, self._scalr_xname))

        s.set_hklout(pointless_hklin)

        for hklin in pointless_hklins:
          s.add_hklin(hklin)

        s.sort()

        pointgroup, reindex_op, ntr, pt = \
                    self._pointless_indexer_jiffy(
            pointless_hklin, refiner)

        Debug.write('X1698: %s: %s' % (pointgroup, reindex_op))

        lattices = [Syminfo.get_lattice(pointgroup)]

        for epoch in self._sweep_handler.get_epochs():
          si = self._sweep_handler.get_sweep_information(epoch)
          intgr = si.get_integrater()
          hklin = si.get_reflections()
          refiner = intgr.get_integrater_refiner()

          if ntr:
            intgr.integrater_reset_reindex_operator()
            need_to_return = True

      else:
        lattices = []

        for epoch in self._sweep_handler.get_epochs():

          si = self._sweep_handler.get_sweep_information(epoch)
          intgr = si.get_integrater()
          hklin = si.get_reflections()
          refiner = intgr.get_integrater_refiner()

          if self._scalr_input_pointgroup:
            pointgroup = self._scalr_input_pointgroup
            reindex_op = 'h,k,l'
            ntr = False

          else:
            pointless_hklin = self._prepare_pointless_hklin(
              hklin, si.get_integrater().get_phi_width())

            pointgroup, reindex_op, ntr, pt = \
                        self._pointless_indexer_jiffy(
                pointless_hklin, refiner)

            Debug.write('X1698: %s: %s' % (pointgroup, reindex_op))

          lattice = Syminfo.get_lattice(pointgroup)

          if not lattice in lattices:
            lattices.append(lattice)

          if ntr:

            intgr.integrater_reset_reindex_operator()
            need_to_return = True

      if len(lattices) > 1:

        # why not using pointless indexer jiffy??!

        correct_lattice = sort_lattices(lattices)[0]

        Chatter.write('Correct lattice asserted to be %s' % \
                      correct_lattice)

        # transfer this information back to the indexers
        for epoch in self._sweep_handler.get_epochs():

          si = self._sweep_handler.get_sweep_information(epoch)
          refiner = si.get_integrater().get_integrater_refiner()
          sname = si.get_sweep_name()

          state = refiner.set_refiner_asserted_lattice(
              correct_lattice)

          if state == refiner.LATTICE_CORRECT:
            Chatter.write('Lattice %s ok for sweep %s' % \
                          (correct_lattice, sname))
          elif state == refiner.LATTICE_IMPOSSIBLE:
            raise RuntimeError, 'Lattice %s impossible for %s' \
                  % (correct_lattice, sname)
          elif state == refiner.LATTICE_POSSIBLE:
            Chatter.write('Lattice %s assigned for sweep %s' % \
                          (correct_lattice, sname))
            need_to_return = True

    # if one or more of them was not in the lowest lattice,
    # need to return here to allow reprocessing

    if need_to_return:
      self.set_scaler_done(False)
      self.set_scaler_prepare_done(False)
      return

    # ---------- REINDEX ALL DATA TO CORRECT POINTGROUP ----------

    # all should share the same pointgroup, unless twinned... in which
    # case force them to be...

    pointgroups = { }
    reindex_ops = { }
    probably_twinned = False

    need_to_return = False

    multi_sweep_indexing = \
      PhilIndex.params.xia2.settings.developmental.multi_sweep_indexing

    if multi_sweep_indexing and not self._scalr_input_pointgroup:
      pointless_hklins = []

      max_batches = 0
      for epoch in self._sweep_handler.get_epochs():
        si = self._sweep_handler.get_sweep_information(epoch)
        hklin = si.get_reflections()

        md = self._factory.Mtzdump()
        md.set_hklin(hklin)
        md.dump()

        batches = md.get_batches()
        if 1 + max(batches) - min(batches) > max_batches:
          max_batches = max(batches) - min(batches) + 1

        datasets = md.get_datasets()

        Debug.write('In reflection file %s found:' % hklin)
        for d in datasets:
          Debug.write('... %s' % d)

        dataset_info = md.get_dataset_info(datasets[0])

      from xia2.lib.bits import nifty_power_of_ten
      Debug.write('Biggest sweep has %d batches' % max_batches)
      max_batches = nifty_power_of_ten(max_batches)

      counter = 0

      for epoch in self._sweep_handler.get_epochs():
        si = self._sweep_handler.get_sweep_information(epoch)
        hklin = si.get_reflections()
        integrater = si.get_integrater()
        refiner = integrater.get_integrater_refiner()

        hklin = self._prepare_pointless_hklin(
            hklin, si.get_integrater().get_phi_width())

        rb = self._factory.Rebatch()

        hklout = os.path.join(self.get_working_directory(),
                              '%s_%s_%s_%s_prepointless.mtz' % \
                              (pname, xname, dname, si.get_sweep_name()))

        # we will want to delete this one exit
        FileHandler.record_temporary_file(hklout)

        first_batch = min(si.get_batches())
        si.set_batch_offset(counter * max_batches - first_batch + 1)

        rb.set_hklin(hklin)
        rb.set_first_batch(counter * max_batches + 1)
        rb.set_project_info(pname, xname, dname)
        rb.set_hklout(hklout)

        new_batches = rb.rebatch()

        pointless_hklins.append(hklout)

        # update the counter & recycle
        counter += 1

      s = self._factory.Sortmtz()

      pointless_hklin = os.path.join(self.get_working_directory(),
                            '%s_%s_prepointless_sorted.mtz' % \
                            (self._scalr_pname, self._scalr_xname))

      s.set_hklout(pointless_hklin)

      for hklin in pointless_hklins:
        s.add_hklin(hklin)

      s.sort()

      pointgroup, reindex_op, ntr, pt = \
                  self._pointless_indexer_jiffy(
          pointless_hklin, refiner)

      for epoch in self._sweep_handler.get_epochs():
        pointgroups[epoch] = pointgroup
        reindex_ops[epoch] = reindex_op

    else:
      for epoch in self._sweep_handler.get_epochs():
        si = self._sweep_handler.get_sweep_information(epoch)

        hklin = si.get_reflections()
        #hklout = os.path.join(
            #self.get_working_directory(),
            #os.path.split(hklin)[-1].replace('.mtz', '_rdx.mtz'))

        #FileHandler.record_temporary_file(hklout)

        integrater = si.get_integrater()
        refiner = integrater.get_integrater_refiner()

        if self._scalr_input_pointgroup:
          Debug.write('Using input pointgroup: %s' % \
                      self._scalr_input_pointgroup)
          pointgroup = self._scalr_input_pointgroup
          reindex_op = 'h,k,l'
          pt = False

        else:

          pointless_hklin = self._prepare_pointless_hklin(
              hklin, si.get_integrater().get_phi_width())

          pointgroup, reindex_op, ntr, pt = \
                      self._pointless_indexer_jiffy(
              pointless_hklin, refiner)

          Debug.write('X1698: %s: %s' % (pointgroup, reindex_op))

          if ntr:

            integrater.integrater_reset_reindex_operator()
            need_to_return = True

        if pt and not probably_twinned:
          probably_twinned = True

        Debug.write('Pointgroup: %s (%s)' % (pointgroup, reindex_op))

        pointgroups[epoch] = pointgroup
        reindex_ops[epoch] = reindex_op

    overall_pointgroup = None

    pointgroup_set = set([pointgroups[e] for e in pointgroups])

    if len(pointgroup_set) > 1 and \
       not probably_twinned:
      raise RuntimeError, 'non uniform pointgroups'

    if len(pointgroup_set) > 1:
      Debug.write('Probably twinned, pointgroups: %s' % \
                  ' '.join([p.replace(' ', '') for p in \
                            list(pointgroup_set)]))
      numbers = [Syminfo.spacegroup_name_to_number(s) for s in \
                 pointgroup_set]
      overall_pointgroup = Syminfo.spacegroup_number_to_name(
          min(numbers))
      self._scalr_input_pointgroup = overall_pointgroup

      Chatter.write('Twinning detected, assume pointgroup %s' % \
                    overall_pointgroup)

      need_to_return = True

    else:
      overall_pointgroup = pointgroup_set.pop()

    for epoch in self._sweep_handler.get_epochs():
      si = self._sweep_handler.get_sweep_information(epoch)

      integrater = si.get_integrater()

      integrater.set_integrater_spacegroup_number(
          Syminfo.spacegroup_name_to_number(overall_pointgroup))
      integrater.set_integrater_reindex_operator(
          reindex_ops[epoch], reason='setting point group')
      # This will give us the reflections in the correct point group
      si.set_reflections(integrater.get_integrater_intensities())

    if need_to_return:
      self.set_scaler_done(False)
      self.set_scaler_prepare_done(False)
      return

    # in here now optinally work through the data files which should be
    # indexed with a consistent point group, and transform the orientation
    # matrices by the lattice symmetry operations (if possible) to get a
    # consistent definition of U matrix modulo fixed rotations

    if PhilIndex.params.xia2.settings.unify_setting:

      from scitbx.matrix import sqr
      reference_U = None
      i3 = sqr((1, 0, 0, 0, 1, 0, 0, 0, 1))

      for epoch in self._sweep_handler.get_epochs():
        si = self._sweep_handler.get_sweep_information(epoch)
        intgr = si.get_integrater()
        fixed = sqr(intgr.get_goniometer().get_fixed_rotation())
        u, b, s = get_umat_bmat_lattice_symmetry_from_mtz(si.get_reflections())
        U = fixed.inverse() * sqr(u).transpose()
        B = sqr(b)

        if reference_U is None:
          reference_U = U
          continue

        results = []
        for op in s.all_ops():
          R = B * sqr(op.r().as_double()).transpose() * B.inverse()
          nearly_i3 = (U * R).inverse() * reference_U
          score = sum([abs(_n - _i) for (_n, _i) in zip(nearly_i3, i3)])
          results.append((score, op.r().as_hkl(), op))

        results.sort()
        best = results[0]
        Debug.write('Best reindex: %s %.3f' % (best[1], best[0]))
        intgr.set_integrater_reindex_operator(best[2].r().inverse().as_hkl(),
                                              reason='unifying [U] setting')
        si.set_reflections(intgr.get_integrater_intensities())

        # recalculate to verify
        u, b, s = get_umat_bmat_lattice_symmetry_from_mtz(si.get_reflections())
        U = fixed.inverse() * sqr(u).transpose()
        Debug.write('New reindex: %s' % (U.inverse() * reference_U))

        # FIXME I should probably raise an exception at this stage if this
        # is not about I3...

    if self.get_scaler_reference_reflection_file():
      self._reference = self.get_scaler_reference_reflection_file()
      Debug.write('Using HKLREF %s' % self._reference)

    elif Flags.get_reference_reflection_file():
      self._reference = Flags.get_reference_reflection_file()
      Debug.write('Using HKLREF %s' % self._reference)

    params = PhilIndex.params
    use_brehm_diederichs = params.xia2.settings.use_brehm_diederichs
    if len(self._sweep_handler.get_epochs()) > 1 and use_brehm_diederichs:

      brehm_diederichs_files_in = []
      for epoch in self._sweep_handler.get_epochs():

        si = self._sweep_handler.get_sweep_information(epoch)
        hklin = si.get_reflections()
        brehm_diederichs_files_in.append(hklin)

      # now run cctbx.brehm_diederichs to figure out the indexing hand for
      # each sweep
      from xia2.Wrappers.Cctbx.BrehmDiederichs import BrehmDiederichs
      from xia2.lib.bits import auto_logfiler
      brehm_diederichs = BrehmDiederichs()
      brehm_diederichs.set_working_directory(self.get_working_directory())
      auto_logfiler(brehm_diederichs)
      brehm_diederichs.set_input_filenames(brehm_diederichs_files_in)
      # 1 or 3? 1 seems to work better?
      brehm_diederichs.set_asymmetric(1)
      brehm_diederichs.run()
      reindexing_dict = brehm_diederichs.get_reindexing_dict()

      for epoch in self._sweep_handler.get_epochs():

        si = self._sweep_handler.get_sweep_information(epoch)
        intgr = si.get_integrater()
        hklin = si.get_reflections()

        reindex_op = reindexing_dict.get(os.path.abspath(hklin))
        assert reindex_op is not None

        if 1 or reindex_op != 'h,k,l':
          # apply the reindexing operator
          intgr.set_integrater_reindex_operator(
            reindex_op, reason='match reference')
          si.set_reflections(intgr.get_integrater_intensities())

    elif len(self._sweep_handler.get_epochs()) > 1 and \
           not self._reference:

      first = self._sweep_handler.get_epochs()[0]
      si = self._sweep_handler.get_sweep_information(first)
      self._reference = si.get_reflections()

    if self._reference:

      md = self._factory.Mtzdump()
      md.set_hklin(self._reference)
      md.dump()

      if md.get_batches() and False:
        raise RuntimeError, 'reference reflection file %s unmerged' % \
              self._reference

      datasets = md.get_datasets()

      if len(datasets) > 1 and False:
        raise RuntimeError, 'more than one dataset in %s' % \
              self._reference

      # then get the unit cell, lattice etc.

      reference_lattice = Syminfo.get_lattice(md.get_spacegroup())
      reference_cell = md.get_dataset_info(datasets[0])['cell']

      # then compute the pointgroup from this...

      # ---------- REINDEX TO CORRECT (REFERENCE) SETTING ----------

      for epoch in self._sweep_handler.get_epochs():
        pl = self._factory.Pointless()

        si = self._sweep_handler.get_sweep_information(epoch)
        hklin = si.get_reflections()

        pl.set_hklin(self._prepare_pointless_hklin(
            hklin, si.get_integrater().get_phi_width()))

        hklout = os.path.join(
            self.get_working_directory(),
            '%s_rdx2.mtz' % os.path.split(hklin)[-1][:-4])

        # we will want to delete this one exit
        FileHandler.record_temporary_file(hklout)

        # now set the initial reflection set as a reference...

        pl.set_hklref(self._reference)

        # write a pointless log file...
        pl.decide_pointgroup()

        Debug.write('Reindexing analysis of %s' % pl.get_hklin())

        pointgroup = pl.get_pointgroup()
        reindex_op = pl.get_reindex_operator()

        Debug.write('Operator: %s' % reindex_op)

        # apply this...

        integrater = si.get_integrater()

        integrater.set_integrater_reindex_operator(reindex_op,
                                                   reason='match reference')
        integrater.set_integrater_spacegroup_number(
            Syminfo.spacegroup_name_to_number(pointgroup))
        si.set_reflections(integrater.get_integrater_intensities())

        md = self._factory.Mtzdump()
        md.set_hklin(si.get_reflections())
        md.dump()

        datasets = md.get_datasets()

        if len(datasets) > 1:
          raise RuntimeError, 'more than one dataset in %s' % \
                si.get_reflections()

        # then get the unit cell, lattice etc.

        lattice = Syminfo.get_lattice(md.get_spacegroup())
        cell = md.get_dataset_info(datasets[0])['cell']

        if lattice != reference_lattice:
          raise RuntimeError, 'lattices differ in %s and %s' % \
                (self._reference, si.get_reflections())

        for j in range(6):
          if math.fabs((cell[j] - reference_cell[j]) /
                       reference_cell[j]) > 0.1:
            raise RuntimeError, \
                  'unit cell parameters differ in %s and %s' % \
                  (self._reference, si.get_reflections())

    # ---------- SORT TOGETHER DATA ----------

    self._sort_together_data_ccp4()

    self._scalr_resolution_limits = { }

    # store central resolution limit estimates

    batch_ranges = [self._sweep_handler.get_sweep_information(
        epoch).get_batch_range() for epoch in
                    self._sweep_handler.get_epochs()]

    self._resolution_limit_estimates = erzatz_resolution(
        self._prepared_reflections, batch_ranges)


    return
Exemple #24
0
  def _index(self):
    '''Actually index the diffraction pattern. Note well that
    this is not going to compute the matrix...'''

    # acknowledge this program

    if not self._indxr_images:
      raise RuntimeError, 'No good spots found on any images'

    Citations.cite('labelit')
    Citations.cite('distl')

    _images = []
    for i in self._indxr_images:
      for j in i:
        if not j in _images:
          _images.append(j)

    _images.sort()

    images_str = '%d' % _images[0]
    for i in _images[1:]:
      images_str += ', %d' % i

    cell_str = None
    if self._indxr_input_cell:
      cell_str = '%.2f %.2f %.2f %.2f %.2f %.2f' % \
                  self._indxr_input_cell

    if self._indxr_sweep_name:

      # then this is a proper autoindexing run - describe this
      # to the journal entry

      if len(self._fp_directory) <= 50:
        dirname = self._fp_directory
      else:
        dirname = '...%s' % self._fp_directory[-46:]

      Journal.block(
          'autoindexing', self._indxr_sweep_name, 'labelit',
          {'images':images_str,
           'target cell':cell_str,
           'target lattice':self._indxr_input_lattice,
           'template':self._fp_template,
           'directory':dirname})

    #auto_logfiler(self)

    from xia2.Wrappers.Labelit.LabelitIndex import LabelitIndex
    index = LabelitIndex()
    index.set_working_directory(self.get_working_directory())
    auto_logfiler(index)

    #task = 'Autoindex from images:'

    #for i in _images:
      #task += ' %s' % self.get_image_name(i)

    #self.set_task(task)

    #self.add_command_line('--index_only')

    Debug.write('Indexing from images:')
    for i in _images:
      index.add_image(self.get_image_name(i))
      Debug.write('%s' % self.get_image_name(i))

    if self._indxr_input_lattice and False:
      index.set_space_group_number(
        lattice_to_spacegroup(self._indxr_input_lattice))

    if self._primitive_unit_cell:
      index.set_primitive_unit_cell(self._primitive_unit_cell)

    if self._indxr_input_cell:
      index.set_max_cell(1.25 * max(self._indxr_input_cell[:3]))

    xsweep = self.get_indexer_sweep()
    if xsweep is not None:
      if xsweep.get_distance() is not None:
        index.set_distance(xsweep.get_distance())
      #if self.get_wavelength_prov() == 'user':
        #index.set_wavelength(self.get_wavelength())
      if xsweep.get_beam_centre() is not None:
        index.set_beam_centre(xsweep.get_beam_centre())

    if self._refine_beam is False:
      index.set_refine_beam(False)
    else:
      index.set_refine_beam(True)
      index.set_beam_search_scope(self._beam_search_scope)

    if ((math.fabs(self.get_wavelength() - 1.54) < 0.01) or
        (math.fabs(self.get_wavelength() - 2.29) < 0.01)):
      index.set_Cu_KA_or_Cr_KA(True)

    try:
      index.run()
    except RuntimeError, e:

      if self._refine_beam is False:
        raise e

      # can we improve the situation?

      if self._beam_search_scope < 4.0:
        self._beam_search_scope += 4.0

        # try repeating the indexing!

        self.set_indexer_done(False)
        return 'failed'

      # otherwise this is beyond redemption

      raise e
Exemple #25
0
    def _index(self):
        '''Actually index the diffraction pattern. Note well that
    this is not going to compute the matrix...'''

        # acknowledge this program

        Citations.cite('labelit')
        Citations.cite('distl')

        #self.reset()

        _images = []
        for i in self._indxr_images:
            for j in i:
                if not j in _images:
                    _images.append(j)

        _images.sort()

        images_str = '%d' % _images[0]
        for i in _images[1:]:
            images_str += ', %d' % i

        cell_str = None
        if self._indxr_input_cell:
            cell_str = '%.2f %.2f %.2f %.2f %.2f %.2f' % \
                        self._indxr_input_cell

        if self._indxr_sweep_name:

            # then this is a proper autoindexing run - describe this
            # to the journal entry

            #if len(self._fp_directory) <= 50:
            #dirname = self._fp_directory
            #else:
            #dirname = '...%s' % self._fp_directory[-46:]
            dirname = os.path.dirname(self.get_imageset().get_template())

            Journal.block(
                'autoindexing', self._indxr_sweep_name, 'labelit', {
                    'images': images_str,
                    'target cell': cell_str,
                    'target lattice': self._indxr_input_lattice,
                    'template': self.get_imageset().get_template(),
                    'directory': dirname
                })

        if len(_images) > 4:
            raise RuntimeError('cannot use more than 4 images')

        from xia2.Wrappers.Labelit.LabelitIndex import LabelitIndex
        index = LabelitIndex()
        index.set_working_directory(self.get_working_directory())
        auto_logfiler(index)

        #task = 'Autoindex from images:'

        #for i in _images:
        #task += ' %s' % self.get_image_name(i)

        #self.set_task(task)

        Debug.write('Indexing from images:')
        for i in _images:
            index.add_image(self.get_image_name(i))
            Debug.write('%s' % self.get_image_name(i))

        xsweep = self.get_indexer_sweep()
        if xsweep is not None:
            if xsweep.get_distance() is not None:
                index.set_distance(xsweep.get_distance())
            #if self.get_wavelength_prov() == 'user':
            #index.set_wavelength(self.get_wavelength())
            if xsweep.get_beam_centre() is not None:
                index.set_beam_centre(xsweep.get_beam_centre())

        if self._refine_beam is False:
            index.set_refine_beam(False)
        else:
            index.set_refine_beam(True)
            index.set_beam_search_scope(self._beam_search_scope)

        if ((math.fabs(self.get_wavelength() - 1.54) < 0.01)
                or (math.fabs(self.get_wavelength() - 2.29) < 0.01)):
            index.set_Cu_KA_or_Cr_KA(True)

        #sweep = self.get_indexer_sweep_name()
        #FileHandler.record_log_file(
        #'%s INDEX' % (sweep), self.get_log_file())

        try:
            index.run()
        except RuntimeError as e:

            if self._refine_beam is False:
                raise e

            # can we improve the situation?

            if self._beam_search_scope < 4.0:
                self._beam_search_scope += 4.0

                # try repeating the indexing!

                self.set_indexer_done(False)
                return 'failed'

            # otherwise this is beyond redemption

            raise e

        self._solutions = index.get_solutions()

        # FIXME this needs to check the smilie status e.g.
        # ":)" or ";(" or "  ".

        # FIXME need to check the value of the RMSD and raise an
        # exception if the P1 solution has an RMSD > 1.0...

        # Change 27/FEB/08 to support user assigned spacegroups
        # (euugh!) have to "ignore" solutions with higher symmetry
        # otherwise the rest of xia will override us. Bummer.

        for i, solution in self._solutions.iteritems():
            if self._indxr_user_input_lattice:
                if (lattice_to_spacegroup(solution['lattice']) >
                        lattice_to_spacegroup(self._indxr_input_lattice)):
                    Debug.write('Ignoring solution: %s' % solution['lattice'])
                    del self._solutions[i]

        # check the RMSD from the triclinic unit cell
        if self._solutions[1]['rmsd'] > 1.0 and False:
            # don't know when this is useful - but I know when it is not!
            raise RuntimeError('high RMSD for triclinic solution')

        # configure the "right" solution
        self._solution = self.get_solution()

        # now store also all of the other solutions... keyed by the
        # lattice - however these should only be added if they
        # have a smiley in the appropriate record, perhaps?

        for solution in self._solutions.keys():
            lattice = self._solutions[solution]['lattice']
            if lattice in self._indxr_other_lattice_cell:
                if self._indxr_other_lattice_cell[lattice]['goodness'] < \
                   self._solutions[solution]['metric']:
                    continue

            self._indxr_other_lattice_cell[lattice] = {
                'goodness': self._solutions[solution]['metric'],
                'cell': self._solutions[solution]['cell']
            }

        self._indxr_lattice = self._solution['lattice']
        self._indxr_cell = tuple(self._solution['cell'])
        self._indxr_mosaic = self._solution['mosaic']

        lms = LabelitMosflmScript()
        lms.set_working_directory(self.get_working_directory())
        lms.set_solution(self._solution['number'])
        self._indxr_payload['mosflm_orientation_matrix'] = lms.calculate()

        # get the beam centre from the mosflm script - mosflm
        # may have inverted the beam centre and labelit will know
        # this!

        mosflm_beam_centre = lms.get_mosflm_beam()

        if mosflm_beam_centre:
            self._indxr_payload['mosflm_beam_centre'] = tuple(
                mosflm_beam_centre)

        import copy
        detector = copy.deepcopy(self.get_detector())
        beam = copy.deepcopy(self.get_beam())
        from dxtbx.model.detector_helpers import set_mosflm_beam_centre
        set_mosflm_beam_centre(detector, beam, mosflm_beam_centre)

        from xia2.Experts.SymmetryExpert import lattice_to_spacegroup_number
        from scitbx import matrix
        from cctbx import sgtbx, uctbx
        from dxtbx.model import CrystalFactory
        mosflm_matrix = matrix.sqr([
            float(i) for line in lms.calculate()
            for i in line.replace("-", " -").split()
        ][:9])

        space_group = sgtbx.space_group_info(
            lattice_to_spacegroup_number(self._solution['lattice'])).group()
        crystal_model = CrystalFactory.from_mosflm_matrix(
            mosflm_matrix,
            unit_cell=uctbx.unit_cell(tuple(self._solution['cell'])),
            space_group=space_group)

        from dxtbx.model import Experiment, ExperimentList
        experiment = Experiment(
            beam=beam,
            detector=detector,
            goniometer=self.get_goniometer(),
            scan=self.get_scan(),
            crystal=crystal_model,
        )

        experiment_list = ExperimentList([experiment])
        self.set_indexer_experiment_list(experiment_list)

        # also get an estimate of the resolution limit from the
        # labelit.stats_distl output... FIXME the name is wrong!

        lsd = LabelitStats_distl()
        lsd.set_working_directory(self.get_working_directory())
        lsd.stats_distl()

        resolution = 1.0e6
        for i in _images:
            stats = lsd.get_statistics(self.get_image_name(i))

            resol = 0.5 * (stats['resol_one'] + stats['resol_two'])

            if resol < resolution:
                resolution = resol

        self._indxr_resolution_estimate = resolution

        return 'ok'
Exemple #26
0
  def _scale_prepare(self):
    '''Prepare the data for scaling - this will reindex it the
    reflections to the correct pointgroup and setting, for instance,
    and move the reflection files to the scale directory.'''

    Citations.cite('xds')
    Citations.cite('ccp4')
    Citations.cite('pointless')

    # GATHER phase - get the reflection files together... note that
    # it is not necessary in here to keep the batch information as we
    # don't wish to rebatch the reflections prior to scaling.
    # FIXME need to think about what I will do about the radiation
    # damage analysis in here...

    self._sweep_information = { }

    # FIXME in here I want to record the batch number to
    # epoch mapping as per the CCP4 Scaler implementation.

    Journal.block(
        'gathering', self.get_scaler_xcrystal().get_name(), 'XDS',
        {'working directory':self.get_working_directory()})

    for epoch in self._scalr_integraters.keys():
      intgr = self._scalr_integraters[epoch]
      pname, xname, dname = intgr.get_integrater_project_info()
      sname = intgr.get_integrater_sweep_name()
      self._sweep_information[epoch] = {
          'pname':pname,
          'xname':xname,
          'dname':dname,
          'integrater':intgr,
          'corrected_intensities':intgr.get_integrater_corrected_intensities(),
          'prepared_reflections':None,
          'scaled_reflections':None,
          'header':intgr.get_header(),
          'batches':intgr.get_integrater_batches(),
          'image_to_epoch':intgr.get_integrater_sweep(
          ).get_image_to_epoch(),
          'image_to_dose':{},
          'batch_offset':0,
          'sname':sname
          }

      Journal.entry({'adding data from':'%s/%s/%s' % \
                     (xname, dname, sname)})

      # what are these used for?
      # pname / xname / dname - dataset identifiers
      # image to epoch / batch offset / batches - for RD analysis

      Debug.write('For EPOCH %s have:' % str(epoch))
      Debug.write('ID = %s/%s/%s' % (pname, xname, dname))
      Debug.write('SWEEP = %s' % intgr.get_integrater_sweep_name())

    # next work through all of the reflection files and make sure that
    # they are XDS_ASCII format...

    epochs = self._sweep_information.keys()
    epochs.sort()

    self._first_epoch = min(epochs)

    self._scalr_pname = self._sweep_information[epochs[0]]['pname']
    self._scalr_xname = self._sweep_information[epochs[0]]['xname']

    for epoch in epochs:
      intgr = self._scalr_integraters[epoch]
      pname = self._sweep_information[epoch]['pname']
      xname = self._sweep_information[epoch]['xname']
      dname = self._sweep_information[epoch]['dname']
      sname = self._sweep_information[epoch]['sname']
      if self._scalr_pname != pname:
        raise RuntimeError, 'all data must have a common project name'
      xname = self._sweep_information[epoch]['xname']
      if self._scalr_xname != xname:
        raise RuntimeError, \
              'all data for scaling must come from one crystal'

      xsh = XDSScalerHelper()
      xsh.set_working_directory(self.get_working_directory())
      hklin = self._sweep_information[epoch]['corrected_intensities']
      hklout = os.path.join(self.get_working_directory(),
                            '%s_%s_%s_%s_CORRECTED.HKL' %(
                              pname, xname, dname, sname))
      sweep = intgr.get_integrater_sweep()
      if sweep.get_frames_to_process() is not None:
        offset = intgr.get_frame_offset()
        #print "offset: %d" %offset
        start, end = sweep.get_frames_to_process()
        start -= offset
        end -= offset
        #end += 1 ????
        #print "limiting batches: %d-%d" %(start, end)
        xsh.limit_batches(hklin, hklout, start, end)
        self._sweep_information[epoch]['corrected_intensities'] = hklout

    # if there is more than one sweep then compare the lattices
    # and eliminate all but the lowest symmetry examples if
    # there are more than one...

    # -------------------------------------------------
    # Ensure that the integration lattices are the same
    # -------------------------------------------------

    need_to_return = False

    if len(self._sweep_information.keys()) > 1:

      lattices = []

      # FIXME run this stuff in parallel as well...

      for epoch in self._sweep_information.keys():

        intgr = self._sweep_information[epoch]['integrater']
        hklin = self._sweep_information[epoch]['corrected_intensities']
        refiner = intgr.get_integrater_refiner()

        if self._scalr_input_pointgroup:
          pointgroup = self._scalr_input_pointgroup
          reindex_op = 'h,k,l'
          ntr = False

        else:

          pointgroup, reindex_op, ntr = \
                      self._pointless_indexer_jiffy(hklin, refiner)

          Debug.write('X1698: %s: %s' % (pointgroup, reindex_op))

        lattice = Syminfo.get_lattice(pointgroup)

        if not lattice in lattices:
          lattices.append(lattice)

        if ntr:

          # if we need to return, we should logically reset
          # any reindexing operator right? right here all
          # we are talking about is the correctness of
          # individual pointgroups?? Bug # 3373

          reindex_op = 'h,k,l'
          # actually, should this not be done "by magic"
          # when a new pointgroup is assigned in the
          # pointless indexer jiffy above?!

          intgr.set_integrater_reindex_operator(
              reindex_op, compose = False)

          need_to_return = True

      # bug # 2433 - need to ensure that all of the lattice
      # conclusions were the same...

      if len(lattices) > 1:
        ordered_lattices = []
        for l in lattices_in_order():
          if l in lattices:
            ordered_lattices.append(l)

        correct_lattice = ordered_lattices[0]
        Debug.write('Correct lattice asserted to be %s' % \
                    correct_lattice)

        # transfer this information back to the indexers
        for epoch in self._sweep_information.keys():
          integrater = self._sweep_information[
              epoch]['integrater']
          refiner = integrater.get_integrater_refiner()
          sname = integrater.get_integrater_sweep_name()

          if not refiner:
            continue

          state = refiner.set_refiner_asserted_lattice(
              correct_lattice)
          if state == refiner.LATTICE_CORRECT:
            Debug.write('Lattice %s ok for sweep %s' % \
                        (correct_lattice, sname))
          elif state == refiner.LATTICE_IMPOSSIBLE:
            raise RuntimeError, 'Lattice %s impossible for %s' % \
                  (correct_lattice, sname)
          elif state == refiner.LATTICE_POSSIBLE:
            Debug.write('Lattice %s assigned for sweep %s' % \
                        (correct_lattice, sname))
            need_to_return = True

    # if one or more of them was not in the lowest lattice,
    # need to return here to allow reprocessing

    if need_to_return:
      self.set_scaler_done(False)
      self.set_scaler_prepare_done(False)
      return

    # next if there is more than one sweep then generate
    # a merged reference reflection file to check that the
    # setting for all reflection files is the same...

    # if we get to here then all data was processed with the same
    # lattice

    # ----------------------------------------------------------
    # next ensure that all sweeps are set in the correct setting
    # ----------------------------------------------------------

    if self.get_scaler_reference_reflection_file():
      self._reference = self.get_scaler_reference_reflection_file()
      Debug.write('Using HKLREF %s' % self._reference)

      md = self._factory.Mtzdump()
      md.set_hklin(self.get_scaler_reference_reflection_file())
      md.dump()

      self._xds_spacegroup = Syminfo.spacegroup_name_to_number(
          md.get_spacegroup())

      Debug.write('Spacegroup %d' % self._xds_spacegroup)

    elif PhilIndex.params.xia2.settings.scale.reference_reflection_file:
      self._reference = PhilIndex.params.xia2.settings.scale.reference_reflection_file

      Debug.write('Using HKLREF %s' % self._reference)

      md = self._factory.Mtzdump()
      md.set_hklin(PhilIndex.params.xia2.settings.scale.reference_reflection_file)
      md.dump()

      self._xds_spacegroup = Syminfo.spacegroup_name_to_number(
          md.get_spacegroup())

      Debug.write('Spacegroup %d' % self._xds_spacegroup)

    params = PhilIndex.params
    use_brehm_diederichs = params.xia2.settings.use_brehm_diederichs
    if len(self._sweep_information.keys()) > 1 and use_brehm_diederichs:
      brehm_diederichs_files_in = []
      for epoch in self._sweep_information.keys():

        intgr = self._sweep_information[epoch]['integrater']
        hklin = self._sweep_information[epoch]['corrected_intensities']
        refiner = intgr.get_integrater_refiner()

        # in here need to consider what to do if the user has
        # assigned the pointgroup on the command line ...

        if not self._scalr_input_pointgroup:
          pointgroup, reindex_op, ntr = \
                      self._pointless_indexer_jiffy(hklin, refiner)

          if ntr:

            # Bug # 3373

            Debug.write('Reindex to standard (PIJ): %s' % \
                        reindex_op)

            intgr.set_integrater_reindex_operator(
                reindex_op, compose = False)
            reindex_op = 'h,k,l'
            need_to_return = True

        else:

          # 27/FEB/08 to support user assignment of pointgroups

          Debug.write('Using input pointgroup: %s' % \
                      self._scalr_input_pointgroup)
          pointgroup = self._scalr_input_pointgroup
          reindex_op = 'h,k,l'

        intgr.set_integrater_reindex_operator(reindex_op)
        intgr.set_integrater_spacegroup_number(
            Syminfo.spacegroup_name_to_number(pointgroup))
        self._sweep_information[epoch]['corrected_intensities'] \
          = intgr.get_integrater_corrected_intensities()

        # convert the XDS_ASCII for this sweep to mtz - on the next
        # get this should be in the correct setting...

        dname = self._sweep_information[epoch]['dname']
        sname = intgr.get_integrater_sweep_name()
        hklin = self._sweep_information[epoch]['corrected_intensities']
        hklout = os.path.join(self.get_working_directory(),
                              '%s_%s.mtz' % (dname, sname))

        FileHandler.record_temporary_file(hklout)

        # now use pointless to make this conversion

        pointless = self._factory.Pointless()
        pointless.set_xdsin(hklin)
        pointless.set_hklout(hklout)
        pointless.xds_to_mtz()
        brehm_diederichs_files_in.append(hklout)

      # now run cctbx.brehm_diederichs to figure out the indexing hand for
      # each sweep
      from xia2.Wrappers.Cctbx.BrehmDiederichs import BrehmDiederichs
      brehm_diederichs = BrehmDiederichs()
      brehm_diederichs.set_working_directory(self.get_working_directory())
      auto_logfiler(brehm_diederichs)
      brehm_diederichs.set_input_filenames(brehm_diederichs_files_in)
      # 1 or 3? 1 seems to work better?
      brehm_diederichs.set_asymmetric(1)
      brehm_diederichs.run()
      reindexing_dict = brehm_diederichs.get_reindexing_dict()

      for epoch in self._sweep_information.keys():

        intgr = self._sweep_information[epoch]['integrater']

        dname = self._sweep_information[epoch]['dname']
        sname = intgr.get_integrater_sweep_name()
        hklin = self._sweep_information[epoch]['corrected_intensities']
        hklout = os.path.join(self.get_working_directory(),
                              '%s_%s.mtz' % (dname, sname))

        # apply the reindexing operator
        intgr.set_integrater_reindex_operator(reindex_op)

        # and copy the reflection file to the local directory
        hklin = self._sweep_information[epoch]['corrected_intensities']
        hklout = os.path.join(self.get_working_directory(),
                              '%s_%s.HKL' % (dname, sname))

        Debug.write('Copying %s to %s' % (hklin, hklout))
        shutil.copyfile(hklin, hklout)

        # record just the local file name...
        self._sweep_information[epoch][
            'prepared_reflections'] = os.path.split(hklout)[-1]

    elif len(self._sweep_information.keys()) > 1 and \
           not self._reference:
      # need to generate a reference reflection file - generate this
      # from the reflections in self._first_epoch
      #
      # FIXME this should really use the Brehm and Diederichs method
      # if you have lots of little sweeps...

      intgr = self._sweep_information[self._first_epoch]['integrater']

      hklin = self._sweep_information[epoch]['corrected_intensities']
      refiner = intgr.get_integrater_refiner()

      if self._scalr_input_pointgroup:
        Debug.write('Using input pointgroup: %s' % \
                    self._scalr_input_pointgroup)
        pointgroup = self._scalr_input_pointgroup
        ntr = False
        reindex_op = 'h,k,l'

      else:
        pointgroup, reindex_op, ntr = self._pointless_indexer_jiffy(
            hklin, refiner)

        Debug.write('X1698: %s: %s' % (pointgroup, reindex_op))

      reference_reindex_op = intgr.get_integrater_reindex_operator()

      if ntr:

        # Bug # 3373

        intgr.set_integrater_reindex_operator(
            reindex_op, compose = False)
        reindex_op = 'h,k,l'
        need_to_return = True

      self._xds_spacegroup = Syminfo.spacegroup_name_to_number(pointgroup)

      # next pass this reindexing operator back to the source
      # of the reflections

      intgr.set_integrater_reindex_operator(reindex_op)
      intgr.set_integrater_spacegroup_number(
          Syminfo.spacegroup_name_to_number(pointgroup))
      self._sweep_information[epoch]['corrected_intensities'] \
        = intgr.get_integrater_corrected_intensities()

      hklin = self._sweep_information[epoch]['corrected_intensities']

      hklout = os.path.join(self.get_working_directory(),
                            'xds-pointgroup-reference-unsorted.mtz')
      FileHandler.record_temporary_file(hklout)

      # now use pointless to handle this conversion

      pointless = self._factory.Pointless()
      pointless.set_xdsin(hklin)
      pointless.set_hklout(hklout)
      pointless.xds_to_mtz()

      self._reference = hklout

    if self._reference:

      from xia2.Driver.DriverFactory import DriverFactory

      def run_one_sweep(args):
        sweep_information = args[0]
        pointless_indexer_jiffy = args[1]
        factory = args[2]
        job_type = args[3]

        if job_type:
          DriverFactory.set_driver_type(job_type)

        intgr = sweep_information['integrater']
        hklin = sweep_information['corrected_intensities']
        refiner = intgr.get_integrater_refiner()

        # in here need to consider what to do if the user has
        # assigned the pointgroup on the command line ...

        if not self._scalr_input_pointgroup:
          pointgroup, reindex_op, ntr = \
                      self._pointless_indexer_jiffy(hklin, refiner)

          if ntr:

            # Bug # 3373

            Debug.write('Reindex to standard (PIJ): %s' % \
                        reindex_op)

            intgr.set_integrater_reindex_operator(
                reindex_op, compose = False)
            reindex_op = 'h,k,l'
            need_to_return = True

        else:

          # 27/FEB/08 to support user assignment of pointgroups

          Debug.write('Using input pointgroup: %s' % \
                      self._scalr_input_pointgroup)
          pointgroup = self._scalr_input_pointgroup
          reindex_op = 'h,k,l'

        intgr.set_integrater_reindex_operator(reindex_op)
        intgr.set_integrater_spacegroup_number(
            Syminfo.spacegroup_name_to_number(pointgroup))
        sweep_information['corrected_intensities'] \
          = intgr.get_integrater_corrected_intensities()

        # convert the XDS_ASCII for this sweep to mtz - on the next
        # get this should be in the correct setting...

        hklin = sweep_information['corrected_intensities']

        # now use pointless to make this conversion

        # try with no conversion?!

        pointless = self._factory.Pointless()
        pointless.set_xdsin(hklin)
        hklout = os.path.join(
          self.get_working_directory(),
          '%d_xds-pointgroup-unsorted.mtz' %pointless.get_xpid())
        FileHandler.record_temporary_file(hklout)
        pointless.set_hklout(hklout)
        pointless.xds_to_mtz()

        pointless = self._factory.Pointless()
        pointless.set_hklin(hklout)
        pointless.set_hklref(self._reference)
        pointless.decide_pointgroup()

        pointgroup = pointless.get_pointgroup()
        reindex_op = pointless.get_reindex_operator()

        # for debugging print out the reindexing operations and
        # what have you...

        Debug.write('Reindex to standard: %s' % reindex_op)

        # this should send back enough information that this
        # is in the correct pointgroup (from the call above) and
        # also in the correct setting, from the interaction
        # with the reference set... - though I guess that the
        # spacegroup number should not have changed, right?

        # set the reindex operation afterwards... though if the
        # spacegroup number is the same this should make no
        # difference, right?!

        intgr.set_integrater_spacegroup_number(
            Syminfo.spacegroup_name_to_number(pointgroup))
        intgr.set_integrater_reindex_operator(reindex_op)
        sweep_information['corrected_intensities'] \
          = intgr.get_integrater_corrected_intensities()

        # and copy the reflection file to the local directory

        dname = sweep_information['dname']
        sname = intgr.get_integrater_sweep_name()
        hklin = sweep_information['corrected_intensities']
        hklout = os.path.join(self.get_working_directory(),
                              '%s_%s.HKL' % (dname, sname))

        Debug.write('Copying %s to %s' % (hklin, hklout))
        shutil.copyfile(hklin, hklout)

        # record just the local file name...
        sweep_information['prepared_reflections'] = os.path.split(hklout)[-1]
        return sweep_information

      from libtbx import easy_mp
      params = PhilIndex.get_python_object()
      mp_params = params.xia2.settings.multiprocessing
      njob = mp_params.njob

      if njob > 1:
        # cache drivertype
        drivertype = DriverFactory.get_driver_type()

        args = [
          (self._sweep_information[epoch], self._pointless_indexer_jiffy,
           self._factory, mp_params.type)
                for epoch in self._sweep_information.keys()]
        results_list = easy_mp.parallel_map(
          run_one_sweep, args, params=None,
          processes=njob,
          method="threading",
          asynchronous=True,
          callback=None,
          preserve_order=True,
          preserve_exception_message=True)

        # restore drivertype
        DriverFactory.set_driver_type(drivertype)

        # results should be given back in the same order
        for i, epoch in enumerate(self._sweep_information.keys()):
          self._sweep_information[epoch] = results_list[i]

      else:
        for epoch in self._sweep_information.keys():
          self._sweep_information[epoch] = run_one_sweep(
            (self._sweep_information[epoch], self._pointless_indexer_jiffy,
             self._factory, None))

    else:
      # convert the XDS_ASCII for this sweep to mtz

      epoch = self._first_epoch
      intgr = self._sweep_information[epoch]['integrater']
      refiner = intgr.get_integrater_refiner()
      sname = intgr.get_integrater_sweep_name()

      hklout = os.path.join(self.get_working_directory(),
                            '%s-pointless.mtz' % sname)
      FileHandler.record_temporary_file(hklout)

      pointless = self._factory.Pointless()
      pointless.set_xdsin(self._sweep_information[epoch]['corrected_intensities'])
      pointless.set_hklout(hklout)
      pointless.xds_to_mtz()

      # run it through pointless interacting with the
      # Indexer which belongs to this sweep

      hklin = hklout

      if self._scalr_input_pointgroup:
        Debug.write('Using input pointgroup: %s' % \
                    self._scalr_input_pointgroup)
        pointgroup = self._scalr_input_pointgroup
        ntr = False
        reindex_op = 'h,k,l'

      else:
        pointgroup, reindex_op, ntr = self._pointless_indexer_jiffy(
            hklin, refiner)

      if ntr:

        # if we need to return, we should logically reset
        # any reindexing operator right? right here all
        # we are talking about is the correctness of
        # individual pointgroups?? Bug # 3373

        reindex_op = 'h,k,l'
        intgr.set_integrater_reindex_operator(
            reindex_op, compose = False)

        need_to_return = True

      self._xds_spacegroup = Syminfo.spacegroup_name_to_number(pointgroup)

      # next pass this reindexing operator back to the source
      # of the reflections

      intgr.set_integrater_reindex_operator(reindex_op)
      intgr.set_integrater_spacegroup_number(
          Syminfo.spacegroup_name_to_number(pointgroup))
      self._sweep_information[epoch]['corrected_intensities'] \
        = intgr.get_integrater_corrected_intensities()

      hklin = self._sweep_information[epoch]['corrected_intensities']
      dname = self._sweep_information[epoch]['dname']
      hklout = os.path.join(self.get_working_directory(),
                            '%s_%s.HKL' % (dname, sname))

      # and copy the reflection file to the local
      # directory

      Debug.write('Copying %s to %s' % (hklin, hklout))
      shutil.copyfile(hklin, hklout)

      # record just the local file name...
      self._sweep_information[epoch][
          'prepared_reflections'] = os.path.split(hklout)[-1]

    if need_to_return:
      self.set_scaler_done(False)
      self.set_scaler_prepare_done(False)
      return

    unit_cell_list = []

    for epoch in self._sweep_information.keys():
      integrater = self._sweep_information[epoch]['integrater']
      cell = integrater.get_integrater_cell()
      n_ref = integrater.get_integrater_n_ref()

      Debug.write('Cell for %s: %.2f %.2f %.2f %.2f %.2f %.2f' % \
                  (integrater.get_integrater_sweep_name(),
                   cell[0], cell[1], cell[2],
                   cell[3], cell[4], cell[5]))
      Debug.write('=> %d reflections' % n_ref)

      unit_cell_list.append((cell, n_ref))

    self._scalr_cell = compute_average_unit_cell(unit_cell_list)

    self._scalr_resolution_limits = { }

    Debug.write('Determined unit cell: %.2f %.2f %.2f %.2f %.2f %.2f' % \
                tuple(self._scalr_cell))

    if os.path.exists(os.path.join(
        self.get_working_directory(),
        'REMOVE.HKL')):
      os.remove(os.path.join(
          self.get_working_directory(),
          'REMOVE.HKL'))

      Debug.write('Deleting REMOVE.HKL at end of scale prepare.')

    return
Exemple #27
0
  def _index(self):
    '''Actually index the diffraction pattern. Note well that
    this is not going to compute the matrix...'''

    # acknowledge this program

    Citations.cite('labelit')
    Citations.cite('distl')

    #self.reset()

    _images = []
    for i in self._indxr_images:
      for j in i:
        if not j in _images:
          _images.append(j)

    _images.sort()

    images_str = '%d' % _images[0]
    for i in _images[1:]:
      images_str += ', %d' % i

    cell_str = None
    if self._indxr_input_cell:
      cell_str = '%.2f %.2f %.2f %.2f %.2f %.2f' % \
                  self._indxr_input_cell

    if self._indxr_sweep_name:

      # then this is a proper autoindexing run - describe this
      # to the journal entry

      #if len(self._fp_directory) <= 50:
        #dirname = self._fp_directory
      #else:
        #dirname = '...%s' % self._fp_directory[-46:]
      dirname = os.path.dirname(self.get_imageset().get_template())

      Journal.block(
          'autoindexing', self._indxr_sweep_name, 'labelit',
          {'images':images_str,
           'target cell':cell_str,
           'target lattice':self._indxr_input_lattice,
           'template':self.get_imageset().get_template(),
           'directory':dirname})

    if len(_images) > 4:
      raise RuntimeError, 'cannot use more than 4 images'

    from xia2.Wrappers.Labelit.LabelitIndex import LabelitIndex
    index = LabelitIndex()
    index.set_working_directory(self.get_working_directory())
    auto_logfiler(index)

    #task = 'Autoindex from images:'

    #for i in _images:
      #task += ' %s' % self.get_image_name(i)

    #self.set_task(task)

    Debug.write('Indexing from images:')
    for i in _images:
      index.add_image(self.get_image_name(i))
      Debug.write('%s' % self.get_image_name(i))

    xsweep = self.get_indexer_sweep()
    if xsweep is not None:
      if xsweep.get_distance() is not None:
        index.set_distance(xsweep.get_distance())
      #if self.get_wavelength_prov() == 'user':
        #index.set_wavelength(self.get_wavelength())
      if xsweep.get_beam_centre() is not None:
        index.set_beam_centre(xsweep.get_beam_centre())

    if self._refine_beam is False:
      index.set_refine_beam(False)
    else:
      index.set_refine_beam(True)
      index.set_beam_search_scope(self._beam_search_scope)

    if ((math.fabs(self.get_wavelength() - 1.54) < 0.01) or
        (math.fabs(self.get_wavelength() - 2.29) < 0.01)):
      index.set_Cu_KA_or_Cr_KA(True)

    #sweep = self.get_indexer_sweep_name()
    #FileHandler.record_log_file(
        #'%s INDEX' % (sweep), self.get_log_file())

    try:
      index.run()
    except RuntimeError, e:

      if self._refine_beam is False:
        raise e

      # can we improve the situation?

      if self._beam_search_scope < 4.0:
        self._beam_search_scope += 4.0

        # try repeating the indexing!

        self.set_indexer_done(False)
        return 'failed'

      # otherwise this is beyond redemption

      raise e
Exemple #28
0
  def _scale(self):
    '''Actually scale all of the data together.'''

    from xia2.Handlers.Environment import debug_memory_usage
    debug_memory_usage()

    Journal.block(
        'scaling', self.get_scaler_xcrystal().get_name(), 'XSCALE',
        {'scaling model':'default (all)'})

    epochs = self._sweep_information.keys()
    epochs.sort()

    xscale = self.XScale()

    xscale.set_spacegroup_number(self._xds_spacegroup)
    xscale.set_cell(self._scalr_cell)

    Debug.write('Set CELL: %.2f %.2f %.2f %.2f %.2f %.2f' % \
                tuple(self._scalr_cell))
    Debug.write('Set SPACEGROUP_NUMBER: %d' % \
                self._xds_spacegroup)

    Debug.write('Gathering measurements for scaling')

    for epoch in epochs:

      # get the prepared reflections
      reflections = self._sweep_information[epoch][
          'prepared_reflections']

      # and the get wavelength that this belongs to
      dname = self._sweep_information[epoch]['dname']
      sname = self._sweep_information[epoch]['sname']

      # and the resolution range for the reflections
      intgr = self._sweep_information[epoch]['integrater']
      Debug.write('Epoch: %d' % epoch)
      Debug.write('HKL: %s (%s/%s)' % (reflections, dname, sname))

      resolution_low = intgr.get_integrater_low_resolution()
      resolution_high, _ = self._scalr_resolution_limits.get((dname, sname), (0.0, None))

      resolution = (resolution_high, resolution_low)

      xscale.add_reflection_file(reflections, dname, resolution)

    # set the global properties of the sample
    xscale.set_crystal(self._scalr_xname)
    xscale.set_anomalous(self._scalr_anomalous)

    debug_memory_usage()
    xscale.run()

    scale_factor = xscale.get_scale_factor()

    Debug.write('XSCALE scale factor found to be: %e' % scale_factor)

    # record the log file

    pname = self._scalr_pname
    xname = self._scalr_xname

    FileHandler.record_log_file('%s %s XSCALE' % \
                                (pname, xname),
                                os.path.join(self.get_working_directory(),
                                             'XSCALE.LP'))

    # check for outlier reflections and if a number are found
    # then iterate (that is, rerun XSCALE, rejecting these outliers)

    if not PhilIndex.params.dials.fast_mode and not PhilIndex.params.xds.keep_outliers:
      xscale_remove = xscale.get_remove()
      if xscale_remove:
        current_remove = []
        final_remove = []

        # first ensure that there are no duplicate entries...
        if os.path.exists(os.path.join(
            self.get_working_directory(),
            'REMOVE.HKL')):
          for line in open(os.path.join(
              self.get_working_directory(),
              'REMOVE.HKL'), 'r').readlines():
            h, k, l = map(int, line.split()[:3])
            z = float(line.split()[3])

            if not (h, k, l, z) in current_remove:
              current_remove.append((h, k, l, z))

          for c in xscale_remove:
            if c in current_remove:
              continue
            final_remove.append(c)

          Debug.write(
              '%d alien reflections are already removed' % \
              (len(xscale_remove) - len(final_remove)))

        else:
          # we want to remove all of the new dodgy reflections
          final_remove = xscale_remove

        remove_hkl = open(os.path.join(
            self.get_working_directory(),
            'REMOVE.HKL'), 'w')

        z_min = PhilIndex.params.xds.z_min
        rejected = 0

        # write in the old reflections
        for remove in current_remove:
          z = remove[3]
          if z >= z_min:
            remove_hkl.write('%d %d %d %f\n' % remove)
          else:
            rejected += 1
        Debug.write('Wrote %d old reflections to REMOVE.HKL' % \
                    (len(current_remove) - rejected))
        Debug.write('Rejected %d as z < %f' % \
                    (rejected, z_min))

        # and the new reflections
        rejected = 0
        used = 0
        for remove in final_remove:
          z = remove[3]
          if z >= z_min:
            used += 1
            remove_hkl.write('%d %d %d %f\n' % remove)
          else:
            rejected += 1
        Debug.write('Wrote %d new reflections to REMOVE.HKL' % \
                    (len(final_remove) - rejected))
        Debug.write('Rejected %d as z < %f' % \
                    (rejected, z_min))

        remove_hkl.close()

        # we want to rerun the finishing step so...
        # unless we have added no new reflections
        if used:
          self.set_scaler_done(False)

    if not self.get_scaler_done():
      Chatter.write('Excluding outlier reflections Z > %.2f' %
                    PhilIndex.params.xds.z_min)
      return

    debug_memory_usage()

    # now get the reflection files out and merge them with aimless

    output_files = xscale.get_output_reflection_files()
    wavelength_names = output_files.keys()

    # these are per wavelength - also allow for user defined resolution
    # limits a la bug # 3183. No longer...

    for epoch in self._sweep_information.keys():

      input = self._sweep_information[epoch]

      intgr = input['integrater']

      rkey = input['dname'], input['sname']

      if intgr.get_integrater_user_resolution():
        dmin = intgr.get_integrater_high_resolution()

        if rkey not in self._user_resolution_limits:
          self._scalr_resolution_limits[rkey] = (dmin, None)
          self._user_resolution_limits[rkey] = dmin
        elif dmin < self._user_resolution_limits[rkey]:
          self._scalr_resolution_limits[rkey] = (dmin, None)
          self._user_resolution_limits[rkey] = dmin

    self._scalr_scaled_refl_files = { }

    self._scalr_statistics = { }

    max_batches = 0
    mtz_dict = { }

    project_info = { }
    for epoch in self._sweep_information.keys():
      pname = self._scalr_pname
      xname = self._scalr_xname
      dname = self._sweep_information[epoch]['dname']
      reflections = os.path.split(
          self._sweep_information[epoch]['prepared_reflections'])[-1]
      project_info[reflections] = (pname, xname, dname)

    for epoch in self._sweep_information.keys():
      self._sweep_information[epoch]['scaled_reflections'] = None

    debug_memory_usage()

    for wavelength in wavelength_names:
      hklin = output_files[wavelength]

      xsh = XDSScalerHelper()
      xsh.set_working_directory(self.get_working_directory())

      ref = xsh.split_and_convert_xscale_output(
          hklin, 'SCALED_', project_info, 1.0 / scale_factor)

      for hklout in ref.keys():
        for epoch in self._sweep_information.keys():
          if os.path.split(self._sweep_information[epoch][
              'prepared_reflections'])[-1] == \
              os.path.split(hklout)[-1]:
            if self._sweep_information[epoch][
                'scaled_reflections'] is not None:
              raise RuntimeError, 'duplicate entries'
            self._sweep_information[epoch][
                'scaled_reflections'] = ref[hklout]

      del(xsh)

    debug_memory_usage()

    for epoch in self._sweep_information.keys():
      hklin = self._sweep_information[epoch]['scaled_reflections']
      dname = self._sweep_information[epoch]['dname']
      sname = self._sweep_information[epoch]['sname']

      hkl_copy = os.path.join(self.get_working_directory(),
                              'R_%s' % os.path.split(hklin)[-1])

      if not os.path.exists(hkl_copy):
        shutil.copyfile(hklin, hkl_copy)

      # let's properly listen to the user's resolution limit needs...

      if self._user_resolution_limits.get((dname, sname), False):
        resolution = self._user_resolution_limits[(dname, sname)]

      else:
        if PhilIndex.params.xia2.settings.resolution.keep_all_reflections == True:
          try:
            resolution = intgr.get_detector().get_max_resolution(intgr.get_beam_obj().get_s0())
            Debug.write('keep_all_reflections set, using detector limits')
          except Exception:
            resolution = self._estimate_resolution_limit(hklin)
        else:
          resolution = self._estimate_resolution_limit(hklin)

      Chatter.write('Resolution for sweep %s/%s: %.2f' % \
                    (dname, sname, resolution))

      if (dname, sname) not in self._scalr_resolution_limits:
        self._scalr_resolution_limits[(dname, sname)] = (resolution, None)
        self.set_scaler_done(False)
      else:
        if resolution < self._scalr_resolution_limits[(dname, sname)][0]:
          self._scalr_resolution_limits[(dname, sname)] = (resolution, None)
          self.set_scaler_done(False)

    debug_memory_usage()

    if not self.get_scaler_done():
      Debug.write('Returning as scaling not finished...')
      return

    self._sort_together_data_xds()

    highest_resolution = min(limit for limit, _ in self._scalr_resolution_limits.values())

    self._scalr_highest_resolution = highest_resolution

    Debug.write('Scaler highest resolution set to %5.2f' % \
                highest_resolution)

    if not self.get_scaler_done():
      Debug.write('Returning as scaling not finished...')
      return

    sdadd_full = 0.0
    sdb_full = 0.0

    # ---------- FINAL MERGING ----------

    sc = self._factory.Aimless()

    FileHandler.record_log_file('%s %s aimless' % (self._scalr_pname,
                                                   self._scalr_xname),
                                sc.get_log_file())

    sc.set_resolution(highest_resolution)
    sc.set_hklin(self._prepared_reflections)
    sc.set_new_scales_file('%s_final.scales' % self._scalr_xname)

    if sdadd_full == 0.0 and sdb_full == 0.0:
      pass
    else:
      sc.add_sd_correction('both', 1.0, sdadd_full, sdb_full)

    for epoch in epochs:
      input = self._sweep_information[epoch]
      start, end = (min(input['batches']), max(input['batches']))

      rkey = input['dname'], input['sname']
      run_resolution_limit, _ = self._scalr_resolution_limits[rkey]

      sc.add_run(start, end, exclude = False,
                 resolution = run_resolution_limit,
                 name = input['sname'])

    sc.set_hklout(os.path.join(self.get_working_directory(),
                               '%s_%s_scaled.mtz' % \
                               (self._scalr_pname, self._scalr_xname)))

    if self.get_scaler_anomalous():
      sc.set_anomalous()

    sc.multi_merge()

    FileHandler.record_xml_file('%s %s aimless xml' % (self._scalr_pname,
                                                       self._scalr_xname),
                                sc.get_xmlout())
    data = sc.get_summary()

    loggraph = sc.parse_ccp4_loggraph()

    standard_deviation_info = { }

    for key in loggraph.keys():
      if 'standard deviation v. Intensity' in key:
        dataset = key.split(',')[-1].strip()
        standard_deviation_info[dataset] = transpose_loggraph(
            loggraph[key])

    resolution_info = { }

    for key in loggraph.keys():
      if 'Analysis against resolution' in key:
        dataset = key.split(',')[-1].strip()
        resolution_info[dataset] = transpose_loggraph(
            loggraph[key])

    # and also radiation damage stuff...

    batch_info = { }

    for key in loggraph.keys():
      if 'Analysis against Batch' in key:
        dataset = key.split(',')[-1].strip()
        batch_info[dataset] = transpose_loggraph(
            loggraph[key])


    # finally put all of the results "somewhere useful"

    self._scalr_statistics = data

    self._scalr_scaled_refl_files = copy.deepcopy(
        sc.get_scaled_reflection_files())

    self._scalr_scaled_reflection_files = { }

    # also output the unmerged scalepack format files...

    sc = self._factory.Aimless()
    sc.set_resolution(highest_resolution)
    sc.set_hklin(self._prepared_reflections)
    sc.set_scalepack()

    for epoch in epochs:
      input = self._sweep_information[epoch]
      start, end = (min(input['batches']), max(input['batches']))

      rkey = input['dname'], input['sname']
      run_resolution_limit, _ = self._scalr_resolution_limits[rkey]

      sc.add_run(start, end, exclude = False,
                 resolution = run_resolution_limit,
                 name = input['sname'])

    sc.set_hklout(os.path.join(self.get_working_directory(),
                               '%s_%s_scaled.mtz' % \
                               (self._scalr_pname,
                                self._scalr_xname)))

    if self.get_scaler_anomalous():
      sc.set_anomalous()

    sc.multi_merge()

    self._scalr_scaled_reflection_files['sca_unmerged'] = { }
    self._scalr_scaled_reflection_files['mtz_unmerged'] = { }

    for dataset in sc.get_scaled_reflection_files().keys():
      hklout = sc.get_scaled_reflection_files()[dataset]

      # then mark the scalepack files for copying...

      scalepack = os.path.join(os.path.split(hklout)[0],
                               os.path.split(hklout)[1].replace(
          '_scaled', '_scaled_unmerged').replace('.mtz', '.sca'))
      self._scalr_scaled_reflection_files['sca_unmerged'][
          dataset] = scalepack
      FileHandler.record_data_file(scalepack)
      mtz_unmerged = os.path.splitext(scalepack)[0] + '.mtz'
      self._scalr_scaled_reflection_files['mtz_unmerged'][dataset] = mtz_unmerged
      FileHandler.record_data_file(mtz_unmerged)

    if PhilIndex.params.xia2.settings.merging_statistics.source == 'cctbx':
      for key in self._scalr_scaled_refl_files:
        stats = self._compute_scaler_statistics(
          self._scalr_scaled_reflection_files['mtz_unmerged'][key], wave=key)
        self._scalr_statistics[
          (self._scalr_pname, self._scalr_xname, key)] = stats

    # convert reflection files to .sca format - use mtz2various for this

    self._scalr_scaled_reflection_files['sca'] = { }
    self._scalr_scaled_reflection_files['hkl'] = { }

    for key in self._scalr_scaled_refl_files:

      f = self._scalr_scaled_refl_files[key]
      scaout = '%s.sca' % f[:-4]

      m2v = self._factory.Mtz2various()
      m2v.set_hklin(f)
      m2v.set_hklout(scaout)
      m2v.convert()

      self._scalr_scaled_reflection_files['sca'][key] = scaout
      FileHandler.record_data_file(scaout)

      if PhilIndex.params.xia2.settings.small_molecule == True:
        hklout = '%s.hkl' % f[:-4]

        m2v = self._factory.Mtz2various()
        m2v.set_hklin(f)
        m2v.set_hklout(hklout)
        m2v.convert_shelx()

        self._scalr_scaled_reflection_files['hkl'][key] = hklout
        FileHandler.record_data_file(hklout)
Exemple #29
0
    def _index(self):
        '''Implement the indexer interface.'''

        Citations.cite('mosflm')

        indexer = MosflmIndex()
        indexer.set_working_directory(self.get_working_directory())
        auto_logfiler(indexer)

        from xia2.lib.bits import unique_elements
        _images = unique_elements(self._indxr_images)
        indexer.set_images(_images)
        images_str = ', '.join(map(str, _images))

        cell_str = None
        if self._indxr_input_cell:
            cell_str = '%.2f %.2f %.2f %.2f %.2f %.2f' % \
                        self._indxr_input_cell

        if self._indxr_sweep_name:

            #if len(self._fp_directory) <= 50:
            #dirname = self._fp_directory
            #else:
            #dirname = '...%s' % self._fp_directory[-46:]
            dirname = os.path.dirname(self.get_imageset().get_template())

            Journal.block(
                'autoindexing', self._indxr_sweep_name, 'mosflm', {
                    'images': images_str,
                    'target cell': self._indxr_input_cell,
                    'target lattice': self._indxr_input_lattice,
                    'template': self.get_imageset().get_template(),
                    'directory': dirname
                })

        #task = 'Autoindex from images:'

        #for i in _images:
        #task += ' %s' % self.get_image_name(i)

        #self.set_task(task)

        indexer.set_template(os.path.basename(self.get_template()))
        indexer.set_directory(self.get_directory())

        xsweep = self.get_indexer_sweep()
        if xsweep is not None:
            if xsweep.get_distance() is not None:
                indexer.set_distance(xsweep.get_distance())
            #if self.get_wavelength_prov() == 'user':
            #index.set_wavelength(self.get_wavelength())
            if xsweep.get_beam_centre() is not None:
                indexer.set_beam_centre(xsweep.get_beam_centre())

        if self._indxr_input_cell:
            indexer.set_unit_cell(self._indxr_input_cell)

        if self._indxr_input_lattice is not None:
            spacegroup_number = lattice_to_spacegroup(
                self._indxr_input_lattice)
            indexer.set_space_group_number(spacegroup_number)

        if not self._mosflm_autoindex_thresh:

            try:

                min_peaks = 200

                Debug.write('Aiming for at least %d spots...' % min_peaks)

                thresholds = []

                for i in _images:

                    p = Printpeaks()
                    p.set_working_directory(self.get_working_directory())
                    auto_logfiler(p)
                    p.set_image(self.get_image_name(i))
                    thresh = p.threshold(min_peaks)

                    Debug.write('Autoindex threshold for image %d: %d' % \
                                (i, thresh))

                    thresholds.append(thresh)

                thresh = min(thresholds)
                self._mosflm_autoindex_thresh = thresh

            except Exception as e:
                print str(e)  #XXX this should disappear!
                Debug.write('Error computing threshold: %s' % str(e))
                Debug.write('Using default of 20.0')
                thresh = 20.0

        else:
            thresh = self._mosflm_autoindex_thresh

        Debug.write('Using autoindex threshold: %d' % thresh)

        if self._mosflm_autoindex_sol:
            indexer.set_solution_number(self._mosflm_autoindex_sol)
        indexer.set_threshold(thresh)

        # now forget this to prevent weird things happening later on
        if self._mosflm_autoindex_sol:
            self._mosflm_autoindex_sol = 0

        indexer.run()

        indxr_cell = indexer.get_refined_unit_cell()
        self._indxr_lattice = indexer.get_lattice()
        space_group_number = indexer.get_indexed_space_group_number()
        detector_distance = indexer.get_refined_distance()
        beam_centre = indexer.get_refined_beam_centre()
        mosaic_spreads = indexer.get_mosaic_spreads()

        if min(list(indxr_cell)) < 10.0 and \
           indxr_cell[2] / indxr_cell[0] > 6:

            Debug.write('Unrealistic autoindexing solution: ' +
                        '%.2f %.2f %.2f %.2f %.2f %.2f' % indxr_cell)

            # tweak some parameters and try again...
            self._mosflm_autoindex_thresh *= 1.5
            self.set_indexer_done(False)

            return

        intgr_params = {}

        # look up other possible indexing solutions (not well - in
        # standard settings only!) This is moved earlier as it could
        # result in returning if Mosflm has selected the wrong
        # solution!

        try:
            self._indxr_other_lattice_cell = indexer.get_solutions()

            # Change 27/FEB/08 to support user assigned spacegroups
            if self._indxr_user_input_lattice:
                lattice_to_spacegroup_dict = {
                    'aP': 1,
                    'mP': 3,
                    'mC': 5,
                    'oP': 16,
                    'oC': 20,
                    'oF': 22,
                    'oI': 23,
                    'tP': 75,
                    'tI': 79,
                    'hP': 143,
                    'hR': 146,
                    'cP': 195,
                    'cF': 196,
                    'cI': 197
                }
                for k in self._indxr_other_lattice_cell.keys():
                    if lattice_to_spacegroup_dict[k] > \
                           lattice_to_spacegroup_dict[
                        self._indxr_input_lattice]:
                        del (self._indxr_other_lattice_cell[k])

            # check that the selected unit cell matches - and if
            # not raise a "horrible" exception

            if self._indxr_input_cell:
                assert indxr_cell is not None
                for j in range(6):
                    if math.fabs(self._indxr_input_cell[j] -
                                 indxr_cell[j]) > 2.0:
                        Chatter.write('Mosflm autoindexing did not select ' +
                                      'correct (target) unit cell')
                        raise RuntimeError(
                            'something horrible happened in indexing')

        except RuntimeError as e:
            # check if mosflm rejected a solution we have it
            if 'horribl' in str(e):
                # ok it did - time to break out the big guns...
                if not self._indxr_input_cell:
                    raise RuntimeError(
                        'error in solution selection when not preset')

                # XXX FIXME
                self._mosflm_autoindex_sol = _get_indexing_solution_number(
                    indexer.get_all_output(), self._indxr_input_cell,
                    self._indxr_input_lattice)

                # set the fact that we are not done...
                self.set_indexer_done(False)

                # and return - hopefully this will restart everything
                return
            else:
                raise e

        if len(mosaic_spreads) == 0:
            # then consider setting it do a default value...
            # equal to the oscillation width (a good guess)
            phi_width = self.get_phi_width()
            Chatter.write(
                'Mosaic estimation failed, so guessing at %4.2f' % \
                phi_width)
            # only consider this if we have thus far no idea on the
            # mosaic spread...
            mosaic_spreads.append(phi_width)

        intgr_params['raster'] = indexer.get_raster()

        intgr_params['separation'] = indexer.get_separation()

        self._indxr_resolution_estimate = indexer.get_resolution_estimate()

        # compute mosaic as mean(mosaic_spreads)

        self._indxr_mosaic = sum(mosaic_spreads) / len(mosaic_spreads)

        self._indxr_payload['mosflm_integration_parameters'] = intgr_params

        self._indxr_payload['mosflm_orientation_matrix'] = open(
            os.path.join(self.get_working_directory(), 'xiaindex.mat'),
            'r').readlines()

        import copy
        from dxtbx.model.detector_helpers import set_mosflm_beam_centre
        from xia2.Wrappers.Mosflm.AutoindexHelpers import set_distance
        from xia2.Wrappers.Mosflm.AutoindexHelpers import crystal_model_from_mosflm_mat
        from cctbx import sgtbx, uctbx

        # update the beam centre (i.e. shift the origin of the detector)
        detector = copy.deepcopy(self.get_detector())
        beam = copy.deepcopy(self.get_beam())
        set_mosflm_beam_centre(detector, beam, beam_centre)
        if detector_distance is not None:
            set_distance(detector, detector_distance)

        # make a dxtbx crystal_model object from the mosflm matrix
        space_group = sgtbx.space_group_info(number=space_group_number).group()
        crystal_model = crystal_model_from_mosflm_mat(
            self._indxr_payload['mosflm_orientation_matrix'],
            unit_cell=uctbx.unit_cell(tuple(indxr_cell)),
            space_group=space_group)

        # construct an experiment_list
        from dxtbx.model import Experiment, ExperimentList
        experiment = Experiment(beam=beam,
                                detector=detector,
                                goniometer=self.get_goniometer(),
                                scan=self.get_scan(),
                                crystal=crystal_model)

        experiment_list = ExperimentList([experiment])
        self.set_indexer_experiment_list(experiment_list)
Exemple #30
0
    def _integrate(self):
        """Actually do the integration - in XDS terms this will mean running
        DEFPIX and INTEGRATE to measure all the reflections."""

        experiment = self._intgr_refiner.get_refined_experiment_list(
            self.get_integrater_epoch())[0]
        crystal_model = experiment.crystal
        self._intgr_refiner_cell = crystal_model.get_unit_cell().parameters()

        images_str = "%d to %d" % tuple(self._intgr_wedge)
        cell_str = "%.2f %.2f %.2f %.2f %.2f %.2f" % tuple(
            self._intgr_refiner_cell)

        if len(self._fp_directory) <= 50:
            dirname = self._fp_directory
        else:
            dirname = "...%s" % self._fp_directory[-46:]

        Journal.block(
            "integrating",
            self._intgr_sweep_name,
            "XDS",
            {
                "images": images_str,
                "cell": cell_str,
                "lattice": self._intgr_refiner.get_refiner_lattice(),
                "template": self._fp_template,
                "directory": dirname,
                "resolution": "%.2f" % self._intgr_reso_high,
            },
        )

        defpix = self.Defpix()

        # pass in the correct data

        for file in [
                "X-CORRECTIONS.cbf",
                "Y-CORRECTIONS.cbf",
                "BKGINIT.cbf",
                "XPARM.XDS",
        ]:
            defpix.set_input_data_file(file, self._xds_data_files[file])

        defpix.set_data_range(
            self._intgr_wedge[0] + self.get_frame_offset(),
            self._intgr_wedge[1] + self.get_frame_offset(),
        )

        if (self.get_integrater_high_resolution() > 0.0
                and self.get_integrater_user_resolution()):
            Debug.write("Setting resolution limit in DEFPIX to %.2f" %
                        self.get_integrater_high_resolution())
            defpix.set_resolution_high(self.get_integrater_high_resolution())
            defpix.set_resolution_low(self.get_integrater_low_resolution())

        elif self.get_integrater_low_resolution():
            Debug.write("Setting low resolution limit in DEFPIX to %.2f" %
                        self.get_integrater_low_resolution())
            defpix.set_resolution_high(0.0)
            defpix.set_resolution_low(self.get_integrater_low_resolution())

        defpix.run()

        # and gather the result files
        for file in ["BKGPIX.cbf", "ABS.cbf"]:
            self._xds_data_files[file] = defpix.get_output_data_file(file)

        integrate = self.Integrate()

        if self._xds_integrate_parameters:
            integrate.set_updates(self._xds_integrate_parameters)

        # decide what images we are going to process, if not already
        # specified

        if not self._intgr_wedge:
            images = self.get_matching_images()
            self.set_integrater_wedge(min(images), max(images))

        integrate.set_data_range(
            self._intgr_wedge[0] + self.get_frame_offset(),
            self._intgr_wedge[1] + self.get_frame_offset(),
        )

        for file in [
                "X-CORRECTIONS.cbf",
                "Y-CORRECTIONS.cbf",
                "BLANK.cbf",
                "BKGPIX.cbf",
                "GAIN.cbf",
        ]:
            integrate.set_input_data_file(file, self._xds_data_files[file])

        if "GXPARM.XDS" in self._xds_data_files:
            Debug.write("Using globally refined parameters")
            integrate.set_input_data_file("XPARM.XDS",
                                          self._xds_data_files["GXPARM.XDS"])
            integrate.set_refined_xparm()
        else:
            integrate.set_input_data_file("XPARM.XDS",
                                          self._xds_data_files["XPARM.XDS"])

        integrate.run()

        self._intgr_per_image_statistics = integrate.get_per_image_statistics()
        Chatter.write(self.show_per_image_statistics())

        # record the log file -

        pname, xname, dname = self.get_integrater_project_info()
        sweep = self.get_integrater_sweep_name()
        FileHandler.record_log_file(
            "%s %s %s %s INTEGRATE" % (pname, xname, dname, sweep),
            os.path.join(self.get_working_directory(), "INTEGRATE.LP"),
        )

        # and copy the first pass INTEGRATE.HKL...

        lattice = self._intgr_refiner.get_refiner_lattice()
        if not os.path.exists(
                os.path.join(self.get_working_directory(),
                             "INTEGRATE-%s.HKL" % lattice)):
            here = self.get_working_directory()
            shutil.copyfile(
                os.path.join(here, "INTEGRATE.HKL"),
                os.path.join(here, "INTEGRATE-%s.HKL" % lattice),
            )

        # record INTEGRATE.HKL for e.g. BLEND.

        FileHandler.record_more_data_file(
            "%s %s %s %s INTEGRATE" % (pname, xname, dname, sweep),
            os.path.join(self.get_working_directory(), "INTEGRATE.HKL"),
        )

        # should the existence of these require that I rerun the
        # integration or can we assume that the application of a
        # sensible resolution limit will achieve this??

        self._xds_integrate_parameters = integrate.get_updates()

        # record the mosaic spread &c.

        m_min, m_mean, m_max = integrate.get_mosaic()
        self.set_integrater_mosaic_min_mean_max(m_min, m_mean, m_max)

        Chatter.write("Mosaic spread: %.3f < %.3f < %.3f" %
                      self.get_integrater_mosaic_min_mean_max())

        return os.path.join(self.get_working_directory(), "INTEGRATE.HKL")