Example #1
0
  def close_wait(self):
    '''Close the standard input channel and wait for the standard
    output to stop. Note that the results can still be obtained through
    self.get_all_output()...'''

    self.close()

    while True:
      line = self.output()

      if not line:
        break

    if self._log_file:
      # close the existing log file: also add a comment at the end containing the
      # command-line (replacing working directory & executable path for brevity)
      command_line = '%s ' % os.path.split(self._executable)[-1]
      for c in self._command_line:
        command_line += ' \'%s\'' % c.replace(self._working_directory + os.sep, '')
      self._log_file.write('# command line:\n')
      self._log_file.write('# %s\n' % command_line)
      self._log_file.close()
      self._log_file = None
      from xia2.Handlers.Streams import Debug
      with open(self._log_file_name, 'rb') as f:
        lines = f.readlines()
        n = min(50, len(lines))
        Debug.write('Last %i lines of %s:' %(n, self._log_file_name))
        for line in lines[-n:]:
          Debug.write(line.rstrip('\n'), strip=False)

    self.cleanup()
Example #2
0
  def _integrate_select_images_wedges(self):
    '''Select correct images based on image headers.'''

    phi_width = self.get_phi_width()

    images = self.get_matching_images()

    # characterise the images - are there just two (e.g. dna-style
    # reference images) or is there a full block?

    wedges = []

    if len(images) < 3:
      # work on the assumption that this is a reference pair

      wedges.append(images[0])

      if len(images) > 1:
        wedges.append(images[1])

    else:
      block_size = min(len(images), int(math.ceil(5/phi_width)))

      Debug.write('Adding images for indexer: %d -> %d' % \
                  (images[0], images[block_size - 1]))

      wedges.append((images[0], images[block_size - 1]))

      if int(90.0 / phi_width) + block_size in images:
        # assume we can add a wedge around 45 degrees as well...
        Debug.write('Adding images for indexer: %d -> %d' % \
                    (int(45.0 / phi_width) + images[0],
                     int(45.0 / phi_width) + images[0] +
                     block_size - 1))
        Debug.write('Adding images for indexer: %d -> %d' % \
                    (int(90.0 / phi_width) + images[0],
                     int(90.0 / phi_width) + images[0] +
                     block_size - 1))
        wedges.append(
            (int(45.0 / phi_width) + images[0],
             int(45.0 / phi_width) + images[0] + block_size - 1))
        wedges.append(
            (int(90.0 / phi_width) + images[0],
             int(90.0 / phi_width) + images[0] + block_size - 1))

      else:

        # add some half-way anyway
        first = (len(images) // 2) - (block_size // 2) + images[0] - 1
        if first > wedges[0][1]:
          last = first + block_size - 1
          Debug.write('Adding images for indexer: %d -> %d' % \
                      (first, last))
          wedges.append((first, last))
        if len(images) > block_size:
          Debug.write('Adding images for indexer: %d -> %d' % \
                      (images[- block_size], images[-1]))
          wedges.append((images[- block_size], images[-1]))

    return wedges
Example #3
0
  def _setup(self):
    if self._is_setup:
      return

    self._is_setup = True
    harvest_directory = self.generate_directory('Harvest')
    self.setenv('HARVESTHOME', harvest_directory)

    # create a USER environment variable, to allow harvesting
    # in Mosflm to work (hacky, I know, but it really doesn't
    # matter too much...

    if not 'USER' in os.environ:
      if 'USERNAME' in os.environ:
        os.environ['USER'] = os.environ['USERNAME']
      else:
        os.environ['USER'] = '******'

    # define a local CCP4_SCR

    ccp4_scr = tempfile.mkdtemp()
    os.environ['CCP4_SCR'] = ccp4_scr
    Debug.write('Created CCP4_SCR: %s' % ccp4_scr)

    self._is_setup = True

    return
Example #4
0
File: Merger.py Project: xia2/xia2
    def run(self):
      assert(self._hklin)
      cl = [self._hklin]
      cl.append('nbins=%s' % self._nbins)
      cl.append('rmerge=%s' % self._limit_rmerge)
      cl.append('completeness=%s' % self._limit_completeness)
      cl.append('cc_half=%s' % self._limit_cc_half)
      cl.append('cc_half_significance_level=%s' % self._cc_half_significance_level)
      cl.append('isigma=%s' % self._limit_isigma)
      cl.append('misigma=%s' % self._limit_misigma)
      if self._batch_range is not None:
        cl.append('batch_range=%i,%i' % self._batch_range)
      for c in cl:
        self.add_command_line(c)
      Debug.write('Resolution analysis: %s' % (' '.join(cl)))
      self.start()
      self.close_wait()
      for record in self.get_all_output():
        if 'Resolution rmerge' in record:
          self._resolution_rmerge = float(record.split()[-1])
        if 'Resolution completeness' in record:
          self._resolution_completeness = float(record.split()[-1])
        if 'Resolution cc_half' in record:
          self._resolution_cc_half = float(record.split()[-1])
        if 'Resolution I/sig' in record:
          self._resolution_isigma = float(record.split()[-1])
        if 'Resolution Mn(I/sig)' in record:
          self._resolution_misigma = float(record.split()[-1])

      return
Example #5
0
    def set_xdsin(self, xdsin):

      # copy this file for debugging purposes - may take up a lot
      # of disk space so remove before release!

      if True:
        self._xdsin = xdsin
        return

      # now use this step to remove the misfit reflections
      # from the XDS_ASCII file.

      copyto = os.path.join(self.get_working_directory(), '%s_%s' % \
                            (self.get_xpid(), os.path.split(xdsin)[-1]))

      # shutil.copyfile(xdsin, copyto)

      ignored = remove_misfits(xdsin, copyto)

      Debug.write('Copied XDSIN to %s' % copyto)
      Debug.write('Removed %d misfits' % ignored)

      self._xdsin = copyto

      return
Example #6
0
    def xds_to_mtz(self):
      '''Use pointless to convert XDS file to MTZ.'''

      if not self._xdsin:
        raise RuntimeError, 'XDSIN not set'

      self.check_hklout()

      # -c for copy - just convert the file to MTZ multirecord
      self.add_command_line('-c')


      self.start()

      if self._pname and self._xname and self._dname:
        self.input('name project %s crystal %s dataset %s' % \
                   (self._pname, self._xname, self._dname))

      self.input('xdsin %s' % self._xdsin)

      if self._scale_factor:
        Debug.write('Scaling intensities by factor %e' % \
                    self._scale_factor)

        self.input('multiply %e' % self._scale_factor)

      self.close_wait()

      # FIXME need to check the status and so on here

      return
    def run(self):
      from xia2.Handlers.Streams import Debug
      Debug.write('Running %s' %self.get_executable())

      self.clear_command_line()
      self.add_command_line(self._sweep_filename)
      self.add_command_line(self._spot_filename)
      nproc = Flags.get_parallel()
      self.set_cpu_threads(nproc)
      self.add_command_line('nproc=%i' % nproc)
      for scan_range in self._scan_ranges:
        self.add_command_line('scan_range=%d,%d' % scan_range)

      if self._phil_file is not None:
        self.add_command_line("%s" %self._phil_file)

      self._optimized_filename = os.path.join(
        self.get_working_directory(), '%d_optimized_datablock.json' %self.get_xpid())
      self.add_command_line("output.datablock=%s" %self._optimized_filename)

      self.start()
      self.close_wait()
      self.check_for_errors()

      records = self.get_all_output()

      assert os.path.exists(self._optimized_filename), self._optimized_filename

      return
Example #8
0
    def __call__(self, indxr, images):
      from xia2.Handlers.Streams import Debug
      Debug.write('Running mosflm to generate RASTER, SEPARATION')

      self.start()
      self.input('template "%s"' % indxr.get_template())
      self.input('directory "%s"' % indxr.get_directory())
      self.input('beam %f %f' % indxr.get_indexer_beam_centre())
      self.input('distance %f' % indxr.get_indexer_distance())
      self.input('wavelength %f' % indxr.get_wavelength())
      self.input('findspots file spots.dat')
      for i in images:
        self.input('findspots find %d' % i)
      self.input('go')

      self.close_wait()

      p = { }

      # scrape from the output the values we want...

      for o in self.get_all_output():
        if 'parameters have been set to' in o:
          p['raster'] = map(int, o.split()[-5:])
        if '(currently SEPARATION' in o:
          p['separation'] = map(float, o.replace(')', '').split()[-2:])

      return p
Example #9
0
def memory_usage():
  try:
    import resource
    return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
  except exceptions.Exception, e:
    Debug.write('Error getting RAM usage: %s' % str(e))
    return 0
Example #10
0
  def _integrate_finish(self):
    '''Finish the integration - if necessary performing reindexing
    based on the pointgroup and the reindexing operator.'''

    if self._intgr_reindex_operator is None and \
       self._intgr_spacegroup_number == lattice_to_spacegroup(
        self.get_integrater_refiner().get_refiner_lattice()):
      return self._mosflm_hklout

    if self._intgr_reindex_operator is None and \
       self._intgr_spacegroup_number == 0:
      return self._mosflm_hklout

    Debug.write('Reindexing to spacegroup %d (%s)' % \
                (self._intgr_spacegroup_number,
                 self._intgr_reindex_operator))

    hklin = self._mosflm_hklout
    reindex = Reindex()
    reindex.set_working_directory(self.get_working_directory())
    auto_logfiler(reindex)

    reindex.set_operator(self._intgr_reindex_operator)

    if self._intgr_spacegroup_number:
      reindex.set_spacegroup(self._intgr_spacegroup_number)

    hklout = '%s_reindex.mtz' % hklin[:-4]

    reindex.set_hklin(hklin)
    reindex.set_hklout(hklout)
    reindex.reindex()

    return hklout
Example #11
0
  def _index_select_images(self):
    '''Select correct images based on image headers. This will in
    general use the 20 frames. N.B. only if they have good
    spots on them!'''

    phi_width = self.get_phi_width()
    images = self.get_matching_images()

    # N.B. now bodging this to use up to 20 frames which have decent
    # spots on, spaced from throughout the data set.

    spacing = max(1, int(len(images) // 20))

    selected = []

    for j in range(0, len(images), spacing):
      selected.append(images[j])

    for image in selected[:20]:
      ld = LabelitDistl()
      ld.set_working_directory(self.get_working_directory())
      auto_logfiler(ld)
      ld.add_image(self.get_image_name(image))
      ld.distl()
      spots = ld.get_statistics(
          self.get_image_name(image))['spots_good']
      Debug.write('Image %d good spots %d' % (image, spots))
      if spots > 10:
        self.add_indexer_image_wedge(image)

    return
Example #12
0
File: Flags.py Project: hainm/xia2
  def set_spacegroup(self, spacegroup):
    '''A handler for the command-line option -spacegroup - this will
    set the spacegroup and derive from this the pointgroup and lattice
    appropriate for such...'''

    from xia2.Handlers.Syminfo import Syminfo

    spacegroup = spacegroup.upper()

    # validate by deriving the pointgroup and lattice...

    pointgroup = Syminfo.get_pointgroup(spacegroup)
    lattice = Syminfo.get_lattice(spacegroup)

    # assign

    self._spacegroup = spacegroup
    self._pointgroup = pointgroup
    self._lattice = lattice

    # debug print

    from xia2.Handlers.Streams import Debug

    Debug.write('Derived information from spacegroup flag: %s' % \
                spacegroup)
    Debug.write('Pointgroup: %s  Lattice: %s' % (pointgroup, lattice))

    # indicate that since this has been assigned, we do not wish to
    # test it!

    self.set_no_lattice_test(True)

    return
Example #13
0
    def run(self):
      from xia2.Handlers.Streams import Debug
      Debug.write('Running dials.reindex')

      wd = self.get_working_directory()

      self.clear_command_line()
      if self._experiments_filename is not None:
        self.add_command_line(self._experiments_filename)
        self._reindexed_experiments_filename = os.path.join(
          wd, "%d_experiments_reindexed.json" %self.get_xpid())
        self.add_command_line(
          "output.experiments=%s" %self._reindexed_experiments_filename)
      if self._indexed_filename is not None:
        self.add_command_line(self._indexed_filename)
        self._reindexed_reflections_filename = os.path.join(
          wd, "%d_reflections_reindexed.pickle" %self.get_xpid())
        self.add_command_line(
          "output.reflections=%s" %self._reindexed_reflections_filename)
      if self._reference_filename is not None:
        self.add_command_line("reference=%s" % self._reference_filename)
      if self._cb_op:
        self.add_command_line("change_of_basis_op=%s" % self._cb_op)
      if self._space_group:
        self.add_command_line("space_group=%s" % self._space_group)
      if self._hkl_offset is not None:
        self.add_command_line("hkl_offset=%i,%i,%i" %self._hkl_offset)

      self.start()
      self.close_wait()
      self.check_for_errors()
Example #14
0
File: Scaler.py Project: hainm/xia2
  def add_scaler_integrater(self, integrater):
    '''Add an integrater to this scaler, to provide the input.'''

    # epoch values are trusted as long as they are unique.
    # if a collision is detected, all epoch values are replaced by an
    # integer series, starting with 0

    if 0 in self._scalr_integraters.keys():
      epoch = len(self._scalr_integraters)

    else:
      epoch = integrater.get_integrater_epoch()

      # FIXME This is now probably superflous?
      if epoch == 0 and self._scalr_integraters:
        raise RuntimeError, 'multi-sweep integrater has epoch 0'

      if epoch in self._scalr_integraters.keys():
        Debug.write('integrater with epoch %d already exists. will not trust epoch values' % epoch)

        # collision. Throw away all epoch keys, and replace with integer series
        self._scalr_integraters = dict(zip(
            range(0,len(self._scalr_integraters)),
             self._scalr_integraters.values()))
        epoch = len(self._scalr_integraters)

    self._scalr_integraters[epoch] = integrater

    self.scaler_reset()

    return
Example #15
0
File: bits.py Project: hainm/xia2
def auto_logfiler(DriverInstance, extra = None):
  '''Create a "sensible" log file for this program wrapper & connect it.'''

  working_directory = DriverInstance.get_working_directory()

  if not working_directory:
    return

  executable = os.path.split(DriverInstance.get_executable())[-1]
  number = _get_number()

  if executable[-4:] == '.bat':
    executable = executable[:-4]

  if executable[-4:] == '.exe':
    executable = executable[:-4]

  if extra:
    logfile = os.path.join(working_directory,
                           '%d_%s_%s.log' % (number, executable, extra))
  else:
    logfile = os.path.join(working_directory,
                           '%d_%s.log' % (number, executable))

  DriverInstance.set_xpid(number)

  Debug.write('Logfile: %s -> %s' % (executable,
                                     logfile))

  DriverInstance.write_log_file(logfile)

  return logfile
Example #16
0
    def run(self):
      from xia2.Handlers.Streams import Debug
      Debug.write('Running dials.refine_bravais_settings')

      self.clear_command_line()
      self.add_command_line(self._experiments_filename)
      self.add_command_line(self._indexed_filename)

      nproc = PhilIndex.params.xia2.settings.multiprocessing.nproc
      self.set_cpu_threads(nproc)
      self.add_command_line('nproc=%i' % nproc)
      #self.add_command_line('reflections_per_degree=10')
      if self._detector_fix:
        self.add_command_line('detector.fix=%s' % self._detector_fix)
      if self._beam_fix:
        self.add_command_line('beam.fix=%s' % self._beam_fix)
      #self.add_command_line('engine=GaussNewton')
      if self._close_to_spindle_cutoff is not None:
        self.add_command_line(
          'close_to_spindle_cutoff=%f' %self._close_to_spindle_cutoff)

      self.start()
      self.close_wait()
      self.check_for_errors()

      from json import loads
      import os
      self._bravais_summary = loads(open(os.path.join(
          self.get_working_directory(), 'bravais_summary.json'), 'r').read())

      return
Example #17
0
  def Correct(self):
    correct = _Correct(params=PhilIndex.params.xds.correct)
    correct.set_working_directory(self.get_working_directory())

    correct.setup_from_imageset(self.get_imageset())

    if self.get_distance():
      correct.set_distance(self.get_distance())

    if self.get_wavelength():
      correct.set_wavelength(self.get_wavelength())

    if self.get_integrater_ice():
      correct.set_ice(self.get_integrater_ice())

    if self.get_integrater_excluded_regions():
      correct.set_excluded_regions(self.get_integrater_excluded_regions())

    if self.get_integrater_anomalous():
      correct.set_anomalous(True)

    if self.get_integrater_low_resolution() > 0.0:
      Debug.write('Using low resolution limit: %.2f' % \
                  self.get_integrater_low_resolution())
      correct.set_resolution_high(0.0)
      correct.set_resolution_low(
          self.get_integrater_low_resolution())

    auto_logfiler(correct, 'CORRECT')

    return correct
Example #18
0
        def run(self):
            from xia2.Handlers.Streams import Debug

            Debug.write("Running cctbx.brehm_diederichs")

            self.clear_command_line()
            if self._asymmetric is not None:
                assert isinstance(self._asymmetric, int)
                self.add_command_line("asymmetric=%i" % self._asymmetric)
            self.add_command_line("show_plot=False")
            self.add_command_line("save_plot=True")
            for filename in self._input_filenames:
                self.add_command_line(filename)

            self.start()
            self.close_wait()
            self.check_for_errors()

            import os

            results_filename = os.path.join(self.get_working_directory(), "reindex.txt")
            assert os.path.exists(results_filename)
            with open(results_filename, "rb") as f:
                for line in f.readlines():
                    filename, reindex_op = line.strip().rsplit(" ", 1)
                    self._reindexing_dict[os.path.abspath(filename)] = reindex_op

            return
Example #19
0
File: Indexer.py Project: xia2/xia2
  def get_indexer_done(self):

    if not self.get_indexer_prepare_done():
      Debug.write('Resetting indexer done as prepare not done')
      self.set_indexer_done(False)

    return self._indxr_done
Example #20
0
def digest_template(template, images):
  '''Digest the template and image numbers to copy as much of the
  common characters in the numbers as possible to the template to
  give smaller image numbers.'''

  length = template.count('#')

  format = '%%0%dd' % length

  strings = [format % i for i in images]

  offset = 0
  if len(strings) > 1:
    prefix = common_prefix(strings)
    if prefix:
      offset = int(prefix + '0' * (length - len(prefix)))
      template = template.replace(len(prefix) * '#', prefix, 1)
      images = [int(s.replace(prefix, '', 1)) for s in strings]

  try:
    template, images, offset = ensure_no_batches_numbered_zero(
        template, images, offset)
  except RuntimeError, e:
    Debug.write('Throwing away image 0 from template %s' % template)
    template, images, offset = ensure_no_batches_numbered_zero(
        template, images[1:], offset)
Example #21
0
    def run(self):
      from xia2.Handlers.Streams import Debug
      Debug.write('Running xia2.integrate')

      self.clear_command_line()

      if self._phil_file is not None:
        self.add_command_line('%s' % self._phil_file)

      for arg in self._argv:
        self.add_command_line(arg)
      if self._nproc is not None:
        self.set_cpu_threads(self._nproc)
        self.add_command_line('nproc=%i' %self._nproc)

      if self._njob is not None:
        self.add_command_line('njob=%i' %self._njob)

      if self._mp_mode is not None:
        self.add_command_line('multiprocessing.mode=%s' %self._mp_mode)

      self.start()
      self.close_wait()
      self.check_for_errors()
      for line in self.get_all_output():
        if 'Status: error' in line:
          raise RuntimeError(line.split('error')[-1].strip())

      return
Example #22
0
  def _index_finish(self):
    '''Check that the autoindexing gave a convincing result, and
    if not (i.e. it gave a centred lattice where a primitive one
    would be correct) pick up the correct solution.'''

    if self._indxr_input_lattice:
      return

    if self.get_indexer_sweep():
      if self.get_indexer_sweep().get_user_lattice():
        return

    try:
      status, lattice, matrix, cell = mosflm_check_indexer_solution(
          self)
    except:
      return

    if status is False or status is None:
      return

    # ok need to update internals...

    self._indxr_lattice = lattice
    self._indxr_cell = cell

    Debug.write('Inserting solution: %s ' % lattice +
                '%6.2f %6.2f %6.2f %6.2f %6.2f %6.2f' % cell)

    self._indxr_replace(lattice, cell)

    self._indxr_payload['mosflm_orientation_matrix'] = matrix

    return
Example #23
0
  def _index_select_images_small_molecule(self):
    '''Select correct images based on image headers. This one is for
    when you have small molecule data so want more images.'''

    phi_width = self.get_phi_width()
    images = self.get_matching_images()

    Debug.write('Selected image %s' % images[0])

    self.add_indexer_image_wedge(images[0])

    offset = images[0] - 1

    # add an image every 15 degrees up to 90 degrees

    for j in range(6):

      image_number = offset + int(15 * (j + 1) / phi_width)

      if not image_number in images:
        break

      Debug.write('Selected image %s' % image_number)
      self.add_indexer_image_wedge(image_number)

    return
Example #24
0
  def _index_remove_masked_regions(self):
    if not PhilIndex.params.xia2.settings.untrusted_rectangle_indexing:
      return

    untrusted_rectangle_indexing \
      = PhilIndex.params.xia2.settings.untrusted_rectangle_indexing
    limits = untrusted_rectangle_indexing
    spot_xds = []
    removed = 0
    lines = open(self._indxr_payload['SPOT.XDS'], 'rb').readlines()
    for record in lines:
      if not record.strip():
        continue
      remove = False
      x, y, phi, i = map(float, record.split()[:4])
      for limits in untrusted_rectangle_indexing:
        if x > limits[0] and x < limits[1] and \
            y > limits[2] and y < limits[3]:
          removed += 1
          remove = True
          break

      if not remove:
        spot_xds.append('%s' % record)

    Debug.write('Removed %d peaks from SPOT.XDS' % removed)
    masked_spot_xds = os.path.splitext(self._indxr_payload['SPOT.XDS'])[0] + '_masked.XDS'
    with open(masked_spot_xds, 'wb') as f:
      f.writelines(spot_xds)
    self._indxr_payload['SPOT.XDS'] = masked_spot_xds
    return
Example #25
0
    def __call__(self, fp, images = None):
      from xia2.Handlers.Streams import Debug

      if images is None:
        images = self.select_images(fp)

      images_str = ' '.join(map(str, images))

      if self._spot_file:
        Debug.write('Running mosflm to autoindex from %s' %
                    self._spot_file)
      else:
        Debug.write('Running mosflm to autoindex from images %s' %
                    images_str)

      self.start()
      self.input('template "%s"' % fp.get_template())
      self.input('directory "%s"' % fp.get_directory())
      self.input('beam %f %f' % fp.get_beam_centre())
      self.input('distance %f' % fp.get_distance())
      self.input('wavelength %f' % fp.get_wavelength())

      if self._spot_file:
        self.input('autoindex dps refine image %s file %s' %
                   (images_str, self._spot_file))
      else:
        self.input('autoindex dps refine image %s' % images_str)

      self.input('go')
      self.close_wait()

      from AutoindexHelpers import parse_index_log
      return parse_index_log(self.get_all_output())
Example #26
0
File: Refiner.py Project: xia2/xia2
  def set_refiner_done(self, done = True):

    frm = inspect.stack()[1]
    mod = inspect.getmodule(frm[0])
    Debug.write('Called refiner done from %s %d (%s)' %
                (mod.__name__, frm[0].f_lineno, done))

    self._refinr_done = done
Example #27
0
File: Refiner.py Project: xia2/xia2
  def refiner_reset(self):

    Debug.write('Refiner reset')

    self._refinr_done = False
    self._refinr_prepare_done = False
    self._refinr_finish_done = False
    self._refinr_result = None
Example #28
0
  def get_integrater_finish_done(self):

    if not self.get_integrater_done():
      Debug.write(
          'Resetting integrater finish done as integrate not done')
      self.set_integrater_finish_done(False)

    return self._intgr_finish_done
Example #29
0
File: Scaler.py Project: xia2/xia2
  def scaler_reset(self):

    Debug.write('Scaler reset')

    self._scalr_done = False
    self._scalr_prepare_done = False
    self._scalr_finish_done = False
    self._scalr_result = None
Example #30
0
File: Scaler.py Project: xia2/xia2
  def set_scaler_finish_done(self, done = True):

    frm = inspect.stack()[1]
    mod = inspect.getmodule(frm[0])
    Debug.write('Called scaler finish done from %s %d (%s)' %
                (mod.__name__, frm[0].f_lineno, done))

    self._scalr_finish_done = done
Example #31
0
    def setup_from_xinfo_file(self, xinfo_file):
        '''Set up this object & all subobjects based on the .xinfo
    file contents.'''

        settings = PhilIndex.params.xia2.settings

        sweep_ids = [sweep.id for sweep in settings.sweep]
        sweep_ranges = [sweep.range for sweep in settings.sweep]

        if not sweep_ids:
            sweep_ids = None
            sweep_ranges = None

        xinfo = XInfo(xinfo_file,
                      sweep_ids=sweep_ids,
                      sweep_ranges=sweep_ranges)

        self._name = xinfo.get_project()
        crystals = xinfo.get_crystals()

        for crystal in crystals.keys():
            xc = XCrystal(crystal, self)
            if 'sequence' in crystals[crystal]:
                xc.set_aa_sequence(crystals[crystal]['sequence'])
            if 'ha_info' in crystals[crystal]:
                if crystals[crystal]['ha_info'] != {}:
                    xc.set_ha_info(crystals[crystal]['ha_info'])

            if 'scaled_merged_reflection_file' in crystals[crystal]:
                xc.set_scaled_merged_reflections(
                    crystals[crystal]['scaled_merged_reflections'])

            if 'reference_reflection_file' in crystals[crystal]:
                xc.set_reference_reflection_file(
                    crystals[crystal]['reference_reflection_file'])
            if 'freer_file' in crystals[crystal]:
                xc.set_freer_file(crystals[crystal]['freer_file'])

            # user assigned spacegroup
            if 'user_spacegroup' in crystals[crystal]:
                xc.set_user_spacegroup(crystals[crystal]['user_spacegroup'])
            elif settings.space_group is not None:
                # XXX do we ever actually get here?
                xc.set_user_spacegroup(
                    settings.space_group.type().lookup_symbol())

            # add a default sample if none present in xinfo file
            if not crystals[crystal]['samples']:
                crystals[crystal]['samples']['X1'] = {}

            for sample in crystals[crystal]['samples'].keys():
                sample_info = crystals[crystal]['samples'][sample]

                xsample = XSample(sample, xc)
                xc.add_sample(xsample)

            if not crystals[crystal]['wavelengths']:
                raise RuntimeError('No wavelengths specified in xinfo file')

            for wavelength in crystals[crystal]['wavelengths'].keys():
                # FIXME 29/NOV/06 in here need to be able to cope with
                # no wavelength information - this should default to the
                # information in the image header (John Cowan pointed
                # out that this was untidy - requiring that it agrees
                # with the value in the header makes this almost
                # useless.)

                wave_info = crystals[crystal]['wavelengths'][wavelength]

                if 'wavelength' not in wave_info:
                    Debug.write('No wavelength value given for wavelength %s' %
                                wavelength)
                else:
                    Debug.write(
                      'Overriding value for wavelength %s to %8.6f' % \
                        (wavelength, float(wave_info['wavelength'])))

                # handle case where user writes f" in place of f''

                if 'f"' in wave_info and not \
                      'f\'\'' in wave_info:
                    wave_info['f\'\''] = wave_info['f"']

                xw = XWavelength(wavelength,
                                 xc,
                                 wavelength=wave_info.get('wavelength', 0.0),
                                 f_pr=wave_info.get('f\'', 0.0),
                                 f_prpr=wave_info.get('f\'\'', 0.0),
                                 dmin=wave_info.get('dmin', 0.0),
                                 dmax=wave_info.get('dmax', 0.0))

                # in here I also need to look and see if we have
                # been given any scaled reflection files...

                # check to see if we have a user supplied lattice...
                if 'user_spacegroup' in crystals[crystal]:
                    lattice = Syminfo.get_lattice(
                        crystals[crystal]['user_spacegroup'])
                elif settings.space_group is not None:
                    # XXX do we ever actually get here?
                    lattice = Syminfo.get_lattice(
                        settings.space_group.type().lookup_symbol())
                else:
                    lattice = None

                # and also user supplied cell constants - from either
                # the xinfo file (the first port of call) or the
                # command-line.

                if 'user_cell' in crystals[crystal]:
                    cell = crystals[crystal]['user_cell']
                elif settings.unit_cell is not None:
                    # XXX do we ever actually get here?
                    cell = settings.unit_cell.parameters()
                else:
                    cell = None

                dmin = wave_info.get('dmin', 0.0)
                dmax = wave_info.get('dmax', 0.0)

                if dmin == 0.0 and dmax == 0.0:
                    dmin = PhilIndex.params.xia2.settings.resolution.d_min
                    dmax = PhilIndex.params.xia2.settings.resolution.d_max

                # want to be able to locally override the resolution limits
                # for this sweep while leaving the rest for the data set
                # intact...

                for sweep_name in crystals[crystal]['sweeps'].keys():
                    sweep_info = crystals[crystal]['sweeps'][sweep_name]

                    sample_name = sweep_info.get('sample')
                    if sample_name is None:
                        if len(crystals[crystal]['samples']) == 1:
                            sample_name = crystals[crystal]['samples'].keys(
                            )[0]
                        else:
                            raise RuntimeError('No sample given for sweep %s' %
                                               sweep_name)

                    xsample = xc.get_xsample(sample_name)
                    assert xsample is not None

                    dmin_old = dmin
                    dmax_old = dmax
                    replace = False

                    if 'RESOLUTION' in sweep_info:

                        values = map(float, sweep_info['RESOLUTION'].split())
                        if len(values) == 1:
                            dmin = values[0]
                        elif len(values) == 2:
                            dmin = min(values)
                            dmax = max(values)
                        else:
                            raise RuntimeError('bad resolution for sweep %s' %
                                               sweep_name)

                        replace = True

                    if sweep_info['wavelength'] == wavelength:

                        frames_to_process = sweep_info.get('start_end')

                        xsweep = xw.add_sweep(
                            sweep_name,
                            sample=xsample,
                            directory=sweep_info.get('DIRECTORY'),
                            image=sweep_info.get('IMAGE'),
                            beam=sweep_info.get('beam'),
                            reversephi=sweep_info.get('reversephi', False),
                            distance=sweep_info.get('distance'),
                            gain=float(sweep_info.get('GAIN', 0.0)),
                            dmin=dmin,
                            dmax=dmax,
                            polarization=float(
                                sweep_info.get('POLARIZATION', 0.0)),
                            frames_to_process=frames_to_process,
                            user_lattice=lattice,
                            user_cell=cell,
                            epoch=sweep_info.get('epoch', 0),
                            ice=sweep_info.get('ice', False),
                            excluded_regions=sweep_info.get(
                                'excluded_regions', []),
                        )

                        xsample.add_sweep(xsweep)

                    dmin = dmin_old
                    dmax = dmax_old

                xc.add_wavelength(xw)

            self.add_crystal(xc)
Example #32
0
def run():
    if len(sys.argv) < 2 or "-help" in sys.argv or "--help" in sys.argv:
        help()
        sys.exit()

    if "-version" in sys.argv or "--version" in sys.argv:
        print(xia2.XIA2Version.Version)
        print(dials_version())
        ccp4_version = get_ccp4_version()
        if ccp4_version is not None:
            print("CCP4 %s" % ccp4_version)
        sys.exit()

    try:
        check_environment()
    except Exception as e:
        traceback.print_exc(file=open("xia2.error", "w"))
        Debug.write(traceback.format_exc(), strip=False)
        Chatter.write("Error setting up xia2 environment: %s" % str(e))
        Chatter.write(
            "Please send the contents of xia2.txt, xia2.error and xia2-debug.txt to:"
        )
        Chatter.write("*****@*****.**")
        sys.exit(1)

    wd = os.getcwd()

    # Temporarily patch os.chdir() to help identify source of #214
    origpid = os.getpid()
    origchdir = os.chdir

    def chdir_override(arg):
        if os.getpid() != origpid:
            return origchdir(arg)
        # Try to determine the name of the calling module.
        # Use exception trick to pick up the current frame.
        try:
            raise Exception()
        except Exception:
            f = sys.exc_info()[2].tb_frame.f_back

        Debug.write(
            "Directory change to %r in %s:%d" % (arg, f.f_code.co_filename, f.f_lineno)
        )
        return origchdir(arg)

    os.chdir = chdir_override

    try:
        xia2_main()
        Debug.write("\nTiming report:")
        for line in xia2.Driver.timing.report():
            Debug.write(line, strip=False)

        Chatter.write("Status: normal termination")
        return
    except Sorry as s:
        Chatter.write("Error: %s" % str(s))
        sys.exit(1)
    except Exception as e:
        traceback.print_exc(file=open(os.path.join(wd, "xia2.error"), "w"))
        Debug.write(traceback.format_exc(), strip=False)
        Chatter.write("Error: %s" % str(e))
        Chatter.write(
            "Please send the contents of xia2.txt, xia2.error and xia2-debug.txt to:"
        )
        Chatter.write("*****@*****.**")
        sys.exit(1)
Example #33
0
def xia2_main(stop_after=None):
    """Actually process something..."""
    Citations.cite("xia2")

    # print versions of related software
    Chatter.write(dials_version())

    ccp4_version = get_ccp4_version()
    if ccp4_version is not None:
        Chatter.write("CCP4 %s" % ccp4_version)

    start_time = time.time()

    CommandLine = get_command_line()
    start_dir = Flags.get_starting_directory()

    # check that something useful has been assigned for processing...
    xtals = CommandLine.get_xinfo().get_crystals()

    no_images = True

    for name in xtals.keys():
        xtal = xtals[name]

        if not xtal.get_all_image_names():

            Chatter.write("-----------------------------------" + "-" * len(name))
            Chatter.write("| No images assigned for crystal %s |" % name)
            Chatter.write("-----------------------------------" + "-" * len(name))
        else:
            no_images = False

    args = []

    from xia2.Handlers.Phil import PhilIndex

    params = PhilIndex.get_python_object()
    mp_params = params.xia2.settings.multiprocessing
    njob = mp_params.njob

    from libtbx import group_args

    xinfo = CommandLine.get_xinfo()

    if (
        params.xia2.settings.developmental.continue_from_previous_job
        and os.path.exists("xia2.json")
    ):
        Debug.write("==== Starting from existing xia2.json ====")
        from xia2.Schema.XProject import XProject

        xinfo_new = xinfo
        xinfo = XProject.from_json(filename="xia2.json")

        crystals = xinfo.get_crystals()
        crystals_new = xinfo_new.get_crystals()
        for crystal_id in crystals_new.keys():
            if crystal_id not in crystals:
                crystals[crystal_id] = crystals_new[crystal_id]
                continue
            crystals[crystal_id]._scaler = None  # reset scaler
            for wavelength_id in crystals_new[crystal_id].get_wavelength_names():
                wavelength_new = crystals_new[crystal_id].get_xwavelength(wavelength_id)
                if wavelength_id not in crystals[crystal_id].get_wavelength_names():
                    crystals[crystal_id].add_wavelength(
                        crystals_new[crystal_id].get_xwavelength(wavelength_new)
                    )
                    continue
                wavelength = crystals[crystal_id].get_xwavelength(wavelength_id)
                sweeps_new = wavelength_new.get_sweeps()
                sweeps = wavelength.get_sweeps()
                sweep_names = [s.get_name() for s in sweeps]
                sweep_keys = [
                    (s.get_directory(), s.get_template(), s.get_image_range())
                    for s in sweeps
                ]
                for sweep in sweeps_new:
                    if (
                        sweep.get_directory(),
                        sweep.get_template(),
                        sweep.get_image_range(),
                    ) not in sweep_keys:
                        if sweep.get_name() in sweep_names:
                            i = 1
                            while "SWEEEP%i" % i in sweep_names:
                                i += 1
                            sweep._name = "SWEEP%i" % i
                            break
                        wavelength.add_sweep(
                            name=sweep.get_name(),
                            sample=sweep.get_xsample(),
                            directory=sweep.get_directory(),
                            image=sweep.get_image(),
                            beam=sweep.get_beam_centre(),
                            reversephi=sweep.get_reversephi(),
                            distance=sweep.get_distance(),
                            gain=sweep.get_gain(),
                            dmin=sweep.get_resolution_high(),
                            dmax=sweep.get_resolution_low(),
                            polarization=sweep.get_polarization(),
                            frames_to_process=sweep.get_frames_to_process(),
                            user_lattice=sweep.get_user_lattice(),
                            user_cell=sweep.get_user_cell(),
                            epoch=sweep._epoch,
                            ice=sweep._ice,
                            excluded_regions=sweep._excluded_regions,
                        )
                        sweep_names.append(sweep.get_name())

    crystals = xinfo.get_crystals()

    failover = params.xia2.settings.failover

    if mp_params.mode == "parallel" and njob > 1:
        driver_type = mp_params.type
        command_line_args = CommandLine.get_argv()[1:]
        for crystal_id in crystals.keys():
            for wavelength_id in crystals[crystal_id].get_wavelength_names():
                wavelength = crystals[crystal_id].get_xwavelength(wavelength_id)
                sweeps = wavelength.get_sweeps()
                for sweep in sweeps:
                    sweep._get_indexer()
                    sweep._get_refiner()
                    sweep._get_integrater()
                    args.append(
                        (
                            group_args(
                                driver_type=driver_type,
                                stop_after=stop_after,
                                failover=failover,
                                command_line_args=command_line_args,
                                nproc=mp_params.nproc,
                                crystal_id=crystal_id,
                                wavelength_id=wavelength_id,
                                sweep_id=sweep.get_name(),
                            ),
                        )
                    )

        from xia2.Driver.DriverFactory import DriverFactory

        default_driver_type = DriverFactory.get_driver_type()

        # run every nth job on the current computer (no need to submit to qsub)
        for i_job, arg in enumerate(args):
            if (i_job % njob) == 0:
                arg[0].driver_type = default_driver_type

        if mp_params.type == "qsub":
            method = "sge"
        else:
            method = "multiprocessing"
        nproc = mp_params.nproc
        qsub_command = mp_params.qsub_command or "qsub"
        qsub_command = "%s -V -cwd -pe smp %d" % (qsub_command, nproc)

        from libtbx import easy_mp

        results = easy_mp.parallel_map(
            process_one_sweep,
            args,
            processes=njob,
            # method=method,
            method="multiprocessing",
            qsub_command=qsub_command,
            preserve_order=True,
            preserve_exception_message=True,
        )

        # Hack to update sweep with the serialized indexers/refiners/integraters
        i_sweep = 0
        for crystal_id in crystals.keys():
            for wavelength_id in crystals[crystal_id].get_wavelength_names():
                wavelength = crystals[crystal_id].get_xwavelength(wavelength_id)
                remove_sweeps = []
                sweeps = wavelength.get_sweeps()
                for sweep in sweeps:
                    success, output, xsweep_dict = results[i_sweep]
                    if output is not None:
                        Chatter.write(output)
                    if not success:
                        Chatter.write("Sweep failed: removing %s" % sweep.get_name())
                        remove_sweeps.append(sweep)
                    else:
                        assert xsweep_dict is not None
                        Chatter.write("Loading sweep: %s" % sweep.get_name())
                        from xia2.Schema.XSweep import XSweep

                        new_sweep = XSweep.from_dict(xsweep_dict)
                        sweep._indexer = new_sweep._indexer
                        sweep._refiner = new_sweep._refiner
                        sweep._integrater = new_sweep._integrater
                    i_sweep += 1
                for sweep in remove_sweeps:
                    wavelength.remove_sweep(sweep)
                    sample = sweep.get_xsample()
                    sample.remove_sweep(sweep)

    else:
        for crystal_id in crystals.keys():
            for wavelength_id in crystals[crystal_id].get_wavelength_names():
                wavelength = crystals[crystal_id].get_xwavelength(wavelength_id)
                remove_sweeps = []
                sweeps = wavelength.get_sweeps()
                for sweep in sweeps:
                    from dials.command_line.show import show_experiments
                    from dxtbx.model.experiment_list import ExperimentListFactory

                    Debug.write(sweep.get_name())
                    Debug.write(
                        show_experiments(
                            ExperimentListFactory.from_imageset_and_crystal(
                                sweep.get_imageset(), None
                            )
                        )
                    )
                    Citations.cite("dials")
                    try:
                        if stop_after == "index":
                            sweep.get_indexer_cell()
                        else:
                            sweep.get_integrater_intensities()
                        sweep.serialize()
                    except Exception as e:
                        if failover:
                            Chatter.write(
                                "Processing sweep %s failed: %s"
                                % (sweep.get_name(), str(e))
                            )
                            remove_sweeps.append(sweep)
                        else:
                            raise
                for sweep in remove_sweeps:
                    wavelength.remove_sweep(sweep)
                    sample = sweep.get_xsample()
                    sample.remove_sweep(sweep)

    # save intermediate xia2.json file in case scaling step fails
    xinfo.as_json(filename="xia2.json")

    if stop_after not in ("index", "integrate"):
        Chatter.write(xinfo.get_output(), strip=False)

    for crystal in crystals.values():
        crystal.serialize()

    # save final xia2.json file in case report generation fails
    xinfo.as_json(filename="xia2.json")

    if stop_after not in ("index", "integrate"):
        # and the summary file
        with open("xia2-summary.dat", "w") as fh:
            for record in xinfo.summarise():
                fh.write("%s\n" % record)

        # looks like this import overwrites the initial command line
        # Phil overrides so... for https://github.com/xia2/xia2/issues/150
        from xia2.command_line.html import generate_xia2_html

        if params.xia2.settings.small_molecule:
            params.xia2.settings.report.xtriage_analysis = False
            params.xia2.settings.report.include_radiation_damage = False

        with xia2.Driver.timing.record_step("xia2.report"):
            generate_xia2_html(
                xinfo, filename="xia2.html", params=params.xia2.settings.report
            )

    duration = time.time() - start_time

    # write out the time taken in a human readable way
    Chatter.write(
        "Processing took %s" % time.strftime("%Hh %Mm %Ss", time.gmtime(duration))
    )

    write_citations()

    # delete all of the temporary mtz files...
    cleanup()
    Environment.cleanup()
Example #34
0
    def dials_symmetry_indexer_jiffy(
        self, experiments, reflections, refiners, multisweep=False
    ):
        """A jiffy to centralise the interactions between dials.symmetry
        and the Indexer, multisweep edition."""
        # First check format of input against expected input
        assert len(experiments) == len(
            reflections
        ), """
Unequal number of experiments/reflections passed to dials_symmetry_indexer_jiffy"""
        if len(experiments) > 1:
            assert multisweep, """
Passing multple datasets to indexer_jiffy but not set multisweep=True"""

        probably_twinned = False
        reindex_initial = False

        symmetry_analyser = self.dials_symmetry_decide_pointgroup(
            experiments, reflections
        )

        possible = symmetry_analyser.get_possible_lattices()

        Debug.write("Possible lattices (dials.symmetry):")
        Debug.write(" ".join(possible))

        # all refiners contain the same indexer link, so any good here.
        correct_lattice, rerun_symmetry, need_to_return = decide_correct_lattice_using_refiner(
            possible, refiners[0]
        )

        if need_to_return and multisweep:
            if (
                PhilIndex.params.xia2.settings.integrate_p1
                and not PhilIndex.params.xia2.settings.reintegrate_correct_lattice
            ):
                need_to_return = False
                rerun_symmetry = True
            else:
                for refiner in refiners[1:]:
                    refiner.refiner_reset()

        if rerun_symmetry:
            # don't actually need to rerun, just set correct solution - this
            # call updates the relevant info in the Wrapper - but will need to reindex later
            symmetry_analyser.set_correct_lattice(correct_lattice)
            reindex_initial = True
            # rather than reindexing here, just set the reindex_inital and let the
            # scaler manage this as necessary

        Debug.write(
            "Symmetry analysis of %s" % " ".join(experiments) + " ".join(reflections)
        )

        pointgroup = symmetry_analyser.get_pointgroup()
        reindex_op = symmetry_analyser.get_reindex_operator()
        probably_twinned = symmetry_analyser.get_probably_twinned()

        reindexed_reflections = symmetry_analyser.get_output_reflections_filename()
        reindexed_experiments = symmetry_analyser.get_output_experiments_filename()

        Debug.write("Pointgroup: %s (%s)" % (pointgroup, reindex_op))

        return (
            pointgroup,
            reindex_op,
            need_to_return,
            probably_twinned,
            reindexed_reflections,
            reindexed_experiments,
            reindex_initial,
        )
Example #35
0
        def multi_merge(self):
            '''Merge data from multiple runs - this is very similar to
      the scaling subroutine...'''

            self.check_hklin()
            self.check_hklout()

            if not self._scalepack:
                self.set_task('Scaling reflections from %s => %s' % \
                             (os.path.split(self.get_hklin())[-1],
                              os.path.split(self.get_hklout())[-1]))
            else:
                self.set_task('Scaling reflections from %s => scalepack %s' % \
                             (os.path.split(self.get_hklin())[-1],
                              os.path.split(self.get_hklout())[-1]))

            self.start()

            self._xmlout = os.path.join(self.get_working_directory(),
                                        '%d_aimless.xml' % self.get_xpid())

            self.input('xmlout %d_aimless.xml' % self.get_xpid())
            if PhilIndex.params.xia2.settings.small_molecule == False:
                self.input('bins 20')

            if self._new_scales_file:
                self.input('dump %s' % self._new_scales_file)

            if self._resolution:
                self.input('resolution %g' % self._resolution)

            run_number = 0
            for run in self._runs:
                run_number += 1

                if not run[5]:
                    self.input('run %d batch %d to %d' %
                               (run_number, run[0], run[1]))

                if run[6] != 0.0 and not run[5]:
                    self.input('resolution run %d high %g' % \
                               (run_number, run[6]))

            # put in the pname, xname, dname stuff
            run_number = 0
            for run in self._runs:
                run_number += 1

                if run[7]:
                    Debug.write('Run %d corresponds to sweep %s' % \
                                (run_number, run[7]))

                if run[5]:
                    continue

            # we are only merging here so the scales command is
            # dead simple...

            self.input('scales constant')

            if self._anomalous:
                self.input('anomalous on')
            else:
                self.input('anomalous off')

            # FIXME this is probably not ready to be used yet...
            if self._scalepack:
                self.input('output polish unmerged')
            self.input('output unmerged')

            if self._scales_file:
                self.input('onlymerge')
                self.input('restore %s' % self._scales_file)

            self.close_wait()

            # check for errors

            try:
                self.check_for_errors()
                self.check_ccp4_errors()
                self.check_aimless_errors()

                Debug.write('Aimless status: ok')

            except RuntimeError as e:
                try:
                    os.remove(self.get_hklout())
                except Exception:
                    pass

                raise e

            # here get a list of all output files...
            output = self.get_all_output()

            # want to put these into a dictionary at some stage, keyed
            # by the data set id. how this is implemented will depend
            # on the number of datasets...

            # FIXME file names on windows separate out path from
            # drive with ":"... fixed! split on "Filename:"

            # get a list of dataset names...

            datasets = []
            for run in self._runs:
                # cope with case where two runs make one dataset...
                if not run[4] in datasets:
                    if run[5]:
                        pass
                    else:
                        datasets.append(run[4])

            hklout_files = []
            hklout_dict = {}

            for i in range(len(output)):
                record = output[i]

                # this is a potential source of problems - if the
                # wavelength name has a _ in it then we are here stuffed!

                if 'Writing merged data for dataset' in record:

                    if len(record.split()) == 9:
                        hklout = output[i + 1].strip()
                    else:
                        hklout = record.split()[9]

                    dname = record.split()[6].split('/')[-1]
                    hklout_dict[dname] = hklout

                    hklout_files.append(hklout)

                elif 'Writing unmerged data for all datasets' in record:
                    if len(record.split()) == 9:
                        hklout = output[i + 1].strip()
                    else:
                        hklout = record.split()[9]

                    self._unmerged_reflections = hklout

            self._scalr_scaled_reflection_files = hklout_dict

            return 'OK'
Example #36
0
        def scale(self):
            """Actually perform the scaling."""

            self.clear_command_line(
            )  # reset the command line in case has already
            # been run previously

            assert len(self._experiments_json)
            assert len(self._reflection_files)
            assert len(self._experiments_json) == len(self._reflection_files)

            for f in self._experiments_json + self._reflection_files:
                assert os.path.isfile(f)
                self.add_command_line(f)

            nproc = PhilIndex.params.xia2.settings.multiprocessing.nproc
            if isinstance(nproc, int) and nproc > 1:
                self.add_command_line("nproc=%i" % nproc)

            if self._intensities == "summation":
                self.add_command_line("intensity_choice=sum")
            elif self._intensities == "profile":
                self.add_command_line("intensity_choice=profile")

            if self._model is not None:
                self.add_command_line("model=%s" % self._model)
            self.add_command_line("full_matrix=%s" % self._full_matrix)
            if self._spacing:
                self.add_command_line("scale_interval=%g" % self._spacing)
            self.add_command_line("optimise_errors=%s" % self._optimise_errors)
            self.add_command_line("outlier_rejection=%s" %
                                  self._outlier_rejection)

            self.add_command_line("absorption_term=%s" %
                                  self._absorption_correction)
            if self._absorption_correction and self._lmax is not None:
                self.add_command_line("lmax=%i" % self._lmax)

            if self._min_partiality is not None:
                min_partiality = self._min_partiality

            if self._partiality_cutoff is not None:
                partiality_cutoff = self._partiality_cutoff

            self.add_command_line("decay_term=%s" % self._bfactor)
            if self._bfactor and self._brotation is not None:
                self.add_command_line("decay_interval=%g" % self._brotation)

            if self._export_mtz_only:
                self.add_command_line("export_mtz_only=True")

            # next any 'generic' parameters

            if self._isigma_selection is not None:
                self.add_command_line(
                    "reflection_selection.Isigma_range=%f,%f" %
                    tuple(self._isigma_selection))

            if self._d_min is not None:
                self.add_command_line("cut_data.d_min=%g" % self._d_min)

            if self._d_max is not None:
                self.add_command_line("cut_data.d_max=%g" % self._d_max)

            if self._cycles is not None:
                self.add_command_line("max_iterations=%d" % self._cycles)

            if self._outlier_zmax:
                self.add_command_line("outlier_zmax=%d" % self._outlier_zmax)

            if self._n_resolution_bins:
                self.add_command_line("n_resolution_bins=%d" %
                                      self._n_resolution_bins)
            if self._n_absorption_bins:
                self.add_command_line("n_absorption_bins=%d" %
                                      self._n_absorption_bins)
            if self._best_unit_cell is not None:
                self.add_command_line("best_unit_cell=%s,%s,%s,%s,%s,%s" %
                                      self._best_unit_cell)

            if not self._scaled_experiments:
                self._scaled_experiments = os.path.join(
                    self.get_working_directory(),
                    "%i_scaled.expt" % self.get_xpid())
            if not self._scaled_reflections:
                self._scaled_reflections = os.path.join(
                    self.get_working_directory(),
                    "%i_scaled.refl" % self.get_xpid())
            if not self._unmerged_reflections:
                self._unmerged_reflections = [
                    os.path.join(
                        self.get_working_directory(),
                        "%i_scaled_unmerged.mtz" % self.get_xpid(),
                    )
                ]
            self.add_command_line("output.unmerged_mtz=%s" %
                                  " ".join(self._unmerged_reflections))
            if not self._merged_reflections:
                self._merged_reflections = [
                    os.path.join(self.get_working_directory(),
                                 "%i_scaled.mtz" % self.get_xpid())
                ]
            self.add_command_line("output.merged_mtz=%s" %
                                  " ".join(self._merged_reflections))
            if not self._html:
                self._html = os.path.join(self.get_working_directory(),
                                          "%i_scaling.html" % self.get_xpid())
            self.add_command_line("output.html=%s" % self._html)
            if self._crystal_name:
                self.add_command_line("output.crystal_name=%s" %
                                      self._crystal_name)

            self.add_command_line("output.experiments='%s'" %
                                  self._scaled_experiments)
            self.add_command_line("output.reflections='%s'" %
                                  self._scaled_reflections)

            # run using previously determined scales
            self.start()
            self.close_wait()

            # check for errors

            try:
                self.check_for_errors()
            except Exception:
                Chatter.write(
                    "dials.scale failed, see log file for more details:\n  %s"
                    % self.get_log_file())
                raise

            Debug.write("dials.scale status: OK")

            # here get a list of all output files...
            output = self.get_all_output()
            if not self._export_mtz_only:
                Chatter.write("Completed a round of scaling using dials.scale")
            return "OK"
Example #37
0
    def _index_prepare(self):

        from xia2.Handlers.Citations import Citations

        Citations.cite("dials")

        # all_images = self.get_matching_images()
        # first = min(all_images)
        # last = max(all_images)

        spot_lists = []
        experiments_filenames = []

        for imageset, xsweep in zip(self._indxr_imagesets, self._indxr_sweeps):

            Chatter.banner("Spotfinding %s" % xsweep.get_name())

            first, last = imageset.get_scan().get_image_range()

            # at this stage, break out to run the DIALS code: this sets itself up
            # now cheat and pass in some information... save re-reading all of the
            # image headers

            # FIXME need to adjust this to allow (say) three chunks of images

            from dxtbx.model.experiment_list import ExperimentListFactory

            sweep_filename = os.path.join(self.get_working_directory(),
                                          "%s_import.expt" % xsweep.get_name())
            ExperimentListFactory.from_imageset_and_crystal(
                imageset, None).as_file(sweep_filename)

            genmask = self.GenerateMask()
            genmask.set_input_experiments(sweep_filename)
            genmask.set_output_experiments(
                os.path.join(
                    self.get_working_directory(),
                    "%s_%s_masked.expt" %
                    (genmask.get_xpid(), xsweep.get_name()),
                ))
            genmask.set_params(PhilIndex.params.dials.masking)
            sweep_filename, mask_pickle = genmask.run()
            Debug.write("Generated mask for %s: %s" %
                        (xsweep.get_name(), mask_pickle))

            gain = PhilIndex.params.xia2.settings.input.gain
            if gain is libtbx.Auto:
                gain_estimater = self.EstimateGain()
                gain_estimater.set_sweep_filename(sweep_filename)
                gain_estimater.run()
                gain = gain_estimater.get_gain()
                Chatter.write("Estimated gain: %.2f" % gain)
                PhilIndex.params.xia2.settings.input.gain = gain

            # FIXME this should really use the assigned spot finding regions
            # offset = self.get_frame_offset()
            dfs_params = PhilIndex.params.dials.find_spots
            spotfinder = self.Spotfinder()
            if last - first > 10:
                spotfinder.set_write_hot_mask(True)
            spotfinder.set_input_sweep_filename(sweep_filename)
            spotfinder.set_output_sweep_filename(
                "%s_%s_strong.expt" %
                (spotfinder.get_xpid(), xsweep.get_name()))
            spotfinder.set_input_spot_filename(
                "%s_%s_strong.refl" %
                (spotfinder.get_xpid(), xsweep.get_name()))
            if PhilIndex.params.dials.fast_mode:
                wedges = self._index_select_images_i(imageset)
                spotfinder.set_scan_ranges(wedges)
            else:
                spotfinder.set_scan_ranges([(first, last)])
            if dfs_params.phil_file is not None:
                spotfinder.set_phil_file(dfs_params.phil_file)
            if dfs_params.min_spot_size is libtbx.Auto:
                if imageset.get_detector()[0].get_type() == "SENSOR_PAD":
                    dfs_params.min_spot_size = 3
                else:
                    dfs_params.min_spot_size = None
            if dfs_params.min_spot_size is not None:
                spotfinder.set_min_spot_size(dfs_params.min_spot_size)
            if dfs_params.min_local is not None:
                spotfinder.set_min_local(dfs_params.min_local)
            if dfs_params.sigma_strong:
                spotfinder.set_sigma_strong(dfs_params.sigma_strong)
            gain = PhilIndex.params.xia2.settings.input.gain
            if gain:
                spotfinder.set_gain(gain)
            if dfs_params.filter_ice_rings:
                spotfinder.set_filter_ice_rings(dfs_params.filter_ice_rings)
            if dfs_params.kernel_size:
                spotfinder.set_kernel_size(dfs_params.kernel_size)
            if dfs_params.global_threshold is not None:
                spotfinder.set_global_threshold(dfs_params.global_threshold)
            if dfs_params.threshold.algorithm is not None:
                spotfinder.set_threshold_algorithm(
                    dfs_params.threshold.algorithm)
            spotfinder.run()

            spot_filename = spotfinder.get_spot_filename()
            if not os.path.exists(spot_filename):
                raise RuntimeError("Spotfinding failed: %s does not exist." %
                                   os.path.basename(spot_filename))

            spot_lists.append(spot_filename)
            experiments_filenames.append(
                spotfinder.get_output_sweep_filename())

            from dials.util.ascii_art import spot_counts_per_image_plot

            refl = flex.reflection_table.from_file(spot_filename)
            if not len(refl):
                raise RuntimeError("No spots found in sweep %s" %
                                   xsweep.get_name())
            Chatter.write(spot_counts_per_image_plot(refl), strip=False)

            if not PhilIndex.params.dials.fast_mode:
                detectblanks = self.DetectBlanks()
                detectblanks.set_sweep_filename(experiments_filenames[-1])
                detectblanks.set_reflections_filename(spot_filename)
                detectblanks.run()
                json = detectblanks.get_results()
                blank_regions = json["strong"]["blank_regions"]
                if len(blank_regions):
                    blank_regions = [(int(s), int(e))
                                     for s, e in blank_regions]
                    for blank_start, blank_end in blank_regions:
                        Chatter.write(
                            "WARNING: Potential blank images: %i -> %i" %
                            (blank_start + 1, blank_end))

                    if PhilIndex.params.xia2.settings.remove_blanks:
                        non_blanks = []
                        start, end = imageset.get_array_range()
                        last_blank_end = start
                        for blank_start, blank_end in blank_regions:
                            if blank_start > start:
                                non_blanks.append(
                                    (last_blank_end, blank_start))
                            last_blank_end = blank_end

                        if last_blank_end + 1 < end:
                            non_blanks.append((last_blank_end, end))

                        xsweep = self.get_indexer_sweep()
                        xwav = xsweep.get_wavelength()
                        xsample = xsweep.get_xsample()

                        sweep_name = xsweep.get_name()
                        import string

                        for i, (nb_start, nb_end) in enumerate(non_blanks):
                            assert i < 26
                            if i == 0:
                                sub_imageset = imageset[nb_start -
                                                        start:nb_end - start]
                                xsweep._frames_to_process = (nb_start + 1,
                                                             nb_end + 1)
                                self.set_indexer_prepare_done(done=False)
                                self._indxr_imagesets[
                                    self._indxr_imagesets.index(
                                        imageset)] = sub_imageset
                                xsweep._integrater._setup_from_imageset(
                                    sub_imageset)
                            else:
                                min_images = (PhilIndex.params.xia2.settings.
                                              input.min_images)
                                if (nb_end - nb_start) < min_images:
                                    continue
                                new_name = "_".join(
                                    (sweep_name, string.ascii_lowercase[i]))
                                new_sweep = xwav.add_sweep(
                                    new_name,
                                    xsample,
                                    directory=os.path.join(
                                        os.path.basename(
                                            xsweep.get_directory()),
                                        new_name,
                                    ),
                                    image=imageset.get_path(nb_start - start),
                                    frames_to_process=(nb_start + 1, nb_end),
                                )
                                Chatter.write(
                                    "Generating new sweep: %s (%s:%i:%i)" % (
                                        new_sweep.get_name(),
                                        new_sweep.get_image(),
                                        new_sweep.get_frames_to_process()[0],
                                        new_sweep.get_frames_to_process()[1],
                                    ))
                        return

            if not PhilIndex.params.xia2.settings.trust_beam_centre:
                discovery = self.DiscoverBetterExperimentalModel()
                discovery.set_sweep_filename(experiments_filenames[-1])
                discovery.set_spot_filename(spot_filename)
                try:
                    discovery.run()
                except Exception as e:
                    Debug.write("DIALS beam centre search failed: %s" % str(e))
                else:
                    # overwrite indexed.expt in experiments list
                    experiments_filenames[
                        -1] = discovery.get_optimized_experiments_filename()

        self.set_indexer_payload("spot_lists", spot_lists)
        self.set_indexer_payload("experiments", experiments_filenames)
Example #38
0
    def _index(self):
        '''Actually index the diffraction pattern. Note well that
    this is not going to compute the matrix...'''

        # acknowledge this program

        if not self._indxr_images:
            raise RuntimeError('No good spots found on any images')

        Citations.cite('labelit')
        Citations.cite('distl')

        _images = []
        for i in self._indxr_images:
            for j in i:
                if not j in _images:
                    _images.append(j)

        _images.sort()

        images_str = '%d' % _images[0]
        for i in _images[1:]:
            images_str += ', %d' % i

        cell_str = None
        if self._indxr_input_cell:
            cell_str = '%.2f %.2f %.2f %.2f %.2f %.2f' % \
                        self._indxr_input_cell

        if self._indxr_sweep_name:

            # then this is a proper autoindexing run - describe this
            # to the journal entry

            if len(self._fp_directory) <= 50:
                dirname = self._fp_directory
            else:
                dirname = '...%s' % self._fp_directory[-46:]

            Journal.block(
                'autoindexing', self._indxr_sweep_name, 'labelit', {
                    'images': images_str,
                    'target cell': cell_str,
                    'target lattice': self._indxr_input_lattice,
                    'template': self._fp_template,
                    'directory': dirname
                })

        #auto_logfiler(self)

        from xia2.Wrappers.Labelit.LabelitIndex import LabelitIndex
        index = LabelitIndex()
        index.set_working_directory(self.get_working_directory())
        auto_logfiler(index)

        #task = 'Autoindex from images:'

        #for i in _images:
        #task += ' %s' % self.get_image_name(i)

        #self.set_task(task)

        #self.add_command_line('--index_only')

        Debug.write('Indexing from images:')
        for i in _images:
            index.add_image(self.get_image_name(i))
            Debug.write('%s' % self.get_image_name(i))

        if self._indxr_input_lattice and False:
            index.set_space_group_number(
                lattice_to_spacegroup(self._indxr_input_lattice))

        if self._primitive_unit_cell:
            index.set_primitive_unit_cell(self._primitive_unit_cell)

        if self._indxr_input_cell:
            index.set_max_cell(1.25 * max(self._indxr_input_cell[:3]))

        xsweep = self.get_indexer_sweep()
        if xsweep is not None:
            if xsweep.get_distance() is not None:
                index.set_distance(xsweep.get_distance())
            #if self.get_wavelength_prov() == 'user':
            #index.set_wavelength(self.get_wavelength())
            if xsweep.get_beam_centre() is not None:
                index.set_beam_centre(xsweep.get_beam_centre())

        if self._refine_beam is False:
            index.set_refine_beam(False)
        else:
            index.set_refine_beam(True)
            index.set_beam_search_scope(self._beam_search_scope)

        if ((math.fabs(self.get_wavelength() - 1.54) < 0.01)
                or (math.fabs(self.get_wavelength() - 2.29) < 0.01)):
            index.set_Cu_KA_or_Cr_KA(True)

        try:
            index.run()
        except RuntimeError as e:

            if self._refine_beam is False:
                raise e

            # can we improve the situation?

            if self._beam_search_scope < 4.0:
                self._beam_search_scope += 4.0

                # try repeating the indexing!

                self.set_indexer_done(False)
                return 'failed'

            # otherwise this is beyond redemption

            raise e

        self._solutions = index.get_solutions()

        # FIXME this needs to check the smilie status e.g.
        # ":)" or ";(" or "  ".

        # FIXME need to check the value of the RMSD and raise an
        # exception if the P1 solution has an RMSD > 1.0...

        # Change 27/FEB/08 to support user assigned spacegroups
        # (euugh!) have to "ignore" solutions with higher symmetry
        # otherwise the rest of xia will override us. Bummer.

        for i, solution in self._solutions.iteritems():
            if self._indxr_user_input_lattice:
                if (lattice_to_spacegroup(solution['lattice']) >
                        lattice_to_spacegroup(self._indxr_input_lattice)):
                    Debug.write('Ignoring solution: %s' % solution['lattice'])
                    del self._solutions[i]

        # check the RMSD from the triclinic unit cell
        if self._solutions[1]['rmsd'] > 1.0 and False:
            # don't know when this is useful - but I know when it is not!
            raise RuntimeError('high RMSD for triclinic solution')

        # configure the "right" solution
        self._solution = self.get_solution()

        # now store also all of the other solutions... keyed by the
        # lattice - however these should only be added if they
        # have a smiley in the appropriate record, perhaps?

        for solution in self._solutions.keys():
            lattice = self._solutions[solution]['lattice']
            if lattice in self._indxr_other_lattice_cell:
                if self._indxr_other_lattice_cell[lattice]['goodness'] < \
                   self._solutions[solution]['metric']:
                    continue

            self._indxr_other_lattice_cell[lattice] = {
                'goodness': self._solutions[solution]['metric'],
                'cell': self._solutions[solution]['cell']
            }

        self._indxr_lattice = self._solution['lattice']
        self._indxr_cell = tuple(self._solution['cell'])
        self._indxr_mosaic = self._solution['mosaic']

        lms = LabelitMosflmMatrix()
        lms.set_working_directory(self.get_working_directory())
        lms.set_solution(self._solution['number'])
        self._indxr_payload['mosflm_orientation_matrix'] = lms.calculate()

        # get the beam centre from the mosflm script - mosflm
        # may have inverted the beam centre and labelit will know
        # this!

        mosflm_beam_centre = lms.get_mosflm_beam()

        if mosflm_beam_centre:
            self._indxr_payload['mosflm_beam_centre'] = tuple(
                mosflm_beam_centre)

        import copy
        detector = copy.deepcopy(self.get_detector())
        beam = copy.deepcopy(self.get_beam())
        from dxtbx.model.detector_helpers import set_mosflm_beam_centre
        set_mosflm_beam_centre(detector, beam, mosflm_beam_centre)

        from xia2.Experts.SymmetryExpert import lattice_to_spacegroup_number
        from scitbx import matrix
        from cctbx import sgtbx, uctbx
        from dxtbx.model import CrystalFactory
        mosflm_matrix = matrix.sqr([
            float(i) for line in lms.calculate()
            for i in line.replace("-", " -").split()
        ][:9])

        space_group = sgtbx.space_group_info(
            lattice_to_spacegroup_number(self._solution['lattice'])).group()
        crystal_model = CrystalFactory.from_mosflm_matrix(
            mosflm_matrix,
            unit_cell=uctbx.unit_cell(tuple(self._solution['cell'])),
            space_group=space_group)

        from dxtbx.model import Experiment, ExperimentList
        experiment = Experiment(
            beam=beam,
            detector=detector,
            goniometer=self.get_goniometer(),
            scan=self.get_scan(),
            crystal=crystal_model,
        )

        experiment_list = ExperimentList([experiment])
        self.set_indexer_experiment_list(experiment_list)

        # also get an estimate of the resolution limit from the
        # labelit.stats_distl output... FIXME the name is wrong!

        lsd = LabelitStats_distl()
        lsd.set_working_directory(self.get_working_directory())
        lsd.stats_distl()

        resolution = 1.0e6
        for i in _images:
            stats = lsd.get_statistics(self.get_image_name(i))

            resol = 0.5 * (stats['resol_one'] + stats['resol_two'])

            if resol < resolution:
                resolution = resol

        self._indxr_resolution_estimate = resolution

        return 'ok'
Example #39
0
    def continue_from_error(self):
      # copy the LP file
      shutil.copyfile(os.path.join(self.get_working_directory(),
                                   'IDXREF.LP'),
                      os.path.join(self.get_working_directory(),
                                   '%d_IDXREF.LP' % self.get_xpid()))

      # parse the output
      lp = open(os.path.join(
          self.get_working_directory(), 'IDXREF.LP'), 'r').readlines()

      self._fraction_rmsd_rmsphi = _parse_idxref_lp_quality(lp)

      self._idxref_data = _parse_idxref_lp(lp)

      if not self._idxref_data:
        raise RuntimeError('indexing failed')

      st = _parse_idxref_lp_subtree(lp)

      if 2 in st:

        if st[2] > st[1] / 10.0:
          Debug.write('Look closely at autoindexing solution!')
          self._index_tree_problem = True
          for j in sorted(st):
            Debug.write('%2d: %5d' % (j, st[j]))

      # print out some (perhaps dire) warnings about the beam centre
      # if there is really any ambiguity...

      origins = _parse_idxref_index_origin(lp)

      assert((0, 0, 0) in origins)

      quality_0 = origins[(0, 0, 0)][0]

      alternatives = []

      for hkl in origins:
        if hkl == (0, 0, 0):
          continue
        if origins[hkl][0] < 4 * quality_0:
          quality, delta, beam_x, beam_y = origins[hkl]
          alternatives.append((hkl[0], hkl[1], hkl[2],
                               quality, beam_x, beam_y))

      if alternatives:
        Debug.write('Alternative indexing possible:')
        for alternative in alternatives:
          Debug.write('... %3d %3d %3d %4.1f %6.1f %6.1f' % \
                      alternative)

      # New algorithm in here - now use iotbx.lattice_symmetry with the
      # P1 indexing solution (solution #1) to determine the list of
      # allowable solutions - only consider those lattices in this
      # allowed list (unless we have user input)

      from xia2.Wrappers.Phenix.LatticeSymmetry import LatticeSymmetry
      ls = LatticeSymmetry()
      ls.set_lattice('aP')
      ls.set_cell(tuple(self._idxref_data[44]['cell']))
      ls.generate()

      allowed_lattices = ls.get_lattices()

      for j in range(1, 45):
        if j not in self._idxref_data:
          continue
        data = self._idxref_data[j]
        lattice = data['lattice']
        fit = data['fit']
        cell = data['cell']
        mosaic = data['mosaic']
        reidx = data['reidx']

        if self._symm and self._cell and \
               self._indxr_user_input_lattice:

          if self._compare_cell(self._cell, cell) and \
                 lattice_to_spacegroup_number(lattice) == self._symm:
            if lattice in self._indexing_solutions:
              if self._indexing_solutions[lattice][
                  'goodness'] < fit:
                continue

            self._indexing_solutions[lattice] = {
                'goodness':fit,
                'cell':cell}

        else:
          if lattice in allowed_lattices or \
              (self._symm and fit < 200.0):
            # bug 2417 - if we have an input lattice then we
            # don't want to include anything higher symmetry
            # in the results table...

            if self._symm:
              if lattice_to_spacegroup_number(lattice) \
                     > self._symm:
                Debug.write(
                    'Ignoring solution with lattice %s' % \
                    lattice)
                continue

            if lattice in self._indexing_solutions:
              if self._indexing_solutions[lattice][
                  'goodness'] < fit:
                continue

            self._indexing_solutions[lattice] = {
                'goodness':fit,
                'cell':cell}

      # postprocess this list, to remove lattice solutions which are
      # lower symmetry but higher penalty than the putative correct
      # one, if self._symm is set...

      if self._symm:
        assert len(self._indexing_solutions) > 0, "No remaining indexing solutions (%s, %s)" % (s2l(self._symm), self._symm);
      else:
        assert len(self._indexing_solutions) > 0, "No remaining indexing solutions"

#     print self._indexing_solutions
      if self._symm:
        max_p = 2.0 * self._indexing_solutions[
            s2l(self._symm)]['goodness']
        to_remove = []
        for lattice in self._indexing_solutions:
          if self._indexing_solutions[lattice]['goodness'] > max_p:
            to_remove.append(lattice)
        for lattice in to_remove:
          Debug.write('Ignoring solution with lattice %s' % \
                      lattice)
          del(self._indexing_solutions[lattice])


      # get the highest symmetry "acceptable" solution

      list = [(k, self._indexing_solutions[k]['cell']) for k in \
              self._indexing_solutions.keys()]

      # if there was a preassigned cell and symmetry return now
      # with everything done, else select the "top" solution and
      # reindex, resetting the input cell and symmetry.

      if self._cell:

        # select the solution which matches the input unit cell
        # actually after the changes above this should now be the
        # only solution in the table..

        Debug.write(
            'Target unit cell: %.2f %.2f %.2f %.2f %.2f %.2f' % \
            self._cell)

        for l in list:
          if lattice_to_spacegroup_number(l[0]) == self._symm:
            # this should be the correct solution...
            # check the unit cell...
            cell = l[1]

            if self._compare_cell(self._cell, cell) or True:

              cell_str = '%.2f %.2f %.2f %.2f %.2f %.2f' % cell
              Debug.write(
              'Chosen unit cell: %s' % cell_str)

              self._indxr_lattice = l[0]
              self._indxr_cell = l[1]
              self._indxr_mosaic = mosaic

            else:

              cell_str = '%.2f %.2f %.2f %.2f %.2f %.2f' % cell
              Debug.write(
              'Ignoring unit cell: %s' % cell_str)

      else:

        # select the top solution as the input cell and reset the
        # "indexing done" flag

        sorted_list = SortLattices(list)
#       print sorted_list

        self._symm = lattice_to_spacegroup_number(sorted_list[0][0])
        self._cell = sorted_list[0][1]

        return False

      # get the refined distance &c.

      beam, distance = _parse_idxref_lp_distance_etc(lp)

      self._refined_beam = beam
      self._refined_distance = distance

      # gather the output files

      for file in self._output_data_files_list:
        self._output_data_files[file] = os.path.join(
          self.get_working_directory(), file)

      return True
Example #40
0
    def _index_finish(self):
        """Perform the indexer post-processing as required."""

        # ok, in here now ask if this solution was sensible!

        if not self.get_indexer_user_input_lattice():

            lattice = self._indxr_lattice
            cell = self._indxr_cell

            lattice2, cell2 = xds_check_indexer_solution(
                os.path.join(self.get_working_directory(), "XPARM.XDS"),
                os.path.join(self.get_working_directory(), "SPOT.XDS"),
            )

            Debug.write("Centring analysis: %s => %s" % (lattice, lattice2))

            doubled_lattice = False
            for j in range(3):
                if int(round(cell2[j] / cell[j])) == 2:
                    doubled_lattice = True
                    axes = "A", "B", "C"
                    Debug.write("Lattice axis doubled: %s" % axes[j])

            if (self._idxref_subtree_problem and
                (lattice2 != lattice)) or doubled_lattice:

                # hmm.... looks like we don't agree on the correct result...
                # update the putative correct result as input

                Debug.write("Detected pseudocentred lattice")
                Debug.write("Inserting solution: %s " % lattice2 +
                            "%6.2f %6.2f %6.2f %6.2f %6.2f %6.2f" % cell2)

                self._indxr_replace(lattice2, cell2)

                Debug.write("Set lattice: %s" % lattice2)
                Debug.write("Set cell: %f %f %f %f %f %f" % cell2)

                # then rerun

                self.set_indexer_done(False)
                return

        # finally read through SPOT.XDS and XPARM.XDS to get an estimate
        # of the low resolution limit - this should be pretty straightforward
        # since what I want is the resolution of the lowest resolution indexed
        # spot..

        spot_file = os.path.join(self.get_working_directory(), "SPOT.XDS")

        experiment = self.get_indexer_experiment_list()[0]
        crystal_model = experiment.crystal

        from iotbx.xds import spot_xds

        spot_xds_handle = spot_xds.reader()
        spot_xds_handle.read_file(spot_file)

        from cctbx.array_family import flex

        miller_indices = flex.miller_index(spot_xds_handle.miller_index)

        # only those reflections that were actually indexed
        miller_indices = miller_indices.select(miller_indices != (0, 0, 0))

        from scitbx import matrix

        ub = matrix.sqr(crystal_model.get_A())
        dmax = 1.05 * flex.max(
            1 / (ub.elems * miller_indices.as_vec3_double()).norms())

        Debug.write("Low resolution limit assigned as: %.2f" % dmax)
        self._indxr_low_resolution = dmax
Example #41
0
    def _index(self):
        """Actually do the autoindexing using the data prepared by the
        previous method."""

        images_str = "%d to %d" % tuple(self._indxr_images[0])
        for i in self._indxr_images[1:]:
            images_str += ", %d to %d" % tuple(i)

        cell_str = None
        if self._indxr_input_cell:
            cell_str = "%.2f %.2f %.2f %.2f %.2f %.2f" % self._indxr_input_cell

        # then this is a proper autoindexing run - describe this
        # to the journal entry

        dirname = self.get_directory()

        Journal.block(
            "autoindexing",
            self._indxr_sweep_name,
            "XDS",
            {
                "images": images_str,
                "target cell": cell_str,
                "target lattice": self._indxr_input_lattice,
                "template": self.get_template(),
                "directory": dirname,
            },
        )

        idxref = self.Idxref()

        self._index_remove_masked_regions()
        for file in ["SPOT.XDS"]:
            idxref.set_input_data_file(file, self._indxr_payload[file])

        # edit SPOT.XDS to remove reflections in untrusted regions of the detector

        idxref.set_data_range(self._indxr_images[0][0],
                              self._indxr_images[0][1])
        idxref.set_background_range(self._indxr_images[0][0],
                                    self._indxr_images[0][1])

        # set the phi start etc correctly

        for block in self._indxr_images[:1]:
            starting_frame = block[0]
            starting_angle = self.get_scan().get_angle_from_image_index(
                starting_frame)

            idxref.set_starting_frame(starting_frame)
            idxref.set_starting_angle(starting_angle)

            idxref.add_spot_range(block[0], block[1])

        for block in self._indxr_images[1:]:
            idxref.add_spot_range(block[0], block[1])

        if self._indxr_user_input_lattice:
            idxref.set_indexer_user_input_lattice(True)

        if self._indxr_input_lattice and self._indxr_input_cell:
            idxref.set_indexer_input_lattice(self._indxr_input_lattice)
            idxref.set_indexer_input_cell(self._indxr_input_cell)

            Debug.write("Set lattice: %s" % self._indxr_input_lattice)
            Debug.write("Set cell: %f %f %f %f %f %f" % self._indxr_input_cell)

            original_cell = self._indxr_input_cell
        elif self._indxr_input_lattice:
            idxref.set_indexer_input_lattice(self._indxr_input_lattice)
            original_cell = None
        else:
            original_cell = None

        from dxtbx.serialize.xds import to_xds

        converter = to_xds(self.get_imageset())
        xds_beam_centre = converter.detector_origin

        idxref.set_beam_centre(xds_beam_centre[0], xds_beam_centre[1])

        # fixme need to check if the lattice, cell have been set already,
        # and if they have, pass these in as input to the indexing job.

        done = False

        while not done:
            try:
                done = idxref.run()

                # N.B. in here if the IDXREF step was being run in the first
                # pass done is FALSE however there should be a refined
                # P1 orientation matrix etc. available - so keep it!

            except XDSException as e:
                # inspect this - if we have complaints about not
                # enough reflections indexed, and we have a target
                # unit cell, and they are the same, well ignore it

                if "solution is inaccurate" in str(e):
                    Debug.write("XDS complains solution inaccurate - ignoring")
                    done = idxref.continue_from_error()
                elif ("insufficient percentage (< 70%)" in str(e)
                      or "insufficient percentage (< 50%)"
                      in str(e)) and original_cell:
                    done = idxref.continue_from_error()
                    lattice, cell, mosaic = idxref.get_indexing_solution()
                    # compare solutions FIXME should use xds_cell_deviation
                    check = PhilIndex.params.xia2.settings.xds_check_cell_deviation
                    for j in range(3):
                        # allow two percent variation in unit cell length
                        if (math.fabs(
                            (cell[j] - original_cell[j]) / original_cell[j]) >
                                0.02 and check):
                            Debug.write("XDS unhappy and solution wrong")
                            raise e
                        # and two degree difference in angle
                        if (math.fabs(cell[j + 3] - original_cell[j + 3]) > 2.0
                                and check):
                            Debug.write("XDS unhappy and solution wrong")
                            raise e
                    Debug.write("XDS unhappy but solution ok")
                elif "insufficient percentage (< 70%)" in str(
                        e) or "insufficient percentage (< 50%)" in str(e):
                    Debug.write("XDS unhappy but solution probably ok")
                    done = idxref.continue_from_error()
                else:
                    raise e

        FileHandler.record_log_file(
            "%s INDEX" % self.get_indexer_full_name(),
            os.path.join(self.get_working_directory(), "IDXREF.LP"),
        )

        for file in ["SPOT.XDS", "XPARM.XDS"]:
            self._indxr_payload[file] = idxref.get_output_data_file(file)

        # need to get the indexing solutions out somehow...

        self._indxr_other_lattice_cell = idxref.get_indexing_solutions()

        self._indxr_lattice, self._indxr_cell, self._indxr_mosaic = (
            idxref.get_indexing_solution())

        import dxtbx
        from dxtbx.serialize.xds import to_crystal

        xparm_file = os.path.join(self.get_working_directory(), "XPARM.XDS")
        models = dxtbx.load(xparm_file)
        crystal_model = to_crystal(xparm_file)

        from dxtbx.model import Experiment, ExperimentList

        experiment = Experiment(
            beam=models.get_beam(),
            detector=models.get_detector(),
            goniometer=models.get_goniometer(),
            scan=models.get_scan(),
            crystal=crystal_model,
            # imageset=self.get_imageset(),
        )

        experiment_list = ExperimentList([experiment])
        self.set_indexer_experiment_list(experiment_list)

        # I will want this later on to check that the lattice was ok
        self._idxref_subtree_problem = idxref.get_index_tree_problem()

        return
Example #42
0
        def scale(self):
            '''Actually perform the scaling.'''

            self.check_hklin()
            self.check_hklout()

            if self._chef_unmerged and self._scalepack:
                raise RuntimeError('CHEF and scalepack incompatible')

            if self._onlymerge:
                raise RuntimeError('use merge() method')

            if not self._scalepack:
                self.set_task('Scaling reflections from %s => %s' % \
                             (os.path.split(self.get_hklin())[-1],
                              os.path.split(self.get_hklout())[-1]))
            else:
                self.set_task('Scaling reflections from %s => scalepack %s' % \
                             (os.path.split(self.get_hklin())[-1],
                              os.path.split(self.get_hklout())[-1]))

            self._xmlout = os.path.join(self.get_working_directory(),
                                        '%d_aimless.xml' % self.get_xpid())

            self.start()

            nproc = PhilIndex.params.xia2.settings.multiprocessing.nproc
            if isinstance(nproc, int) and nproc > 1:
                self.set_working_environment('OMP_NUM_THREADS', '%d' % nproc)
                self.input('refine parallel')
            self.input('xmlout %d_aimless.xml' % self.get_xpid())
            if PhilIndex.params.xia2.settings.small_molecule == False:
                self.input('bins 20')
            self.input('intensities %s' % self._intensities)

            if self._new_scales_file:
                self.input('dump %s' % self._new_scales_file)

            run_number = 0
            for run in self._runs:
                run_number += 1

                if not run[5]:
                    self.input('run %d batch %d to %d' %
                               (run_number, run[0], run[1]))

                if run[6] != 0.0 and not run[5]:
                    self.input('resolution run %d high %g' % \
                               (run_number, run[6]))

            run_number = 0
            for run in self._runs:
                run_number += 1

                if run[7]:
                    Debug.write('Run %d corresponds to sweep %s' % \
                                (run_number, run[7]))

                if run[5]:
                    continue

            self.input('sdcorrection same')

            # FIXME this is a bit of a hack - should be better determined
            # than this...
            if PhilIndex.params.xia2.settings.small_molecule == True:
                #self.input('sdcorrection tie sdfac 0.707 0.3 tie sdadd 0.01 0.05')
                #self.input('reject all 30')
                self.input('sdcorrection fixsdb')

            if self._secondary_lmax and self._surface_tie:
                self.input('tie surface %.4f' % self._surface_tie)
                if not self._surface_link:
                    self.input('unlink all')

            # assemble the scales command
            if self._mode == 'rotation':
                scale_command = 'scales rotation spacing %g' % self._spacing

                if self._secondary_lmax is not None:
                    scale_command += ' %s %d' % \
                      (self._secondary, int(self._secondary_lmax))
                else:
                    scale_command += ' %s' % self._secondary

                if self._bfactor:
                    scale_command += ' bfactor on'

                    if self._brotation:
                        scale_command += ' brotation %g' % \
                                         self._brotation

                else:
                    scale_command += ' bfactor off'

                self.input(scale_command)

            else:

                scale_command = 'scales batch'

                if self._bfactor:
                    scale_command += ' bfactor on'

                    if self._brotation:
                        scale_command += ' brotation %g' % \
                                         self._brotation
                    else:
                        scale_command += ' brotation %g' % \
                                         self._spacing

                else:
                    scale_command += ' bfactor off'

                self.input(scale_command)

            # Debug.write('Scaling command: "%s"' % scale_command)

            # next any 'generic' parameters

            if self._resolution:
                self.input('resolution %g' % self._resolution)

            self.input('cycles %d' % self._cycles)

            if self._anomalous:
                self.input('anomalous on')
            else:
                self.input('anomalous off')

            if self._scalepack:
                self.input('output polish unmerged')
            elif self._chef_unmerged:
                self.input('output unmerged together')
            else:
                self.input('output unmerged')

            # run using previously determined scales

            if self._scales_file:
                self.input('onlymerge')
                self.input('restore %s' % self._scales_file)

            self.close_wait()

            # check for errors

            if True:
                # try:
                try:
                    self.check_for_errors()
                    self.check_ccp4_errors()
                    self.check_aimless_error_negative_scale_run()
                    self.check_aimless_errors()
                except Exception:
                    Chatter.write(
                        "Aimless failed, see log file for more details:\n  %s"
                        % self.get_log_file())
                    raise

                Debug.write('Aimless status: OK')

            else:
                # except RuntimeError as e:
                try:
                    os.remove(self.get_hklout())
                except Exception:
                    pass

                raise e

            # here get a list of all output files...
            output = self.get_all_output()

            hklout_files = []
            hklout_dict = {}

            for i in range(len(output)):
                record = output[i]

                # this is a potential source of problems - if the
                # wavelength name has a _ in it then we are here stuffed!

                if 'Writing merged data for dataset' in record:

                    if len(record.split()) == 9:
                        hklout = output[i + 1].strip()
                    else:
                        hklout = record.split()[9]

                    dname = record.split()[6].split('/')[-1]
                    hklout_dict[dname] = hklout

                    hklout_files.append(hklout)

                elif 'Writing unmerged data for all datasets' in record:
                    if len(record.split()) == 9:
                        hklout = output[i + 1].strip()
                    else:
                        hklout = record.split()[9]

                    self._unmerged_reflections = hklout

            self._scalr_scaled_reflection_files = hklout_dict

            return 'OK'
Example #43
0
    def _scale(self):
        """Perform all of the operations required to deliver the scaled
        data."""
        sweep_infos = [
            self._sweep_handler.get_sweep_information(e)
            for e in self._sweep_handler.get_epochs()
        ]

        if self._scalr_corrections:
            Journal.block(
                "scaling",
                self.get_scaler_xcrystal().get_name(),
                "Dials",
                {
                    "scaling model": "automatic",
                    "absorption": self._scalr_correct_absorption,
                    "decay": self._scalr_correct_decay,
                },
            )

        else:
            Journal.block(
                "scaling",
                self.get_scaler_xcrystal().get_name(),
                "Dials",
                {"scaling model": "default"},
            )

        ### Set the parameters and datafiles for dials.scale

        self._scaler = DialsScale()
        self._scaler = self._updated_dials_scaler()

        if self._scaled_experiments and self._scaled_reflections:
            # going to continue-where-left-off
            self._scaler.add_experiments_json(self._scaled_experiments)
            self._scaler.add_reflections_file(self._scaled_reflections)
        else:
            for si in sweep_infos:
                self._scaler.add_experiments_json(si.get_experiments())
                self._scaler.add_reflections_file(si.get_reflections())

        ### Set the unmerged mtz filepath

        self._scalr_scaled_reflection_files = {}
        self._scalr_scaled_reflection_files["mtz_unmerged"] = {}

        # First set the unmerged mtz output filename. Note that this is the
        # same for MAD datasets too, as need a single unmerged for merging
        # stats calc. For the merged mtz this is different.
        scaled_unmerged_mtz_path = os.path.join(
            self.get_working_directory(),
            "%s_%s_scaled_unmerged.mtz" % (self._scalr_pname, self._scalr_xname),
        )
        self._scaler.set_scaled_unmerged_mtz([scaled_unmerged_mtz_path])
        self._scaler.set_crystal_name(self._scalr_xname)  # Name goes in mtz

        ### Set the merged mtz filepath(s), making into account MAD case.

        # Find number of dnames (i.e. number of wavelengths)
        dnames_set = OrderedSet()
        for si in sweep_infos:
            dnames_set.add(si.get_project_info()[2])

        scaled_mtz_path = os.path.join(
            self.get_working_directory(),
            "%s_%s_scaled.mtz" % (self._scalr_pname, self._scalr_xname),
        )
        if len(dnames_set) == 1:
            self._scaler.set_scaled_mtz([scaled_mtz_path])
            self._scalr_scaled_reflection_files["mtz"] = {
                dnames_set[0]: scaled_mtz_path
            }
            self._scalr_scaled_reflection_files["mtz_unmerged"] = {
                dnames_set[0]: scaled_unmerged_mtz_path
            }
        else:
            merged_mtz_files = []
            self._scalr_scaled_reflection_files["mtz"] = {}
            for dname in dnames_set:
                this_mtz_path = scaled_mtz_path.rstrip(".mtz") + ("_%s.mtz" % dname)
                merged_mtz_files.append(this_mtz_path)
                self._scalr_scaled_reflection_files["mtz"][dname] = scaled_mtz_path
                # Note - we aren't logging individual unmerged here as not
                # generating until later.
            self._scaler.set_scaled_mtz(merged_mtz_files)

        ### Set the resolution limit if applicable

        user_resolution_limits = {}
        highest_resolution = 100.0
        for si in sweep_infos:
            dname = si.get_project_info()[2]
            sname = si.get_sweep_name()
            intgr = si.get_integrater()

            if intgr.get_integrater_user_resolution():
                # record user resolution here but don't use it until later - why?
                dmin = intgr.get_integrater_high_resolution()

                if (dname, sname) not in user_resolution_limits:
                    user_resolution_limits[(dname, sname)] = dmin
                elif dmin < user_resolution_limits[(dname, sname)]:
                    user_resolution_limits[(dname, sname)] = dmin

            if (dname, sname) in self._scalr_resolution_limits:
                d_min, _ = self._scalr_resolution_limits[(dname, sname)]
                if d_min < highest_resolution:
                    highest_resolution = d_min
        if highest_resolution < 99.9:
            self._scaler.set_resolution(d_min=highest_resolution)

        ### Setup final job details and run scale

        self._scaler.set_working_directory(self.get_working_directory())
        auto_logfiler(self._scaler)
        FileHandler.record_log_file(
            "%s %s SCALE" % (self._scalr_pname, self._scalr_xname),
            self._scaler.get_log_file(),
        )
        self._scaler.scale()
        self._scaled_experiments = self._scaler.get_scaled_experiments()
        self._scaled_reflections = self._scaler.get_scaled_reflections()

        FileHandler.record_data_file(scaled_unmerged_mtz_path)

        # make it so that only scaled.expt and scaled.refl are
        # the files that dials.scale knows about, so that if scale is called again,
        # scaling resumes from where it left off.
        self._scaler.clear_datafiles()

        # log datafiles here, picked up from here in commonscaler methods.
        if len(dnames_set) == 1:
            hklout = copy.deepcopy(self._scaler.get_scaled_mtz()[0])
            self._scalr_scaled_refl_files = {dnames_set[0]: hklout}
            FileHandler.record_data_file(hklout)
        else:
            self._scalr_scaled_refl_files = {}
            for i, dname in enumerate(dnames_set):
                hklout = copy.deepcopy(self._scaler.get_scaled_mtz()[i])
                self._scalr_scaled_refl_files[dname] = hklout
                FileHandler.record_data_file(hklout)

        ### Calculate the resolution limit and set done False if applicable

        highest_suggested_resolution = self.assess_resolution_limits(
            self._scaler.get_unmerged_reflection_file(),
            user_resolution_limits,
            use_misigma=False,
        )

        if not self.get_scaler_done():
            # reset for when resolution limit applied
            Debug.write("Returning as scaling not finished...")
            return

        ### For MAD case, generate individual unmerged mtz for stats.

        if len(dnames_set) > 1:
            unmerged_mtz_files = []
            scaler = DialsScale()
            scaler.set_working_directory(self.get_working_directory())
            scaler.set_export_mtz_only()
            scaler.add_experiments_json(self._scaled_experiments)
            scaler.add_reflections_file(self._scaled_reflections)
            for dname in dnames_set:
                this_mtz_path = scaled_unmerged_mtz_path.rstrip(".mtz") + (
                    "_%s.mtz" % dname
                )
                unmerged_mtz_files.append(this_mtz_path)
                self._scalr_scaled_reflection_files["mtz_unmerged"][
                    dname
                ] = this_mtz_path
            scaler.set_scaled_unmerged_mtz(unmerged_mtz_files)
            scaler.scale()
            for f in scaler.get_scaled_unmerged_mtz():  # a list
                FileHandler.record_data_file(f)
            # set refls, exps & unmerged mtz names"

        if PhilIndex.params.xia2.settings.merging_statistics.source == "cctbx":
            for key in self._scalr_scaled_refl_files:
                stats = self._compute_scaler_statistics(
                    self._scalr_scaled_reflection_files["mtz_unmerged"][key],
                    selected_band=(highest_suggested_resolution, None),
                    wave=key,
                )
                self._scalr_statistics[
                    (self._scalr_pname, self._scalr_xname, key)
                ] = stats

        # Run twotheta refine
        self._update_scaled_unit_cell()
Example #44
0
    def _do_indexing(self, method=None):
        indexer = self.Index()
        for spot_list in self._indxr_payload["spot_lists"]:
            indexer.add_spot_filename(spot_list)
        for filename in self._indxr_payload["experiments"]:
            indexer.add_sweep_filename(filename)
        if PhilIndex.params.dials.index.phil_file is not None:
            indexer.set_phil_file(PhilIndex.params.dials.index.phil_file)
        indexer.set_max_cell(
            max_cell=PhilIndex.params.dials.index.max_cell,
            max_height_fraction=PhilIndex.params.dials.index.
            max_cell_estimation.max_height_fraction,
        )
        if PhilIndex.params.xia2.settings.small_molecule:
            indexer.set_min_cell(3)
        if PhilIndex.params.dials.fix_geometry:
            indexer.set_detector_fix("all")
            indexer.set_beam_fix("all")
        indexer.set_close_to_spindle_cutoff(
            PhilIndex.params.dials.close_to_spindle_cutoff)

        if self._indxr_input_lattice:
            indexer.set_indexer_input_lattice(self._indxr_input_lattice)
            Debug.write("Set lattice: %s" % self._indxr_input_lattice)

        if self._indxr_input_cell:
            indexer.set_indexer_input_cell(self._indxr_input_cell)
            Debug.write("Set cell: %f %f %f %f %f %f" % self._indxr_input_cell)

        if method is None:
            if PhilIndex.params.dials.index.method is None:
                method = "fft3d"
                Debug.write("Choosing indexing method: %s" % method)
            else:
                method = PhilIndex.params.dials.index.method

        FileHandler.record_log_file("%s INDEX" % self.get_indexer_full_name(),
                                    indexer.get_log_file())
        indexer.run(method)

        if not os.path.exists(indexer.get_experiments_filename()):
            raise RuntimeError(
                "Indexing has failed: see %s for more details." %
                indexer.get_log_file())
        elif not os.path.exists(indexer.get_indexed_filename()):
            raise RuntimeError("Indexing has failed: %s does not exist." %
                               indexer.get_indexed_filename())

        report = self.Report()
        report.set_experiments_filename(indexer.get_experiments_filename())
        report.set_reflections_filename(indexer.get_indexed_filename())
        html_filename = os.path.join(
            self.get_working_directory(),
            "%i_dials.index.report.html" % report.get_xpid(),
        )
        report.set_html_filename(html_filename)
        report.run()
        FileHandler.record_html_file("%s INDEX" % self.get_indexer_full_name(),
                                     html_filename)

        return indexer
Example #45
0
    def _index(self):
        if PhilIndex.params.dials.index.method in (libtbx.Auto, None):
            if self._indxr_input_cell is not None:
                indexer = self._do_indexing("real_space_grid_search")
            else:
                try:
                    indexer_fft3d = self._do_indexing(method="fft3d")
                    nref_3d, rmsd_3d = indexer_fft3d.get_nref_rmsds()
                except Exception as e:
                    nref_3d = None
                    rmsd_3d = None
                    indexing_failure = e
                try:
                    indexer_fft1d = self._do_indexing(method="fft1d")
                    nref_1d, rmsd_1d = indexer_fft1d.get_nref_rmsds()
                except Exception as e:
                    nref_1d = None
                    rmsd_1d = None
                    indexing_failure = e

                if (nref_1d is not None and nref_3d is None or
                    (nref_1d > nref_3d and rmsd_1d[0] < rmsd_3d[0]
                     and rmsd_1d[1] < rmsd_3d[1] and rmsd_1d[2] < rmsd_3d[2])):
                    indexer = indexer_fft1d
                elif nref_3d is not None:
                    indexer = indexer_fft3d
                else:
                    raise RuntimeError(indexing_failure)

        else:
            indexer = self._do_indexing(
                method=PhilIndex.params.dials.index.method)

        # not strictly the P1 cell, rather the cell that was used in indexing
        self._p1_cell = indexer._p1_cell
        self.set_indexer_payload("indexed_filename",
                                 indexer.get_indexed_filename())

        from cctbx.sgtbx import bravais_types
        from dxtbx.serialize import load

        indexed_file = indexer.get_indexed_filename()
        indexed_experiments = indexer.get_experiments_filename()

        fast_mode = PhilIndex.params.dials.fast_mode
        trust_beam_centre = PhilIndex.params.xia2.settings.trust_beam_centre
        multi_sweep_indexing = PhilIndex.params.xia2.settings.multi_sweep_indexing
        check_indexing_symmetry = PhilIndex.params.dials.check_indexing_symmetry

        if check_indexing_symmetry and not (trust_beam_centre or fast_mode
                                            or multi_sweep_indexing):
            checksym = self.CheckIndexingSymmetry()
            checksym.set_experiments_filename(indexed_experiments)
            checksym.set_indexed_filename(indexed_file)
            checksym.set_grid_search_scope(1)
            checksym.run()
            hkl_offset = checksym.get_hkl_offset()
            Debug.write("hkl_offset: %s" % str(hkl_offset))
            if hkl_offset is not None and hkl_offset != (0, 0, 0):
                reindex = self.Reindex()
                reindex.set_hkl_offset(hkl_offset)
                reindex.set_indexed_filename(indexed_file)
                reindex.run()
                indexed_file = reindex.get_reindexed_reflections_filename()

                # do some scan-static refinement - run twice, first without outlier
                # rejection as the model is too far from reality to do a sensible job of
                # outlier rejection
                refiner = self.Refine()
                refiner.set_experiments_filename(indexed_experiments)
                refiner.set_indexed_filename(
                    reindex.get_reindexed_reflections_filename())
                refiner.set_outlier_algorithm(None)
                refiner.run()
                indexed_experiments = refiner.get_refined_experiments_filename(
                )

                # now again with outlier rejection (possibly)
                refiner = self.Refine()
                refiner.set_experiments_filename(indexed_experiments)
                refiner.set_indexed_filename(indexed_file)
                refiner.run()
                indexed_experiments = refiner.get_refined_experiments_filename(
                )

        if self._indxr_input_lattice is None:

            # FIXME in here should respect the input unit cell and lattice if provided

            # FIXME from this (i) populate the helper table,
            # (ii) try to avoid re-running the indexing
            # step if we eliminate a solution as we have all of the refined results
            # already available.

            rbs = self.RefineBravaisSettings()
            rbs.set_experiments_filename(indexed_experiments)
            rbs.set_indexed_filename(indexed_file)
            if PhilIndex.params.dials.fix_geometry:
                rbs.set_detector_fix("all")
                rbs.set_beam_fix("all")

            FileHandler.record_log_file(
                "%s LATTICE" % self.get_indexer_full_name(),
                rbs.get_log_file())
            rbs.run()

            from cctbx import crystal, sgtbx

            for k in sorted(rbs.get_bravais_summary()):
                summary = rbs.get_bravais_summary()[k]

                # FIXME need to do this better - for the moment only accept lattices
                # where R.M.S. deviation is less than twice P1 R.M.S. deviation.

                if self._indxr_input_lattice is None:
                    if not summary["recommended"]:
                        continue

                experiments = load.experiment_list(summary["experiments_file"],
                                                   check_format=False)
                cryst = experiments.crystals()[0]
                cs = crystal.symmetry(unit_cell=cryst.get_unit_cell(),
                                      space_group=cryst.get_space_group())
                cb_op_best_to_ref = cs.change_of_basis_op_to_reference_setting(
                )
                cs_reference = cs.change_basis(cb_op_best_to_ref)
                lattice = str(
                    bravais_types.bravais_lattice(
                        group=cs_reference.space_group()))
                cb_op = cb_op_best_to_ref * sgtbx.change_of_basis_op(
                    str(summary["cb_op"]))

                self._solutions[k] = {
                    "number": k,
                    "mosaic": 0.0,
                    "metric": summary["max_angular_difference"],
                    "rmsd": summary["rmsd"],
                    "nspots": summary["nspots"],
                    "lattice": lattice,
                    "cell": cs_reference.unit_cell().parameters(),
                    "experiments_file": summary["experiments_file"],
                    "cb_op": str(cb_op),
                }

            self._solution = self.get_solution()
            self._indxr_lattice = self._solution["lattice"]

            for solution in self._solutions.keys():
                lattice = self._solutions[solution]["lattice"]
                if (self._indxr_input_lattice is not None
                        and self._indxr_input_lattice != lattice):
                    continue
                if lattice in self._indxr_other_lattice_cell:
                    if (self._indxr_other_lattice_cell[lattice]["metric"] <
                            self._solutions[solution]["metric"]):
                        continue

                self._indxr_other_lattice_cell[lattice] = {
                    "metric": self._solutions[solution]["metric"],
                    "cell": self._solutions[solution]["cell"],
                }

            self._indxr_mosaic = self._solution["mosaic"]

            experiment_list = load.experiment_list(
                self._solution["experiments_file"])
            self.set_indexer_experiment_list(experiment_list)

            # reindex the output experiments list to the reference setting
            # (from the best cell/conventional setting)
            cb_op_to_ref = (experiment_list.crystals()[0].get_space_group().
                            info().change_of_basis_op_to_reference_setting())
            reindex = self.Reindex()
            reindex.set_experiments_filename(
                self._solution["experiments_file"])
            reindex.set_cb_op(cb_op_to_ref)
            reindex.set_space_group(
                str(lattice_to_spacegroup_number(self._solution["lattice"])))
            reindex.run()
            experiments_file = reindex.get_reindexed_experiments_filename()
            experiment_list = load.experiment_list(experiments_file)
            self.set_indexer_experiment_list(experiment_list)
            self.set_indexer_payload("experiments_filename", experiments_file)

            # reindex the output reflection list to this solution
            reindex = self.Reindex()
            reindex.set_indexed_filename(indexed_file)
            reindex.set_cb_op(self._solution["cb_op"])
            reindex.set_space_group(
                str(lattice_to_spacegroup_number(self._solution["lattice"])))
            reindex.run()
            indexed_file = reindex.get_reindexed_reflections_filename()
            self.set_indexer_payload("indexed_filename", indexed_file)

        else:
            experiment_list = load.experiment_list(indexed_experiments)
            self.set_indexer_experiment_list(experiment_list)
            self.set_indexer_payload("experiments_filename",
                                     indexed_experiments)

            cryst = experiment_list.crystals()[0]
            lattice = str(
                bravais_types.bravais_lattice(group=cryst.get_space_group()))
            self._indxr_lattice = lattice
            self._solutions = {}
            self._solutions[0] = {
                "number": 0,
                "mosaic": 0.0,
                "metric": -1,
                "rmsd": -1,
                "nspots": -1,
                "lattice": lattice,
                "cell": cryst.get_unit_cell().parameters(),
                "experiments_file": indexed_experiments,
                "cb_op": "a,b,c",
            }

            self._indxr_other_lattice_cell[lattice] = {
                "metric": self._solutions[0]["metric"],
                "cell": self._solutions[0]["cell"],
            }
Example #46
0
    def _scale_prepare(self):
        """Perform all of the preparation required to deliver the scaled
        data. This should sort together the reflection files, ensure that
        they are correctly indexed (via dials.symmetry) and generally tidy
        things up."""

        # AIM discover symmetry and reindex with dials.symmetry, and set the correct
        # reflections in si.reflections, si.experiments

        self._helper.set_working_directory(self.get_working_directory())
        self._factory.set_working_directory(self.get_working_directory())

        need_to_return = False

        self._sweep_handler = SweepInformationHandler(self._scalr_integraters)

        p, x = self._sweep_handler.get_project_info()
        self._scalr_pname = p
        self._scalr_xname = x

        self._helper.set_pname_xname(p, x)

        Journal.block(
            "gathering",
            self.get_scaler_xcrystal().get_name(),
            "Dials",
            {"working directory": self.get_working_directory()},
        )

        # First do stuff to work out if excluding any data
        # Note - does this actually work? I couldn't seem to get it to work
        # in either this pipeline or the standard dials pipeline
        for epoch in self._sweep_handler.get_epochs():
            si = self._sweep_handler.get_sweep_information(epoch)
            intgr = si.get_integrater()
            _, xname, dname = si.get_project_info()
            sname = si.get_sweep_name()

            exclude_sweep = False

            for sweep in PhilIndex.params.xia2.settings.sweep:
                if sweep.id == sname and sweep.exclude:
                    exclude_sweep = True
                    break

            if exclude_sweep:
                self._sweep_handler.remove_epoch(epoch)
                Debug.write("Excluding sweep %s" % sname)
            else:
                Journal.entry({"adding data from": "%s/%s/%s" % (xname, dname, sname)})

        # If multiple files, want to run symmetry to check for consistent indexing
        # also

        # try to reproduce what CCP4ScalerA is doing

        # first assign identifiers to avoid dataset-id collisions
        # Idea is that this should be called anytime you get data anew from the
        # integrater, to intercept and assign unique ids, then set in the
        # sweep_information (si) and always use si.set_reflections/
        # si.get_reflections as we process.

        # self._sweep_handler = self._helper.assign_and_return_datasets(
        #    self._sweep_handler
        # ) symmetry now sorts out identifiers.

        need_to_return = False

        if self._scalr_input_pointgroup:
            self._input_pointgroup_scale_prepare()
        elif (
            len(self._sweep_handler.get_epochs()) > 1
            and PhilIndex.params.xia2.settings.multi_sweep_indexing
        ):
            need_to_return = self._multi_sweep_scale_prepare()
        else:
            need_to_return = self._standard_scale_prepare()

        if need_to_return:
            self.set_scaler_done(False)
            self.set_scaler_prepare_done(False)
            return

        ### After this point, point group is good and only need to
        ### reindex to consistent setting. Don't need to call back to the
        ### integator, just use the data in the sweep info.

        # First work out if we're going to reindex against external reference
        param = PhilIndex.params.xia2.settings.scale
        using_external_references = False
        reference_refl = None
        reference_expt = None
        if param.reference_reflection_file:
            if not param.reference_experiment_file:
                Chatter.write(
                    """
No DIALS reference experiments file provided, reference reflection file will
not be used. Reference mtz files for reindexing not currently supported for
pipeline=dials (supported for pipeline=dials-aimless).
"""
                )
            else:
                reference_refl = param.reference_reflection_file
                reference_expt = param.reference_experiment_file
                using_external_references = True
                Debug.write("Using reference reflections %s" % reference_refl)
                Debug.write("Using reference experiments %s" % reference_expt)

        if len(self._sweep_handler.get_epochs()) > 1:
            if PhilIndex.params.xia2.settings.unify_setting:
                self.unify_setting()

            if PhilIndex.params.xia2.settings.use_brehm_diederichs:
                self.brehm_diederichs_reindexing()
            # If not using Brehm-deidrichs reindexing, set reference as first
            # sweep, unless using external reference.
            elif not using_external_references:
                Debug.write("First sweep will be used as reference for reindexing")
                first = self._sweep_handler.get_epochs()[0]
                si = self._sweep_handler.get_sweep_information(first)
                reference_expt = si.get_experiments()
                reference_refl = si.get_reflections()

        # Now reindex to be consistent with first dataset - run reindex on each
        # dataset with reference (unless did brehm diederichs and didn't supply
        # a reference file)

        if reference_refl and reference_expt:
            exp = load.experiment_list(reference_expt)
            reference_cell = exp[0].crystal.get_unit_cell().parameters()

            # ---------- REINDEX TO CORRECT (REFERENCE) SETTING ----------
            Chatter.write("Reindexing all datasets to common reference")

            if using_external_references:
                epochs = self._sweep_handler.get_epochs()
            else:
                epochs = self._sweep_handler.get_epochs()[1:]
            for epoch in epochs:
                # if we are working with unified UB matrix then this should not
                # be a problem here (note, *if*; *should*)

                # what about e.g. alternative P1 settings?
                # see JIRA MXSW-904
                if PhilIndex.params.xia2.settings.unify_setting:
                    continue

                reindexer = DialsReindex()
                reindexer.set_working_directory(self.get_working_directory())
                auto_logfiler(reindexer)

                si = self._sweep_handler.get_sweep_information(epoch)
                reindexer.set_reference_filename(reference_expt)
                reindexer.set_reference_reflections(reference_refl)
                reindexer.set_indexed_filename(si.get_reflections())
                reindexer.set_experiments_filename(si.get_experiments())
                reindexer.run()

                # At this point, CCP4ScalerA would reset in integrator so that
                # the integrater calls reindex, no need to do that here as
                # have access to the files and will never need to reintegrate.

                si.set_reflections(reindexer.get_reindexed_reflections_filename())
                si.set_experiments(reindexer.get_reindexed_experiments_filename())

                # FIXME how to get some indication of the reindexing used?

                exp = load.experiment_list(
                    reindexer.get_reindexed_experiments_filename()
                )
                cell = exp[0].crystal.get_unit_cell().parameters()

                # Note - no lattice check as this will already be caught by reindex
                Debug.write("Cell: %.2f %.2f %.2f %.2f %.2f %.2f" % cell)
                Debug.write("Ref:  %.2f %.2f %.2f %.2f %.2f %.2f" % reference_cell)

                for j in range(6):
                    if (
                        math.fabs((cell[j] - reference_cell[j]) / reference_cell[j])
                        > 0.1
                    ):
                        raise RuntimeError(
                            "unit cell parameters differ in %s and %s"
                            % (reference_expt, si.get_reflections())
                        )

        # Now make sure all batches ok before finish preparing
        # This should be made safer, currently after dials.scale there is no
        # concept of 'batch', dials.export uses the calculate_batch_offsets
        # to assign batches, giving the same result as below.

        experiments_to_rebatch = []
        for epoch in self._sweep_handler.get_epochs():
            si = self._sweep_handler.get_sweep_information(epoch)
            experiment = si.get_experiments()
            experiments_to_rebatch.append(load.experiment_list(experiment)[0])
        offsets = calculate_batch_offsets(experiments_to_rebatch)

        for i, epoch in enumerate(self._sweep_handler.get_epochs()):
            si = self._sweep_handler.get_sweep_information(epoch)
            r = si.get_batch_range()
            si.set_batch_offset(offsets[i])
            si.set_batches([r[0] + offsets[i], r[1] + offsets[i]])
Example #47
0
    def _index_select_images_i(self, imageset):
        # FIXME copied from XDSIndexer.py!
        """Select correct images based on image headers."""

        start, end = imageset.get_scan().get_array_range()
        images = tuple(range(start + 1, end + 1))

        # characterise the images - are there just two (e.g. dna-style
        # reference images) or is there a full block?

        wedges = []

        if len(images) < 3:
            # work on the assumption that this is a reference pair

            if len(images) == 1:
                wedges.append((images[0], images[0]))
            else:
                wedges.append((images[0], images[1]))

        else:
            block_size = min(len(images), 5)

            Debug.write("Adding images for indexer: %d -> %d" %
                        (images[0], images[block_size - 1]))

            wedges.append((images[0], images[block_size - 1]))

            phi_width = imageset.get_scan().get_oscillation()[1]
            if int(90.0 / phi_width) + block_size in images:
                # assume we can add a wedge around 45 degrees as well...
                Debug.write("Adding images for indexer: %d -> %d" % (
                    int(45.0 / phi_width) + images[0],
                    int(45.0 / phi_width) + images[0] + block_size - 1,
                ))
                Debug.write("Adding images for indexer: %d -> %d" % (
                    int(90.0 / phi_width) + images[0],
                    int(90.0 / phi_width) + images[0] + block_size - 1,
                ))
                wedges.append((
                    int(45.0 / phi_width) + images[0],
                    int(45.0 / phi_width) + images[0] + block_size - 1,
                ))
                wedges.append((
                    int(90.0 / phi_width) + images[0],
                    int(90.0 / phi_width) + images[0] + block_size - 1,
                ))

            else:

                # add some half-way anyway
                first = (len(images) // 2) - (block_size // 2) + images[0] - 1
                if first > wedges[0][1]:
                    last = first + block_size - 1
                    Debug.write("Adding images for indexer: %d -> %d" %
                                (first, last))
                    wedges.append((first, last))
                if len(images) > block_size:
                    Debug.write("Adding images for indexer: %d -> %d" %
                                (images[-block_size], images[-1]))
                    wedges.append((images[-block_size], images[-1]))

        return wedges
Example #48
0
    def _standard_scale_prepare(self):
        pointgroups = {}
        reindex_ops = {}
        probably_twinned = False
        need_to_return = False

        lattices = []
        # First check for the existence of multiple lattices. If only one
        # epoch, then this gives the necessary data for proceeding straight
        # to the point group check.
        for epoch in self._sweep_handler.get_epochs():
            si = self._sweep_handler.get_sweep_information(epoch)
            intgr = si.get_integrater()
            experiment = intgr.get_integrated_experiments()
            reflections = intgr.get_integrated_reflections()
            refiner = intgr.get_integrater_refiner()

            pointgroup, reindex_op, ntr, pt, _, __, ___ = self._dials_symmetry_indexer_jiffy(
                [experiment], [reflections], [refiner]
            )

            lattice = Syminfo.get_lattice(pointgroup)
            if lattice not in lattices:
                lattices.append(lattice)
            if ntr:
                si.get_integrater().integrater_reset_reindex_operator()
                need_to_return = True
            if pt:
                probably_twinned = True
            pointgroups[epoch] = pointgroup
            reindex_ops[epoch] = reindex_op
            Debug.write("Pointgroup: %s (%s)" % (pointgroup, reindex_op))

        if len(lattices) > 1:
            # Check consistency of lattices if more than one. If not, then
            # can proceed to straight to checking point group consistency
            # using the cached results.
            correct_lattice = sort_lattices(lattices)[0]
            Chatter.write("Correct lattice asserted to be %s" % correct_lattice)

            # transfer this information back to the indexers
            for epoch in self._sweep_handler.get_epochs():
                si = self._sweep_handler.get_sweep_information(epoch)
                refiner = si.get_integrater().get_integrater_refiner()
                _tup = (correct_lattice, si.get_sweep_name())

                state = refiner.set_refiner_asserted_lattice(correct_lattice)

                if state == refiner.LATTICE_CORRECT:
                    Chatter.write("Lattice %s ok for sweep %s" % _tup)
                elif state == refiner.LATTICE_IMPOSSIBLE:
                    raise RuntimeError("Lattice %s impossible for %s" % _tup)
                elif state == refiner.LATTICE_POSSIBLE:
                    Chatter.write("Lattice %s assigned for sweep %s" % _tup)
                    need_to_return = True

        if need_to_return:
            return need_to_return

        need_to_return = False

        pointgroup_set = {pointgroups[e] for e in pointgroups}

        if len(pointgroup_set) > 1 and not probably_twinned:
            raise RuntimeError(
                "non uniform pointgroups: %s" % str(list(pointgroup_set))
            )

        if len(pointgroup_set) > 1:
            Debug.write(
                "Probably twinned, pointgroups: %s"
                % " ".join([p.replace(" ", "") for p in list(pointgroup_set)])
            )
            numbers = [Syminfo.spacegroup_name_to_number(s) for s in pointgroup_set]
            overall_pointgroup = Syminfo.spacegroup_number_to_name(min(numbers))
            self._scalr_input_pointgroup = overall_pointgroup

            Chatter.write(
                "Twinning detected, assume pointgroup %s" % overall_pointgroup
            )
            need_to_return = True
        else:
            overall_pointgroup = pointgroup_set.pop()
        self._scalr_likely_spacegroups = [overall_pointgroup]
        for epoch in self._sweep_handler.get_epochs():
            si = self._sweep_handler.get_sweep_information(epoch)
            self._helper.reindex_jiffy(si, overall_pointgroup, reindex_ops[epoch])
        return need_to_return
def mosflm_check_indexer_solution(indexer):

    distance = indexer.get_indexer_distance()
    axis = matrix.col([0, 0, 1])
    beam = indexer.get_indexer_beam_centre()
    cell = indexer.get_indexer_cell()
    wavelength = indexer.get_wavelength()

    space_group_number = l2s(indexer.get_indexer_lattice())
    spacegroup = sgtbx.space_group_symbols(space_group_number).hall()
    phi = indexer.get_header()["phi_width"]

    sg = sgtbx.space_group(spacegroup)

    if not (sg.n_ltr() - 1):
        # primitive solution - just return ... something
        return None, None, None, None

    # FIXME need to raise an exception if this is not available!
    m_matrix = indexer.get_indexer_payload("mosflm_orientation_matrix")

    # N.B. in the calculation below I am using the Cambridge frame
    # and Mosflm definitions of X & Y...

    m_elems = []

    for record in m_matrix[:3]:
        record = record.replace("-", " -")
        for e in map(float, record.split()):
            m_elems.append(e / wavelength)

    mi = matrix.sqr(m_elems)
    m = mi.inverse()

    A = matrix.col(m.elems[0:3])
    B = matrix.col(m.elems[3:6])
    C = matrix.col(m.elems[6:9])

    # now select the images - start with the images that the indexer
    # used for indexing, though can interrogate the FrameProcessor
    # interface of the indexer to put together a completely different
    # list if I like...

    images = []

    for i in indexer.get_indexer_images():
        for j in i:
            if not j in images:
                images.append(j)

    images.sort()

    # now construct the reciprocal-space peak list n.b. should
    # really run this in parallel...

    spots_r = []

    spots_r_j = {}

    for i in images:
        image = indexer.get_image_name(i)
        dd = Diffdump()
        dd.set_image(image)
        header = dd.readheader()
        phi = header["phi_start"] + 0.5 * header["phi_width"]
        pixel = header["pixel"]
        wavelength = header["wavelength"]
        peaks = locate_maxima(image)

        spots_r_j[i] = []

        for p in peaks:
            x, y, isigma = p

            if isigma < 5.0:
                continue

            xp = pixel[0] * y - beam[0]
            yp = pixel[1] * x - beam[1]

            scale = wavelength * math.sqrt(xp * xp + yp * yp +
                                           distance * distance)

            X = distance / scale
            X -= 1.0 / wavelength
            Y = -xp / scale
            Z = yp / scale

            S = matrix.col([X, Y, Z])

            rtod = 180.0 / math.pi

            spots_r.append(S.rotate(axis, -phi / rtod))
            spots_r_j[i].append(S.rotate(axis, -phi / rtod))

    # now reindex the reciprocal space spot list and count - n.b. need
    # to transform the Bravais lattice to an assumed spacegroup and hence
    # to a cctbx spacegroup!

    # lists = [spots_r_j[j] for j in spots_r_j]
    lists = []
    lists.append(spots_r)

    for l in lists:

        absent = 0
        present = 0
        total = 0

        for spot in l:
            hkl = (m * spot).elems

            total += 1

            ihkl = map(nint, hkl)

            if math.fabs(hkl[0] - ihkl[0]) > 0.1:
                continue

            if math.fabs(hkl[1] - ihkl[1]) > 0.1:
                continue

            if math.fabs(hkl[2] - ihkl[2]) > 0.1:
                continue

            # now determine if it is absent

            if sg.is_sys_absent(ihkl):
                absent += 1
            else:
                present += 1

        # now perform the analysis on these numbers...

        sd = math.sqrt(absent)

        if total:

            Debug.write("Counts: %d %d %d %.3f" % (total, present, absent,
                                                   (absent - 3 * sd) / total))

        else:

            Debug.write("Not enough spots found for analysis")
            return False, None, None, None

        if (absent - 3 * sd) / total < 0.008:
            return False, None, None, None

    # in here need to calculate the new orientation matrix for the
    # primitive basis and reconfigure the indexer - somehow...

    # ok, so the bases are fine, but what I will want to do is reorder them
    # to give the best primitive choice of unit cell...

    sgp = sg.build_derived_group(True, False)
    lattice_p = s2l(sgp.type().number())
    symm = crystal.symmetry(unit_cell=cell, space_group=sgp)

    rdx = symm.change_of_basis_op_to_best_cell()
    symm_new = symm.change_basis(rdx)

    # now apply this to the reciprocal-space orientation matrix mi

    # cb_op = sgtbx.change_of_basis_op(rdx)
    cb_op = rdx
    R = cb_op.c_inv().r().as_rational().as_float().transpose().inverse()
    mi_r = mi * R

    # now re-derive the cell constants, just to be sure

    m_r = mi_r.inverse()
    Ar = matrix.col(m_r.elems[0:3])
    Br = matrix.col(m_r.elems[3:6])
    Cr = matrix.col(m_r.elems[6:9])

    a = math.sqrt(Ar.dot())
    b = math.sqrt(Br.dot())
    c = math.sqrt(Cr.dot())

    rtod = 180.0 / math.pi

    alpha = rtod * Br.angle(Cr)
    beta = rtod * Cr.angle(Ar)
    gamma = rtod * Ar.angle(Br)

    # print '%6.3f %6.3f %6.3f %6.3f %6.3f %6.3f' % \
    # (a, b, c, alpha, beta, gamma)

    cell = uctbx.unit_cell((a, b, c, alpha, beta, gamma))

    amat = [wavelength * e for e in mi_r.elems]
    bmat = matrix.sqr(cell.fractionalization_matrix())
    umat = mi_r * bmat.inverse()

    # yuk! surely I don't need to do this...

    # I do need to do this, and don't call me shirley!

    new_matrix = [
        "%s\n" % r
        for r in format_matrix((a, b, c, alpha, beta,
                                gamma), amat, umat.elems).split("\n")
    ]

    # ok - this gives back the right matrix in the right setting - excellent!
    # now need to apply this back at base to the results of the indexer.

    # N.B. same should be applied to the same calculations for the XDS
    # version of this.

    return True, lattice_p, new_matrix, (a, b, c, alpha, beta, gamma)
Example #50
0
        def run(self):
            from xia2.Handlers.Streams import Debug

            Debug.write("Running dials.integrate")

            self.clear_command_line()
            self.add_command_line("input.experiments=%s" % self._experiments_filename)
            nproc = PhilIndex.params.xia2.settings.multiprocessing.nproc
            njob = PhilIndex.params.xia2.settings.multiprocessing.njob
            mp_mode = PhilIndex.params.xia2.settings.multiprocessing.mode
            mp_type = PhilIndex.params.xia2.settings.multiprocessing.type
            self.set_cpu_threads(nproc)

            self.add_command_line("nproc=%i" % nproc)
            if mp_mode == "serial" and mp_type == "qsub" and njob > 1:
                self.add_command_line("mp.method=drmaa")
                self.add_command_line("mp.njobs=%i" % njob)
            self.add_command_line("input.reflections=%s" % self._reflections_filename)
            self._integrated_reflections = os.path.join(
                self.get_working_directory(), "%d_integrated.refl" % self.get_xpid()
            )
            self._integrated_experiments = os.path.join(
                self.get_working_directory(), "%d_integrated.expt" % self.get_xpid()
            )
            self._integration_report_filename = os.path.join(
                self.get_working_directory(),
                "%d_integration_report.json" % self.get_xpid(),
            )
            self.add_command_line(
                "output.experiments=%s" % self._integrated_experiments
            )
            self.add_command_line(
                "output.reflections=%s" % self._integrated_reflections
            )
            self.add_command_line(
                "output.report=%s" % self._integration_report_filename
            )
            self.add_command_line("output.include_bad_reference=True")
            self.add_command_line("debug.reference.output=True")
            self.add_command_line("profile.fitting=%s" % self._profile_fitting)
            self.add_command_line(
                "gaussian_rs.scan_varying=%s" % self._scan_varying_profile
            )
            if self._new_mosaic:
                self.add_command_line("sigma_m_algorithm=extended")
            if self._outlier_algorithm is not None:
                self.add_command_line("outlier.algorithm=%s" % self._outlier_algorithm)
            if self._background_algorithm is not None:
                self.add_command_line(
                    "background.algorithm=%s" % self._background_algorithm
                )
            if self._phil_file is not None:
                self.add_command_line("%s" % self._phil_file)
            if self._d_max is not None:
                self.add_command_line("prediction.d_max=%f" % self._d_max)
            if self._d_min is not None and self._d_min > 0.0:
                self.add_command_line("prediction.d_min=%f" % self._d_min)
            for scan_range in self._scan_range:
                self.add_command_line("scan_range=%d,%d" % scan_range)
            if self._reflections_per_degree is not None:
                self.add_command_line(
                    "reflections_per_degree=%d" % self._reflections_per_degree
                )
                self.add_command_line("integrate_all_reflections=False")

            self.start()
            self.close_wait()

            dials_output = self.get_all_output()
            for n, record in enumerate(dials_output):
                if "There was a problem allocating memory for shoeboxes" in record:
                    raise DIALSIntegrateError(
                        """dials.integrate requires more memory than is available.
Try using a machine with more memory or using fewer processors."""
                    )
                if "Too few reflections for profile modelling" in record:
                    raise DIALSIntegrateError(
                        "%s\n%s, %s\nsee %%s for more details"
                        % tuple(dials_output[n + i].strip() for i in (0, 1, 2))
                        % self.get_log_file()
                    )

            self.check_for_errors()

            # save some of the output for future reference - the per-image
            # results

            self._integration_report = json.load(
                open(self._integration_report_filename, "rb")
            )

            self._per_image_statistics = {}
            table = self._integration_report["tables"]["integration.image.summary"]
            rows = table["rows"]
            for row in table["rows"]:
                n_ref = float(row["n_prf"])
                if n_ref > 0:
                    ios = float(row["ios_prf"])
                else:
                    ios = float(row["ios_sum"])
                    n_ref = float(row["n_sum"])
                # XXX this +1 might need changing if James changes what is output in report.json
                self._per_image_statistics[int(row["image"]) + 1] = {
                    "isigi": ios,
                    "isig_tot": ios * math.sqrt(n_ref),
                    "rmsd_pixel": float(row["rmsd_xy"]),
                    "strong": n_ref,
                }
def IntegraterForXSweep(xsweep, json_file=None):
    """Create an Integrater implementation to work with the provided
    XSweep."""

    # FIXME this needs properly implementing...
    if xsweep is None:
        raise RuntimeError("XSweep instance needed")

    if not xsweep.__class__.__name__ == "XSweep":
        raise RuntimeError("XSweep instance needed")

    integrater = Integrater()

    if json_file is not None:
        assert os.path.isfile(json_file)
        Debug.write("Loading integrater from json: %s" % json_file)
        import time

        t0 = time.time()
        integrater = integrater.__class__.from_json(filename=json_file)
        t1 = time.time()
        Debug.write("Loaded integrater in %.2f seconds" % (t1 - t0))
    else:
        integrater.setup_from_imageset(xsweep.get_imageset())
    integrater.set_integrater_sweep_name(xsweep.get_name())

    # copy across resolution limits
    if xsweep.get_resolution_high() or xsweep.get_resolution_low():

        d_min = PhilIndex.params.xia2.settings.resolution.d_min
        d_max = PhilIndex.params.xia2.settings.resolution.d_max

        # override with sweep versions if set - xia2#146
        if xsweep.get_resolution_high():
            d_min = xsweep.get_resolution_high()
        if xsweep.get_resolution_low():
            d_max = xsweep.get_resolution_low()

        if d_min is not None and d_min != integrater.get_integrater_high_resolution(
        ):

            Debug.write("Assigning resolution limits from XINFO input:")
            Debug.write("d_min: %.3f" % d_min)
            integrater.set_integrater_high_resolution(d_min, user=True)

        if d_max is not None and d_max != integrater.get_integrater_low_resolution(
        ):

            Debug.write("Assigning resolution limits from XINFO input:")
            Debug.write("d_max: %.3f" % d_max)
            integrater.set_integrater_low_resolution(d_max, user=True)

    # check the epoch and perhaps pass this in for future reference
    # (in the scaling)
    if xsweep._epoch > 0:
        integrater.set_integrater_epoch(xsweep._epoch)

    # need to do the same for wavelength now as that could be wrong in
    # the image header...

    if xsweep.get_wavelength_value():
        Debug.write("Integrater factory: Setting wavelength: %.6f" %
                    xsweep.get_wavelength_value())
        integrater.set_wavelength(xsweep.get_wavelength_value())

    # likewise the distance...
    if xsweep.get_distance():
        Debug.write("Integrater factory: Setting distance: %.2f" %
                    xsweep.get_distance())
        integrater.set_distance(xsweep.get_distance())

    integrater.set_integrater_sweep(xsweep, reset=False)

    return integrater
Example #52
0
    def _determine_scaled_pointgroup(self):
        """Rerun symmetry after scaling to check for consistent space group. If not,
        then new space group should be used and data rescaled."""
        from cctbx import crystal

        exp_crystal = load.experiment_list(self._scaler.get_scaled_experiments())[
            0
        ].crystal
        cs = crystal.symmetry(
            space_group=exp_crystal.get_space_group(),
            unit_cell=exp_crystal.get_unit_cell(),
        )
        cs_ref = cs.as_reference_setting()
        current_pointgroup = cs_ref.space_group()
        current_patt_group = (
            current_pointgroup.build_derived_patterson_group().type().lookup_symbol()
        )
        Debug.write(
            "Space group used in scaling: %s"
            % current_pointgroup.type().lookup_symbol()
        )
        first = self._sweep_handler.get_epochs()[0]
        si = self._sweep_handler.get_sweep_information(first)
        refiner = si.get_integrater().get_integrater_refiner()
        point_group, reindex_op, _, _, reind_refl, reind_exp, reindex_initial = self._dials_symmetry_indexer_jiffy(
            [self._scaler.get_scaled_experiments()],
            [self._scaler.get_scaled_reflections()],
            [refiner],
        )
        Debug.write(
            "Point group determined by dials.symmetry on scaled dataset: %s"
            % point_group
        )
        sginfo = space_group_info(symbol=point_group)
        patt_group = (
            sginfo.group().build_derived_patterson_group().type().lookup_symbol()
        )
        self._scaler_symmetry_check_count += 1
        if patt_group != current_patt_group:
            if reindex_initial:
                reindexer = DialsReindex()
                reindexer.set_working_directory(self.get_working_directory())
                auto_logfiler(reindexer)
                reindexer.set_experiments_filename(
                    self._scaler.get_scaled_experiments()
                )
                reindexer.set_indexed_filename(self._scaler.get_scaled_reflections())
                reindexer.set_cb_op(reindex_op)
                reindexer.run()
                self._scaler.set_scaled_experiments(
                    reindexer.get_reindexed_experiments_filename()
                )
                self._scaler.set_scaled_reflections(
                    reindexer.get_reindexed_reflections_filename()
                )
            else:
                self._scaler.set_scaled_experiments(reind_exp)
                self._scaler.set_scaled_reflections(reind_refl)
            self.set_scaler_done(False)
            Chatter.write(
                """Inconsistent space groups determined before and after scaling: %s, %s \n
Data will be rescaled in new point group"""
                % (current_patt_group, patt_group)
            )
            return
        else:
            Chatter.write("Consistent space group determined before and after scaling")
Example #53
0
    def _index_select_images(self):
        """Select correct images based on image headers."""

        # FIXME in here (i) sum the images defined from the existing class
        # contents then (ii) change the template stored, the directory and
        # the header contents to correspond to those new images. Finally make
        # a note of these changes so we can correct XPARM file at the end.

        assert min(self.get_matching_images()) == 1

        # make a note so we can fix the XPARM.XDS file at the end
        self._true_phi_width = self.get_header_item("phi_width")

        params = PhilIndex.params.xds.merge2cbf
        if params.data_range is None:
            params.data_range = 1, len(self.get_matching_images())
        m2c = Merge2cbf(params=params)
        m2c.setup_from_image(self.get_image_name(1))
        m2c.set_working_directory(
            os.path.join(self.get_working_directory(), "summed_images"))
        os.mkdir(m2c.get_working_directory())
        m2c.run()

        # Is this safe to do?
        self._setup_from_image(
            os.path.join(m2c.get_working_directory(),
                         "merge2cbf_averaged_0001.cbf"))

        phi_width = self.get_header_item("phi_width")

        if phi_width == 0.0:
            raise RuntimeError("cannot use still images")

        # use five degrees for the background calculation

        five_deg = int(round(5.0 / phi_width)) - 1

        if five_deg < 5:
            five_deg = 5

        images = self.get_matching_images()

        # characterise the images - are there just two (e.g. dna-style
        # reference images) or is there a full block? if it is the
        # former then we have a problem, as we want *all* the images in the
        # sweep...

        wedges = []

        min_images = params.xia2.settings.input.min_images

        if len(images) < 3 and len(images) < min_images:
            raise RuntimeError(
                "This INDEXER cannot be used for only %d images" % len(images))

        Debug.write("Adding images for indexer: %d -> %d" %
                    (min(images), max(images)))

        wedges.append((min(images), max(images)))

        # FIXME this should have a wrapper function!

        if min(images) + five_deg in images:
            self._background_images = (min(images), min(images) + five_deg)
        else:
            self._background_images = (min(images), max(images))

        return wedges
Example #54
0
        def __init__(self):
            # generic things
            CCP4DriverInstance.__class__.__init__(self)

            self.set_executable(
                os.path.join(os.environ.get('CBIN', ''), 'aimless'))

            if not os.path.exists(self.get_executable()):
                raise RuntimeError('aimless binary not found')

            self.start()
            self.close_wait()

            version = None

            for record in self.get_all_output():
                if '##' in record and 'AIMLESS' in record:
                    version = record.split()[5]

            if not version:
                raise RuntimeError('version not found')

            Debug.write('Using version: %s' % version)

            # clear all the header junk
            self.reset()

            # input and output files
            self._scalepack = False
            self._chef_unmerged = False
            self._unmerged_reflections = None
            self._xmlout = None

            # scaling parameters
            self._resolution = None

            # scales file for recycling
            self._scales_file = None

            # this defaults to SCALES - and is useful for when we
            # want to refine the SD parameters because we can
            # recycle the scale factors through the above interface
            self._new_scales_file = None

            # this flag indicates that the input reflections are already
            # scaled and just need merging e.g. from XDS/XSCALE.
            self._onlymerge = False

            # by default, switch this on
            if decay_correction is None:
                self._bfactor = True
            else:
                self._bfactor = decay_correction

            # this will often be wanted
            self._anomalous = False

            self._mode = 'rotation'

            # these are only relevant for 'rotation' mode scaling
            self._spacing = 5
            self._cycles = 100
            self._brotation = None
            self._bfactor_tie = None
            self._surface_tie = None
            self._surface_link = True

            self._intensities = 'combine'

            self._project_crystal_dataset = {}
            self._runs = []

            # for adding data on merge - one dname
            self._pname = None
            self._xname = None
            self._dname = None
Example #55
0
        def run(self):
            from xia2.Handlers.Streams import Debug
            Debug.write('Running dials.integrate')

            self.clear_command_line()
            self.add_command_line('input.experiments=%s' %
                                  self._experiments_filename)
            nproc = PhilIndex.params.xia2.settings.multiprocessing.nproc
            njob = PhilIndex.params.xia2.settings.multiprocessing.njob
            mp_mode = PhilIndex.params.xia2.settings.multiprocessing.mode
            mp_type = PhilIndex.params.xia2.settings.multiprocessing.type
            self.set_cpu_threads(nproc)

            self.add_command_line('nproc=%i' % nproc)
            if mp_mode == 'serial' and mp_type == 'qsub' and njob > 1:
                self.add_command_line('mp.method=drmaa')
                self.add_command_line('mp.njobs=%i' % njob)
            self.add_command_line(
                ('input.reflections=%s' % self._reflections_filename))
            self._integrated_reflections = os.path.join(
                self.get_working_directory(),
                '%d_integrated.pickle' % self.get_xpid())
            self._integrated_experiments = os.path.join(
                self.get_working_directory(),
                '%d_integrated_experiments.json' % self.get_xpid())
            self._integration_report_filename = os.path.join(
                self.get_working_directory(),
                '%d_integration_report.json' % self.get_xpid())
            self.add_command_line('output.experiments=%s' %
                                  self._integrated_experiments)
            self.add_command_line('output.reflections=%s' %
                                  self._integrated_reflections)
            self.add_command_line('output.report=%s' %
                                  self._integration_report_filename)
            self.add_command_line('output.include_bad_reference=True')
            self.add_command_line('debug.reference.output=True')
            self.add_command_line('profile.fitting=%s' % self._profile_fitting)
            if self._new_mosaic:
                self.add_command_line('sigma_m_algorithm=extended')
            if self._outlier_algorithm is not None:
                self.add_command_line('outlier.algorithm=%s' %
                                      self._outlier_algorithm)
            if self._background_algorithm is not None:
                self.add_command_line('background.algorithm=%s' %
                                      self._background_algorithm)
            if self._phil_file is not None:
                self.add_command_line('%s' % self._phil_file)
            if self._d_max is not None:
                self.add_command_line('prediction.d_max=%f' % self._d_max)
            if self._d_min is not None and self._d_min > 0.0:
                self.add_command_line('prediction.d_min=%f' % self._d_min)
            for scan_range in self._scan_range:
                self.add_command_line('scan_range=%d,%d' % scan_range)
            if self._reflections_per_degree is not None:
                self.add_command_line('reflections_per_degree=%d' %
                                      self._reflections_per_degree)
                self.add_command_line('integrate_all_reflections=False')

            self.start()
            self.close_wait()

            dials_output = self.get_all_output()
            for n, record in enumerate(dials_output):
                if 'There was a problem allocating memory for shoeboxes' in record:
                    raise DIALSIntegrateError(
                        '''dials.integrate requires more memory than is available.
Try using a machine with more memory or using fewer processors.''')
                if 'Too few reflections for profile modelling' in record:
                    raise DIALSIntegrateError(
                        "%s\n%s, %s\nsee %%s for more details" %
                        tuple(dials_output[n + i].strip()
                              for i in (0, 1, 2)) % self.get_log_file())

            self.check_for_errors()

            # save some of the output for future reference - the per-image
            # results

            self._integration_report = json.load(
                open(self._integration_report_filename, 'rb'))

            self._per_image_statistics = {}
            table = self._integration_report['tables'][
                'integration.image.summary']
            rows = table['rows']
            for row in table['rows']:
                n_ref = float(row['n_prf'])
                if n_ref > 0:
                    ios = float(row['ios_prf'])
                else:
                    ios = float(row['ios_sum'])
                    n_ref = float(row['n_sum'])
                # XXX this +1 might need changing if James changes what is output in report.json
                self._per_image_statistics[int(row['image']) + 1] = {
                    'isigi': ios,
                    'isig_tot': ios * math.sqrt(n_ref),
                    'rmsd_pixel': float(row['rmsd_xy']),
                    'strong': n_ref,
                }
def Integrater():
    """Return an  Integrater implementation."""

    # FIXME this should take an indexer as an argument...

    integrater = None
    preselection = get_preferences().get("integrater")

    if not integrater and (not preselection or preselection == "dials"):
        try:
            integrater = DialsIntegrater()
            Debug.write("Using Dials Integrater")
        except NotAvailableError:
            if preselection == "dials":
                raise RuntimeError(
                    "preselected integrater dials not available: " +
                    "dials not installed?")

    if not integrater and (not preselection or preselection == "mosflmr"):
        try:
            integrater = MosflmIntegrater()
            Debug.write("Using MosflmR Integrater")
            if not get_preferences().get("scaler"):
                add_preference("scaler", "ccp4a")
        except NotAvailableError:
            if preselection == "mosflmr":
                raise RuntimeError(
                    "preselected integrater mosflmr not available")

    if not integrater and (not preselection or preselection == "xdsr"):
        try:
            integrater = XDSIntegrater()
            Debug.write("Using XDS Integrater in new resolution mode")
        except NotAvailableError:
            if preselection == "xdsr":
                raise RuntimeError(
                    "preselected integrater xdsr not available: " +
                    "xds not installed?")

    if not integrater:
        raise RuntimeError("no integrater implementations found")

    # check to see if resolution limits were passed in through the
    # command line...

    dmin = PhilIndex.params.xia2.settings.resolution.d_min
    dmax = PhilIndex.params.xia2.settings.resolution.d_max

    if dmin:
        Debug.write("Adding user-assigned resolution limits:")

        if dmax:

            Debug.write("dmin: %.3f dmax: %.2f" % (dmin, dmax))
            integrater.set_integrater_resolution(dmin, dmax, user=True)

        else:

            Debug.write("dmin: %.3f" % dmin)
            integrater.set_integrater_high_resolution(dmin, user=True)

    return integrater
Example #57
0
    def _index_prepare(self):
        """Prepare to do autoindexing - in XDS terms this will mean
        calling xycorr, init and colspot on the input images."""

        # decide on images to work with

        Debug.write("XDS INDEX PREPARE:")
        Debug.write("Wavelength: %.6f" % self.get_wavelength())
        Debug.write("Distance: %.2f" % self.get_distance())

        if self._indxr_images == []:
            _select_images_function = getattr(
                self, "_index_select_images_%s" % (self._index_select_images))
            wedges = _select_images_function()
            for wedge in wedges:
                self.add_indexer_image_wedge(wedge)
            self.set_indexer_prepare_done(True)

        all_images = self.get_matching_images()

        first = min(all_images)
        last = max(all_images)

        # next start to process these - first xycorr

        xycorr = self.Xycorr()

        xycorr.set_data_range(first, last)
        xycorr.set_background_range(self._indxr_images[0][0],
                                    self._indxr_images[0][1])
        from dxtbx.serialize.xds import to_xds

        converter = to_xds(self.get_imageset())
        xds_beam_centre = converter.detector_origin
        xycorr.set_beam_centre(xds_beam_centre[0], xds_beam_centre[1])
        for block in self._indxr_images:
            xycorr.add_spot_range(block[0], block[1])

        # FIXME need to set the origin here

        xycorr.run()

        for file in ["X-CORRECTIONS.cbf", "Y-CORRECTIONS.cbf"]:
            self._indxr_payload[file] = xycorr.get_output_data_file(file)

        # next start to process these - then init

        if PhilIndex.params.xia2.settings.input.format.dynamic_shadowing:
            imageset = self._indxr_imagesets[0]
            masker = (imageset.get_format_class().get_instance(
                imageset.paths()[0]).get_masker())
            if masker is None:
                # disable dynamic_shadowing
                PhilIndex.params.xia2.settings.input.format.dynamic_shadowing = False

        if PhilIndex.params.xia2.settings.input.format.dynamic_shadowing:
            # find the region of the scan with the least predicted shadow
            # to use for background determination in XDS INIT step
            from dxtbx.model.experiment_list import ExperimentListFactory

            imageset = self._indxr_imagesets[0]
            xsweep = self._indxr_sweeps[0]
            sweep_filename = os.path.join(
                self.get_working_directory(),
                "%s_indexed.expt" % xsweep.get_name())
            ExperimentListFactory.from_imageset_and_crystal(
                imageset, None).as_file(sweep_filename)

            from xia2.Wrappers.Dials.ShadowPlot import ShadowPlot

            shadow_plot = ShadowPlot()
            shadow_plot.set_working_directory(self.get_working_directory())
            auto_logfiler(shadow_plot)
            shadow_plot.set_sweep_filename(sweep_filename)
            shadow_plot.set_json_filename(
                os.path.join(
                    self.get_working_directory(),
                    "%s_shadow_plot.json" % shadow_plot.get_xpid(),
                ))
            shadow_plot.run()
            results = shadow_plot.get_results()
            from scitbx.array_family import flex

            fraction_shadowed = flex.double(results["fraction_shadowed"])
            if flex.max(fraction_shadowed) == 0:
                PhilIndex.params.xia2.settings.input.format.dynamic_shadowing = False
            else:
                scan_points = flex.double(results["scan_points"])

                scan = imageset.get_scan()
                oscillation = scan.get_oscillation()

                if self._background_images is not None:
                    bg_images = self._background_images
                    bg_range_deg = (
                        scan.get_angle_from_image_index(bg_images[0]),
                        scan.get_angle_from_image_index(bg_images[1]),
                    )
                    bg_range_width = bg_range_deg[1] - bg_range_deg[0]

                    min_shadow = 100
                    best_bg_range = bg_range_deg
                    from libtbx.utils import frange

                    for bg_range_start in frange(
                            flex.min(scan_points),
                            flex.max(scan_points) - bg_range_width,
                            step=oscillation[1],
                    ):
                        bg_range_deg = (bg_range_start,
                                        bg_range_start + bg_range_width)
                        sel = (scan_points >= bg_range_deg[0]) & (
                            scan_points <= bg_range_deg[1])
                        mean_shadow = flex.mean(fraction_shadowed.select(sel))
                        if mean_shadow < min_shadow:
                            min_shadow = mean_shadow
                            best_bg_range = bg_range_deg

                    self._background_images = (
                        scan.get_image_index_from_angle(best_bg_range[0]),
                        scan.get_image_index_from_angle(best_bg_range[1]),
                    )
                    Debug.write("Setting background images: %s -> %s" %
                                self._background_images)

        init = self.Init()

        for file in ["X-CORRECTIONS.cbf", "Y-CORRECTIONS.cbf"]:
            init.set_input_data_file(file, self._indxr_payload[file])

        init.set_data_range(first, last)

        if self._background_images:
            init.set_background_range(self._background_images[0],
                                      self._background_images[1])
        else:
            init.set_background_range(self._indxr_images[0][0],
                                      self._indxr_images[0][1])

        for block in self._indxr_images:
            init.add_spot_range(block[0], block[1])

        init.run()

        # at this stage, need to (perhaps) modify the BKGINIT.cbf image
        # to mark out the back stop

        if PhilIndex.params.xds.backstop_mask:
            Debug.write("Applying mask to BKGINIT.pck")

            # copy the original file
            cbf_old = os.path.join(init.get_working_directory(), "BKGINIT.cbf")
            cbf_save = os.path.join(init.get_working_directory(),
                                    "BKGINIT.sav")
            shutil.copyfile(cbf_old, cbf_save)

            # modify the file to give the new mask
            from xia2.Toolkit.BackstopMask import BackstopMask

            mask = BackstopMask(PhilIndex.params.xds.backstop_mask)
            mask.apply_mask_xds(self.get_header(), cbf_save, cbf_old)

            init.reload()

        for file in ["BLANK.cbf", "BKGINIT.cbf", "GAIN.cbf"]:
            self._indxr_payload[file] = init.get_output_data_file(file)

        if PhilIndex.params.xia2.settings.developmental.use_dials_spotfinder:

            spotfinder = self.DialsSpotfinder()

            for block in self._indxr_images:
                spotfinder.add_spot_range(block[0], block[1])

            spotfinder.run()
            export = self.DialsExportSpotXDS()
            export.set_input_data_file(
                "observations.refl",
                spotfinder.get_output_data_file("observations.refl"),
            )
            export.run()

            for file in ["SPOT.XDS"]:
                self._indxr_payload[file] = export.get_output_data_file(file)

        else:

            # next start to process these - then colspot

            colspot = self.Colspot()

            for file in [
                    "X-CORRECTIONS.cbf",
                    "Y-CORRECTIONS.cbf",
                    "BLANK.cbf",
                    "BKGINIT.cbf",
                    "GAIN.cbf",
            ]:
                colspot.set_input_data_file(file, self._indxr_payload[file])

            colspot.set_data_range(first, last)
            colspot.set_background_range(self._indxr_images[0][0],
                                         self._indxr_images[0][1])
            for block in self._indxr_images:
                colspot.add_spot_range(block[0], block[1])

            colspot.run()

            for file in ["SPOT.XDS"]:
                self._indxr_payload[file] = colspot.get_output_data_file(file)
Example #58
0
def IndexerForXSweep(xsweep, json_file=None):
    '''Provide an indexer to work with XSweep instance xsweep.'''

    # check what is going on

    if xsweep is None:
        raise RuntimeError('XSweep instance needed')

    if not xsweep.__class__.__name__ == 'XSweep':
        raise RuntimeError('XSweep instance needed')

    # if the xsweep has a crystal lattice defined, use mosflm which
    # FIXME needs to be modified to take a crystal cell as input.
    # Ignore this - both mosflm and labelit can take this as
    # input and it is implemented for both via the Indexer interface.

    crystal_lattice = xsweep.get_crystal_lattice()

    params = PhilIndex.params
    multi_sweep_indexing = params.xia2.settings.multi_sweep_indexing == True

    # FIXME SCI-599 decide from the width of the sweep and the preference
    # which indexer to return...

    sweep_images = xsweep.get_image_range()
    imageset = xsweep.get_imageset()
    scan = imageset.get_scan()
    oscillation = scan.get_oscillation()
    sweep_width = (oscillation[1] * (sweep_images[1] - sweep_images[0] + 1))

    # hack now - if XDS integration switch to XDS indexer if (i) labelit and
    # (ii) sweep < 10 degrees
    if multi_sweep_indexing and len(xsweep.get_xsample().get_sweeps()) > 1:
        xsample = xsweep.get_xsample()
        indexer = xsample.get_multi_indexer()

        if indexer is None:
            indexer = Indexer()
            xsample.set_multi_indexer(indexer)

    elif sweep_width < 10.0 and not get_preferences().get('indexer') and \
        get_preferences().get('integrater') and \
        'xds' in get_preferences().get('integrater'):
        Debug.write('Overriding indexer as XDSII')
        indexer = Indexer(preselection='xdsii')
    else:
        indexer = Indexer()

    if json_file is not None:
        assert os.path.isfile(json_file)
        Debug.write("Loading indexer from json: %s" % json_file)
        import time
        t0 = time.time()
        indexer = indexer.__class__.from_json(filename=json_file)
        t1 = time.time()
        Debug.write("Loaded indexer in %.2f seconds" % (t1 - t0))
    else:
        # configure the indexer
        indexer.add_indexer_imageset(xsweep.get_imageset())

    if crystal_lattice:
        # this is e.g. ('aP', (1.0, 2.0, 3.0, 90.0, 98.0, 88.0))
        indexer.set_indexer_input_lattice(crystal_lattice[0])
        indexer.set_indexer_input_cell(crystal_lattice[1])

    # FIXME - it is assumed that all programs which implement the Indexer
    # interface will also implement FrameProcessor, which this uses.
    # verify this, or assert it in some way...

    #if xsweep.get_beam_centre():
    #indexer.set_beam_centre(xsweep.get_beam_centre())

    ## N.B. This does not need to be done for the integrater, since
    ## that gets it's numbers from the indexer it uses.

    #if xsweep.get_distance():
    #Debug.write('Indexer factory: Setting distance: %.2f' % \
    #xsweep.get_distance())
    #indexer.set_distance(xsweep.get_distance())

    # FIXME more - need to check if we should be indexing in a specific
    # lattice - check xsweep.get_crystal_lattice()

    # need to do the same for wavelength now as that could be wrong in
    # the image header...

    #if xsweep.get_wavelength_value():
    #Debug.write('Indexer factory: Setting wavelength: %.6f' % \
    #xsweep.get_wavelength_value())
    #indexer.set_wavelength(xsweep.get_wavelength_value())

    indexer.set_indexer_sweep(xsweep)

    if xsweep.get_xsample().get_multi_indexer() is not None:
        xsample = xsweep.get_xsample()
        multi_indexer = xsample.get_multi_indexer()
        assert multi_indexer is indexer, (multi_indexer, indexer)

        if len(indexer._indxr_imagesets) == 1:

            for xsweep_other in xsample.get_sweeps()[1:]:
                xsweep_other._get_indexer()

    return indexer
Example #59
0
    def _index_select_images_i(self):
        """Select correct images based on image headers."""

        phi_width = self.get_phi_width()

        images = self.get_matching_images()

        # characterise the images - are there just two (e.g. dna-style
        # reference images) or is there a full block?

        wedges = []

        if len(images) < 3:
            # work on the assumption that this is a reference pair

            wedges.append(images[0])

            if len(images) > 1:
                wedges.append(images[1])

        else:
            max_wedge_size_degrees = PhilIndex.params.xds.index.max_wedge_size_degrees
            max_wedge_size = PhilIndex.params.xds.index.max_wedge_size
            if max_wedge_size_degrees is not None:
                n = int(
                    math.floor(max_wedge_size_degrees / self.get_phi_width()))
                if max_wedge_size is not None:
                    max_wedge_size = min(max_wedge_size, max(n, 1))
                else:
                    max_wedge_size = n

            Debug.write("Using max_wedge_size: %d" % max_wedge_size)

            block_size = min(len(images), max_wedge_size)

            Debug.write("Adding images for indexer: %d -> %d" %
                        (images[0], images[block_size - 1]))

            wedges.append((images[0], images[block_size - 1]))

            if int(90.0 / phi_width) + block_size in images:
                # assume we can add a wedge around 45 degrees as well...
                Debug.write("Adding images for indexer: %d -> %d" % (
                    int(45.0 / phi_width) + images[0],
                    int(45.0 / phi_width) + images[0] + block_size - 1,
                ))
                Debug.write("Adding images for indexer: %d -> %d" % (
                    int(90.0 / phi_width) + images[0],
                    int(90.0 / phi_width) + images[0] + block_size - 1,
                ))
                wedges.append((
                    int(45.0 / phi_width) + images[0],
                    int(45.0 / phi_width) + images[0] + block_size - 1,
                ))
                wedges.append((
                    int(90.0 / phi_width) + images[0],
                    int(90.0 / phi_width) + images[0] + block_size - 1,
                ))

            else:

                # add some half-way anyway
                first = (len(images) // 2) - (block_size // 2) + images[0] - 1
                if first > wedges[0][1]:
                    last = first + block_size - 1
                    Debug.write("Adding images for indexer: %d -> %d" %
                                (first, last))
                    wedges.append((first, last))
                if len(images) > block_size:
                    Debug.write("Adding images for indexer: %d -> %d" %
                                (images[-block_size], images[-1]))
                    wedges.append((images[-block_size], images[-1]))

        return wedges
Example #60
0
    def _integrate_finish(self):
        '''Finish off the integration by running dials.export.'''

        # FIXME - do we want to export every time we call this method
        # (the file will not have changed) and also (more important) do
        # we want a different exported MTZ file every time (I do not think
        # that we do; these can be very large) - was exporter.get_xpid() ->
        # now dials

        exporter = self.ExportMtz()
        exporter.set_reflections_filename(self._intgr_integrated_pickle)
        mtz_filename = os.path.join(self.get_working_directory(),
                                    '%s_integrated.mtz' % 'dials')
        exporter.set_mtz_filename(mtz_filename)
        exporter.run()
        self._intgr_integrated_filename = mtz_filename

        # record integrated MTZ file for e.g. BLEND.

        pname, xname, dname = self.get_integrater_project_info()
        sweep = self.get_integrater_sweep_name()
        FileHandler.record_more_data_file(
            '%s %s %s %s INTEGRATE' % (pname, xname, dname, sweep),
            mtz_filename)

        from iotbx.reflection_file_reader import any_reflection_file
        miller_arrays = any_reflection_file(
            self._intgr_integrated_filename).as_miller_arrays()
        # look for profile-fitted intensities
        intensities = [
            ma for ma in miller_arrays
            if ma.info().labels == ['IPR', 'SIGIPR']
        ]
        if len(intensities) == 0:
            # look instead for summation-integrated intensities
            intensities = [
                ma for ma in miller_arrays
                if ma.info().labels == ['I', 'SIGI']
            ]
            assert len(intensities)
        self._intgr_n_ref = intensities[0].size()

        if not os.path.isfile(self._intgr_integrated_filename):
            raise RuntimeError("dials.export failed: %s does not exist." %
                               self._intgr_integrated_filename)

        if self._intgr_reindex_operator is None and \
          self._intgr_spacegroup_number == lattice_to_spacegroup(
            self.get_integrater_refiner().get_refiner_lattice()):
            Debug.write('Not reindexing to spacegroup %d (%s)' % \
                          (self._intgr_spacegroup_number,
                           self._intgr_reindex_operator))
            return mtz_filename

        if self._intgr_reindex_operator is None and \
          self._intgr_spacegroup_number == 0:
            Debug.write('Not reindexing to spacegroup %d (%s)' % \
                          (self._intgr_spacegroup_number,
                           self._intgr_reindex_operator))
            return mtz_filename

        Debug.write('Reindexing to spacegroup %d (%s)' % \
                    (self._intgr_spacegroup_number,
                     self._intgr_reindex_operator))

        hklin = mtz_filename
        reindex = Reindex()
        reindex.set_working_directory(self.get_working_directory())
        auto_logfiler(reindex)

        reindex.set_operator(self._intgr_reindex_operator)

        if self._intgr_spacegroup_number:
            reindex.set_spacegroup(self._intgr_spacegroup_number)
        else:
            reindex.set_spacegroup(
                lattice_to_spacegroup(
                    self.get_integrater_refiner().get_refiner_lattice()))

        hklout = '%s_reindex.mtz' % hklin[:-4]
        reindex.set_hklin(hklin)
        reindex.set_hklout(hklout)
        reindex.reindex()
        self._intgr_integrated_filename = hklout
        self._intgr_cell = reindex.get_cell()

        pname, xname, dname = self.get_integrater_project_info()
        sweep = self.get_integrater_sweep_name()
        FileHandler.record_more_data_file(
            '%s %s %s %s experiments' % (pname, xname, dname, sweep),
            self.get_integrated_experiments())
        FileHandler.record_more_data_file(
            '%s %s %s %s reflections' % (pname, xname, dname, sweep),
            self.get_integrated_reflections())

        return hklout