示例#1
0
文件: Scaler.py 项目: ndevenish/xia2
    def scale(self):
        '''Actually perform the scaling - this is delegated to the
    implementation.'''

        if self._scalr_integraters == {}:
            raise RuntimeError( \
                  'no Integrater implementations assigned for scaling')

        xname = self._scalr_xcrystal.get_name()

        while not self.get_scaler_finish_done():
            while not self.get_scaler_done():
                while not self.get_scaler_prepare_done():

                    Chatter.banner('Preparing %s' % xname)

                    self._scalr_prepare_done = True
                    self._scale_prepare()

                Chatter.banner('Scaling %s' % xname)

                self._scalr_done = True
                self._scalr_result = self._scale()

            self._scalr_finish_done = True
            self._scale_finish()

        return self._scalr_result
示例#2
0
文件: Scaler.py 项目: hainm/xia2
  def scale(self):
    '''Actually perform the scaling - this is delegated to the
    implementation.'''

    if self._scalr_integraters == { }:
      raise RuntimeError, \
            'no Integrater implementations assigned for scaling'

    xname = self._scalr_xcrystal.get_name()

    while not self.get_scaler_finish_done():
      while not self.get_scaler_done():
        while not self.get_scaler_prepare_done():

          Chatter.banner('Preparing %s' % xname)

          self._scalr_prepare_done = True
          self._scale_prepare()

        Chatter.banner('Scaling %s' % xname)

        self._scalr_done = True
        self._scalr_result = self._scale()

      self._scalr_finish_done = True
      self._scale_finish()

    return self._scalr_result
示例#3
0
    def _scale_finish_chunk_8_raddam(self):
        crd = CCP4InterRadiationDamageDetector()

        crd.set_working_directory(self.get_working_directory())

        crd.set_hklin(self._scalr_scaled_reflection_files['mtz'])

        if self.get_scaler_anomalous():
            crd.set_anomalous(True)

        hklout = os.path.join(self.get_working_directory(), 'temp.mtz')
        FileHandler.record_temporary_file(hklout)

        crd.set_hklout(hklout)

        status = crd.detect()

        if status:
            Chatter.write('')
            Chatter.banner('Local Scaling %s' % self._scalr_xname)
            for s in status:
                Chatter.write('%s %s' % s)
            Chatter.banner('')
        else:
            Debug.write('Local scaling failed')
示例#4
0
文件: Integrater.py 项目: xia2/xia2
  def integrate(self):
    '''Actually perform integration until we think we are done...'''

    while not self.get_integrater_finish_done():
      while not self.get_integrater_done():
        while not self.get_integrater_prepare_done():

          Debug.write('Preparing to do some integration...')
          self.set_integrater_prepare_done(True)

          # if this raises an exception, perhaps the autoindexing
          # solution has too high symmetry. if this the case, then
          # perform a self._intgr_indexer.eliminate() - this should
          # reset the indexing system

          try:
            self._integrate_prepare()

          except BadLatticeError, e:

            Journal.banner('eliminated this lattice', size = 80)

            Chatter.write('Rejecting bad lattice %s' % str(e))
            self._intgr_refiner.eliminate()
            self._integrater_reset()

        # FIXME x1698 - may be the case that _integrate() returns the
        # raw intensities, _integrate_finish() returns intensities
        # which may have been adjusted or corrected. See #1698 below.

        Debug.write('Doing some integration...')

        self.set_integrater_done(True)

        template = self.get_integrater_sweep().get_template()

        if self._intgr_sweep_name:
          if PhilIndex.params.xia2.settings.show_template:
            Chatter.banner('Integrating %s (%s)' % \
                           (self._intgr_sweep_name, template))
          else:
            Chatter.banner('Integrating %s' % \
                           (self._intgr_sweep_name))
        try:

          #1698
          self._intgr_hklout_raw = self._integrate()

        except BadLatticeError, e:
          Chatter.write('Rejecting bad lattice %s' % str(e))

          Journal.banner('eliminated this lattice', size = 80)

          self._intgr_refiner.eliminate()
          self._integrater_reset()
示例#5
0
    def _index_prepare(self):
        Chatter.banner('Spotfinding %s' % self.get_indexer_sweep_name())
        super(XDSIndexerII, self)._index_prepare()

        from dials.array_family import flex
        from dials.util.ascii_art import spot_counts_per_image_plot
        reflection_pickle = spot_xds_to_reflection_pickle(
            self._indxr_payload['SPOT.XDS'],
            working_directory=self.get_working_directory())
        refl = flex.reflection_table.from_pickle(reflection_pickle)
        Chatter.write(spot_counts_per_image_plot(refl), strip=False)
示例#6
0
文件: XDSIndexerII.py 项目: xia2/xia2
  def _index_prepare(self):
    Chatter.banner('Spotfinding %s' %self.get_indexer_sweep_name())
    super(XDSIndexerII, self)._index_prepare()

    from dials.array_family import flex
    from dials.util.ascii_art import spot_counts_per_image_plot
    reflection_pickle = spot_xds_to_reflection_pickle(
      self._indxr_payload['SPOT.XDS'],
      working_directory=self.get_working_directory())
    refl = flex.reflection_table.from_pickle(reflection_pickle)
    Chatter.write(spot_counts_per_image_plot(refl), strip=False)
示例#7
0
    def _index_prepare(self):

        from xia2.Handlers.Citations import Citations
        Citations.cite('dials')

        #all_images = self.get_matching_images()
        #first = min(all_images)
        #last = max(all_images)

        spot_lists = []
        datablocks = []

        for imageset, xsweep in zip(self._indxr_imagesets, self._indxr_sweeps):

            Chatter.banner('Spotfinding %s' % xsweep.get_name())

            first, last = imageset.get_scan().get_image_range()

            # at this stage, break out to run the DIALS code: this sets itself up
            # now cheat and pass in some information... save re-reading all of the
            # image headers

            # FIXME need to adjust this to allow (say) three chunks of images

            from dxtbx.serialize import dump
            from dxtbx.datablock import DataBlock
            sweep_filename = os.path.join(
                self.get_working_directory(),
                '%s_datablock.json' % xsweep.get_name())
            dump.datablock(DataBlock([imageset]), sweep_filename)

            genmask = self.GenerateMask()
            genmask.set_input_datablock(sweep_filename)
            genmask.set_output_datablock(
                os.path.join(
                    self.get_working_directory(), '%s_%s_datablock.json' %
                    (genmask.get_xpid(), xsweep.get_name())))
            genmask.set_params(PhilIndex.params.dials.masking)
            sweep_filename, mask_pickle = genmask.run()
            Debug.write('Generated mask for %s: %s' %
                        (xsweep.get_name(), mask_pickle))

            gain = PhilIndex.params.xia2.settings.input.gain
            if gain is libtbx.Auto:
                gain_estimater = self.EstimateGain()
                gain_estimater.set_sweep_filename(sweep_filename)
                gain_estimater.run()
                gain = gain_estimater.get_gain()
                Chatter.write('Estimated gain: %.2f' % gain)
                PhilIndex.params.xia2.settings.input.gain = gain

            # FIXME this should really use the assigned spot finding regions
            #offset = self.get_frame_offset()
            spotfinder = self.Spotfinder()
            if last - first > 10:
                spotfinder.set_write_hot_mask(True)
            spotfinder.set_input_sweep_filename(sweep_filename)
            spotfinder.set_output_sweep_filename(
                '%s_%s_datablock.json' %
                (spotfinder.get_xpid(), xsweep.get_name()))
            spotfinder.set_input_spot_filename(
                '%s_%s_strong.pickle' %
                (spotfinder.get_xpid(), xsweep.get_name()))
            if PhilIndex.params.dials.fast_mode:
                wedges = self._index_select_images_i(imageset)
                spotfinder.set_scan_ranges(wedges)
            else:
                spotfinder.set_scan_ranges([(first, last)])
            if PhilIndex.params.dials.find_spots.phil_file is not None:
                spotfinder.set_phil_file(
                    PhilIndex.params.dials.find_spots.phil_file)
            min_spot_size = PhilIndex.params.dials.find_spots.min_spot_size
            if min_spot_size is libtbx.Auto:
                if imageset.get_detector()[0].get_type() == 'SENSOR_PAD':
                    min_spot_size = 3
                else:
                    min_spot_size = None
            if min_spot_size is not None:
                spotfinder.set_min_spot_size(min_spot_size)
            min_local = PhilIndex.params.dials.find_spots.min_local
            if min_local is not None:
                spotfinder.set_min_local(min_local)
            sigma_strong = PhilIndex.params.dials.find_spots.sigma_strong
            if sigma_strong:
                spotfinder.set_sigma_strong(sigma_strong)
            gain = PhilIndex.params.xia2.settings.input.gain
            if gain:
                spotfinder.set_gain(gain)
            filter_ice_rings = PhilIndex.params.dials.find_spots.filter_ice_rings
            if filter_ice_rings:
                spotfinder.set_filter_ice_rings(filter_ice_rings)
            kernel_size = PhilIndex.params.dials.find_spots.kernel_size
            if kernel_size:
                spotfinder.set_kernel_size(kernel_size)
            global_threshold = PhilIndex.params.dials.find_spots.global_threshold
            if global_threshold is not None:
                spotfinder.set_global_threshold(global_threshold)
            spotfinder.run()

            spot_filename = spotfinder.get_spot_filename()
            if not os.path.exists(spot_filename):
                raise RuntimeError("Spotfinding failed: %s does not exist." %
                                   os.path.basename(spot_filename))

            spot_lists.append(spot_filename)
            datablocks.append(spotfinder.get_output_sweep_filename())

            from libtbx import easy_pickle
            from dials.util.ascii_art import spot_counts_per_image_plot
            refl = easy_pickle.load(spot_filename)
            if not len(refl):
                raise RuntimeError('No spots found in sweep %s' %
                                   xsweep.get_name())
            Chatter.write(spot_counts_per_image_plot(refl), strip=False)

            if not PhilIndex.params.dials.fast_mode:
                detectblanks = self.DetectBlanks()
                detectblanks.set_sweep_filename(datablocks[-1])
                detectblanks.set_reflections_filename(spot_filename)
                detectblanks.run()
                json = detectblanks.get_results()
                offset = imageset.get_scan().get_image_range()[0]
                blank_regions = json['strong']['blank_regions']
                if len(blank_regions):
                    blank_regions = [(int(s), int(e))
                                     for s, e in blank_regions]
                    for blank_start, blank_end in blank_regions:
                        Chatter.write(
                            'WARNING: Potential blank images: %i -> %i' %
                            (blank_start + 1, blank_end))

                    if PhilIndex.params.xia2.settings.remove_blanks:
                        non_blanks = []
                        start, end = imageset.get_array_range()
                        last_blank_end = start
                        for blank_start, blank_end in blank_regions:
                            if blank_start > start:
                                non_blanks.append(
                                    (last_blank_end, blank_start))
                            last_blank_end = blank_end

                        if last_blank_end + 1 < end:
                            non_blanks.append((last_blank_end, end))

                        xsweep = self.get_indexer_sweep()
                        xwav = xsweep.get_wavelength()
                        xsample = xsweep.get_xsample()

                        sweep_name = xsweep.get_name()
                        import string
                        for i, (nb_start, nb_end) in enumerate(non_blanks):
                            assert i < 26
                            if i == 0:
                                sub_imageset = imageset[nb_start -
                                                        start:nb_end - start]
                                xsweep._frames_to_process = (nb_start + 1,
                                                             nb_end + 1)
                                self.set_indexer_prepare_done(done=False)
                                self._indxr_imagesets[
                                    self._indxr_imagesets.index(
                                        imageset)] = sub_imageset
                                xsweep._integrater._setup_from_imageset(
                                    sub_imageset)
                            else:
                                new_name = '_'.join(
                                    (sweep_name, string.ascii_lowercase[i]))
                                new_sweep = xwav.add_sweep(
                                    new_name,
                                    xsample,
                                    directory=os.path.join(
                                        os.path.basename(
                                            xsweep.get_directory()), new_name),
                                    image=imageset.get_path(nb_start - start),
                                    frames_to_process=(nb_start + 1, nb_end),
                                )
                                Chatter.write(
                                    "Generating new sweep: %s (%s:%i:%i)" %
                                    (new_sweep.get_name(),
                                     new_sweep.get_image(),
                                     new_sweep.get_frames_to_process()[0],
                                     new_sweep.get_frames_to_process()[1]))
                        return

            if not PhilIndex.params.xia2.settings.trust_beam_centre:
                discovery = self.DiscoverBetterExperimentalModel()
                discovery.set_sweep_filename(datablocks[-1])
                discovery.set_spot_filename(spot_filename)
                #wedges = self._index_select_images_i(imageset)
                #discovery.set_scan_ranges(wedges)
                #discovery.set_scan_ranges([(first + offset, last + offset)])
                try:
                    discovery.run()
                except Exception as e:
                    Debug.write('DIALS beam centre search failed: %s' % str(e))
                else:
                    # overwrite datablock.json in datablocks list
                    datablocks[
                        -1] = discovery.get_optimized_datablock_filename()

        self.set_indexer_payload("spot_lists", spot_lists)
        self.set_indexer_payload("datablocks", datablocks)

        return
示例#8
0
文件: Indexer.py 项目: xia2/xia2
  def index(self):

    if not self.get_indexer_finish_done():
      f = inspect.currentframe().f_back.f_back
      m = f.f_code.co_filename
      l = f.f_lineno

      Debug.write('Index in %s called from %s %d' %
                  (self.__class__.__name__, m, l))

    while not self.get_indexer_finish_done():
      while not self.get_indexer_done():
        while not self.get_indexer_prepare_done():

          # --------------
          # call prepare()
          # --------------

          self.set_indexer_prepare_done(True)
          self._index_prepare()

        # --------------------------------------------
        # then do the proper indexing - using the best
        # solution already stored if available (c/f
        # eliminate above)
        # --------------------------------------------

        self.set_indexer_done(True)

        if self.get_indexer_sweeps():
          xsweeps = [ s.get_name() for s in self.get_indexer_sweeps() ]
          if len(xsweeps) > 1:
            # find "SWEEPn, SWEEP(n+1), (..), SWEEPm" and aggregate to "SWEEPS n-m"
            xsweeps = map(lambda x: (int(x[5:]), int(x[5:])) if x.startswith('SWEEP') else x, xsweeps)
            xsweeps[0] = [xsweeps[0]]
            def compress(seen, nxt):
              if isinstance(seen[-1], tuple) and isinstance(nxt, tuple) and (seen[-1][1] + 1 == nxt[0]):
                seen[-1] = (seen[-1][0], nxt[1])
              else:
                seen.append(nxt)
              return seen
            xsweeps = reduce(compress, xsweeps)
            xsweeps = map(lambda x: ('SWEEP%d' % x[0] if x[0] == x[1] else
                                     'SWEEPS %d to %d' % (x[0], x[1])) if isinstance(x, tuple)
                                     else x, xsweeps)
          if len(xsweeps) > 1:
            sweep_names = ', '.join(xsweeps[:-1])
            sweep_names += ' & ' + xsweeps[-1]
          else:
            sweep_names = xsweeps[0]

          if PhilIndex.params.xia2.settings.show_template:
            template = self.get_indexer_sweep().get_template()
            Chatter.banner(
              'Autoindexing %s (%s)' %(sweep_names, template))
          else:
            Chatter.banner('Autoindexing %s' %sweep_names)


        if not self._indxr_helper:

          result = self._index()

          if not self._indxr_done:
            Debug.write(
              'Looks like indexing failed - try again!')
            continue

          solutions = { }
          for k in self._indxr_other_lattice_cell.keys():
            solutions[k] = self._indxr_other_lattice_cell[k][
              'cell']

          # create a helper for the indexer to manage solutions
          self._indxr_helper = _IndexerHelper(solutions)

          solution = self._indxr_helper.get()

          # compare these against the final solution, if different
          # reject solution and return - correct solution will
          # be used next cycle

          if self._indxr_lattice != solution[0] and \
             not self._indxr_input_cell and \
             not PhilIndex.params.xia2.settings.integrate_p1:
            Chatter.write('Rerunning indexing lattice %s to %s' %
                          (self._indxr_lattice, solution[0]))
            Debug.write(
              'Rerunning indexing with target lattice %s' % \
              solution[0])
            self.set_indexer_done(False)

        else:
          # rerun autoindexing with the best known current solution

          solution = self._indxr_helper.get()
          self._indxr_input_lattice = solution[0]
          self._indxr_input_cell = solution[1]
          result = self._index()

      # next finish up...

      self.set_indexer_finish_done(True)
      self._index_finish()

      if self._indxr_print:
        Chatter.write(self.show_indexer_solutions())
示例#9
0
    def integrate(self):
        '''Actually perform integration until we think we are done...'''

        while not self.get_integrater_finish_done():
            while not self.get_integrater_done():
                while not self.get_integrater_prepare_done():

                    Debug.write('Preparing to do some integration...')
                    self.set_integrater_prepare_done(True)

                    # if this raises an exception, perhaps the autoindexing
                    # solution has too high symmetry. if this the case, then
                    # perform a self._intgr_indexer.eliminate() - this should
                    # reset the indexing system

                    try:
                        self._integrate_prepare()

                    except BadLatticeError as e:

                        Journal.banner('eliminated this lattice', size=80)

                        Chatter.write('Rejecting bad lattice %s' % str(e))
                        self._intgr_refiner.eliminate()
                        self._integrater_reset()

                # FIXME x1698 - may be the case that _integrate() returns the
                # raw intensities, _integrate_finish() returns intensities
                # which may have been adjusted or corrected. See #1698 below.

                Debug.write('Doing some integration...')

                self.set_integrater_done(True)

                template = self.get_integrater_sweep().get_template()

                if self._intgr_sweep_name:
                    if PhilIndex.params.xia2.settings.show_template:
                        Chatter.banner('Integrating %s (%s)' % \
                                       (self._intgr_sweep_name, template))
                    else:
                        Chatter.banner('Integrating %s' % \
                                       (self._intgr_sweep_name))
                try:

                    #1698
                    self._intgr_hklout_raw = self._integrate()

                except BadLatticeError as e:
                    Chatter.write('Rejecting bad lattice %s' % str(e))

                    Journal.banner('eliminated this lattice', size=80)

                    self._intgr_refiner.eliminate()
                    self._integrater_reset()

            self.set_integrater_finish_done(True)

            try:
                # allow for the fact that postrefinement may be used
                # to reject the lattice...

                self._intgr_hklout = self._integrate_finish()

            except BadLatticeError as e:
                Chatter.write('Bad Lattice Error: %s' % str(e))
                self._intgr_refiner.eliminate()
                self._integrater_reset()

        return self._intgr_hklout
示例#10
0
    def index(self):

        if not self.get_indexer_finish_done():
            f = inspect.currentframe().f_back.f_back
            m = f.f_code.co_filename
            l = f.f_lineno

            Debug.write(
                "Index in %s called from %s %d" % (self.__class__.__name__, m, l)
            )

        while not self.get_indexer_finish_done():
            while not self.get_indexer_done():
                while not self.get_indexer_prepare_done():

                    # --------------
                    # call prepare()
                    # --------------

                    self.set_indexer_prepare_done(True)
                    self._index_prepare()

                # --------------------------------------------
                # then do the proper indexing - using the best
                # solution already stored if available (c/f
                # eliminate above)
                # --------------------------------------------

                self.set_indexer_done(True)

                if self.get_indexer_sweeps():
                    xsweeps = [s.get_name() for s in self.get_indexer_sweeps()]
                    if len(xsweeps) > 1:
                        # find "SWEEPn, SWEEP(n+1), (..), SWEEPm" and aggregate to "SWEEPS n-m"
                        xsweeps = map(
                            lambda x: (int(x[5:]), int(x[5:]))
                            if x.startswith("SWEEP")
                            else x,
                            xsweeps,
                        )
                        xsweeps[0] = [xsweeps[0]]

                        def compress(seen, nxt):
                            if (
                                isinstance(seen[-1], tuple)
                                and isinstance(nxt, tuple)
                                and (seen[-1][1] + 1 == nxt[0])
                            ):
                                seen[-1] = (seen[-1][0], nxt[1])
                            else:
                                seen.append(nxt)
                            return seen

                        xsweeps = reduce(compress, xsweeps)
                        xsweeps = map(
                            lambda x: (
                                "SWEEP%d" % x[0]
                                if x[0] == x[1]
                                else "SWEEPS %d to %d" % (x[0], x[1])
                            )
                            if isinstance(x, tuple)
                            else x,
                            xsweeps,
                        )
                    if len(xsweeps) > 1:
                        sweep_names = ", ".join(xsweeps[:-1])
                        sweep_names += " & " + xsweeps[-1]
                    else:
                        sweep_names = xsweeps[0]

                    if PhilIndex.params.xia2.settings.show_template:
                        template = self.get_indexer_sweep().get_template()
                        Chatter.banner("Autoindexing %s (%s)" % (sweep_names, template))
                    else:
                        Chatter.banner("Autoindexing %s" % sweep_names)

                if not self._indxr_helper:

                    result = self._index()

                    if not self._indxr_done:
                        Debug.write("Looks like indexing failed - try again!")
                        continue

                    solutions = {}
                    for k in self._indxr_other_lattice_cell.keys():
                        solutions[k] = self._indxr_other_lattice_cell[k]["cell"]

                    # create a helper for the indexer to manage solutions
                    self._indxr_helper = _IndexerHelper(solutions)

                    solution = self._indxr_helper.get()

                    # compare these against the final solution, if different
                    # reject solution and return - correct solution will
                    # be used next cycle

                    if (
                        self._indxr_lattice != solution[0]
                        and not self._indxr_input_cell
                        and not PhilIndex.params.xia2.settings.integrate_p1
                    ):
                        Chatter.write(
                            "Rerunning indexing lattice %s to %s"
                            % (self._indxr_lattice, solution[0])
                        )
                        Debug.write(
                            "Rerunning indexing with target lattice %s" % solution[0]
                        )
                        self.set_indexer_done(False)

                else:
                    # rerun autoindexing with the best known current solution

                    solution = self._indxr_helper.get()
                    self._indxr_input_lattice = solution[0]
                    self._indxr_input_cell = solution[1]
                    result = self._index()

            # next finish up...

            self.set_indexer_finish_done(True)
            self._index_finish()

            if self._indxr_print:
                Chatter.write(self.show_indexer_solutions())
示例#11
0
文件: Indexer.py 项目: hainm/xia2
  def index(self):

    if not self.get_indexer_finish_done():
      f = inspect.currentframe().f_back.f_back
      m = f.f_code.co_filename
      l = f.f_lineno

      Debug.write('Index in %s called from %s %d' %
                  (self.__class__.__name__, m, l))

    while not self.get_indexer_finish_done():
      while not self.get_indexer_done():
        while not self.get_indexer_prepare_done():

          # --------------
          # call prepare()
          # --------------

          self.set_indexer_prepare_done(True)
          self._index_prepare()

        # --------------------------------------------
        # then do the proper indexing - using the best
        # solution already stored if available (c/f
        # eliminate above)
        # --------------------------------------------

        self.set_indexer_done(True)

        if self.get_indexer_sweeps():
          xsweeps = self.get_indexer_sweeps()
          if len(xsweeps) > 1:
            sweep_names = ', '.join(x.get_name() for x in xsweeps[:-1])
            sweep_names += ' & ' + xsweeps[-1].get_name()
          else:
            sweep_names = xsweeps[0].get_name()

          if PhilIndex.params.xia2.settings.show_template:
            template = self.get_indexer_sweep().get_template()
            Chatter.banner(
              'Autoindexing %s (%s)' %(sweep_names, template))
          else:
            Chatter.banner('Autoindexing %s' %sweep_names)


        if not self._indxr_helper:

          result = self._index()

          if not self._indxr_done:
            Debug.write(
              'Looks like indexing failed - try again!')
            continue

          solutions = { }
          for k in self._indxr_other_lattice_cell.keys():
            solutions[k] = self._indxr_other_lattice_cell[k][
              'cell']

          # create a helper for the indexer to manage solutions
          self._indxr_helper = _IndexerHelper(solutions)

          solution = self._indxr_helper.get()

          # compare these against the final solution, if different
          # reject solution and return - correct solution will
          # be used next cycle

          if self._indxr_lattice != solution[0] and \
             not self._indxr_input_cell and \
             not PhilIndex.params.xia2.settings.integrate_p1:
            Chatter.write('Rerunning indexing lattice %s to %s' %
                          (self._indxr_lattice, solution[0]))
            Debug.write(
              'Rerunning indexing with target lattice %s' % \
              solution[0])
            self.set_indexer_done(False)

        else:
          # rerun autoindexing with the best known current solution

          solution = self._indxr_helper.get()
          self._indxr_input_lattice = solution[0]
          self._indxr_input_cell = solution[1]
          result = self._index()

      # next finish up...

      self.set_indexer_finish_done(True)
      self._index_finish()

      if self._indxr_print:
        Chatter.write(self.show_indexer_solutions())

      # FIXED 23/OCT/06 at this stage I need to look at the list of
      # reasonable solutions and try to figure out if the indexing
      # program has picked the highest - if not, then constrain the
      # unit cell (need to implement this somewhere, sure it's
      # around!) then rerun the autoindexing (perhaps?) with this
      # new target - this means that we are always working from the
      # top downwards with these things. Once we decide somewhere
      # else (e.g. in cell refinement) that this cell isn't good
      # then we can eliminate it from the list, select the next
      # lower symmetry solution and continue. This solution is a
      # general one, so may be implemented in the general indexer
      # interface rather than in specific code...

    return
示例#12
0
  def _update_scaled_unit_cell(self):
    # FIXME this could be brought in-house

    params = PhilIndex.params
    fast_mode = params.dials.fast_mode
    if (params.xia2.settings.integrater == 'dials' and not fast_mode
        and params.xia2.settings.scale.two_theta_refine):
      from xia2.Wrappers.Dials.TwoThetaRefine import TwoThetaRefine
      from xia2.lib.bits import auto_logfiler

      Chatter.banner('Unit cell refinement')

      # Collect a list of all sweeps, grouped by project, crystal, wavelength
      groups = {}
      self._scalr_cell_dict = {}
      tt_refine_experiments = []
      tt_refine_pickles = []
      tt_refine_reindex_ops = []
      for epoch in self._sweep_handler.get_epochs():
        si = self._sweep_handler.get_sweep_information(epoch)
        pi = '_'.join(si.get_project_info())
        intgr = si.get_integrater()
        groups[pi] = groups.get(pi, []) + \
          [(intgr.get_integrated_experiments(),
            intgr.get_integrated_reflections(),
            intgr.get_integrater_reindex_operator())]

      # Two theta refine the unit cell for each group
      p4p_file = os.path.join(self.get_working_directory(),
                              '%s_%s.p4p' % (self._scalr_pname, self._scalr_xname))
      for pi in groups.keys():
        tt_grouprefiner = TwoThetaRefine()
        tt_grouprefiner.set_working_directory(self.get_working_directory())
        auto_logfiler(tt_grouprefiner)
        args = zip(*groups[pi])
        tt_grouprefiner.set_experiments(args[0])
        tt_grouprefiner.set_pickles(args[1])
        tt_grouprefiner.set_output_p4p(p4p_file)
        tt_refine_experiments.extend(args[0])
        tt_refine_pickles.extend(args[1])
        tt_refine_reindex_ops.extend(args[2])
        reindex_ops = args[2]
        from cctbx.sgtbx import change_of_basis_op as cb_op
        if self._spacegroup_reindex_operator is not None:
          reindex_ops = [(
            cb_op(str(self._spacegroup_reindex_operator)) * cb_op(str(op))).as_hkl()
            if op is not None else self._spacegroup_reindex_operator
            for op in reindex_ops]
        tt_grouprefiner.set_reindex_operators(reindex_ops)
        tt_grouprefiner.run()
        Chatter.write('%s: %6.2f %6.2f %6.2f %6.2f %6.2f %6.2f' % \
          tuple([''.join(pi.split('_')[2:])] + list(tt_grouprefiner.get_unit_cell())))
        self._scalr_cell_dict[pi] = (tt_grouprefiner.get_unit_cell(), tt_grouprefiner.get_unit_cell_esd(), tt_grouprefiner.import_cif(), tt_grouprefiner.import_mmcif())
        if len(groups) > 1:
          cif_in = tt_grouprefiner.import_cif()
          cif_out = CIF.get_block(pi)
          for key in sorted(cif_in.keys()):
            cif_out[key] = cif_in[key]
          mmcif_in = tt_grouprefiner.import_mmcif()
          mmcif_out = mmCIF.get_block(pi)
          for key in sorted(mmcif_in.keys()):
            mmcif_out[key] = mmcif_in[key]

      # Two theta refine everything together
      if len(groups) > 1:
        tt_refiner = TwoThetaRefine()
        tt_refiner.set_working_directory(self.get_working_directory())
        tt_refiner.set_output_p4p(p4p_file)
        auto_logfiler(tt_refiner)
        tt_refiner.set_experiments(tt_refine_experiments)
        tt_refiner.set_pickles(tt_refine_pickles)
        if self._spacegroup_reindex_operator is not None:
          reindex_ops = [(
            cb_op(str(self._spacegroup_reindex_operator)) * cb_op(str(op))).as_hkl()
            if op is not None else self._spacegroup_reindex_operator
            for op in tt_refine_reindex_ops]
        tt_refiner.set_reindex_operators(reindex_ops)
        tt_refiner.run()
        self._scalr_cell = tt_refiner.get_unit_cell()
        Chatter.write('Overall: %6.2f %6.2f %6.2f %6.2f %6.2f %6.2f' % tt_refiner.get_unit_cell())
        self._scalr_cell_esd = tt_refiner.get_unit_cell_esd()
        cif_in = tt_refiner.import_cif()
        mmcif_in = tt_refiner.import_mmcif()
      else:
        self._scalr_cell, self._scalr_cell_esd, cif_in, mmcif_in = self._scalr_cell_dict.values()[0]
      if params.xia2.settings.small_molecule == True:
        FileHandler.record_data_file(p4p_file)

      import dials.util.version
      cif_out = CIF.get_block('xia2')
      mmcif_out = mmCIF.get_block('xia2')
      cif_out['_computing_cell_refinement'] = mmcif_out['_computing.cell_refinement'] = 'DIALS 2theta refinement, %s' % dials.util.version.dials_version()
      for key in sorted(cif_in.keys()):
        cif_out[key] = cif_in[key]
      for key in sorted(mmcif_in.keys()):
        mmcif_out[key] = mmcif_in[key]

      Debug.write('Unit cell obtained by two-theta refinement')

    else:
      ami = AnalyseMyIntensities()
      ami.set_working_directory(self.get_working_directory())

      average_unit_cell, ignore_sg = ami.compute_average_cell(
        [self._scalr_scaled_refl_files[key] for key in
         self._scalr_scaled_refl_files])

      Debug.write('Computed average unit cell (will use in all files)')
      self._scalr_cell = average_unit_cell
      self._scalr_cell_esd = None

      # Write average unit cell to .cif
      cif_out = CIF.get_block('xia2')
      cif_out['_computing_cell_refinement'] = 'AIMLESS averaged unit cell'
      for cell, cifname in zip(self._scalr_cell,
                               ['length_a', 'length_b', 'length_c', 'angle_alpha', 'angle_beta', 'angle_gamma']):
        cif_out['_cell_%s' % cifname] = cell

    Debug.write('%7.3f %7.3f %7.3f %7.3f %7.3f %7.3f' % \
              self._scalr_cell)
示例#13
0
def get_unit_cell_errors(stop_after=None):
  '''Actually process something...'''
  wd = os.getcwd()

  all_miller_indices, all_two_thetas_obs, reference_cell, reference_lattice, reference_wavelength = load_sweeps_with_common_indexing()

  Chatter.banner('Unit cell sampling')
  Debug.banner('Unit cell sampling')
  span = miller.index_span(all_miller_indices)
  Chatter.write("Found %d reflections in 2theta range %.3f - %.3f deg" % (len(all_miller_indices), min(all_two_thetas_obs), max(all_two_thetas_obs)))
  Chatter.write("Miller index range: %s - %s" % (str(span.min()), str(span.max())))
  unit_cell_info = { 'reflections':
                     { 'count': len(all_miller_indices),
                       'min_2theta': min(all_two_thetas_obs),
                       'max_2theta': max(all_two_thetas_obs),
                       'min_miller': list(span.min()),
                       'max_miller': list(span.max())
                     } }

  # Exclude 1% of reflections to remove potential outliers
  # eg. indexed/integrated high angle noise
  two_theta_cutoff = sorted(all_two_thetas_obs)[-int(len(all_two_thetas_obs) * 0.01)-1]
  Chatter.write("Excluding outermost 1%% of reflections (2theta >= %.3f)" % two_theta_cutoff)
  two_thetas_select = all_two_thetas_obs < two_theta_cutoff
  all_two_thetas_obs = all_two_thetas_obs.select(two_thetas_select)
  all_miller_indices = all_miller_indices.select(two_thetas_select)
  Chatter.write("Kept %d reflections in 2theta range %.3f - %.3f deg" % (len(all_miller_indices), min(all_two_thetas_obs), max(all_two_thetas_obs)))
  span = miller.index_span(all_miller_indices)
  unit_cell_info['reflections_filtered'] = \
                     { 'count': len(all_miller_indices),
                       'min_2theta': min(all_two_thetas_obs),
                       'max_2theta': max(all_two_thetas_obs),
                       'min_miller': list(span.min()),
                       'max_miller': list(span.max())
                     }

  # prepare MonteCarlo sampling
  mc_runs = 50
  sample_size = min(len(all_miller_indices) // 2, 100)
  unit_cell_info['sampling'] = { 'method': 'montecarlo', 'runs': mc_runs, 'used_per_run': sample_size }
  unit_cell_info['reference'] = { 'cell': reference_cell.parameters(), 'cell_volume': reference_cell.volume(),
                                  'lattice': reference_lattice, 'wavelength': reference_wavelength }

  Chatter.write("\nRandomly sampling %d x %d reflections for Monte Carlo iterations" % (mc_runs, sample_size))
  Debug.write("Refinements start with reference unit cell: %s" % reference_cell)

  MC = []
  MCconstrained = []
  used_index_range = flex.miller_index()
  used_two_theta_range_min = 1e300
  used_two_theta_range_max = 0
  used_reflections = set()

  for n in range(mc_runs): # MC sampling
    # Select sample_size reflections
    sample = flex.size_t(random.sample(range(len(all_miller_indices)), sample_size))
    used_reflections = used_reflections.union(set(sample))
    miller_indices = all_miller_indices.select(sample)
    two_thetas_obs = all_two_thetas_obs.select(sample)

    # Record
    span = miller.index_span(miller_indices)
    used_index_range.append(span.min())
    used_index_range.append(span.max())
    used_two_theta_range_min = min(used_two_theta_range_min, min(two_thetas_obs))
    used_two_theta_range_max = max(used_two_theta_range_max, max(two_thetas_obs))

    refined = _refinery(two_thetas_obs, miller_indices, reference_wavelength, reference_cell)
    MC.append(refined.unit_cell().parameters() + (refined.unit_cell().volume(),))
    Debug.write('Run %d refined to: %s' % (n, str(refined.unit_cell())))
    if reference_lattice is not None and reference_lattice is not 'aP':
      refined = _refinery(two_thetas_obs, miller_indices, reference_wavelength, reference_cell, reference_lattice[0])
      MCconstrained.append(refined.unit_cell().parameters() + (refined.unit_cell().volume(),))
      Debug.write('Run %d (constrained %s) refined to: %s' % (n, reference_lattice[0], str(refined.unit_cell())))

    if (n % 50) == 0:
      sys.stdout.write("\n%5s ." % (str(n) if n > 0 else ''))
    else:
      sys.stdout.write(".")
    sys.stdout.flush()

  assert used_two_theta_range_min < used_two_theta_range_max

  def stats_summary(l):
    mean = sum(l) / len(l)
    var = 0
    for y in l:
      var = var + ((y - mean) ** 2)
    popvar = var / (len(l)-1)
    popstddev = math.sqrt(popvar)
    stderr = popstddev / math.sqrt(len(l))
    return { 'mean': mean, 'variance': var, 'population_variance': popvar,
        'population_standard_deviation': popstddev, 'standard_error': stderr }

  print
  Chatter.write("")
  Chatter.write("Unit cell estimation based on %d Monte Carlo runs," % len(MC))
  span = miller.index_span(used_index_range)
  Chatter.write("drawn from miller indices between %s and %s" % (str(span.min()), str(span.max())))
  Chatter.write("with associated 2theta angles between %.3f and %.3f deg" % (used_two_theta_range_min, used_two_theta_range_max))
  unit_cell_info['sampling'].update({
      'used_reflections': len(used_reflections),
      'used_max_2theta': used_two_theta_range_max,
      'used_min_2theta': used_two_theta_range_min,
      'used_max_miller': span.max(),
      'used_min_miller': span.min() })
  unit_cell_info['solution_unconstrained'] = {}
  unit_cell_info['solution_constrained'] = { 'lattice': reference_lattice }
  if reference_lattice is None or reference_lattice == 'aP':
    Chatter.write("\n  Unconstrained estimate:", strip=False)
    for dimension, estimate in zip(['a', 'b', 'c', 'alpha', 'beta', 'gamma', 'volume'], zip(*MC)):
      est_stats = stats_summary(estimate)
      unit_cell_info['solution_unconstrained'][dimension] = est_stats
      unit_cell_info['solution_constrained'][dimension] = est_stats
      Chatter.write(" %6s =%10.5f (SD: %.5f, SE: %.5f)" % (dimension,
        est_stats['mean'],
        est_stats['population_standard_deviation'],
        est_stats['standard_error']), strip=False)
  else:
    Chatter.write("\n    Unconstrained estimate:                     |     Constrained estimate (%s):" % reference_lattice, strip=False)
    for dimension, estimate, constrained in zip(['a', 'b', 'c', 'alpha', 'beta', 'gamma', 'volume'], zip(*MC), zip(*MCconstrained)):
      est_stats = stats_summary(estimate)
      rest_stats = stats_summary(constrained)
      unit_cell_info['solution_unconstrained'][dimension] = est_stats
      unit_cell_info['solution_constrained'][dimension] = rest_stats
      Chatter.write(" %6s =%10.5f (SD: %.5f, SE: %.5f)  |  %6s =%10.5f (SD: %.5f, SE: %.5f)" %
        (dimension,
         est_stats['mean'],
         est_stats['population_standard_deviation'],
         est_stats['standard_error'],
         dimension,
         rest_stats['mean'],
         rest_stats['population_standard_deviation'],
         rest_stats['standard_error']), strip=False)

  with open(os.path.join(wd, 'xia2.get_unit_cell_errors.json'), 'w') as fh:
    json.dump(unit_cell_info, fh, indent = 2, sort_keys=True)
示例#14
0
    def _index_prepare(self):

        from xia2.Handlers.Citations import Citations

        Citations.cite("dials")

        # all_images = self.get_matching_images()
        # first = min(all_images)
        # last = max(all_images)

        spot_lists = []
        experiments_filenames = []

        for imageset, xsweep in zip(self._indxr_imagesets, self._indxr_sweeps):

            Chatter.banner("Spotfinding %s" % xsweep.get_name())

            first, last = imageset.get_scan().get_image_range()

            # at this stage, break out to run the DIALS code: this sets itself up
            # now cheat and pass in some information... save re-reading all of the
            # image headers

            # FIXME need to adjust this to allow (say) three chunks of images

            from dxtbx.model.experiment_list import ExperimentListFactory

            sweep_filename = os.path.join(self.get_working_directory(),
                                          "%s_import.expt" % xsweep.get_name())
            ExperimentListFactory.from_imageset_and_crystal(
                imageset, None).as_file(sweep_filename)

            genmask = self.GenerateMask()
            genmask.set_input_experiments(sweep_filename)
            genmask.set_output_experiments(
                os.path.join(
                    self.get_working_directory(),
                    "%s_%s_masked.expt" %
                    (genmask.get_xpid(), xsweep.get_name()),
                ))
            genmask.set_params(PhilIndex.params.dials.masking)
            sweep_filename, mask_pickle = genmask.run()
            Debug.write("Generated mask for %s: %s" %
                        (xsweep.get_name(), mask_pickle))

            gain = PhilIndex.params.xia2.settings.input.gain
            if gain is libtbx.Auto:
                gain_estimater = self.EstimateGain()
                gain_estimater.set_sweep_filename(sweep_filename)
                gain_estimater.run()
                gain = gain_estimater.get_gain()
                Chatter.write("Estimated gain: %.2f" % gain)
                PhilIndex.params.xia2.settings.input.gain = gain

            # FIXME this should really use the assigned spot finding regions
            # offset = self.get_frame_offset()
            dfs_params = PhilIndex.params.dials.find_spots
            spotfinder = self.Spotfinder()
            if last - first > 10:
                spotfinder.set_write_hot_mask(True)
            spotfinder.set_input_sweep_filename(sweep_filename)
            spotfinder.set_output_sweep_filename(
                "%s_%s_strong.expt" %
                (spotfinder.get_xpid(), xsweep.get_name()))
            spotfinder.set_input_spot_filename(
                "%s_%s_strong.refl" %
                (spotfinder.get_xpid(), xsweep.get_name()))
            if PhilIndex.params.dials.fast_mode:
                wedges = self._index_select_images_i(imageset)
                spotfinder.set_scan_ranges(wedges)
            else:
                spotfinder.set_scan_ranges([(first, last)])
            if dfs_params.phil_file is not None:
                spotfinder.set_phil_file(dfs_params.phil_file)
            if dfs_params.min_spot_size is libtbx.Auto:
                if imageset.get_detector()[0].get_type() == "SENSOR_PAD":
                    dfs_params.min_spot_size = 3
                else:
                    dfs_params.min_spot_size = None
            if dfs_params.min_spot_size is not None:
                spotfinder.set_min_spot_size(dfs_params.min_spot_size)
            if dfs_params.min_local is not None:
                spotfinder.set_min_local(dfs_params.min_local)
            if dfs_params.sigma_strong:
                spotfinder.set_sigma_strong(dfs_params.sigma_strong)
            gain = PhilIndex.params.xia2.settings.input.gain
            if gain:
                spotfinder.set_gain(gain)
            if dfs_params.filter_ice_rings:
                spotfinder.set_filter_ice_rings(dfs_params.filter_ice_rings)
            if dfs_params.kernel_size:
                spotfinder.set_kernel_size(dfs_params.kernel_size)
            if dfs_params.global_threshold is not None:
                spotfinder.set_global_threshold(dfs_params.global_threshold)
            if dfs_params.threshold.algorithm is not None:
                spotfinder.set_threshold_algorithm(
                    dfs_params.threshold.algorithm)
            spotfinder.run()

            spot_filename = spotfinder.get_spot_filename()
            if not os.path.exists(spot_filename):
                raise RuntimeError("Spotfinding failed: %s does not exist." %
                                   os.path.basename(spot_filename))

            spot_lists.append(spot_filename)
            experiments_filenames.append(
                spotfinder.get_output_sweep_filename())

            from dials.util.ascii_art import spot_counts_per_image_plot

            refl = flex.reflection_table.from_file(spot_filename)
            if not len(refl):
                raise RuntimeError("No spots found in sweep %s" %
                                   xsweep.get_name())
            Chatter.write(spot_counts_per_image_plot(refl), strip=False)

            if not PhilIndex.params.dials.fast_mode:
                detectblanks = self.DetectBlanks()
                detectblanks.set_sweep_filename(experiments_filenames[-1])
                detectblanks.set_reflections_filename(spot_filename)
                detectblanks.run()
                json = detectblanks.get_results()
                blank_regions = json["strong"]["blank_regions"]
                if len(blank_regions):
                    blank_regions = [(int(s), int(e))
                                     for s, e in blank_regions]
                    for blank_start, blank_end in blank_regions:
                        Chatter.write(
                            "WARNING: Potential blank images: %i -> %i" %
                            (blank_start + 1, blank_end))

                    if PhilIndex.params.xia2.settings.remove_blanks:
                        non_blanks = []
                        start, end = imageset.get_array_range()
                        last_blank_end = start
                        for blank_start, blank_end in blank_regions:
                            if blank_start > start:
                                non_blanks.append(
                                    (last_blank_end, blank_start))
                            last_blank_end = blank_end

                        if last_blank_end + 1 < end:
                            non_blanks.append((last_blank_end, end))

                        xsweep = self.get_indexer_sweep()
                        xwav = xsweep.get_wavelength()
                        xsample = xsweep.get_xsample()

                        sweep_name = xsweep.get_name()
                        import string

                        for i, (nb_start, nb_end) in enumerate(non_blanks):
                            assert i < 26
                            if i == 0:
                                sub_imageset = imageset[nb_start -
                                                        start:nb_end - start]
                                xsweep._frames_to_process = (nb_start + 1,
                                                             nb_end + 1)
                                self.set_indexer_prepare_done(done=False)
                                self._indxr_imagesets[
                                    self._indxr_imagesets.index(
                                        imageset)] = sub_imageset
                                xsweep._integrater._setup_from_imageset(
                                    sub_imageset)
                            else:
                                min_images = (PhilIndex.params.xia2.settings.
                                              input.min_images)
                                if (nb_end - nb_start) < min_images:
                                    continue
                                new_name = "_".join(
                                    (sweep_name, string.ascii_lowercase[i]))
                                new_sweep = xwav.add_sweep(
                                    new_name,
                                    xsample,
                                    directory=os.path.join(
                                        os.path.basename(
                                            xsweep.get_directory()),
                                        new_name,
                                    ),
                                    image=imageset.get_path(nb_start - start),
                                    frames_to_process=(nb_start + 1, nb_end),
                                )
                                Chatter.write(
                                    "Generating new sweep: %s (%s:%i:%i)" % (
                                        new_sweep.get_name(),
                                        new_sweep.get_image(),
                                        new_sweep.get_frames_to_process()[0],
                                        new_sweep.get_frames_to_process()[1],
                                    ))
                        return

            if not PhilIndex.params.xia2.settings.trust_beam_centre:
                discovery = self.DiscoverBetterExperimentalModel()
                discovery.set_sweep_filename(experiments_filenames[-1])
                discovery.set_spot_filename(spot_filename)
                try:
                    discovery.run()
                except Exception as e:
                    Debug.write("DIALS beam centre search failed: %s" % str(e))
                else:
                    # overwrite indexed.expt in experiments list
                    experiments_filenames[
                        -1] = discovery.get_optimized_experiments_filename()

        self.set_indexer_payload("spot_lists", spot_lists)
        self.set_indexer_payload("experiments", experiments_filenames)
示例#15
0
文件: CCP4ScalerA.py 项目: xia2/xia2
  def _update_scaled_unit_cell(self):
    # FIXME this could be brought in-house

    fast_mode = PhilIndex.params.dials.fast_mode
    if PhilIndex.params.xia2.settings.integrater == 'dials' and not fast_mode:
      from xia2.Wrappers.Dials.TwoThetaRefine import TwoThetaRefine
      from xia2.lib.bits import auto_logfiler

      Chatter.banner('Unit cell refinement')

      # Collect a list of all sweeps, grouped by project, crystal, wavelength
      groups = {}
      self._scalr_cell_dict = {}
      tt_refine_experiments, tt_refine_pickles = [], []
      for epoch in self._sweep_handler.get_epochs():
        si = self._sweep_handler.get_sweep_information(epoch)
        pi = '_'.join(si.get_project_info())
        intgr = si.get_integrater()
        groups[pi] = groups.get(pi, []) + \
          [(intgr.get_integrated_experiments(), intgr.get_integrated_reflections())]

      # Two theta refine the unit cell for each group
      for pi in groups.keys():
        tt_grouprefiner = TwoThetaRefine()
        tt_grouprefiner.set_working_directory(self.get_working_directory())
        auto_logfiler(tt_grouprefiner)
        files = zip(*groups[pi])
        tt_grouprefiner.set_experiments(files[0])
        tt_grouprefiner.set_pickles(files[1])
        tt_refine_experiments.extend(files[0])
        tt_refine_pickles.extend(files[1])
        tt_grouprefiner.set_reindex_operator(self._spacegroup_reindex_operator)
        tt_grouprefiner.run()
        Chatter.write('%s: %6.2f %6.2f %6.2f %6.2f %6.2f %6.2f' % \
          tuple([''.join(pi.split('_')[2:])] + list(tt_grouprefiner.get_unit_cell())))
        self._scalr_cell_dict[pi] = (tt_grouprefiner.get_unit_cell(), tt_grouprefiner.get_unit_cell_esd(), tt_grouprefiner.import_cif(), tt_grouprefiner.import_mmcif())
        if len(groups) > 1:
          cif_in = tt_grouprefiner.import_cif()
          cif_out = CIF.get_block(pi)
          for key in sorted(cif_in.keys()):
            cif_out[key] = cif_in[key]
          mmcif_in = tt_grouprefiner.import_mmcif()
          mmcif_out = mmCIF.get_block(pi)
          for key in sorted(mmcif_in.keys()):
            mmcif_out[key] = mmcif_in[key]

      # Two theta refine everything together
      if len(groups) > 1:
        tt_refiner = TwoThetaRefine()
        tt_refiner.set_working_directory(self.get_working_directory())
        auto_logfiler(tt_refiner)
        tt_refiner.set_experiments(tt_refine_experiments)
        tt_refiner.set_pickles(tt_refine_pickles)
        tt_refiner.set_reindex_operator(self._spacegroup_reindex_operator)
        tt_refiner.run()
        self._scalr_cell = tt_refiner.get_unit_cell()
        Chatter.write('Overall: %6.2f %6.2f %6.2f %6.2f %6.2f %6.2f' % tt_refiner.get_unit_cell())
        self._scalr_cell_esd = tt_refiner.get_unit_cell_esd()
        cif_in = tt_refiner.import_cif()
        mmcif_in = tt_refiner.import_mmcif()
      else:
        self._scalr_cell, self._scalr_cell_esd, cif_in, mmcif_in = self._scalr_cell_dict.values()[0]

      import dials.util.version
      cif_out = CIF.get_block('xia2')
      mmcif_out = mmCIF.get_block('xia2')
      cif_out['_computing_cell_refinement'] = mmcif_out['_computing.cell_refinement'] = 'DIALS 2theta refinement, %s' % dials.util.version.dials_version()
      for key in sorted(cif_in.keys()):
        cif_out[key] = cif_in[key]
      for key in sorted(mmcif_in.keys()):
        mmcif_out[key] = mmcif_in[key]

      Debug.write('Unit cell obtained by two-theta refinement')

    else:
      ami = AnalyseMyIntensities()
      ami.set_working_directory(self.get_working_directory())

      average_unit_cell, ignore_sg = ami.compute_average_cell(
        [self._scalr_scaled_refl_files[key] for key in
         self._scalr_scaled_refl_files])

      Debug.write('Computed average unit cell (will use in all files)')
      self._scalr_cell = average_unit_cell
      self._scalr_cell_esd = None

      # Write average unit cell to .cif
      cif_out = CIF.get_block('xia2')
      cif_out['_computing_cell_refinement'] = 'AIMLESS averaged unit cell'
      for cell, cifname in zip(self._scalr_cell,
                               ['length_a', 'length_b', 'length_c', 'angle_alpha', 'angle_beta', 'angle_gamma']):
        cif_out['_cell_%s' % cifname] = cell

    Debug.write('%7.3f %7.3f %7.3f %7.3f %7.3f %7.3f' % \
              self._scalr_cell)
示例#16
0
文件: DialsIndexer.py 项目: xia2/xia2
  def _index_prepare(self):

    from xia2.Handlers.Citations import Citations
    Citations.cite('dials')

    #all_images = self.get_matching_images()
    #first = min(all_images)
    #last = max(all_images)

    spot_lists = []
    datablocks = []

    for imageset, xsweep in zip(self._indxr_imagesets, self._indxr_sweeps):

      Chatter.banner('Spotfinding %s' %xsweep.get_name())

      first, last = imageset.get_scan().get_image_range()

      # at this stage, break out to run the DIALS code: this sets itself up
      # now cheat and pass in some information... save re-reading all of the
      # image headers

      # FIXME need to adjust this to allow (say) three chunks of images

      from dxtbx.serialize import dump
      from dxtbx.datablock import DataBlock
      sweep_filename = os.path.join(
        self.get_working_directory(), '%s_datablock.json' %xsweep.get_name())
      dump.datablock(DataBlock([imageset]), sweep_filename)

      gain = PhilIndex.params.xia2.settings.input.gain
      if gain is libtbx.Auto:
        gain_estimater = self.EstimateGain()
        gain_estimater.set_sweep_filename(sweep_filename)
        gain_estimater.run()
        gain = gain_estimater.get_gain()
        Chatter.write('Estimated gain: %.2f' %gain)
        PhilIndex.params.xia2.settings.input.gain = gain

      # FIXME this should really use the assigned spot finding regions
      #offset = self.get_frame_offset()
      spotfinder = self.Spotfinder()
      if last - first > 10:
        spotfinder.set_write_hot_mask(True)
      spotfinder.set_input_sweep_filename(sweep_filename)
      spotfinder.set_output_sweep_filename(
        '%s_%s_datablock.json' %(spotfinder.get_xpid(), xsweep.get_name()))
      spotfinder.set_input_spot_filename(
        '%s_%s_strong.pickle' %(spotfinder.get_xpid(), xsweep.get_name()))
      if PhilIndex.params.dials.fast_mode:
        wedges = self._index_select_images_i(imageset)
        spotfinder.set_scan_ranges(wedges)
      else:
        spotfinder.set_scan_ranges([(first, last)])
      if PhilIndex.params.dials.find_spots.phil_file is not None:
        spotfinder.set_phil_file(PhilIndex.params.dials.find_spots.phil_file)
      min_spot_size = PhilIndex.params.dials.find_spots.min_spot_size
      if min_spot_size is libtbx.Auto:
        if imageset.get_detector()[0].get_type() == 'SENSOR_PAD':
          min_spot_size = 3
        else:
          min_spot_size = None
      if min_spot_size is not None:
        spotfinder.set_min_spot_size(min_spot_size)
      min_local = PhilIndex.params.dials.find_spots.min_local
      if min_local is not None:
        spotfinder.set_min_local(min_local)
      sigma_strong = PhilIndex.params.dials.find_spots.sigma_strong
      if sigma_strong:
        spotfinder.set_sigma_strong(sigma_strong)
      gain = PhilIndex.params.xia2.settings.input.gain
      if gain:
        spotfinder.set_gain(gain)
      filter_ice_rings = PhilIndex.params.dials.find_spots.filter_ice_rings
      if filter_ice_rings:
        spotfinder.set_filter_ice_rings(filter_ice_rings)
      kernel_size = PhilIndex.params.dials.find_spots.kernel_size
      if kernel_size:
        spotfinder.set_kernel_size(kernel_size)
      global_threshold = PhilIndex.params.dials.find_spots.global_threshold
      if global_threshold is not None:
        spotfinder.set_global_threshold(global_threshold)
      spotfinder.run()

      spot_filename = spotfinder.get_spot_filename()
      if not os.path.exists(spot_filename):
        raise RuntimeError("Spotfinding failed: %s does not exist."
                           %os.path.basename(spot_filename))

      spot_lists.append(spot_filename)
      datablocks.append(spotfinder.get_output_sweep_filename())

      from libtbx import easy_pickle
      from dials.util.ascii_art import spot_counts_per_image_plot
      refl = easy_pickle.load(spot_filename)
      if not len(refl):
        raise RuntimeError('No spots found in sweep %s' %xsweep.get_name())
      Chatter.write(spot_counts_per_image_plot(refl), strip=False)

      if not PhilIndex.params.dials.fast_mode:
        detectblanks = self.DetectBlanks()
        detectblanks.set_sweep_filename(datablocks[-1])
        detectblanks.set_reflections_filename(spot_filename)
        detectblanks.run()
        json = detectblanks.get_results()
        offset = imageset.get_scan().get_image_range()[0]
        blank_regions = json['strong']['blank_regions']
        if len(blank_regions):
          blank_regions = [(int(s), int(e)) for s, e in blank_regions]
          for blank_start, blank_end in blank_regions:
            Chatter.write('WARNING: Potential blank images: %i -> %i' %(
              blank_start+1, blank_end))

          if PhilIndex.params.xia2.settings.remove_blanks:
            non_blanks = []
            start, end = imageset.get_array_range()
            last_blank_end = start
            for blank_start, blank_end in blank_regions:
              if blank_start > start:
                non_blanks.append((last_blank_end, blank_start))
              last_blank_end = blank_end

            if last_blank_end+1 < end:
              non_blanks.append((last_blank_end, end))

            xsweep = self.get_indexer_sweep()
            xwav = xsweep.get_wavelength()
            xsample = xsweep.get_xsample()

            sweep_name = xsweep.get_name()
            import string
            for i, (nb_start, nb_end) in enumerate(non_blanks):
              assert i < 26
              if i == 0:
                sub_imageset = imageset[nb_start-start:nb_end-start]
                xsweep._frames_to_process = (nb_start+1, nb_end+1)
                self.set_indexer_prepare_done(done=False)
                self._indxr_imagesets[self._indxr_imagesets.index(imageset)] = sub_imageset
                xsweep._integrater._setup_from_imageset(sub_imageset)
              else:
                new_name = '_'.join((sweep_name, string.ascii_lowercase[i]))
                new_sweep = xwav.add_sweep(new_name,
                               xsample,
                               directory=os.path.join(
                                 os.path.basename(xsweep.get_directory()), new_name),
                               image=imageset.get_path(nb_start-start),
                               frames_to_process=(nb_start+1, nb_end),
                               )
                Chatter.write("Generating new sweep: %s (%s:%i:%i)" %(
                  new_sweep.get_name(),
                  new_sweep.get_image(),
                  new_sweep.get_frames_to_process()[0],
                  new_sweep.get_frames_to_process()[1]))
            return

      if not PhilIndex.params.xia2.settings.trust_beam_centre:
        discovery = self.DiscoverBetterExperimentalModel()
        discovery.set_sweep_filename(datablocks[-1])
        discovery.set_spot_filename(spot_filename)
        #wedges = self._index_select_images_i(imageset)
        #discovery.set_scan_ranges(wedges)
        #discovery.set_scan_ranges([(first + offset, last + offset)])
        try:
          discovery.run()
        except Exception, e:
          Debug.write('DIALS beam centre search failed: %s' %str(e))
        else:
          # overwrite datablock.json in datablocks list
          datablocks[-1] = discovery.get_optimized_datablock_filename()