Example #1
0
File: XSweep.py Project: xia2/xia2
 def _add_detector_identification_to_cif(self):
   detector_id = self.get_detector_identification()
   if detector_id:
     import dxtbx.data.beamline_defs as ddb
     bl_info = ddb.get_beamline_definition(detector_id)
     Debug.write('Beamline information available for %s: %s' % (detector_id, str(bl_info)))
     if bl_info:
       from xia2.Handlers.CIF import CIF, mmCIF
       cifblock, mmcifblock = bl_info.CIF_block(), bl_info.mmCIF_block()
       if cifblock:
         CIF.set_block(bl_info.get_block_name(), cifblock)
       if mmcifblock:
         mmCIF.set_block(bl_info.get_block_name(), mmcifblock)
Example #2
0
 def _add_detector_identification_to_cif(self):
     detector_id = self.get_detector_identification()
     if detector_id:
         import dxtbx.data.beamline_defs as ddb
         bl_info = ddb.get_beamline_definition(detector_id)
         Debug.write('Beamline information available for %s: %s' %
                     (detector_id, str(bl_info)))
         if bl_info:
             from xia2.Handlers.CIF import CIF, mmCIF
             cifblock, mmcifblock = bl_info.CIF_block(
             ), bl_info.mmCIF_block()
             if cifblock:
                 CIF.set_block(bl_info.get_block_name(), cifblock)
             if mmcifblock:
                 mmCIF.set_block(bl_info.get_block_name(), mmcifblock)
Example #3
0
    def _generate_absorption_map(self, scaler):
        output = scaler.get_all_output()

        aimless = "AIMLESS, CCP4"

        pattern = re.compile(" +#+ *CCP4.*#+")
        for line in output:
            if pattern.search(line):
                aimless = re.sub(r"\s\s+", ", ", line.strip("\t\n #"))
                break

        coefficients = scrape_coefficients(log=output)
        if coefficients:
            absmap = evaluate_1degree(coefficients)
            absmin, absmax = absmap.min(), absmap.max()
        else:
            absmin, absmax = 1.0, 1.0

        block = CIF.get_block("xia2")
        mmblock = mmCIF.get_block("xia2")
        block["_exptl_absorpt_correction_T_min"] = mmblock[
            "_exptl.absorpt_correction_T_min"
        ] = (
            absmin / absmax
        )  # = scaled
        block["_exptl_absorpt_correction_T_max"] = mmblock[
            "_exptl.absorpt_correction_T_max"
        ] = (
            absmax / absmax
        )  # = 1
        block["_exptl_absorpt_correction_type"] = mmblock[
            "_exptl.absorpt_correction_type"
        ] = "empirical"
        block["_exptl_absorpt_process_details"] = mmblock[
            "_exptl.absorpt_process_details"
        ] = (
            """
%s
Scaling & analysis of unmerged intensities, absorption correction using spherical harmonics
"""
            % aimless
        )

        log_directory = self._base_path / "LogFiles"
        if absmax - absmin > 0.000001:
            log_directory.mkdir(parents=True, exist_ok=True)
            mapfile = log_directory / "absorption_surface.png"
            generate_map(absmap, str(mapfile))
        else:
            logger.debug(
                "Cannot create absorption surface: map is too flat (min: %f, max: %f)",
                absmin,
                absmax,
            )
Example #4
0
  def _generate_absorption_map(self, scaler):
    output = scaler.get_all_output()

    aimless = 'AIMLESS, CCP4'
    import re
    pattern = re.compile(" +#+ *CCP4.*#+")
    for line in output:
      if pattern.search(line):
        aimless = re.sub('\s\s+', ', ', line.strip("\t\n #"))
        break

    from xia2.Toolkit.AimlessSurface import evaluate_1degree, \
      scrape_coefficients, generate_map
    coefficients = scrape_coefficients(log=output)
    if coefficients:
      absmap = evaluate_1degree(coefficients)
      absmin, absmax = absmap.min(), absmap.max()
    else:
      absmin, absmax = 1.0, 1.0

    block = CIF.get_block('xia2')
    mmblock = mmCIF.get_block('xia2')
    block["_exptl_absorpt_correction_T_min"] = mmblock["_exptl.absorpt_correction_T_min"] = \
      absmin / absmax # = scaled
    block["_exptl_absorpt_correction_T_max"] = mmblock["_exptl.absorpt_correction_T_max"] = \
      absmax / absmax # = 1
    block["_exptl_absorpt_correction_type"] = mmblock["_exptl.absorpt_correction_type"] = \
      "empirical"
    block["_exptl_absorpt_process_details"] = mmblock["_exptl.absorpt_process_details"] = '''
%s
Scaling & analysis of unmerged intensities, absorption correction using spherical harmonics
''' % aimless

    if absmax - absmin > 0.000001:
      from xia2.Handlers.Environment import Environment
      log_directory = Environment.generate_directory('LogFiles')
      mapfile = os.path.join(log_directory, 'absorption_surface.png')
      generate_map(absmap, mapfile)
    else:
      Debug.write("Cannot create absorption surface: map is too flat (min: %f, max: %f)" % (absmin, absmax))
Example #5
0
  def _generate_absorption_map(self, scaler):
    output = scaler.get_all_output()

    aimless = 'AIMLESS, CCP4'
    import re
    pattern = re.compile(" +#+ *CCP4.*#+")
    for line in output:
      if pattern.search(line):
        aimless = re.sub('\s\s+', ', ', line.strip("\t\n #"))
        break

    from xia2.Toolkit.AimlessSurface import evaluate_1degree, \
      scrape_coefficients, generate_map
    coefficients = scrape_coefficients(log=output)
    if coefficients:
      absmap = evaluate_1degree(coefficients)
      absmin, absmax = absmap.min(), absmap.max()
    else:
      absmin, absmax = 1.0, 1.0

    block = CIF.get_block('xia2')
    mmblock = mmCIF.get_block('xia2')
    block["_exptl_absorpt_correction_T_min"] = mmblock["_exptl.absorpt_correction_T_min"] = \
      absmin / absmax # = scaled
    block["_exptl_absorpt_correction_T_max"] = mmblock["_exptl.absorpt_correction_T_max"] = \
      absmax / absmax # = 1
    block["_exptl_absorpt_correction_type"] = mmblock["_exptl.absorpt_correction_type"] = \
      "empirical"
    block["_exptl_absorpt_process_details"] = mmblock["_exptl.absorpt_process_details"] = '''
%s
Scaling & analysis of unmerged intensities, absorption correction using spherical harmonics
''' % aimless

    if absmax - absmin > 0.000001:
      from xia2.Handlers.Environment import Environment
      log_directory = Environment.generate_directory('LogFiles')
      mapfile = os.path.join(log_directory, 'absorption_surface.png')
      generate_map(absmap, mapfile)
    else:
      Debug.write("Cannot create absorption surface: map is too flat (min: %f, max: %f)" % (absmin, absmax))
Example #6
0
    def get_output(self):

        result = "Crystal: %s\n" % self._name

        if self._aa_sequence:
            result += "Sequence: %s\n" % self._aa_sequence.get_sequence()
        for wavelength in self._wavelengths.keys():
            result += self._wavelengths[wavelength].get_output()

        scaler = self._get_scaler()
        if scaler.get_scaler_finish_done():
            for wname, xwav in self._wavelengths.iteritems():
                for xsweep in xwav.get_sweeps():
                    idxr = xsweep._get_indexer()
                    if PhilIndex.params.xia2.settings.show_template:
                        result += "%s\n" % banner(
                            "Autoindexing %s (%s)" %
                            (idxr.get_indexer_sweep_name(),
                             idxr.get_template()))
                    else:
                        result += "%s\n" % banner(
                            "Autoindexing %s" % idxr.get_indexer_sweep_name())
                    result += "%s\n" % idxr.show_indexer_solutions()

                    intgr = xsweep._get_integrater()
                    if PhilIndex.params.xia2.settings.show_template:
                        result += "%s\n" % banner(
                            "Integrating %s (%s)" %
                            (intgr.get_integrater_sweep_name(),
                             intgr.get_template()))
                    else:
                        result += "%s\n" % banner(
                            "Integrating %s" %
                            intgr.get_integrater_sweep_name())
                    result += "%s\n" % intgr.show_per_image_statistics()

            result += "%s\n" % banner("Scaling %s" % self.get_name())

            for (
                (dname, sname),
                (limit, suggested),
            ) in scaler.get_scaler_resolution_limits().iteritems():
                if suggested is None or limit == suggested:
                    result += "Resolution limit for %s/%s: %5.2f\n" % (
                        dname,
                        sname,
                        limit,
                    )
                else:
                    result += (
                        "Resolution limit for %s/%s: %5.2f (%5.2f suggested)\n"
                        % (dname, sname, limit, suggested))

        # this is now deprecated - be explicit in what you are
        # asking for...
        reflections_all = self.get_scaled_merged_reflections()
        statistics_all = self._get_scaler().get_scaler_statistics()

        # print some of these statistics, perhaps?

        for key in statistics_all.keys():
            result += format_statistics(statistics_all[key],
                                        caption="For %s/%s/%s" % key)

        # then print out some "derived" information based on the
        # scaling - this is presented through the Scaler interface
        # explicitly...

        cell = self._get_scaler().get_scaler_cell()
        cell_esd = self._get_scaler().get_scaler_cell_esd()
        spacegroups = self._get_scaler().get_scaler_likely_spacegroups()

        spacegroup = spacegroups[0]
        resolution = self._get_scaler().get_scaler_highest_resolution()

        from cctbx import sgtbx

        sg = sgtbx.space_group_type(str(spacegroup))
        spacegroup = sg.lookup_symbol()
        CIF.set_spacegroup(sg)
        mmCIF.set_spacegroup(sg)

        if len(self._wavelengths) == 1:
            CIF.set_wavelengths(
                [w.get_wavelength() for w in self._wavelengths.itervalues()])
            mmCIF.set_wavelengths(
                [w.get_wavelength() for w in self._wavelengths.itervalues()])
        else:
            for wavelength in self._wavelengths.keys():
                full_wave_name = "%s_%s_%s" % (
                    self._project._name,
                    self._name,
                    wavelength,
                )
                CIF.get_block(full_wave_name)[
                    "_diffrn_radiation_wavelength"] = self._wavelengths[
                        wavelength].get_wavelength()
                mmCIF.get_block(full_wave_name)[
                    "_diffrn_radiation_wavelength"] = self._wavelengths[
                        wavelength].get_wavelength()
            CIF.set_wavelengths({
                name: wave.get_wavelength()
                for name, wave in self._wavelengths.iteritems()
            })
            mmCIF.set_wavelengths({
                name: wave.get_wavelength()
                for name, wave in self._wavelengths.iteritems()
            })

        result += "Assuming spacegroup: %s\n" % spacegroup
        if len(spacegroups) > 1:
            result += "Other likely alternatives are:\n"
            for sg in spacegroups[1:]:
                result += "%s\n" % sg

        if cell_esd:
            from libtbx.utils import format_float_with_standard_uncertainty

            def match_formatting(dimA, dimB):
                def conditional_split(s):
                    return ((s[:s.index(".")],
                             s[s.index("."):]) if "." in s else (s, ""))

                A, B = conditional_split(dimA), conditional_split(dimB)
                maxlen = (max(len(A[0]), len(B[0])), max(len(A[1]), len(B[1])))
                return (
                    A[0].rjust(maxlen[0]) + A[1].ljust(maxlen[1]),
                    B[0].rjust(maxlen[0]) + B[1].ljust(maxlen[1]),
                )

            formatted_cell_esds = tuple(
                format_float_with_standard_uncertainty(v, sd)
                for v, sd in zip(cell, cell_esd))
            formatted_rows = (formatted_cell_esds[0:3],
                              formatted_cell_esds[3:6])
            formatted_rows = zip(*(match_formatting(l, a)
                                   for l, a in zip(*formatted_rows)))
            result += "Unit cell (with estimated std devs):\n"
            result += "%s %s %s\n%s %s %s\n" % (formatted_rows[0] +
                                                formatted_rows[1])
        else:
            result += "Unit cell:\n"
            result += "%7.3f %7.3f %7.3f\n%7.3f %7.3f %7.3f\n" % tuple(cell)

        # now, use this information and the sequence (if provided)
        # and also matthews_coef (should I be using this directly, here?)
        # to compute a likely number of molecules in the ASU and also
        # the solvent content...

        if self._aa_sequence:
            residues = self._aa_sequence.get_sequence()
            if residues:
                nres = len(residues)

                # first compute the number of molecules using the K&R
                # method

                nmol = compute_nmol(
                    cell[0],
                    cell[1],
                    cell[2],
                    cell[3],
                    cell[4],
                    cell[5],
                    spacegroup,
                    resolution,
                    nres,
                )

                # then compute the solvent fraction

                solvent = compute_solvent(
                    cell[0],
                    cell[1],
                    cell[2],
                    cell[3],
                    cell[4],
                    cell[5],
                    spacegroup,
                    nmol,
                    nres,
                )

                result += "Likely number of molecules in ASU: %d\n" % nmol
                result += "Giving solvent fraction:        %4.2f\n" % solvent

                self._nmol = nmol

        if isinstance(reflections_all, type({})):
            for format in reflections_all.keys():
                result += "%s format:\n" % format
                reflections = reflections_all[format]

                if isinstance(reflections, type({})):
                    for wavelength in reflections.keys():
                        target = FileHandler.get_data_file(
                            reflections[wavelength])
                        result += "Scaled reflections (%s): %s\n" % (
                            wavelength, target)

                else:
                    target = FileHandler.get_data_file(reflections)
                    result += "Scaled reflections: %s\n" % target

        CIF.write_cif()
        mmCIF.write_cif()

        return result
Example #7
0
  def _update_scaled_unit_cell(self):
    # FIXME this could be brought in-house

    params = PhilIndex.params
    fast_mode = params.dials.fast_mode
    if (params.xia2.settings.integrater == 'dials' and not fast_mode
        and params.xia2.settings.scale.two_theta_refine):
      from xia2.Wrappers.Dials.TwoThetaRefine import TwoThetaRefine
      from xia2.lib.bits import auto_logfiler

      Chatter.banner('Unit cell refinement')

      # Collect a list of all sweeps, grouped by project, crystal, wavelength
      groups = {}
      self._scalr_cell_dict = {}
      tt_refine_experiments = []
      tt_refine_pickles = []
      tt_refine_reindex_ops = []
      for epoch in self._sweep_handler.get_epochs():
        si = self._sweep_handler.get_sweep_information(epoch)
        pi = '_'.join(si.get_project_info())
        intgr = si.get_integrater()
        groups[pi] = groups.get(pi, []) + \
          [(intgr.get_integrated_experiments(),
            intgr.get_integrated_reflections(),
            intgr.get_integrater_reindex_operator())]

      # Two theta refine the unit cell for each group
      p4p_file = os.path.join(self.get_working_directory(),
                              '%s_%s.p4p' % (self._scalr_pname, self._scalr_xname))
      for pi in groups.keys():
        tt_grouprefiner = TwoThetaRefine()
        tt_grouprefiner.set_working_directory(self.get_working_directory())
        auto_logfiler(tt_grouprefiner)
        args = zip(*groups[pi])
        tt_grouprefiner.set_experiments(args[0])
        tt_grouprefiner.set_pickles(args[1])
        tt_grouprefiner.set_output_p4p(p4p_file)
        tt_refine_experiments.extend(args[0])
        tt_refine_pickles.extend(args[1])
        tt_refine_reindex_ops.extend(args[2])
        reindex_ops = args[2]
        from cctbx.sgtbx import change_of_basis_op as cb_op
        if self._spacegroup_reindex_operator is not None:
          reindex_ops = [(
            cb_op(str(self._spacegroup_reindex_operator)) * cb_op(str(op))).as_hkl()
            if op is not None else self._spacegroup_reindex_operator
            for op in reindex_ops]
        tt_grouprefiner.set_reindex_operators(reindex_ops)
        tt_grouprefiner.run()
        Chatter.write('%s: %6.2f %6.2f %6.2f %6.2f %6.2f %6.2f' % \
          tuple([''.join(pi.split('_')[2:])] + list(tt_grouprefiner.get_unit_cell())))
        self._scalr_cell_dict[pi] = (tt_grouprefiner.get_unit_cell(), tt_grouprefiner.get_unit_cell_esd(), tt_grouprefiner.import_cif(), tt_grouprefiner.import_mmcif())
        if len(groups) > 1:
          cif_in = tt_grouprefiner.import_cif()
          cif_out = CIF.get_block(pi)
          for key in sorted(cif_in.keys()):
            cif_out[key] = cif_in[key]
          mmcif_in = tt_grouprefiner.import_mmcif()
          mmcif_out = mmCIF.get_block(pi)
          for key in sorted(mmcif_in.keys()):
            mmcif_out[key] = mmcif_in[key]

      # Two theta refine everything together
      if len(groups) > 1:
        tt_refiner = TwoThetaRefine()
        tt_refiner.set_working_directory(self.get_working_directory())
        tt_refiner.set_output_p4p(p4p_file)
        auto_logfiler(tt_refiner)
        tt_refiner.set_experiments(tt_refine_experiments)
        tt_refiner.set_pickles(tt_refine_pickles)
        if self._spacegroup_reindex_operator is not None:
          reindex_ops = [(
            cb_op(str(self._spacegroup_reindex_operator)) * cb_op(str(op))).as_hkl()
            if op is not None else self._spacegroup_reindex_operator
            for op in tt_refine_reindex_ops]
        tt_refiner.set_reindex_operators(reindex_ops)
        tt_refiner.run()
        self._scalr_cell = tt_refiner.get_unit_cell()
        Chatter.write('Overall: %6.2f %6.2f %6.2f %6.2f %6.2f %6.2f' % tt_refiner.get_unit_cell())
        self._scalr_cell_esd = tt_refiner.get_unit_cell_esd()
        cif_in = tt_refiner.import_cif()
        mmcif_in = tt_refiner.import_mmcif()
      else:
        self._scalr_cell, self._scalr_cell_esd, cif_in, mmcif_in = self._scalr_cell_dict.values()[0]
      if params.xia2.settings.small_molecule == True:
        FileHandler.record_data_file(p4p_file)

      import dials.util.version
      cif_out = CIF.get_block('xia2')
      mmcif_out = mmCIF.get_block('xia2')
      cif_out['_computing_cell_refinement'] = mmcif_out['_computing.cell_refinement'] = 'DIALS 2theta refinement, %s' % dials.util.version.dials_version()
      for key in sorted(cif_in.keys()):
        cif_out[key] = cif_in[key]
      for key in sorted(mmcif_in.keys()):
        mmcif_out[key] = mmcif_in[key]

      Debug.write('Unit cell obtained by two-theta refinement')

    else:
      ami = AnalyseMyIntensities()
      ami.set_working_directory(self.get_working_directory())

      average_unit_cell, ignore_sg = ami.compute_average_cell(
        [self._scalr_scaled_refl_files[key] for key in
         self._scalr_scaled_refl_files])

      Debug.write('Computed average unit cell (will use in all files)')
      self._scalr_cell = average_unit_cell
      self._scalr_cell_esd = None

      # Write average unit cell to .cif
      cif_out = CIF.get_block('xia2')
      cif_out['_computing_cell_refinement'] = 'AIMLESS averaged unit cell'
      for cell, cifname in zip(self._scalr_cell,
                               ['length_a', 'length_b', 'length_c', 'angle_alpha', 'angle_beta', 'angle_gamma']):
        cif_out['_cell_%s' % cifname] = cell

    Debug.write('%7.3f %7.3f %7.3f %7.3f %7.3f %7.3f' % \
              self._scalr_cell)
Example #8
0
  def get_output(self):

    result = 'Crystal: %s\n' % self._name

    if self._aa_sequence:
      result += 'Sequence: %s\n' % self._aa_sequence.get_sequence()
    for wavelength in self._wavelengths.keys():
      result += self._wavelengths[wavelength].get_output()

    scaler = self._get_scaler()
    if scaler.get_scaler_finish_done():
      for wname, xwav in self._wavelengths.iteritems():
        for xsweep in xwav.get_sweeps():
          idxr = xsweep._get_indexer()
          if PhilIndex.params.xia2.settings.show_template:
            result += '%s\n' %banner('Autoindexing %s (%s)' %(
              idxr.get_indexer_sweep_name(), idxr.get_template()))
          else:
            result += '%s\n' %banner(
              'Autoindexing %s' %idxr.get_indexer_sweep_name())
          result += '%s\n' %idxr.show_indexer_solutions()

          intgr = xsweep._get_integrater()
          if PhilIndex.params.xia2.settings.show_template:
            result += '%s\n' %banner('Integrating %s (%s)' %(
              intgr.get_integrater_sweep_name(), intgr.get_template()))
          else:
            result += '%s\n' %banner(
              'Integrating %s' %intgr.get_integrater_sweep_name())
          result += '%s\n' % intgr.show_per_image_statistics()

      result += '%s\n' %banner('Scaling %s' %self.get_name())

      for (dname, sname), (limit, suggested) in scaler.get_scaler_resolution_limits().iteritems():
        if suggested is None or limit == suggested:
          result += 'Resolution limit for %s/%s: %5.2f\n' %(dname, sname, limit)
        else:
          result += 'Resolution limit for %s/%s: %5.2f (%5.2f suggested)\n' %(dname, sname, limit, suggested)

    # this is now deprecated - be explicit in what you are
    # asking for...
    reflections_all = self.get_scaled_merged_reflections()
    statistics_all = self._get_scaler().get_scaler_statistics()

    # print some of these statistics, perhaps?

    for key in statistics_all.keys():
      result += format_statistics(statistics_all[key], caption='For %s/%s/%s' % key)

    # then print out some "derived" information based on the
    # scaling - this is presented through the Scaler interface
    # explicitly...

    cell = self._get_scaler().get_scaler_cell()
    cell_esd = self._get_scaler().get_scaler_cell_esd()
    spacegroups = self._get_scaler().get_scaler_likely_spacegroups()

    spacegroup = spacegroups[0]
    resolution = self._get_scaler().get_scaler_highest_resolution()

    from cctbx import sgtbx
    sg = sgtbx.space_group_type(str(spacegroup))
    spacegroup = sg.lookup_symbol()
    CIF.set_spacegroup(sg)
    mmCIF.set_spacegroup(sg)

    if len(self._wavelengths) == 1:
      CIF.set_wavelengths([w.get_wavelength() for w in self._wavelengths.itervalues()])
      mmCIF.set_wavelengths([w.get_wavelength() for w in self._wavelengths.itervalues()])
    else:
      for wavelength in self._wavelengths.keys():
        full_wave_name = '%s_%s_%s' % (self._project._name, self._name, wavelength)
        CIF.get_block(full_wave_name)['_diffrn_radiation_wavelength'] = \
          self._wavelengths[wavelength].get_wavelength()
        mmCIF.get_block(full_wave_name)['_diffrn_radiation_wavelength'] = \
          self._wavelengths[wavelength].get_wavelength()
      CIF.set_wavelengths({name: wave.get_wavelength() for name, wave in self._wavelengths.iteritems()})
      mmCIF.set_wavelengths({name: wave.get_wavelength() for name, wave in self._wavelengths.iteritems()})

    result += 'Assuming spacegroup: %s\n' % spacegroup
    if len(spacegroups) > 1:
      result += 'Other likely alternatives are:\n'
      for sg in spacegroups[1:]:
        result += '%s\n' % sg

    if cell_esd:
      from libtbx.utils import format_float_with_standard_uncertainty
      def match_formatting(dimA, dimB):
        def conditional_split(s):
          return (s[:s.index('.')],s[s.index('.'):]) if '.' in s else (s, '')
        A, B = conditional_split(dimA), conditional_split(dimB)
        maxlen = (max(len(A[0]), len(B[0])), max(len(A[1]), len(B[1])))
        return (
          A[0].rjust(maxlen[0])+A[1].ljust(maxlen[1]),
          B[0].rjust(maxlen[0])+B[1].ljust(maxlen[1])
        )
      formatted_cell_esds = tuple(format_float_with_standard_uncertainty(v, sd) for v, sd in zip(cell, cell_esd))
      formatted_rows = (formatted_cell_esds[0:3], formatted_cell_esds[3:6])
      formatted_rows = zip(*(match_formatting(l, a) for l, a in zip(*formatted_rows)))
      result += 'Unit cell (with estimated std devs):\n'
      result += '%s %s %s\n%s %s %s\n' % (formatted_rows[0] + formatted_rows[1])
    else:
      result += 'Unit cell:\n'
      result += '%7.3f %7.3f %7.3f\n%7.3f %7.3f %7.3f\n' % tuple(cell)

    # now, use this information and the sequence (if provided)
    # and also matthews_coef (should I be using this directly, here?)
    # to compute a likely number of molecules in the ASU and also
    # the solvent content...

    if self._aa_sequence:
      residues = self._aa_sequence.get_sequence()
      if residues:
        nres = len(residues)

        # first compute the number of molecules using the K&R
        # method

        nmol = compute_nmol(cell[0], cell[1], cell[2],
                            cell[3], cell[4], cell[5],
                            spacegroup, resolution, nres)

        # then compute the solvent fraction

        solvent = compute_solvent(cell[0], cell[1], cell[2],
                                  cell[3], cell[4], cell[5],
                                  spacegroup, nmol, nres)

        result += 'Likely number of molecules in ASU: %d\n' % nmol
        result += 'Giving solvent fraction:        %4.2f\n' % solvent

        self._nmol = nmol

    if type(reflections_all) == type({}):
      for format in reflections_all.keys():
        result += '%s format:\n' % format
        reflections = reflections_all[format]

        if type(reflections) == type({}):
          for wavelength in reflections.keys():
            target = FileHandler.get_data_file(
                reflections[wavelength])
            result += 'Scaled reflections (%s): %s\n' % \
                      (wavelength, target)

        else:
          target = FileHandler.get_data_file(
              reflections)
          result += 'Scaled reflections: %s\n' % target

    CIF.write_cif()
    mmCIF.write_cif()

    return result
Example #9
0
  def _update_scaled_unit_cell(self):
    # FIXME this could be brought in-house

    fast_mode = PhilIndex.params.dials.fast_mode
    if PhilIndex.params.xia2.settings.integrater == 'dials' and not fast_mode:
      from xia2.Wrappers.Dials.TwoThetaRefine import TwoThetaRefine
      from xia2.lib.bits import auto_logfiler

      Chatter.banner('Unit cell refinement')

      # Collect a list of all sweeps, grouped by project, crystal, wavelength
      groups = {}
      self._scalr_cell_dict = {}
      tt_refine_experiments, tt_refine_pickles = [], []
      for epoch in self._sweep_handler.get_epochs():
        si = self._sweep_handler.get_sweep_information(epoch)
        pi = '_'.join(si.get_project_info())
        intgr = si.get_integrater()
        groups[pi] = groups.get(pi, []) + \
          [(intgr.get_integrated_experiments(), intgr.get_integrated_reflections())]

      # Two theta refine the unit cell for each group
      for pi in groups.keys():
        tt_grouprefiner = TwoThetaRefine()
        tt_grouprefiner.set_working_directory(self.get_working_directory())
        auto_logfiler(tt_grouprefiner)
        files = zip(*groups[pi])
        tt_grouprefiner.set_experiments(files[0])
        tt_grouprefiner.set_pickles(files[1])
        tt_refine_experiments.extend(files[0])
        tt_refine_pickles.extend(files[1])
        tt_grouprefiner.set_reindex_operator(self._spacegroup_reindex_operator)
        tt_grouprefiner.run()
        Chatter.write('%s: %6.2f %6.2f %6.2f %6.2f %6.2f %6.2f' % \
          tuple([''.join(pi.split('_')[2:])] + list(tt_grouprefiner.get_unit_cell())))
        self._scalr_cell_dict[pi] = (tt_grouprefiner.get_unit_cell(), tt_grouprefiner.get_unit_cell_esd(), tt_grouprefiner.import_cif(), tt_grouprefiner.import_mmcif())
        if len(groups) > 1:
          cif_in = tt_grouprefiner.import_cif()
          cif_out = CIF.get_block(pi)
          for key in sorted(cif_in.keys()):
            cif_out[key] = cif_in[key]
          mmcif_in = tt_grouprefiner.import_mmcif()
          mmcif_out = mmCIF.get_block(pi)
          for key in sorted(mmcif_in.keys()):
            mmcif_out[key] = mmcif_in[key]

      # Two theta refine everything together
      if len(groups) > 1:
        tt_refiner = TwoThetaRefine()
        tt_refiner.set_working_directory(self.get_working_directory())
        auto_logfiler(tt_refiner)
        tt_refiner.set_experiments(tt_refine_experiments)
        tt_refiner.set_pickles(tt_refine_pickles)
        tt_refiner.set_reindex_operator(self._spacegroup_reindex_operator)
        tt_refiner.run()
        self._scalr_cell = tt_refiner.get_unit_cell()
        Chatter.write('Overall: %6.2f %6.2f %6.2f %6.2f %6.2f %6.2f' % tt_refiner.get_unit_cell())
        self._scalr_cell_esd = tt_refiner.get_unit_cell_esd()
        cif_in = tt_refiner.import_cif()
        mmcif_in = tt_refiner.import_mmcif()
      else:
        self._scalr_cell, self._scalr_cell_esd, cif_in, mmcif_in = self._scalr_cell_dict.values()[0]

      import dials.util.version
      cif_out = CIF.get_block('xia2')
      mmcif_out = mmCIF.get_block('xia2')
      cif_out['_computing_cell_refinement'] = mmcif_out['_computing.cell_refinement'] = 'DIALS 2theta refinement, %s' % dials.util.version.dials_version()
      for key in sorted(cif_in.keys()):
        cif_out[key] = cif_in[key]
      for key in sorted(mmcif_in.keys()):
        mmcif_out[key] = mmcif_in[key]

      Debug.write('Unit cell obtained by two-theta refinement')

    else:
      ami = AnalyseMyIntensities()
      ami.set_working_directory(self.get_working_directory())

      average_unit_cell, ignore_sg = ami.compute_average_cell(
        [self._scalr_scaled_refl_files[key] for key in
         self._scalr_scaled_refl_files])

      Debug.write('Computed average unit cell (will use in all files)')
      self._scalr_cell = average_unit_cell
      self._scalr_cell_esd = None

      # Write average unit cell to .cif
      cif_out = CIF.get_block('xia2')
      cif_out['_computing_cell_refinement'] = 'AIMLESS averaged unit cell'
      for cell, cifname in zip(self._scalr_cell,
                               ['length_a', 'length_b', 'length_c', 'angle_alpha', 'angle_beta', 'angle_gamma']):
        cif_out['_cell_%s' % cifname] = cell

    Debug.write('%7.3f %7.3f %7.3f %7.3f %7.3f %7.3f' % \
              self._scalr_cell)