def set_spacegroup(self, spacegroup): '''A handler for the command-line option -spacegroup - this will set the spacegroup and derive from this the pointgroup and lattice appropriate for such...''' from xia2.Handlers.Syminfo import Syminfo spacegroup = spacegroup.upper() # validate by deriving the pointgroup and lattice... pointgroup = Syminfo.get_pointgroup(spacegroup) lattice = Syminfo.get_lattice(spacegroup) # assign self._spacegroup = spacegroup self._pointgroup = pointgroup self._lattice = lattice # debug print from xia2.Handlers.Streams import Debug Debug.write('Derived information from spacegroup flag: %s' % \ spacegroup) Debug.write('Pointgroup: %s Lattice: %s' % (pointgroup, lattice)) # indicate that since this has been assigned, we do not wish to # test it! self.set_no_lattice_test(True) return
def decide_pointgroup(self): '''Decide on the correct pointgroup for hklin.''' if not self._xdsin: self.check_hklin() self.set_task('Computing the correct pointgroup for %s' % \ self.get_hklin()) else: Debug.write('Pointless using XDS input file %s' % \ self._xdsin) self.set_task('Computing the correct pointgroup for %s' % \ self.get_xdsin()) # FIXME this should probably be a standard CCP4 keyword if self._xdsin: self.add_command_line('xdsin') self.add_command_line(self._xdsin) self.add_command_line('xmlout') self.add_command_line('%d_pointless.xml' % self.get_xpid()) if self._hklref: self.add_command_line('hklref') self.add_command_line(self._hklref) self.start() self.input('systematicabsences off') self.input('setting symmetry-based') if self._hklref: from xia2.Handlers.Phil import PhilIndex dev = PhilIndex.params.xia2.settings.developmental if dev.pointless_tolerance > 0.0: self.input('tolerance %f' % dev.pointless_tolerance) # may expect more %age variation for small molecule data if Flags.get_small_molecule() and self._hklref: self.input('tolerance 5.0') if Flags.get_small_molecule(): self.input('chirality nonchiral') if self._input_laue_group: self.input('lauegroup %s' % self._input_laue_group) self.close_wait() # check for errors self.check_for_errors() # check for fatal errors output = self.get_all_output() for j, record in enumerate(output): if 'FATAL ERROR message:' in record: raise RuntimeError, 'Pointless error: %s' % output[j+1].strip() hklin_spacegroup = '' hklin_lattice = '' for o in self.get_all_output(): if 'Spacegroup from HKLIN file' in o: # hklin_spacegroup = o.split(':')[-1].strip() hklin_spacegroup = spacegroup_name_xHM_to_old( o.replace( 'Spacegroup from HKLIN file :', '').strip()) hklin_lattice = Syminfo.get_lattice(hklin_spacegroup) if 'No alternative indexing possible' in o: # then the XML file will be broken - no worries... self._pointgroup = hklin_spacegroup self._confidence = 1.0 self._totalprob = 1.0 self._reindex_matrix = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0] self._reindex_operator = 'h,k,l' return 'ok' if '**** Incompatible symmetries ****' in o: raise RuntimeError, \ 'reindexing against a reference with different symmetry' if '***** Stopping because cell discrepancy between files' in o: raise RuntimeError, 'incompatible unit cells between data sets' if 'L-test suggests that the data may be twinned' in o: self._probably_twinned = True # parse the XML file for the information I need... xml_file = os.path.join(self.get_working_directory(), '%d_pointless.xml' % self.get_xpid()) mend_pointless_xml(xml_file) # catch the case sometimes on ppc mac where pointless adds # an extra .xml on the end... if not os.path.exists(xml_file) and \ os.path.exists('%s.xml' % xml_file): xml_file = '%s.xml' % xml_file if not self._hklref: dom = xml.dom.minidom.parse(xml_file) try: best = dom.getElementsByTagName('BestSolution')[0] except IndexError, e: raise RuntimeError, 'error getting solution from pointless' self._pointgroup = best.getElementsByTagName( 'GroupName')[0].childNodes[0].data self._confidence = float(best.getElementsByTagName( 'Confidence')[0].childNodes[0].data) self._totalprob = float(best.getElementsByTagName( 'TotalProb')[0].childNodes[0].data) self._reindex_matrix = map(float, best.getElementsByTagName( 'ReindexMatrix')[0].childNodes[0].data.split()) self._reindex_operator = clean_reindex_operator( best.getElementsByTagName( 'ReindexOperator')[0].childNodes[0].data.strip())
def _scale_prepare(self): """Perform all of the preparation required to deliver the scaled data. This should sort together the reflection files, ensure that they are correctly indexed (via pointless) and generally tidy things up.""" # acknowledge all of the programs we are about to use... Citations.cite("pointless") Citations.cite("aimless") Citations.cite("ccp4") # ---------- GATHER ---------- self._sweep_handler = SweepInformationHandler(self._scalr_integraters) for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) pname, xname, dname = si.get_project_info() sname = si.get_sweep_name() exclude_sweep = False for sweep in PhilIndex.params.xia2.settings.sweep: if sweep.id == sname and sweep.exclude: exclude_sweep = True break if exclude_sweep: self._sweep_handler.remove_epoch(epoch) logger.debug("Excluding sweep %s", sname) else: logger.debug("%-30s %s/%s/%s", "adding data from:", xname, dname, sname) # gather data for all images which belonged to the parent # crystal - allowing for the fact that things could go wrong # e.g. epoch information not available, exposure times not in # headers etc... for e in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(e) assert is_mtz_file(si.get_reflections()), repr(si.get_reflections()) p, x = self._sweep_handler.get_project_info() self._scalr_pname = p self._scalr_xname = x # verify that the lattices are consistent, calling eliminate if # they are not N.B. there could be corner cases here need_to_return = False multi_sweep_indexing = PhilIndex.params.xia2.settings.multi_sweep_indexing # START OF if more than one epoch if len(self._sweep_handler.get_epochs()) > 1: # if we have multi-sweep-indexing going on then logic says all should # share common lattice & UB definition => this is not used here? # START OF if multi_sweep indexing and not input pg if multi_sweep_indexing and not self._scalr_input_pointgroup: pointless_hklins = [] max_batches = 0 for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) hklin = si.get_reflections() batches = MtzUtils.batches_from_mtz(hklin) if 1 + max(batches) - min(batches) > max_batches: max_batches = max(batches) - min(batches) + 1 logger.debug("Biggest sweep has %d batches", max_batches) max_batches = nifty_power_of_ten(max_batches) counter = 0 refiners = [] for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) hklin = si.get_reflections() integrater = si.get_integrater() refiner = integrater.get_integrater_refiner() refiners.append(refiner) hklin = self._prepare_pointless_hklin( hklin, si.get_integrater().get_phi_width() ) hklout = os.path.join( self.get_working_directory(), "%s_%s_%s_%s_prepointless.mtz" % (pname, xname, dname, si.get_sweep_name()), ) # we will want to delete this one exit FileHandler.record_temporary_file(hklout) first_batch = min(si.get_batches()) si.set_batch_offset(counter * max_batches - first_batch + 1) rebatch( hklin, hklout, first_batch=counter * max_batches + 1, pname=pname, xname=xname, dname=dname, ) pointless_hklins.append(hklout) # update the counter & recycle counter += 1 # SUMMARY - have added all sweeps to pointless_hklins s = self._factory.Sortmtz() pointless_hklin = os.path.join( self.get_working_directory(), "%s_%s_prepointless_sorted.mtz" % (self._scalr_pname, self._scalr_xname), ) s.set_hklout(pointless_hklin) for hklin in pointless_hklins: s.add_hklin(hklin) s.sort() # FIXME xia2-51 in here look at running constant scaling on the # pointless hklin to put the runs on the same scale. Ref=[A] pointless_const = os.path.join( self.get_working_directory(), "%s_%s_prepointless_const.mtz" % (self._scalr_pname, self._scalr_xname), ) FileHandler.record_temporary_file(pointless_const) aimless_const = self._factory.Aimless() aimless_const.set_hklin(pointless_hklin) aimless_const.set_hklout(pointless_const) aimless_const.const() pointless_const = os.path.join( self.get_working_directory(), "%s_%s_prepointless_const_unmerged.mtz" % (self._scalr_pname, self._scalr_xname), ) FileHandler.record_temporary_file(pointless_const) pointless_hklin = pointless_const # FIXME xia2-51 in here need to pass all refiners to ensure that the # information is passed back to all of them not just the last one... logger.debug( "Running multisweep pointless for %d sweeps", len(refiners) ) pointgroup, reindex_op, ntr, pt = self._pointless_indexer_multisweep( pointless_hklin, refiners ) logger.debug("X1698: %s: %s", pointgroup, reindex_op) lattices = [Syminfo.get_lattice(pointgroup)] for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) intgr = si.get_integrater() hklin = si.get_reflections() refiner = intgr.get_integrater_refiner() if ntr: intgr.integrater_reset_reindex_operator() need_to_return = True # SUMMARY - added all sweeps together into an mtz, ran # _pointless_indexer_multisweep on this, made a list of one lattice # and potentially reset reindex op? # END OF if multi_sweep indexing and not input pg # START OF if not multi_sweep, or input pg given else: lattices = [] for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) intgr = si.get_integrater() hklin = si.get_reflections() refiner = intgr.get_integrater_refiner() if self._scalr_input_pointgroup: pointgroup = self._scalr_input_pointgroup reindex_op = "h,k,l" ntr = False else: pointless_hklin = self._prepare_pointless_hklin( hklin, si.get_integrater().get_phi_width() ) pointgroup, reindex_op, ntr, pt = self._pointless_indexer_jiffy( pointless_hklin, refiner ) logger.debug("X1698: %s: %s", pointgroup, reindex_op) lattice = Syminfo.get_lattice(pointgroup) if lattice not in lattices: lattices.append(lattice) if ntr: intgr.integrater_reset_reindex_operator() need_to_return = True # SUMMARY do pointless_indexer on each sweep, get lattices and make a list # of unique lattices, potentially reset reindex op. # END OF if not multi_sweep, or input pg given # SUMMARY - still within if more than one epoch, now have a list of number # of lattices # START OF if multiple-lattices if len(lattices) > 1: # why not using pointless indexer jiffy??! correct_lattice = sort_lattices(lattices)[0] logger.info("Correct lattice asserted to be %s", correct_lattice) # transfer this information back to the indexers for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) refiner = si.get_integrater().get_integrater_refiner() sname = si.get_sweep_name() state = refiner.set_refiner_asserted_lattice(correct_lattice) if state == refiner.LATTICE_CORRECT: logger.info( "Lattice %s ok for sweep %s", correct_lattice, sname ) elif state == refiner.LATTICE_IMPOSSIBLE: raise RuntimeError( f"Lattice {correct_lattice} impossible for {sname}" ) elif state == refiner.LATTICE_POSSIBLE: logger.info( "Lattice %s assigned for sweep %s", correct_lattice, sname ) need_to_return = True # END OF if multiple-lattices # SUMMARY - forced all lattices to be same and hope its okay. # END OF if more than one epoch # if one or more of them was not in the lowest lattice, # need to return here to allow reprocessing if need_to_return: self.set_scaler_done(False) self.set_scaler_prepare_done(False) return # ---------- REINDEX ALL DATA TO CORRECT POINTGROUP ---------- # all should share the same pointgroup, unless twinned... in which # case force them to be... pointgroups = {} reindex_ops = {} probably_twinned = False need_to_return = False multi_sweep_indexing = PhilIndex.params.xia2.settings.multi_sweep_indexing # START OF if multi-sweep and not input pg if multi_sweep_indexing and not self._scalr_input_pointgroup: pointless_hklins = [] max_batches = 0 for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) hklin = si.get_reflections() batches = MtzUtils.batches_from_mtz(hklin) if 1 + max(batches) - min(batches) > max_batches: max_batches = max(batches) - min(batches) + 1 logger.debug("Biggest sweep has %d batches", max_batches) max_batches = nifty_power_of_ten(max_batches) counter = 0 refiners = [] for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) hklin = si.get_reflections() integrater = si.get_integrater() refiner = integrater.get_integrater_refiner() refiners.append(refiner) hklin = self._prepare_pointless_hklin( hklin, si.get_integrater().get_phi_width() ) hklout = os.path.join( self.get_working_directory(), "%s_%s_%s_%s_prepointless.mtz" % (pname, xname, dname, si.get_sweep_name()), ) # we will want to delete this one exit FileHandler.record_temporary_file(hklout) first_batch = min(si.get_batches()) si.set_batch_offset(counter * max_batches - first_batch + 1) rebatch( hklin, hklout, first_batch=counter * max_batches + 1, pname=pname, xname=xname, dname=dname, ) pointless_hklins.append(hklout) # update the counter & recycle counter += 1 # FIXME related to xia2-51 - this looks very very similar to the logic # in [A] above - is this duplicated logic? s = self._factory.Sortmtz() pointless_hklin = os.path.join( self.get_working_directory(), "%s_%s_prepointless_sorted.mtz" % (self._scalr_pname, self._scalr_xname), ) s.set_hklout(pointless_hklin) for hklin in pointless_hklins: s.add_hklin(hklin) s.sort() pointless_const = os.path.join( self.get_working_directory(), f"{self._scalr_pname}_{self._scalr_xname}_prepointless_const.mtz", ) FileHandler.record_temporary_file(pointless_const) aimless_const = self._factory.Aimless() aimless_const.set_hklin(pointless_hklin) aimless_const.set_hklout(pointless_const) aimless_const.const() pointless_const = os.path.join( self.get_working_directory(), "%s_%s_prepointless_const_unmerged.mtz" % (self._scalr_pname, self._scalr_xname), ) FileHandler.record_temporary_file(pointless_const) pointless_hklin = pointless_const pointgroup, reindex_op, ntr, pt = self._pointless_indexer_multisweep( pointless_hklin, refiners ) for epoch in self._sweep_handler.get_epochs(): pointgroups[epoch] = pointgroup reindex_ops[epoch] = reindex_op # SUMMARY ran pointless multisweep on combined mtz and made a dict # of pointgroups and reindex_ops (all same) # END OF if multi-sweep and not input pg # START OF if not mulit-sweep or pg given else: for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) hklin = si.get_reflections() integrater = si.get_integrater() refiner = integrater.get_integrater_refiner() if self._scalr_input_pointgroup: logger.debug( "Using input pointgroup: %s", self._scalr_input_pointgroup ) pointgroup = self._scalr_input_pointgroup reindex_op = "h,k,l" pt = False else: pointless_hklin = self._prepare_pointless_hklin( hklin, si.get_integrater().get_phi_width() ) pointgroup, reindex_op, ntr, pt = self._pointless_indexer_jiffy( pointless_hklin, refiner ) logger.debug("X1698: %s: %s", pointgroup, reindex_op) if ntr: integrater.integrater_reset_reindex_operator() need_to_return = True if pt and not probably_twinned: probably_twinned = True logger.debug("Pointgroup: %s (%s)", pointgroup, reindex_op) pointgroups[epoch] = pointgroup reindex_ops[epoch] = reindex_op # SUMMARY - for each sweep, run indexer jiffy and get reindex operators # and pointgroups dictionaries (could be different between sweeps) # END OF if not mulit-sweep or pg given overall_pointgroup = None pointgroup_set = {pointgroups[e] for e in pointgroups} if len(pointgroup_set) > 1 and not probably_twinned: raise RuntimeError( "non uniform pointgroups: %s" % str(list(pointgroup_set)) ) if len(pointgroup_set) > 1: logger.debug( "Probably twinned, pointgroups: %s", " ".join(p.replace(" ", "") for p in pointgroup_set), ) numbers = (Syminfo.spacegroup_name_to_number(ps) for ps in pointgroup_set) overall_pointgroup = Syminfo.spacegroup_number_to_name(min(numbers)) self._scalr_input_pointgroup = overall_pointgroup logger.info("Twinning detected, assume pointgroup %s", overall_pointgroup) need_to_return = True else: overall_pointgroup = pointgroup_set.pop() # SUMMARY - Have handled if different pointgroups & chosen an overall_pointgroup # which is the lowest symmetry # Now go through sweeps and do reindexing for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) integrater = si.get_integrater() integrater.set_integrater_spacegroup_number( Syminfo.spacegroup_name_to_number(overall_pointgroup) ) integrater.set_integrater_reindex_operator( reindex_ops[epoch], reason="setting point group" ) # This will give us the reflections in the correct point group si.set_reflections(integrater.get_integrater_intensities()) if need_to_return: self.set_scaler_done(False) self.set_scaler_prepare_done(False) return # in here now optionally work through the data files which should be # indexed with a consistent point group, and transform the orientation # matrices by the lattice symmetry operations (if possible) to get a # consistent definition of U matrix modulo fixed rotations if PhilIndex.params.xia2.settings.unify_setting: self.unify_setting() if self.get_scaler_reference_reflection_file(): self._reference = self.get_scaler_reference_reflection_file() logger.debug("Using HKLREF %s", self._reference) elif PhilIndex.params.xia2.settings.scale.reference_reflection_file: self._reference = ( PhilIndex.params.xia2.settings.scale.reference_reflection_file ) logger.debug("Using HKLREF %s", self._reference) params = PhilIndex.params use_brehm_diederichs = params.xia2.settings.use_brehm_diederichs if len(self._sweep_handler.get_epochs()) > 1 and use_brehm_diederichs: self.brehm_diederichs_reindexing() # If not Brehm-deidrichs, set reference as first sweep elif len(self._sweep_handler.get_epochs()) > 1 and not self._reference: first = self._sweep_handler.get_epochs()[0] si = self._sweep_handler.get_sweep_information(first) self._reference = si.get_reflections() # Now reindex to be consistent with first dataset - run pointless on each # dataset with reference if self._reference: md = self._factory.Mtzdump() md.set_hklin(self._reference) md.dump() datasets = md.get_datasets() # then get the unit cell, lattice etc. reference_lattice = Syminfo.get_lattice(md.get_spacegroup()) reference_cell = md.get_dataset_info(datasets[0])["cell"] # then compute the pointgroup from this... # ---------- REINDEX TO CORRECT (REFERENCE) SETTING ---------- for epoch in self._sweep_handler.get_epochs(): # if we are working with unified UB matrix then this should not # be a problem here (note, *if*; *should*) # what about e.g. alternative P1 settings? # see JIRA MXSW-904 if PhilIndex.params.xia2.settings.unify_setting: continue pl = self._factory.Pointless() si = self._sweep_handler.get_sweep_information(epoch) hklin = si.get_reflections() pl.set_hklin( self._prepare_pointless_hklin( hklin, si.get_integrater().get_phi_width() ) ) hklout = os.path.join( self.get_working_directory(), "%s_rdx2.mtz" % os.path.split(hklin)[-1][:-4], ) # we will want to delete this one exit FileHandler.record_temporary_file(hklout) # now set the initial reflection set as a reference... pl.set_hklref(self._reference) # https://github.com/xia2/xia2/issues/115 - should ideally iteratively # construct a reference or a tree of correlations to ensure correct # reference setting - however if small molecule assume has been # multi-sweep-indexed so can ignore "fatal errors" - temporary hack pl.decide_pointgroup( ignore_errors=PhilIndex.params.xia2.settings.small_molecule ) logger.debug("Reindexing analysis of %s", pl.get_hklin()) pointgroup = pl.get_pointgroup() reindex_op = pl.get_reindex_operator() logger.debug("Operator: %s", reindex_op) # apply this... integrater = si.get_integrater() integrater.set_integrater_reindex_operator( reindex_op, reason="match reference" ) integrater.set_integrater_spacegroup_number( Syminfo.spacegroup_name_to_number(pointgroup) ) si.set_reflections(integrater.get_integrater_intensities()) md = self._factory.Mtzdump() md.set_hklin(si.get_reflections()) md.dump() datasets = md.get_datasets() if len(datasets) > 1: raise RuntimeError( "more than one dataset in %s" % si.get_reflections() ) # then get the unit cell, lattice etc. lattice = Syminfo.get_lattice(md.get_spacegroup()) cell = md.get_dataset_info(datasets[0])["cell"] if lattice != reference_lattice: raise RuntimeError( "lattices differ in %s and %s" % (self._reference, si.get_reflections()) ) logger.debug("Cell: %.2f %.2f %.2f %.2f %.2f %.2f" % cell) logger.debug("Ref: %.2f %.2f %.2f %.2f %.2f %.2f" % reference_cell) for j in range(6): if ( math.fabs((cell[j] - reference_cell[j]) / reference_cell[j]) > 0.1 ): raise RuntimeError( "unit cell parameters differ in %s and %s" % (self._reference, si.get_reflections()) ) # ---------- SORT TOGETHER DATA ---------- self._sort_together_data_ccp4() self._scalr_resolution_limits = {} # store central resolution limit estimates batch_ranges = [ self._sweep_handler.get_sweep_information(epoch).get_batch_range() for epoch in self._sweep_handler.get_epochs() ] self._resolution_limit_estimates = ersatz_resolution( self._prepared_reflections, batch_ranges )
def setup_from_xinfo_file(self, xinfo_file): '''Set up this object & all subobjects based on the .xinfo file contents.''' settings = PhilIndex.params.xia2.settings sweep_ids = [sweep.id for sweep in settings.sweep] sweep_ranges = [sweep.range for sweep in settings.sweep] if not sweep_ids: sweep_ids = None sweep_ranges = None xinfo = XInfo(xinfo_file, sweep_ids=sweep_ids, sweep_ranges=sweep_ranges) self._name = xinfo.get_project() crystals = xinfo.get_crystals() for crystal in crystals.keys(): xc = XCrystal(crystal, self) if 'sequence' in crystals[crystal]: xc.set_aa_sequence(crystals[crystal]['sequence']) if 'ha_info' in crystals[crystal]: if crystals[crystal]['ha_info'] != { }: xc.set_ha_info(crystals[crystal]['ha_info']) if 'scaled_merged_reflection_file' in crystals[crystal]: xc.set_scaled_merged_reflections( crystals[crystal]['scaled_merged_reflections']) if 'reference_reflection_file' in crystals[crystal]: xc.set_reference_reflection_file( crystals[crystal]['reference_reflection_file']) if 'freer_file' in crystals[crystal]: xc.set_freer_file(crystals[crystal]['freer_file']) # user assigned spacegroup if 'user_spacegroup' in crystals[crystal]: xc.set_user_spacegroup(crystals[crystal]['user_spacegroup']) elif settings.space_group is not None: # XXX do we ever actually get here? xc.set_user_spacegroup(settings.space_group.type().lookup_symbol()) # add a default sample if none present in xinfo file if not crystals[crystal]['samples']: crystals[crystal]['samples']['X1'] = {} for sample in crystals[crystal]['samples'].keys(): sample_info = crystals[crystal]['samples'][sample] xsample = XSample(sample, xc) xc.add_sample(xsample) if not crystals[crystal]['wavelengths']: raise RuntimeError('No wavelengths specified in xinfo file') for wavelength in crystals[crystal]['wavelengths'].keys(): # FIXME 29/NOV/06 in here need to be able to cope with # no wavelength information - this should default to the # information in the image header (John Cowan pointed # out that this was untidy - requiring that it agrees # with the value in the header makes this almost # useless.) wave_info = crystals[crystal]['wavelengths'][wavelength] if 'wavelength' not in wave_info: Debug.write( 'No wavelength value given for wavelength %s' % wavelength) else: Debug.write( 'Overriding value for wavelength %s to %8.6f' % \ (wavelength, float(wave_info['wavelength']))) # handle case where user writes f" in place of f'' if 'f"' in wave_info and not \ 'f\'\'' in wave_info: wave_info['f\'\''] = wave_info['f"'] xw = XWavelength(wavelength, xc, wavelength = wave_info.get('wavelength', 0.0), f_pr = wave_info.get('f\'', 0.0), f_prpr = wave_info.get('f\'\'', 0.0), dmin = wave_info.get('dmin', 0.0), dmax = wave_info.get('dmax', 0.0)) # in here I also need to look and see if we have # been given any scaled reflection files... # check to see if we have a user supplied lattice... if 'user_spacegroup' in crystals[crystal]: lattice = Syminfo.get_lattice( crystals[crystal]['user_spacegroup']) elif settings.space_group is not None: # XXX do we ever actually get here? lattice = Syminfo.get_lattice( settings.space_group.type().lookup_symbol()) else: lattice = None # and also user supplied cell constants - from either # the xinfo file (the first port of call) or the # command-line. if 'user_cell' in crystals[crystal]: cell = crystals[crystal]['user_cell'] elif settings.unit_cell is not None: # XXX do we ever actually get here? cell = settings.unit_cell.parameters() else: cell = None dmin = wave_info.get('dmin', 0.0) dmax = wave_info.get('dmax', 0.0) if dmin == 0.0 and dmax == 0.0: dmin = PhilIndex.params.xia2.settings.resolution.d_min dmax = PhilIndex.params.xia2.settings.resolution.d_max # want to be able to locally override the resolution limits # for this sweep while leaving the rest for the data set # intact... for sweep_name in crystals[crystal]['sweeps'].keys(): sweep_info = crystals[crystal]['sweeps'][sweep_name] sample_name = sweep_info.get('sample') if sample_name is None: if len(crystals[crystal]['samples']) == 1: sample_name = crystals[crystal]['samples'].keys()[0] else: raise RuntimeError('No sample given for sweep %s' %sweep_name) xsample = xc.get_xsample(sample_name) assert xsample is not None dmin_old = dmin dmax_old = dmax replace = False if 'RESOLUTION' in sweep_info: values = map(float, sweep_info['RESOLUTION'].split()) if len(values) == 1: dmin = values[0] elif len(values) == 2: dmin = min(values) dmax = max(values) else: raise RuntimeError, \ 'bad resolution for sweep %s' % sweep_name replace = True # FIXME: AJP to implement # FIXME ticket number here please if 'ice' in sweep_info: pass if 'excluded_regions' in sweep_info: pass if sweep_info['wavelength'] == wavelength: frames_to_process = sweep_info.get('start_end') xsweep = xw.add_sweep( sweep_name, sample=xsample, directory = sweep_info.get('DIRECTORY'), image = sweep_info.get('IMAGE'), beam = sweep_info.get('beam'), reversephi = sweep_info.get('reversephi', False), distance = sweep_info.get('distance'), gain = float(sweep_info.get('GAIN', 0.0)), dmin = dmin, dmax = dmax, polarization = float(sweep_info.get( 'POLARIZATION', 0.0)), frames_to_process = frames_to_process, user_lattice = lattice, user_cell = cell, epoch = sweep_info.get('epoch', 0), ice = sweep_info.get('ice', False), excluded_regions = sweep_info.get( 'excluded_regions', []), ) xsample.add_sweep(xsweep) dmin = dmin_old dmax = dmax_old xc.add_wavelength(xw) self.add_crystal(xc) return
def _scale_prepare(self): '''Perform all of the preparation required to deliver the scaled data. This should sort together the reflection files, ensure that they are correctly indexed (via pointless) and generally tidy things up.''' # acknowledge all of the programs we are about to use... Citations.cite('pointless') Citations.cite('aimless') Citations.cite('ccp4') # ---------- GATHER ---------- self._sweep_handler = SweepInformationHandler(self._scalr_integraters) Journal.block( 'gathering', self.get_scaler_xcrystal().get_name(), 'CCP4', {'working directory':self.get_working_directory()}) for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) pname, xname, dname = si.get_project_info() sname = si.get_sweep_name() exclude_sweep = False for sweep in PhilIndex.params.xia2.settings.sweep: if sweep.id == sname and sweep.exclude: exclude_sweep = True break if exclude_sweep: self._sweep_handler.remove_epoch(epoch) Debug.write('Excluding sweep %s' %sname) else: Journal.entry({'adding data from':'%s/%s/%s' % \ (xname, dname, sname)}) # gather data for all images which belonged to the parent # crystal - allowing for the fact that things could go wrong # e.g. epoch information not available, exposure times not in # headers etc... for e in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(e) assert is_mtz_file(si.get_reflections()) p, x = self._sweep_handler.get_project_info() self._scalr_pname = p self._scalr_xname = x # verify that the lattices are consistent, calling eliminate if # they are not N.B. there could be corner cases here need_to_return = False multi_sweep_indexing = \ PhilIndex.params.xia2.settings.developmental.multi_sweep_indexing if len(self._sweep_handler.get_epochs()) > 1: if multi_sweep_indexing and not self._scalr_input_pointgroup: pointless_hklins = [] max_batches = 0 for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) hklin = si.get_reflections() md = self._factory.Mtzdump() md.set_hklin(hklin) md.dump() batches = md.get_batches() if 1 + max(batches) - min(batches) > max_batches: max_batches = max(batches) - min(batches) + 1 datasets = md.get_datasets() Debug.write('In reflection file %s found:' % hklin) for d in datasets: Debug.write('... %s' % d) dataset_info = md.get_dataset_info(datasets[0]) from xia2.lib.bits import nifty_power_of_ten Debug.write('Biggest sweep has %d batches' % max_batches) max_batches = nifty_power_of_ten(max_batches) counter = 0 for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) hklin = si.get_reflections() integrater = si.get_integrater() refiner = integrater.get_integrater_refiner() hklin = self._prepare_pointless_hklin( hklin, si.get_integrater().get_phi_width()) rb = self._factory.Rebatch() hklout = os.path.join(self.get_working_directory(), '%s_%s_%s_%s_prepointless.mtz' % \ (pname, xname, dname, si.get_sweep_name())) # we will want to delete this one exit FileHandler.record_temporary_file(hklout) first_batch = min(si.get_batches()) si.set_batch_offset(counter * max_batches - first_batch + 1) rb.set_hklin(hklin) rb.set_first_batch(counter * max_batches + 1) rb.set_project_info(pname, xname, dname) rb.set_hklout(hklout) new_batches = rb.rebatch() pointless_hklins.append(hklout) # update the counter & recycle counter += 1 s = self._factory.Sortmtz() pointless_hklin = os.path.join(self.get_working_directory(), '%s_%s_prepointless_sorted.mtz' % \ (self._scalr_pname, self._scalr_xname)) s.set_hklout(pointless_hklin) for hklin in pointless_hklins: s.add_hklin(hklin) s.sort() pointgroup, reindex_op, ntr, pt = \ self._pointless_indexer_jiffy( pointless_hklin, refiner) Debug.write('X1698: %s: %s' % (pointgroup, reindex_op)) lattices = [Syminfo.get_lattice(pointgroup)] for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) intgr = si.get_integrater() hklin = si.get_reflections() refiner = intgr.get_integrater_refiner() if ntr: intgr.integrater_reset_reindex_operator() need_to_return = True else: lattices = [] for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) intgr = si.get_integrater() hklin = si.get_reflections() refiner = intgr.get_integrater_refiner() if self._scalr_input_pointgroup: pointgroup = self._scalr_input_pointgroup reindex_op = 'h,k,l' ntr = False else: pointless_hklin = self._prepare_pointless_hklin( hklin, si.get_integrater().get_phi_width()) pointgroup, reindex_op, ntr, pt = \ self._pointless_indexer_jiffy( pointless_hklin, refiner) Debug.write('X1698: %s: %s' % (pointgroup, reindex_op)) lattice = Syminfo.get_lattice(pointgroup) if not lattice in lattices: lattices.append(lattice) if ntr: intgr.integrater_reset_reindex_operator() need_to_return = True if len(lattices) > 1: # why not using pointless indexer jiffy??! correct_lattice = sort_lattices(lattices)[0] Chatter.write('Correct lattice asserted to be %s' % \ correct_lattice) # transfer this information back to the indexers for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) refiner = si.get_integrater().get_integrater_refiner() sname = si.get_sweep_name() state = refiner.set_refiner_asserted_lattice( correct_lattice) if state == refiner.LATTICE_CORRECT: Chatter.write('Lattice %s ok for sweep %s' % \ (correct_lattice, sname)) elif state == refiner.LATTICE_IMPOSSIBLE: raise RuntimeError, 'Lattice %s impossible for %s' \ % (correct_lattice, sname) elif state == refiner.LATTICE_POSSIBLE: Chatter.write('Lattice %s assigned for sweep %s' % \ (correct_lattice, sname)) need_to_return = True # if one or more of them was not in the lowest lattice, # need to return here to allow reprocessing if need_to_return: self.set_scaler_done(False) self.set_scaler_prepare_done(False) return # ---------- REINDEX ALL DATA TO CORRECT POINTGROUP ---------- # all should share the same pointgroup, unless twinned... in which # case force them to be... pointgroups = { } reindex_ops = { } probably_twinned = False need_to_return = False multi_sweep_indexing = \ PhilIndex.params.xia2.settings.developmental.multi_sweep_indexing if multi_sweep_indexing and not self._scalr_input_pointgroup: pointless_hklins = [] max_batches = 0 for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) hklin = si.get_reflections() md = self._factory.Mtzdump() md.set_hklin(hklin) md.dump() batches = md.get_batches() if 1 + max(batches) - min(batches) > max_batches: max_batches = max(batches) - min(batches) + 1 datasets = md.get_datasets() Debug.write('In reflection file %s found:' % hklin) for d in datasets: Debug.write('... %s' % d) dataset_info = md.get_dataset_info(datasets[0]) from xia2.lib.bits import nifty_power_of_ten Debug.write('Biggest sweep has %d batches' % max_batches) max_batches = nifty_power_of_ten(max_batches) counter = 0 for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) hklin = si.get_reflections() integrater = si.get_integrater() refiner = integrater.get_integrater_refiner() hklin = self._prepare_pointless_hklin( hklin, si.get_integrater().get_phi_width()) rb = self._factory.Rebatch() hklout = os.path.join(self.get_working_directory(), '%s_%s_%s_%s_prepointless.mtz' % \ (pname, xname, dname, si.get_sweep_name())) # we will want to delete this one exit FileHandler.record_temporary_file(hklout) first_batch = min(si.get_batches()) si.set_batch_offset(counter * max_batches - first_batch + 1) rb.set_hklin(hklin) rb.set_first_batch(counter * max_batches + 1) rb.set_project_info(pname, xname, dname) rb.set_hklout(hklout) new_batches = rb.rebatch() pointless_hklins.append(hklout) # update the counter & recycle counter += 1 s = self._factory.Sortmtz() pointless_hklin = os.path.join(self.get_working_directory(), '%s_%s_prepointless_sorted.mtz' % \ (self._scalr_pname, self._scalr_xname)) s.set_hklout(pointless_hklin) for hklin in pointless_hklins: s.add_hklin(hklin) s.sort() pointgroup, reindex_op, ntr, pt = \ self._pointless_indexer_jiffy( pointless_hklin, refiner) for epoch in self._sweep_handler.get_epochs(): pointgroups[epoch] = pointgroup reindex_ops[epoch] = reindex_op else: for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) hklin = si.get_reflections() #hklout = os.path.join( #self.get_working_directory(), #os.path.split(hklin)[-1].replace('.mtz', '_rdx.mtz')) #FileHandler.record_temporary_file(hklout) integrater = si.get_integrater() refiner = integrater.get_integrater_refiner() if self._scalr_input_pointgroup: Debug.write('Using input pointgroup: %s' % \ self._scalr_input_pointgroup) pointgroup = self._scalr_input_pointgroup reindex_op = 'h,k,l' pt = False else: pointless_hklin = self._prepare_pointless_hklin( hklin, si.get_integrater().get_phi_width()) pointgroup, reindex_op, ntr, pt = \ self._pointless_indexer_jiffy( pointless_hklin, refiner) Debug.write('X1698: %s: %s' % (pointgroup, reindex_op)) if ntr: integrater.integrater_reset_reindex_operator() need_to_return = True if pt and not probably_twinned: probably_twinned = True Debug.write('Pointgroup: %s (%s)' % (pointgroup, reindex_op)) pointgroups[epoch] = pointgroup reindex_ops[epoch] = reindex_op overall_pointgroup = None pointgroup_set = set([pointgroups[e] for e in pointgroups]) if len(pointgroup_set) > 1 and \ not probably_twinned: raise RuntimeError, 'non uniform pointgroups' if len(pointgroup_set) > 1: Debug.write('Probably twinned, pointgroups: %s' % \ ' '.join([p.replace(' ', '') for p in \ list(pointgroup_set)])) numbers = [Syminfo.spacegroup_name_to_number(s) for s in \ pointgroup_set] overall_pointgroup = Syminfo.spacegroup_number_to_name( min(numbers)) self._scalr_input_pointgroup = overall_pointgroup Chatter.write('Twinning detected, assume pointgroup %s' % \ overall_pointgroup) need_to_return = True else: overall_pointgroup = pointgroup_set.pop() for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) integrater = si.get_integrater() integrater.set_integrater_spacegroup_number( Syminfo.spacegroup_name_to_number(overall_pointgroup)) integrater.set_integrater_reindex_operator( reindex_ops[epoch], reason='setting point group') # This will give us the reflections in the correct point group si.set_reflections(integrater.get_integrater_intensities()) if need_to_return: self.set_scaler_done(False) self.set_scaler_prepare_done(False) return # in here now optinally work through the data files which should be # indexed with a consistent point group, and transform the orientation # matrices by the lattice symmetry operations (if possible) to get a # consistent definition of U matrix modulo fixed rotations if PhilIndex.params.xia2.settings.unify_setting: from scitbx.matrix import sqr reference_U = None i3 = sqr((1, 0, 0, 0, 1, 0, 0, 0, 1)) for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) intgr = si.get_integrater() fixed = sqr(intgr.get_goniometer().get_fixed_rotation()) u, b, s = get_umat_bmat_lattice_symmetry_from_mtz(si.get_reflections()) U = fixed.inverse() * sqr(u).transpose() B = sqr(b) if reference_U is None: reference_U = U continue results = [] for op in s.all_ops(): R = B * sqr(op.r().as_double()).transpose() * B.inverse() nearly_i3 = (U * R).inverse() * reference_U score = sum([abs(_n - _i) for (_n, _i) in zip(nearly_i3, i3)]) results.append((score, op.r().as_hkl(), op)) results.sort() best = results[0] Debug.write('Best reindex: %s %.3f' % (best[1], best[0])) intgr.set_integrater_reindex_operator(best[2].r().inverse().as_hkl(), reason='unifying [U] setting') si.set_reflections(intgr.get_integrater_intensities()) # recalculate to verify u, b, s = get_umat_bmat_lattice_symmetry_from_mtz(si.get_reflections()) U = fixed.inverse() * sqr(u).transpose() Debug.write('New reindex: %s' % (U.inverse() * reference_U)) # FIXME I should probably raise an exception at this stage if this # is not about I3... if self.get_scaler_reference_reflection_file(): self._reference = self.get_scaler_reference_reflection_file() Debug.write('Using HKLREF %s' % self._reference) elif Flags.get_reference_reflection_file(): self._reference = Flags.get_reference_reflection_file() Debug.write('Using HKLREF %s' % self._reference) params = PhilIndex.params use_brehm_diederichs = params.xia2.settings.use_brehm_diederichs if len(self._sweep_handler.get_epochs()) > 1 and use_brehm_diederichs: brehm_diederichs_files_in = [] for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) hklin = si.get_reflections() brehm_diederichs_files_in.append(hklin) # now run cctbx.brehm_diederichs to figure out the indexing hand for # each sweep from xia2.Wrappers.Cctbx.BrehmDiederichs import BrehmDiederichs from xia2.lib.bits import auto_logfiler brehm_diederichs = BrehmDiederichs() brehm_diederichs.set_working_directory(self.get_working_directory()) auto_logfiler(brehm_diederichs) brehm_diederichs.set_input_filenames(brehm_diederichs_files_in) # 1 or 3? 1 seems to work better? brehm_diederichs.set_asymmetric(1) brehm_diederichs.run() reindexing_dict = brehm_diederichs.get_reindexing_dict() for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) intgr = si.get_integrater() hklin = si.get_reflections() reindex_op = reindexing_dict.get(os.path.abspath(hklin)) assert reindex_op is not None if 1 or reindex_op != 'h,k,l': # apply the reindexing operator intgr.set_integrater_reindex_operator( reindex_op, reason='match reference') si.set_reflections(intgr.get_integrater_intensities()) elif len(self._sweep_handler.get_epochs()) > 1 and \ not self._reference: first = self._sweep_handler.get_epochs()[0] si = self._sweep_handler.get_sweep_information(first) self._reference = si.get_reflections() if self._reference: md = self._factory.Mtzdump() md.set_hklin(self._reference) md.dump() if md.get_batches() and False: raise RuntimeError, 'reference reflection file %s unmerged' % \ self._reference datasets = md.get_datasets() if len(datasets) > 1 and False: raise RuntimeError, 'more than one dataset in %s' % \ self._reference # then get the unit cell, lattice etc. reference_lattice = Syminfo.get_lattice(md.get_spacegroup()) reference_cell = md.get_dataset_info(datasets[0])['cell'] # then compute the pointgroup from this... # ---------- REINDEX TO CORRECT (REFERENCE) SETTING ---------- for epoch in self._sweep_handler.get_epochs(): pl = self._factory.Pointless() si = self._sweep_handler.get_sweep_information(epoch) hklin = si.get_reflections() pl.set_hklin(self._prepare_pointless_hklin( hklin, si.get_integrater().get_phi_width())) hklout = os.path.join( self.get_working_directory(), '%s_rdx2.mtz' % os.path.split(hklin)[-1][:-4]) # we will want to delete this one exit FileHandler.record_temporary_file(hklout) # now set the initial reflection set as a reference... pl.set_hklref(self._reference) # write a pointless log file... pl.decide_pointgroup() Debug.write('Reindexing analysis of %s' % pl.get_hklin()) pointgroup = pl.get_pointgroup() reindex_op = pl.get_reindex_operator() Debug.write('Operator: %s' % reindex_op) # apply this... integrater = si.get_integrater() integrater.set_integrater_reindex_operator(reindex_op, reason='match reference') integrater.set_integrater_spacegroup_number( Syminfo.spacegroup_name_to_number(pointgroup)) si.set_reflections(integrater.get_integrater_intensities()) md = self._factory.Mtzdump() md.set_hklin(si.get_reflections()) md.dump() datasets = md.get_datasets() if len(datasets) > 1: raise RuntimeError, 'more than one dataset in %s' % \ si.get_reflections() # then get the unit cell, lattice etc. lattice = Syminfo.get_lattice(md.get_spacegroup()) cell = md.get_dataset_info(datasets[0])['cell'] if lattice != reference_lattice: raise RuntimeError, 'lattices differ in %s and %s' % \ (self._reference, si.get_reflections()) for j in range(6): if math.fabs((cell[j] - reference_cell[j]) / reference_cell[j]) > 0.1: raise RuntimeError, \ 'unit cell parameters differ in %s and %s' % \ (self._reference, si.get_reflections()) # ---------- SORT TOGETHER DATA ---------- self._sort_together_data_ccp4() self._scalr_resolution_limits = { } # store central resolution limit estimates batch_ranges = [self._sweep_handler.get_sweep_information( epoch).get_batch_range() for epoch in self._sweep_handler.get_epochs()] self._resolution_limit_estimates = erzatz_resolution( self._prepared_reflections, batch_ranges) return
def _scale_prepare(self): '''Prepare the data for scaling - this will reindex it the reflections to the correct pointgroup and setting, for instance, and move the reflection files to the scale directory.''' Citations.cite('xds') Citations.cite('ccp4') Citations.cite('pointless') # GATHER phase - get the reflection files together... note that # it is not necessary in here to keep the batch information as we # don't wish to rebatch the reflections prior to scaling. # FIXME need to think about what I will do about the radiation # damage analysis in here... self._sweep_information = { } # FIXME in here I want to record the batch number to # epoch mapping as per the CCP4 Scaler implementation. Journal.block( 'gathering', self.get_scaler_xcrystal().get_name(), 'XDS', {'working directory':self.get_working_directory()}) for epoch in self._scalr_integraters.keys(): intgr = self._scalr_integraters[epoch] pname, xname, dname = intgr.get_integrater_project_info() sname = intgr.get_integrater_sweep_name() self._sweep_information[epoch] = { 'pname':pname, 'xname':xname, 'dname':dname, 'integrater':intgr, 'corrected_intensities':intgr.get_integrater_corrected_intensities(), 'prepared_reflections':None, 'scaled_reflections':None, 'header':intgr.get_header(), 'batches':intgr.get_integrater_batches(), 'image_to_epoch':intgr.get_integrater_sweep( ).get_image_to_epoch(), 'image_to_dose':{}, 'batch_offset':0, 'sname':sname } Journal.entry({'adding data from':'%s/%s/%s' % \ (xname, dname, sname)}) # what are these used for? # pname / xname / dname - dataset identifiers # image to epoch / batch offset / batches - for RD analysis Debug.write('For EPOCH %s have:' % str(epoch)) Debug.write('ID = %s/%s/%s' % (pname, xname, dname)) Debug.write('SWEEP = %s' % intgr.get_integrater_sweep_name()) # next work through all of the reflection files and make sure that # they are XDS_ASCII format... epochs = self._sweep_information.keys() epochs.sort() self._first_epoch = min(epochs) self._scalr_pname = self._sweep_information[epochs[0]]['pname'] self._scalr_xname = self._sweep_information[epochs[0]]['xname'] for epoch in epochs: intgr = self._scalr_integraters[epoch] pname = self._sweep_information[epoch]['pname'] xname = self._sweep_information[epoch]['xname'] dname = self._sweep_information[epoch]['dname'] sname = self._sweep_information[epoch]['sname'] if self._scalr_pname != pname: raise RuntimeError, 'all data must have a common project name' xname = self._sweep_information[epoch]['xname'] if self._scalr_xname != xname: raise RuntimeError, \ 'all data for scaling must come from one crystal' xsh = XDSScalerHelper() xsh.set_working_directory(self.get_working_directory()) hklin = self._sweep_information[epoch]['corrected_intensities'] hklout = os.path.join(self.get_working_directory(), '%s_%s_%s_%s_CORRECTED.HKL' %( pname, xname, dname, sname)) sweep = intgr.get_integrater_sweep() if sweep.get_frames_to_process() is not None: offset = intgr.get_frame_offset() #print "offset: %d" %offset start, end = sweep.get_frames_to_process() start -= offset end -= offset #end += 1 ???? #print "limiting batches: %d-%d" %(start, end) xsh.limit_batches(hklin, hklout, start, end) self._sweep_information[epoch]['corrected_intensities'] = hklout # if there is more than one sweep then compare the lattices # and eliminate all but the lowest symmetry examples if # there are more than one... # ------------------------------------------------- # Ensure that the integration lattices are the same # ------------------------------------------------- need_to_return = False if len(self._sweep_information.keys()) > 1: lattices = [] # FIXME run this stuff in parallel as well... for epoch in self._sweep_information.keys(): intgr = self._sweep_information[epoch]['integrater'] hklin = self._sweep_information[epoch]['corrected_intensities'] refiner = intgr.get_integrater_refiner() if self._scalr_input_pointgroup: pointgroup = self._scalr_input_pointgroup reindex_op = 'h,k,l' ntr = False else: pointgroup, reindex_op, ntr = \ self._pointless_indexer_jiffy(hklin, refiner) Debug.write('X1698: %s: %s' % (pointgroup, reindex_op)) lattice = Syminfo.get_lattice(pointgroup) if not lattice in lattices: lattices.append(lattice) if ntr: # if we need to return, we should logically reset # any reindexing operator right? right here all # we are talking about is the correctness of # individual pointgroups?? Bug # 3373 reindex_op = 'h,k,l' # actually, should this not be done "by magic" # when a new pointgroup is assigned in the # pointless indexer jiffy above?! intgr.set_integrater_reindex_operator( reindex_op, compose = False) need_to_return = True # bug # 2433 - need to ensure that all of the lattice # conclusions were the same... if len(lattices) > 1: ordered_lattices = [] for l in lattices_in_order(): if l in lattices: ordered_lattices.append(l) correct_lattice = ordered_lattices[0] Debug.write('Correct lattice asserted to be %s' % \ correct_lattice) # transfer this information back to the indexers for epoch in self._sweep_information.keys(): integrater = self._sweep_information[ epoch]['integrater'] refiner = integrater.get_integrater_refiner() sname = integrater.get_integrater_sweep_name() if not refiner: continue state = refiner.set_refiner_asserted_lattice( correct_lattice) if state == refiner.LATTICE_CORRECT: Debug.write('Lattice %s ok for sweep %s' % \ (correct_lattice, sname)) elif state == refiner.LATTICE_IMPOSSIBLE: raise RuntimeError, 'Lattice %s impossible for %s' % \ (correct_lattice, sname) elif state == refiner.LATTICE_POSSIBLE: Debug.write('Lattice %s assigned for sweep %s' % \ (correct_lattice, sname)) need_to_return = True # if one or more of them was not in the lowest lattice, # need to return here to allow reprocessing if need_to_return: self.set_scaler_done(False) self.set_scaler_prepare_done(False) return # next if there is more than one sweep then generate # a merged reference reflection file to check that the # setting for all reflection files is the same... # if we get to here then all data was processed with the same # lattice # ---------------------------------------------------------- # next ensure that all sweeps are set in the correct setting # ---------------------------------------------------------- if self.get_scaler_reference_reflection_file(): self._reference = self.get_scaler_reference_reflection_file() Debug.write('Using HKLREF %s' % self._reference) md = self._factory.Mtzdump() md.set_hklin(self.get_scaler_reference_reflection_file()) md.dump() self._xds_spacegroup = Syminfo.spacegroup_name_to_number( md.get_spacegroup()) Debug.write('Spacegroup %d' % self._xds_spacegroup) elif PhilIndex.params.xia2.settings.scale.reference_reflection_file: self._reference = PhilIndex.params.xia2.settings.scale.reference_reflection_file Debug.write('Using HKLREF %s' % self._reference) md = self._factory.Mtzdump() md.set_hklin(PhilIndex.params.xia2.settings.scale.reference_reflection_file) md.dump() self._xds_spacegroup = Syminfo.spacegroup_name_to_number( md.get_spacegroup()) Debug.write('Spacegroup %d' % self._xds_spacegroup) params = PhilIndex.params use_brehm_diederichs = params.xia2.settings.use_brehm_diederichs if len(self._sweep_information.keys()) > 1 and use_brehm_diederichs: brehm_diederichs_files_in = [] for epoch in self._sweep_information.keys(): intgr = self._sweep_information[epoch]['integrater'] hklin = self._sweep_information[epoch]['corrected_intensities'] refiner = intgr.get_integrater_refiner() # in here need to consider what to do if the user has # assigned the pointgroup on the command line ... if not self._scalr_input_pointgroup: pointgroup, reindex_op, ntr = \ self._pointless_indexer_jiffy(hklin, refiner) if ntr: # Bug # 3373 Debug.write('Reindex to standard (PIJ): %s' % \ reindex_op) intgr.set_integrater_reindex_operator( reindex_op, compose = False) reindex_op = 'h,k,l' need_to_return = True else: # 27/FEB/08 to support user assignment of pointgroups Debug.write('Using input pointgroup: %s' % \ self._scalr_input_pointgroup) pointgroup = self._scalr_input_pointgroup reindex_op = 'h,k,l' intgr.set_integrater_reindex_operator(reindex_op) intgr.set_integrater_spacegroup_number( Syminfo.spacegroup_name_to_number(pointgroup)) self._sweep_information[epoch]['corrected_intensities'] \ = intgr.get_integrater_corrected_intensities() # convert the XDS_ASCII for this sweep to mtz - on the next # get this should be in the correct setting... dname = self._sweep_information[epoch]['dname'] sname = intgr.get_integrater_sweep_name() hklin = self._sweep_information[epoch]['corrected_intensities'] hklout = os.path.join(self.get_working_directory(), '%s_%s.mtz' % (dname, sname)) FileHandler.record_temporary_file(hklout) # now use pointless to make this conversion pointless = self._factory.Pointless() pointless.set_xdsin(hklin) pointless.set_hklout(hklout) pointless.xds_to_mtz() brehm_diederichs_files_in.append(hklout) # now run cctbx.brehm_diederichs to figure out the indexing hand for # each sweep from xia2.Wrappers.Cctbx.BrehmDiederichs import BrehmDiederichs brehm_diederichs = BrehmDiederichs() brehm_diederichs.set_working_directory(self.get_working_directory()) auto_logfiler(brehm_diederichs) brehm_diederichs.set_input_filenames(brehm_diederichs_files_in) # 1 or 3? 1 seems to work better? brehm_diederichs.set_asymmetric(1) brehm_diederichs.run() reindexing_dict = brehm_diederichs.get_reindexing_dict() for epoch in self._sweep_information.keys(): intgr = self._sweep_information[epoch]['integrater'] dname = self._sweep_information[epoch]['dname'] sname = intgr.get_integrater_sweep_name() hklin = self._sweep_information[epoch]['corrected_intensities'] hklout = os.path.join(self.get_working_directory(), '%s_%s.mtz' % (dname, sname)) # apply the reindexing operator intgr.set_integrater_reindex_operator(reindex_op) # and copy the reflection file to the local directory hklin = self._sweep_information[epoch]['corrected_intensities'] hklout = os.path.join(self.get_working_directory(), '%s_%s.HKL' % (dname, sname)) Debug.write('Copying %s to %s' % (hklin, hklout)) shutil.copyfile(hklin, hklout) # record just the local file name... self._sweep_information[epoch][ 'prepared_reflections'] = os.path.split(hklout)[-1] elif len(self._sweep_information.keys()) > 1 and \ not self._reference: # need to generate a reference reflection file - generate this # from the reflections in self._first_epoch # # FIXME this should really use the Brehm and Diederichs method # if you have lots of little sweeps... intgr = self._sweep_information[self._first_epoch]['integrater'] hklin = self._sweep_information[epoch]['corrected_intensities'] refiner = intgr.get_integrater_refiner() if self._scalr_input_pointgroup: Debug.write('Using input pointgroup: %s' % \ self._scalr_input_pointgroup) pointgroup = self._scalr_input_pointgroup ntr = False reindex_op = 'h,k,l' else: pointgroup, reindex_op, ntr = self._pointless_indexer_jiffy( hklin, refiner) Debug.write('X1698: %s: %s' % (pointgroup, reindex_op)) reference_reindex_op = intgr.get_integrater_reindex_operator() if ntr: # Bug # 3373 intgr.set_integrater_reindex_operator( reindex_op, compose = False) reindex_op = 'h,k,l' need_to_return = True self._xds_spacegroup = Syminfo.spacegroup_name_to_number(pointgroup) # next pass this reindexing operator back to the source # of the reflections intgr.set_integrater_reindex_operator(reindex_op) intgr.set_integrater_spacegroup_number( Syminfo.spacegroup_name_to_number(pointgroup)) self._sweep_information[epoch]['corrected_intensities'] \ = intgr.get_integrater_corrected_intensities() hklin = self._sweep_information[epoch]['corrected_intensities'] hklout = os.path.join(self.get_working_directory(), 'xds-pointgroup-reference-unsorted.mtz') FileHandler.record_temporary_file(hklout) # now use pointless to handle this conversion pointless = self._factory.Pointless() pointless.set_xdsin(hklin) pointless.set_hklout(hklout) pointless.xds_to_mtz() self._reference = hklout if self._reference: from xia2.Driver.DriverFactory import DriverFactory def run_one_sweep(args): sweep_information = args[0] pointless_indexer_jiffy = args[1] factory = args[2] job_type = args[3] if job_type: DriverFactory.set_driver_type(job_type) intgr = sweep_information['integrater'] hklin = sweep_information['corrected_intensities'] refiner = intgr.get_integrater_refiner() # in here need to consider what to do if the user has # assigned the pointgroup on the command line ... if not self._scalr_input_pointgroup: pointgroup, reindex_op, ntr = \ self._pointless_indexer_jiffy(hklin, refiner) if ntr: # Bug # 3373 Debug.write('Reindex to standard (PIJ): %s' % \ reindex_op) intgr.set_integrater_reindex_operator( reindex_op, compose = False) reindex_op = 'h,k,l' need_to_return = True else: # 27/FEB/08 to support user assignment of pointgroups Debug.write('Using input pointgroup: %s' % \ self._scalr_input_pointgroup) pointgroup = self._scalr_input_pointgroup reindex_op = 'h,k,l' intgr.set_integrater_reindex_operator(reindex_op) intgr.set_integrater_spacegroup_number( Syminfo.spacegroup_name_to_number(pointgroup)) sweep_information['corrected_intensities'] \ = intgr.get_integrater_corrected_intensities() # convert the XDS_ASCII for this sweep to mtz - on the next # get this should be in the correct setting... hklin = sweep_information['corrected_intensities'] # now use pointless to make this conversion # try with no conversion?! pointless = self._factory.Pointless() pointless.set_xdsin(hklin) hklout = os.path.join( self.get_working_directory(), '%d_xds-pointgroup-unsorted.mtz' %pointless.get_xpid()) FileHandler.record_temporary_file(hklout) pointless.set_hklout(hklout) pointless.xds_to_mtz() pointless = self._factory.Pointless() pointless.set_hklin(hklout) pointless.set_hklref(self._reference) pointless.decide_pointgroup() pointgroup = pointless.get_pointgroup() reindex_op = pointless.get_reindex_operator() # for debugging print out the reindexing operations and # what have you... Debug.write('Reindex to standard: %s' % reindex_op) # this should send back enough information that this # is in the correct pointgroup (from the call above) and # also in the correct setting, from the interaction # with the reference set... - though I guess that the # spacegroup number should not have changed, right? # set the reindex operation afterwards... though if the # spacegroup number is the same this should make no # difference, right?! intgr.set_integrater_spacegroup_number( Syminfo.spacegroup_name_to_number(pointgroup)) intgr.set_integrater_reindex_operator(reindex_op) sweep_information['corrected_intensities'] \ = intgr.get_integrater_corrected_intensities() # and copy the reflection file to the local directory dname = sweep_information['dname'] sname = intgr.get_integrater_sweep_name() hklin = sweep_information['corrected_intensities'] hklout = os.path.join(self.get_working_directory(), '%s_%s.HKL' % (dname, sname)) Debug.write('Copying %s to %s' % (hklin, hklout)) shutil.copyfile(hklin, hklout) # record just the local file name... sweep_information['prepared_reflections'] = os.path.split(hklout)[-1] return sweep_information from libtbx import easy_mp params = PhilIndex.get_python_object() mp_params = params.xia2.settings.multiprocessing njob = mp_params.njob if njob > 1: # cache drivertype drivertype = DriverFactory.get_driver_type() args = [ (self._sweep_information[epoch], self._pointless_indexer_jiffy, self._factory, mp_params.type) for epoch in self._sweep_information.keys()] results_list = easy_mp.parallel_map( run_one_sweep, args, params=None, processes=njob, method="threading", asynchronous=True, callback=None, preserve_order=True, preserve_exception_message=True) # restore drivertype DriverFactory.set_driver_type(drivertype) # results should be given back in the same order for i, epoch in enumerate(self._sweep_information.keys()): self._sweep_information[epoch] = results_list[i] else: for epoch in self._sweep_information.keys(): self._sweep_information[epoch] = run_one_sweep( (self._sweep_information[epoch], self._pointless_indexer_jiffy, self._factory, None)) else: # convert the XDS_ASCII for this sweep to mtz epoch = self._first_epoch intgr = self._sweep_information[epoch]['integrater'] refiner = intgr.get_integrater_refiner() sname = intgr.get_integrater_sweep_name() hklout = os.path.join(self.get_working_directory(), '%s-pointless.mtz' % sname) FileHandler.record_temporary_file(hklout) pointless = self._factory.Pointless() pointless.set_xdsin(self._sweep_information[epoch]['corrected_intensities']) pointless.set_hklout(hklout) pointless.xds_to_mtz() # run it through pointless interacting with the # Indexer which belongs to this sweep hklin = hklout if self._scalr_input_pointgroup: Debug.write('Using input pointgroup: %s' % \ self._scalr_input_pointgroup) pointgroup = self._scalr_input_pointgroup ntr = False reindex_op = 'h,k,l' else: pointgroup, reindex_op, ntr = self._pointless_indexer_jiffy( hklin, refiner) if ntr: # if we need to return, we should logically reset # any reindexing operator right? right here all # we are talking about is the correctness of # individual pointgroups?? Bug # 3373 reindex_op = 'h,k,l' intgr.set_integrater_reindex_operator( reindex_op, compose = False) need_to_return = True self._xds_spacegroup = Syminfo.spacegroup_name_to_number(pointgroup) # next pass this reindexing operator back to the source # of the reflections intgr.set_integrater_reindex_operator(reindex_op) intgr.set_integrater_spacegroup_number( Syminfo.spacegroup_name_to_number(pointgroup)) self._sweep_information[epoch]['corrected_intensities'] \ = intgr.get_integrater_corrected_intensities() hklin = self._sweep_information[epoch]['corrected_intensities'] dname = self._sweep_information[epoch]['dname'] hklout = os.path.join(self.get_working_directory(), '%s_%s.HKL' % (dname, sname)) # and copy the reflection file to the local # directory Debug.write('Copying %s to %s' % (hklin, hklout)) shutil.copyfile(hklin, hklout) # record just the local file name... self._sweep_information[epoch][ 'prepared_reflections'] = os.path.split(hklout)[-1] if need_to_return: self.set_scaler_done(False) self.set_scaler_prepare_done(False) return unit_cell_list = [] for epoch in self._sweep_information.keys(): integrater = self._sweep_information[epoch]['integrater'] cell = integrater.get_integrater_cell() n_ref = integrater.get_integrater_n_ref() Debug.write('Cell for %s: %.2f %.2f %.2f %.2f %.2f %.2f' % \ (integrater.get_integrater_sweep_name(), cell[0], cell[1], cell[2], cell[3], cell[4], cell[5])) Debug.write('=> %d reflections' % n_ref) unit_cell_list.append((cell, n_ref)) self._scalr_cell = compute_average_unit_cell(unit_cell_list) self._scalr_resolution_limits = { } Debug.write('Determined unit cell: %.2f %.2f %.2f %.2f %.2f %.2f' % \ tuple(self._scalr_cell)) if os.path.exists(os.path.join( self.get_working_directory(), 'REMOVE.HKL')): os.remove(os.path.join( self.get_working_directory(), 'REMOVE.HKL')) Debug.write('Deleting REMOVE.HKL at end of scale prepare.') return
def _setup_from_xinfo_file(self, xinfo_file): """Set up this object & all subobjects based on the .xinfo file contents.""" settings = PhilIndex.params.xia2.settings sweep_ids = [sweep.id for sweep in settings.sweep] sweep_ranges = [sweep.range for sweep in settings.sweep] if not sweep_ids: sweep_ids = None sweep_ranges = None xinfo = XInfo(xinfo_file, sweep_ids=sweep_ids, sweep_ranges=sweep_ranges) self._name = xinfo.get_project() crystals = xinfo.get_crystals() for crystal in crystals: xc = XCrystal(crystal, self) if "sequence" in crystals[crystal]: xc.set_aa_sequence(crystals[crystal]["sequence"]) if "ha_info" in crystals[crystal]: if crystals[crystal]["ha_info"] != {}: xc.set_ha_info(crystals[crystal]["ha_info"]) if "scaled_merged_reflection_file" in crystals[crystal]: xc.set_scaled_merged_reflections( crystals[crystal]["scaled_merged_reflections"]) if "reference_reflection_file" in crystals[crystal]: xc.set_reference_reflection_file( crystals[crystal]["reference_reflection_file"]) if "freer_file" in crystals[crystal]: xc.set_freer_file(crystals[crystal]["freer_file"]) # user assigned spacegroup if "user_spacegroup" in crystals[crystal]: xc.set_user_spacegroup(crystals[crystal]["user_spacegroup"]) elif settings.space_group is not None: # XXX do we ever actually get here? xc.set_user_spacegroup( settings.space_group.type().lookup_symbol()) # add a default sample if none present in xinfo file if not crystals[crystal]["samples"]: crystals[crystal]["samples"]["X1"] = {} for sample in crystals[crystal]["samples"]: xsample = XSample(sample, xc) xc.add_sample(xsample) if not crystals[crystal]["wavelengths"]: raise RuntimeError("No wavelengths specified in xinfo file") for wavelength, wave_info in crystals[crystal][ "wavelengths"].items(): # FIXME 29/NOV/06 in here need to be able to cope with # no wavelength information - this should default to the # information in the image header (John Cowan pointed # out that this was untidy - requiring that it agrees # with the value in the header makes this almost # useless.) if "wavelength" not in wave_info: logger.debug("No wavelength value given for wavelength %s", wavelength) else: logger.debug( "Overriding value for wavelength %s to %8.6f", wavelength, float(wave_info["wavelength"]), ) # handle case where user writes f" in place of f'' if 'f"' in wave_info and "f''" not in wave_info: wave_info["f''"] = wave_info['f"'] xw = XWavelength( wavelength, xc, wavelength=wave_info.get("wavelength", 0.0), f_pr=wave_info.get("f'", 0.0), f_prpr=wave_info.get("f''", 0.0), dmin=wave_info.get("dmin", 0.0), dmax=wave_info.get("dmax", 0.0), ) # in here I also need to look and see if we have # been given any scaled reflection files... # check to see if we have a user supplied lattice... if "user_spacegroup" in crystals[crystal]: lattice = Syminfo.get_lattice( crystals[crystal]["user_spacegroup"]) elif settings.space_group is not None: # XXX do we ever actually get here? lattice = Syminfo.get_lattice( settings.space_group.type().lookup_symbol()) else: lattice = None # and also user supplied cell constants - from either # the xinfo file (the first port of call) or the # command-line. if "user_cell" in crystals[crystal]: cell = crystals[crystal]["user_cell"] elif settings.unit_cell is not None: # XXX do we ever actually get here? cell = settings.unit_cell.parameters() else: cell = None dmin = wave_info.get("dmin", 0.0) dmax = wave_info.get("dmax", 0.0) if dmin == 0.0 and dmax == 0.0: dmin = PhilIndex.params.xia2.settings.resolution.d_min dmax = PhilIndex.params.xia2.settings.resolution.d_max # want to be able to locally override the resolution limits # for this sweep while leaving the rest for the data set # intact... for sweep_name, sweep_info in crystals[crystal][ "sweeps"].items(): sample_name = sweep_info.get("sample") if sample_name is None: if len(crystals[crystal]["samples"]) == 1: sample_name = list(crystals[crystal]["samples"])[0] else: raise RuntimeError("No sample given for sweep %s" % sweep_name) xsample = xc.get_xsample(sample_name) assert xsample is not None dmin_old = dmin dmax_old = dmax if "RESOLUTION" in sweep_info: values = [ float(x) for x in sweep_info["RESOLUTION"].split() ] if len(values) == 1: dmin = values[0] elif len(values) == 2: dmin = min(values) dmax = max(values) else: raise RuntimeError("bad resolution for sweep %s" % sweep_name) if sweep_info["wavelength"] == wavelength: frames_to_process = sweep_info.get("start_end") xsweep = xw.add_sweep( sweep_name, sample=xsample, directory=sweep_info.get("DIRECTORY"), image=sweep_info.get("IMAGE"), beam=sweep_info.get("beam"), reversephi=sweep_info.get("reversephi", False), distance=sweep_info.get("distance"), gain=float(sweep_info.get("GAIN", 0.0)), dmin=dmin, dmax=dmax, polarization=float( sweep_info.get("POLARIZATION", 0.0)), frames_to_process=frames_to_process, user_lattice=lattice, user_cell=cell, epoch=sweep_info.get("epoch", 0), ice=sweep_info.get("ice", False), excluded_regions=sweep_info.get( "excluded_regions", []), ) xsample.add_sweep(xsweep) dmin = dmin_old dmax = dmax_old xc.add_wavelength(xw) self.add_crystal(xc)
def decide_pointgroup(self, ignore_errors=False, batches=None): '''Decide on the correct pointgroup for hklin.''' if not self._xdsin: self.check_hklin() self.set_task('Computing the correct pointgroup for %s' % \ self.get_hklin()) else: Debug.write('Pointless using XDS input file %s' % \ self._xdsin) self.set_task('Computing the correct pointgroup for %s' % \ self.get_xdsin()) # FIXME this should probably be a standard CCP4 keyword if self._xdsin: self.add_command_line('xdsin') self.add_command_line(self._xdsin) self.add_command_line('xmlout') self.add_command_line('%d_pointless.xml' % self.get_xpid()) if self._hklref: self.add_command_line('hklref') self.add_command_line(self._hklref) self.start() if self._allow_out_of_sequence_files: self.input('allow outofsequencefiles') # https://github.com/xia2/xia2/issues/125 pass in run limits for this # HKLIN file - prevents automated RUN determination from causing errors if batches: self.input('run 1 batch %d to %d' % tuple(batches)) self.input('systematicabsences off') self.input('setting symmetry-based') if self._hklref: dev = PhilIndex.params.xia2.settings.developmental if dev.pointless_tolerance > 0.0: self.input('tolerance %f' % dev.pointless_tolerance) # may expect more %age variation for small molecule data if PhilIndex.params.xia2.settings.small_molecule == True: if self._hklref: self.input('tolerance 5.0') if PhilIndex.params.xia2.settings.symmetry.chirality is not None: self.input('chirality %s' % PhilIndex.params.xia2.settings.symmetry.chirality) if self._input_laue_group: self.input('lauegroup %s' % self._input_laue_group) self.close_wait() # check for errors self.check_for_errors() # check for fatal errors output = self.get_all_output() fatal_error = False for j, record in enumerate(output): if 'FATAL ERROR message:' in record: if ignore_errors: fatal_error = True else: raise RuntimeError('Pointless error: %s' % output[j + 1].strip()) if 'Resolution range of Reference data and observed data do not' \ in record and ignore_errors: fatal_error = True if 'All reflection pairs rejected' in record and ignore_errors: fatal_error = True if 'Reference data and observed data do not overlap' in record and \ ignore_errors: fatal_error = True hklin_spacegroup = '' hklin_lattice = '' # split loop - first seek hklin symmetry then later look for everything # else for o in self.get_all_output(): if 'Spacegroup from HKLIN file' in o: hklin_spacegroup = spacegroup_name_xHM_to_old( o.replace('Spacegroup from HKLIN file :', '').strip()) hklin_lattice = Syminfo.get_lattice(hklin_spacegroup) if 'Space group from HKLREF file' in o: hklref_spacegroup = spacegroup_name_xHM_to_old( o.replace('Space group from HKLREF file :', '').strip()) hklref_lattice = Syminfo.get_lattice(hklref_spacegroup) # https://github.com/xia2/xia2/issues/115 if fatal_error: assert hklref_spacegroup self._pointgroup = hklref_spacegroup self._confidence = 1.0 self._totalprob = 1.0 self._reindex_matrix = [ 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0 ] self._reindex_operator = 'h,k,l' return 'ok' for o in self.get_all_output(): if 'No alternative indexing possible' in o: # then the XML file will be broken - no worries... self._pointgroup = hklin_spacegroup self._confidence = 1.0 self._totalprob = 1.0 self._reindex_matrix = [ 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0 ] self._reindex_operator = 'h,k,l' return 'ok' if '**** Incompatible symmetries ****' in o: raise RuntimeError( \ 'reindexing against a reference with different symmetry') if '***** Stopping because cell discrepancy between files' in o: raise RuntimeError( 'incompatible unit cells between data sets') if 'L-test suggests that the data may be twinned' in o: self._probably_twinned = True # parse the XML file for the information I need... xml_file = os.path.join(self.get_working_directory(), '%d_pointless.xml' % self.get_xpid()) mend_pointless_xml(xml_file) # catch the case sometimes on ppc mac where pointless adds # an extra .xml on the end... if not os.path.exists(xml_file) and \ os.path.exists('%s.xml' % xml_file): xml_file = '%s.xml' % xml_file if not self._hklref: dom = xml.dom.minidom.parse(xml_file) try: best = dom.getElementsByTagName('BestSolution')[0] except IndexError: raise RuntimeError('error getting solution from pointless') self._pointgroup = best.getElementsByTagName( 'GroupName')[0].childNodes[0].data self._confidence = float( best.getElementsByTagName('Confidence') [0].childNodes[0].data) self._totalprob = float( best.getElementsByTagName('TotalProb') [0].childNodes[0].data) self._reindex_matrix = map( float, best.getElementsByTagName('ReindexMatrix') [0].childNodes[0].data.split()) self._reindex_operator = clean_reindex_operator( best.getElementsByTagName('ReindexOperator') [0].childNodes[0].data.strip()) else: # if we have provided a HKLREF input then the xml output # is changed... # FIXME in here, need to check if there is the legend # "No possible alternative indexing" in the standard # output, as this will mean that the index scores are # not there... c/f oppf1314, with latest pointless build # 1.2.14. dom = xml.dom.minidom.parse(xml_file) try: best = dom.getElementsByTagName('IndexScores')[0] except IndexError: Debug.write('Reindex not found in xml output') # check for this legend then found = False for record in self.get_all_output(): if 'No possible alternative indexing' in record: found = True if not found: raise RuntimeError('error finding solution') best = None hklref_pointgroup = '' # FIXME need to get this from the reflection file HKLREF reflection_file_elements = dom.getElementsByTagName( 'ReflectionFile') for rf in reflection_file_elements: stream = rf.getAttribute('stream') if stream == 'HKLREF': hklref_pointgroup = rf.getElementsByTagName( 'SpacegroupName')[0].childNodes[0].data.strip() # Chatter.write('HKLREF pointgroup is %s' % \ # hklref_pointgroup) if hklref_pointgroup == '': raise RuntimeError('error finding HKLREF pointgroup') self._pointgroup = hklref_pointgroup self._confidence = 1.0 self._totalprob = 1.0 if best: index = best.getElementsByTagName('Index')[0] self._reindex_matrix = map( float, index.getElementsByTagName('ReindexMatrix') [0].childNodes[0].data.split()) self._reindex_operator = clean_reindex_operator( index.getElementsByTagName('ReindexOperator') [0].childNodes[0].data.strip()) else: # no alternative indexing is possible so just # assume the default... self._reindex_matrix = [ 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0 ] self._reindex_operator = 'h,k,l' if not self._input_laue_group and not self._hklref: scorelist = dom.getElementsByTagName('LaueGroupScoreList')[0] scores = scorelist.getElementsByTagName('LaueGroupScore') for s in scores: lauegroup = s.getElementsByTagName( 'LaueGroupName')[0].childNodes[0].data netzc = float( s.getElementsByTagName('NetZCC')[0].childNodes[0].data) # record this as a possible lattice if its Z score is positive lattice = lauegroup_to_lattice(lauegroup) if not lattice in self._possible_lattices: if netzc > 0.0: self._possible_lattices.append(lattice) # do we not always want to have access to the # solutions, even if they are unlikely - this will # only be invoked if they are known to # be right... self._lattice_to_laue[lattice] = lauegroup return 'ok'
def _scale_prepare(self): '''Perform all of the preparation required to deliver the scaled data. This should sort together the reflection files, ensure that they are correctly indexed (via pointless) and generally tidy things up.''' # acknowledge all of the programs we are about to use... Citations.cite('pointless') Citations.cite('aimless') Citations.cite('ccp4') # ---------- GATHER ---------- self._sweep_handler = SweepInformationHandler(self._scalr_integraters) Journal.block( 'gathering', self.get_scaler_xcrystal().get_name(), 'CCP4', {'working directory':self.get_working_directory()}) for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) pname, xname, dname = si.get_project_info() sname = si.get_sweep_name() exclude_sweep = False for sweep in PhilIndex.params.xia2.settings.sweep: if sweep.id == sname and sweep.exclude: exclude_sweep = True break if exclude_sweep: self._sweep_handler.remove_epoch(epoch) Debug.write('Excluding sweep %s' % sname) else: Journal.entry({'adding data from':'%s/%s/%s' % \ (xname, dname, sname)}) # gather data for all images which belonged to the parent # crystal - allowing for the fact that things could go wrong # e.g. epoch information not available, exposure times not in # headers etc... for e in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(e) assert is_mtz_file(si.get_reflections()) p, x = self._sweep_handler.get_project_info() self._scalr_pname = p self._scalr_xname = x # verify that the lattices are consistent, calling eliminate if # they are not N.B. there could be corner cases here need_to_return = False multi_sweep_indexing = \ PhilIndex.params.xia2.settings.multi_sweep_indexing == True if len(self._sweep_handler.get_epochs()) > 1: # if we have multi-sweep-indexing going on then logic says all should # share common lattice & UB definition => this is not used here? if multi_sweep_indexing and not self._scalr_input_pointgroup: pointless_hklins = [] max_batches = 0 for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) hklin = si.get_reflections() batches = MtzUtils.batches_from_mtz(hklin) if 1 + max(batches) - min(batches) > max_batches: max_batches = max(batches) - min(batches) + 1 from xia2.lib.bits import nifty_power_of_ten Debug.write('Biggest sweep has %d batches' % max_batches) max_batches = nifty_power_of_ten(max_batches) counter = 0 refiners = [] for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) hklin = si.get_reflections() integrater = si.get_integrater() refiner = integrater.get_integrater_refiner() refiners.append(refiner) hklin = self._prepare_pointless_hklin( hklin, si.get_integrater().get_phi_width()) hklout = os.path.join(self.get_working_directory(), '%s_%s_%s_%s_prepointless.mtz' % \ (pname, xname, dname, si.get_sweep_name())) # we will want to delete this one exit FileHandler.record_temporary_file(hklout) first_batch = min(si.get_batches()) si.set_batch_offset(counter * max_batches - first_batch + 1) from xia2.Modules.Scaler.rebatch import rebatch new_batches = rebatch( hklin, hklout, first_batch=counter * max_batches + 1, pname=pname, xname=xname, dname=dname) pointless_hklins.append(hklout) # update the counter & recycle counter += 1 s = self._factory.Sortmtz() pointless_hklin = os.path.join(self.get_working_directory(), '%s_%s_prepointless_sorted.mtz' % \ (self._scalr_pname, self._scalr_xname)) s.set_hklout(pointless_hklin) for hklin in pointless_hklins: s.add_hklin(hklin) s.sort() # FIXME xia2-51 in here look at running constant scaling on the # pointless hklin to put the runs on the same scale. Ref=[A] pointless_const = os.path.join(self.get_working_directory(), '%s_%s_prepointless_const.mtz' % \ (self._scalr_pname, self._scalr_xname)) FileHandler.record_temporary_file(pointless_const) aimless_const = self._factory.Aimless() aimless_const.set_hklin(pointless_hklin) aimless_const.set_hklout(pointless_const) aimless_const.const() pointless_const = os.path.join(self.get_working_directory(), '%s_%s_prepointless_const_unmerged.mtz' % \ (self._scalr_pname, self._scalr_xname)) FileHandler.record_temporary_file(pointless_const) pointless_hklin = pointless_const # FIXME xia2-51 in here need to pass all refiners to ensure that the # information is passed back to all of them not just the last one... Debug.write('Running multisweep pointless for %d sweeps' % len(refiners)) pointgroup, reindex_op, ntr, pt = \ self._pointless_indexer_multisweep(pointless_hklin, refiners) Debug.write('X1698: %s: %s' % (pointgroup, reindex_op)) lattices = [Syminfo.get_lattice(pointgroup)] for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) intgr = si.get_integrater() hklin = si.get_reflections() refiner = intgr.get_integrater_refiner() if ntr: intgr.integrater_reset_reindex_operator() need_to_return = True else: lattices = [] for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) intgr = si.get_integrater() hklin = si.get_reflections() refiner = intgr.get_integrater_refiner() if self._scalr_input_pointgroup: pointgroup = self._scalr_input_pointgroup reindex_op = 'h,k,l' ntr = False else: pointless_hklin = self._prepare_pointless_hklin( hklin, si.get_integrater().get_phi_width()) pointgroup, reindex_op, ntr, pt = \ self._pointless_indexer_jiffy( pointless_hklin, refiner) Debug.write('X1698: %s: %s' % (pointgroup, reindex_op)) lattice = Syminfo.get_lattice(pointgroup) if not lattice in lattices: lattices.append(lattice) if ntr: intgr.integrater_reset_reindex_operator() need_to_return = True if len(lattices) > 1: # why not using pointless indexer jiffy??! correct_lattice = sort_lattices(lattices)[0] Chatter.write('Correct lattice asserted to be %s' % \ correct_lattice) # transfer this information back to the indexers for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) refiner = si.get_integrater().get_integrater_refiner() sname = si.get_sweep_name() state = refiner.set_refiner_asserted_lattice( correct_lattice) if state == refiner.LATTICE_CORRECT: Chatter.write('Lattice %s ok for sweep %s' % \ (correct_lattice, sname)) elif state == refiner.LATTICE_IMPOSSIBLE: raise RuntimeError('Lattice %s impossible for %s' \ % (correct_lattice, sname)) elif state == refiner.LATTICE_POSSIBLE: Chatter.write('Lattice %s assigned for sweep %s' % \ (correct_lattice, sname)) need_to_return = True # if one or more of them was not in the lowest lattice, # need to return here to allow reprocessing if need_to_return: self.set_scaler_done(False) self.set_scaler_prepare_done(False) return # ---------- REINDEX ALL DATA TO CORRECT POINTGROUP ---------- # all should share the same pointgroup, unless twinned... in which # case force them to be... pointgroups = {} reindex_ops = {} probably_twinned = False need_to_return = False multi_sweep_indexing = \ PhilIndex.params.xia2.settings.multi_sweep_indexing == True if multi_sweep_indexing and not self._scalr_input_pointgroup: pointless_hklins = [] max_batches = 0 for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) hklin = si.get_reflections() batches = MtzUtils.batches_from_mtz(hklin) if 1 + max(batches) - min(batches) > max_batches: max_batches = max(batches) - min(batches) + 1 from xia2.lib.bits import nifty_power_of_ten Debug.write('Biggest sweep has %d batches' % max_batches) max_batches = nifty_power_of_ten(max_batches) counter = 0 refiners = [] for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) hklin = si.get_reflections() integrater = si.get_integrater() refiner = integrater.get_integrater_refiner() refiners.append(refiner) hklin = self._prepare_pointless_hklin( hklin, si.get_integrater().get_phi_width()) hklout = os.path.join(self.get_working_directory(), '%s_%s_%s_%s_prepointless.mtz' % \ (pname, xname, dname, si.get_sweep_name())) # we will want to delete this one exit FileHandler.record_temporary_file(hklout) first_batch = min(si.get_batches()) si.set_batch_offset(counter * max_batches - first_batch + 1) from xia2.Modules.Scaler.rebatch import rebatch new_batches = rebatch( hklin, hklout, first_batch=counter * max_batches + 1, pname=pname, xname=xname, dname=dname) pointless_hklins.append(hklout) # update the counter & recycle counter += 1 # FIXME related to xia2-51 - this looks very very similar to the logic # in [A] above - is this duplicated logic? s = self._factory.Sortmtz() pointless_hklin = os.path.join(self.get_working_directory(), '%s_%s_prepointless_sorted.mtz' % \ (self._scalr_pname, self._scalr_xname)) s.set_hklout(pointless_hklin) for hklin in pointless_hklins: s.add_hklin(hklin) s.sort() pointless_const = os.path.join(self.get_working_directory(), '%s_%s_prepointless_const.mtz' % \ (self._scalr_pname, self._scalr_xname)) FileHandler.record_temporary_file(pointless_const) aimless_const = self._factory.Aimless() aimless_const.set_hklin(pointless_hklin) aimless_const.set_hklout(pointless_const) aimless_const.const() pointless_const = os.path.join(self.get_working_directory(), '%s_%s_prepointless_const_unmerged.mtz' % \ (self._scalr_pname, self._scalr_xname)) FileHandler.record_temporary_file(pointless_const) pointless_hklin = pointless_const pointgroup, reindex_op, ntr, pt = \ self._pointless_indexer_multisweep( pointless_hklin, refiners) for epoch in self._sweep_handler.get_epochs(): pointgroups[epoch] = pointgroup reindex_ops[epoch] = reindex_op else: for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) hklin = si.get_reflections() integrater = si.get_integrater() refiner = integrater.get_integrater_refiner() if self._scalr_input_pointgroup: Debug.write('Using input pointgroup: %s' % \ self._scalr_input_pointgroup) pointgroup = self._scalr_input_pointgroup reindex_op = 'h,k,l' pt = False else: pointless_hklin = self._prepare_pointless_hklin( hklin, si.get_integrater().get_phi_width()) pointgroup, reindex_op, ntr, pt = \ self._pointless_indexer_jiffy( pointless_hklin, refiner) Debug.write('X1698: %s: %s' % (pointgroup, reindex_op)) if ntr: integrater.integrater_reset_reindex_operator() need_to_return = True if pt and not probably_twinned: probably_twinned = True Debug.write('Pointgroup: %s (%s)' % (pointgroup, reindex_op)) pointgroups[epoch] = pointgroup reindex_ops[epoch] = reindex_op overall_pointgroup = None pointgroup_set = {pointgroups[e] for e in pointgroups} if len(pointgroup_set) > 1 and \ not probably_twinned: raise RuntimeError('non uniform pointgroups') if len(pointgroup_set) > 1: Debug.write('Probably twinned, pointgroups: %s' % \ ' '.join([p.replace(' ', '') for p in \ list(pointgroup_set)])) numbers = [Syminfo.spacegroup_name_to_number(s) for s in \ pointgroup_set] overall_pointgroup = Syminfo.spacegroup_number_to_name(min(numbers)) self._scalr_input_pointgroup = overall_pointgroup Chatter.write('Twinning detected, assume pointgroup %s' % \ overall_pointgroup) need_to_return = True else: overall_pointgroup = pointgroup_set.pop() for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) integrater = si.get_integrater() integrater.set_integrater_spacegroup_number( Syminfo.spacegroup_name_to_number(overall_pointgroup)) integrater.set_integrater_reindex_operator( reindex_ops[epoch], reason='setting point group') # This will give us the reflections in the correct point group si.set_reflections(integrater.get_integrater_intensities()) if need_to_return: self.set_scaler_done(False) self.set_scaler_prepare_done(False) return # in here now optionally work through the data files which should be # indexed with a consistent point group, and transform the orientation # matrices by the lattice symmetry operations (if possible) to get a # consistent definition of U matrix modulo fixed rotations if PhilIndex.params.xia2.settings.unify_setting: from scitbx.matrix import sqr reference_U = None i3 = sqr((1, 0, 0, 0, 1, 0, 0, 0, 1)) for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) intgr = si.get_integrater() fixed = sqr(intgr.get_goniometer().get_fixed_rotation()) u, b, s = get_umat_bmat_lattice_symmetry_from_mtz(si.get_reflections()) U = fixed.inverse() * sqr(u).transpose() B = sqr(b) if reference_U is None: reference_U = U continue results = [] for op in s.all_ops(): R = B * sqr(op.r().as_double()).transpose() * B.inverse() nearly_i3 = (U * R).inverse() * reference_U score = sum([abs(_n - _i) for (_n, _i) in zip(nearly_i3, i3)]) results.append((score, op.r().as_hkl(), op)) results.sort() best = results[0] Debug.write('Best reindex: %s %.3f' % (best[1], best[0])) intgr.set_integrater_reindex_operator(best[2].r().inverse().as_hkl(), reason='unifying [U] setting') si.set_reflections(intgr.get_integrater_intensities()) # recalculate to verify u, b, s = get_umat_bmat_lattice_symmetry_from_mtz(si.get_reflections()) U = fixed.inverse() * sqr(u).transpose() Debug.write('New reindex: %s' % (U.inverse() * reference_U)) # FIXME I should probably raise an exception at this stage if this # is not about I3... if self.get_scaler_reference_reflection_file(): self._reference = self.get_scaler_reference_reflection_file() Debug.write('Using HKLREF %s' % self._reference) elif PhilIndex.params.xia2.settings.scale.reference_reflection_file: self._reference = PhilIndex.params.xia2.settings.scale.reference_reflection_file Debug.write('Using HKLREF %s' % self._reference) params = PhilIndex.params use_brehm_diederichs = params.xia2.settings.use_brehm_diederichs if len(self._sweep_handler.get_epochs()) > 1 and use_brehm_diederichs: brehm_diederichs_files_in = [] for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) hklin = si.get_reflections() brehm_diederichs_files_in.append(hklin) # now run cctbx.brehm_diederichs to figure out the indexing hand for # each sweep from xia2.Wrappers.Cctbx.BrehmDiederichs import BrehmDiederichs from xia2.lib.bits import auto_logfiler brehm_diederichs = BrehmDiederichs() brehm_diederichs.set_working_directory(self.get_working_directory()) auto_logfiler(brehm_diederichs) brehm_diederichs.set_input_filenames(brehm_diederichs_files_in) # 1 or 3? 1 seems to work better? brehm_diederichs.set_asymmetric(1) brehm_diederichs.run() reindexing_dict = brehm_diederichs.get_reindexing_dict() for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) intgr = si.get_integrater() hklin = si.get_reflections() reindex_op = reindexing_dict.get(os.path.abspath(hklin)) assert reindex_op is not None if 1 or reindex_op != 'h,k,l': # apply the reindexing operator intgr.set_integrater_reindex_operator( reindex_op, reason='match reference') si.set_reflections(intgr.get_integrater_intensities()) elif len(self._sweep_handler.get_epochs()) > 1 and \ not self._reference: first = self._sweep_handler.get_epochs()[0] si = self._sweep_handler.get_sweep_information(first) self._reference = si.get_reflections() if self._reference: md = self._factory.Mtzdump() md.set_hklin(self._reference) md.dump() if md.get_batches() and False: raise RuntimeError('reference reflection file %s unmerged' % \ self._reference) datasets = md.get_datasets() if len(datasets) > 1 and False: raise RuntimeError('more than one dataset in %s' % \ self._reference) # then get the unit cell, lattice etc. reference_lattice = Syminfo.get_lattice(md.get_spacegroup()) reference_cell = md.get_dataset_info(datasets[0])['cell'] # then compute the pointgroup from this... # ---------- REINDEX TO CORRECT (REFERENCE) SETTING ---------- for epoch in self._sweep_handler.get_epochs(): # if we are working with unified UB matrix then this should not # be a problem here (note, *if*; *should*) # what about e.g. alternative P1 settings? # see JIRA MXSW-904 if PhilIndex.params.xia2.settings.unify_setting: continue pl = self._factory.Pointless() si = self._sweep_handler.get_sweep_information(epoch) hklin = si.get_reflections() pl.set_hklin(self._prepare_pointless_hklin( hklin, si.get_integrater().get_phi_width())) hklout = os.path.join( self.get_working_directory(), '%s_rdx2.mtz' % os.path.split(hklin)[-1][:-4]) # we will want to delete this one exit FileHandler.record_temporary_file(hklout) # now set the initial reflection set as a reference... pl.set_hklref(self._reference) # https://github.com/xia2/xia2/issues/115 - should ideally iteratively # construct a reference or a tree of correlations to ensure correct # reference setting - however if small molecule assume has been # multi-sweep-indexed so can ignore "fatal errors" - temporary hack pl.decide_pointgroup( ignore_errors=PhilIndex.params.xia2.settings.small_molecule) Debug.write('Reindexing analysis of %s' % pl.get_hklin()) pointgroup = pl.get_pointgroup() reindex_op = pl.get_reindex_operator() Debug.write('Operator: %s' % reindex_op) # apply this... integrater = si.get_integrater() integrater.set_integrater_reindex_operator(reindex_op, reason='match reference') integrater.set_integrater_spacegroup_number( Syminfo.spacegroup_name_to_number(pointgroup)) si.set_reflections(integrater.get_integrater_intensities()) md = self._factory.Mtzdump() md.set_hklin(si.get_reflections()) md.dump() datasets = md.get_datasets() if len(datasets) > 1: raise RuntimeError('more than one dataset in %s' % \ si.get_reflections()) # then get the unit cell, lattice etc. lattice = Syminfo.get_lattice(md.get_spacegroup()) cell = md.get_dataset_info(datasets[0])['cell'] if lattice != reference_lattice: raise RuntimeError('lattices differ in %s and %s' % \ (self._reference, si.get_reflections())) Debug.write('Cell: %.2f %.2f %.2f %.2f %.2f %.2f' % cell) Debug.write('Ref: %.2f %.2f %.2f %.2f %.2f %.2f' % reference_cell) for j in range(6): if math.fabs((cell[j] - reference_cell[j]) / reference_cell[j]) > 0.1: raise RuntimeError( \ 'unit cell parameters differ in %s and %s' % \ (self._reference, si.get_reflections())) # ---------- SORT TOGETHER DATA ---------- self._sort_together_data_ccp4() self._scalr_resolution_limits = {} # store central resolution limit estimates batch_ranges = [ self._sweep_handler.get_sweep_information(epoch).get_batch_range() for epoch in self._sweep_handler.get_epochs() ] self._resolution_limit_estimates = ersatz_resolution( self._prepared_reflections, batch_ranges)
def setup_from_xinfo_file(self, xinfo_file): '''Set up this object & all subobjects based on the .xinfo file contents.''' settings = PhilIndex.params.xia2.settings sweep_ids = [sweep.id for sweep in settings.sweep] sweep_ranges = [sweep.range for sweep in settings.sweep] if not sweep_ids: sweep_ids = None sweep_ranges = None xinfo = XInfo(xinfo_file, sweep_ids=sweep_ids, sweep_ranges=sweep_ranges) self._name = xinfo.get_project() crystals = xinfo.get_crystals() for crystal in crystals.keys(): xc = XCrystal(crystal, self) if 'sequence' in crystals[crystal]: xc.set_aa_sequence(crystals[crystal]['sequence']) if 'ha_info' in crystals[crystal]: if crystals[crystal]['ha_info'] != {}: xc.set_ha_info(crystals[crystal]['ha_info']) if 'scaled_merged_reflection_file' in crystals[crystal]: xc.set_scaled_merged_reflections( crystals[crystal]['scaled_merged_reflections']) if 'reference_reflection_file' in crystals[crystal]: xc.set_reference_reflection_file( crystals[crystal]['reference_reflection_file']) if 'freer_file' in crystals[crystal]: xc.set_freer_file(crystals[crystal]['freer_file']) # user assigned spacegroup if 'user_spacegroup' in crystals[crystal]: xc.set_user_spacegroup(crystals[crystal]['user_spacegroup']) elif settings.space_group is not None: # XXX do we ever actually get here? xc.set_user_spacegroup( settings.space_group.type().lookup_symbol()) # add a default sample if none present in xinfo file if not crystals[crystal]['samples']: crystals[crystal]['samples']['X1'] = {} for sample in crystals[crystal]['samples'].keys(): sample_info = crystals[crystal]['samples'][sample] xsample = XSample(sample, xc) xc.add_sample(xsample) if not crystals[crystal]['wavelengths']: raise RuntimeError('No wavelengths specified in xinfo file') for wavelength in crystals[crystal]['wavelengths'].keys(): # FIXME 29/NOV/06 in here need to be able to cope with # no wavelength information - this should default to the # information in the image header (John Cowan pointed # out that this was untidy - requiring that it agrees # with the value in the header makes this almost # useless.) wave_info = crystals[crystal]['wavelengths'][wavelength] if 'wavelength' not in wave_info: Debug.write('No wavelength value given for wavelength %s' % wavelength) else: Debug.write( 'Overriding value for wavelength %s to %8.6f' % \ (wavelength, float(wave_info['wavelength']))) # handle case where user writes f" in place of f'' if 'f"' in wave_info and not \ 'f\'\'' in wave_info: wave_info['f\'\''] = wave_info['f"'] xw = XWavelength(wavelength, xc, wavelength=wave_info.get('wavelength', 0.0), f_pr=wave_info.get('f\'', 0.0), f_prpr=wave_info.get('f\'\'', 0.0), dmin=wave_info.get('dmin', 0.0), dmax=wave_info.get('dmax', 0.0)) # in here I also need to look and see if we have # been given any scaled reflection files... # check to see if we have a user supplied lattice... if 'user_spacegroup' in crystals[crystal]: lattice = Syminfo.get_lattice( crystals[crystal]['user_spacegroup']) elif settings.space_group is not None: # XXX do we ever actually get here? lattice = Syminfo.get_lattice( settings.space_group.type().lookup_symbol()) else: lattice = None # and also user supplied cell constants - from either # the xinfo file (the first port of call) or the # command-line. if 'user_cell' in crystals[crystal]: cell = crystals[crystal]['user_cell'] elif settings.unit_cell is not None: # XXX do we ever actually get here? cell = settings.unit_cell.parameters() else: cell = None dmin = wave_info.get('dmin', 0.0) dmax = wave_info.get('dmax', 0.0) if dmin == 0.0 and dmax == 0.0: dmin = PhilIndex.params.xia2.settings.resolution.d_min dmax = PhilIndex.params.xia2.settings.resolution.d_max # want to be able to locally override the resolution limits # for this sweep while leaving the rest for the data set # intact... for sweep_name in crystals[crystal]['sweeps'].keys(): sweep_info = crystals[crystal]['sweeps'][sweep_name] sample_name = sweep_info.get('sample') if sample_name is None: if len(crystals[crystal]['samples']) == 1: sample_name = crystals[crystal]['samples'].keys( )[0] else: raise RuntimeError('No sample given for sweep %s' % sweep_name) xsample = xc.get_xsample(sample_name) assert xsample is not None dmin_old = dmin dmax_old = dmax replace = False if 'RESOLUTION' in sweep_info: values = map(float, sweep_info['RESOLUTION'].split()) if len(values) == 1: dmin = values[0] elif len(values) == 2: dmin = min(values) dmax = max(values) else: raise RuntimeError('bad resolution for sweep %s' % sweep_name) replace = True if sweep_info['wavelength'] == wavelength: frames_to_process = sweep_info.get('start_end') xsweep = xw.add_sweep( sweep_name, sample=xsample, directory=sweep_info.get('DIRECTORY'), image=sweep_info.get('IMAGE'), beam=sweep_info.get('beam'), reversephi=sweep_info.get('reversephi', False), distance=sweep_info.get('distance'), gain=float(sweep_info.get('GAIN', 0.0)), dmin=dmin, dmax=dmax, polarization=float( sweep_info.get('POLARIZATION', 0.0)), frames_to_process=frames_to_process, user_lattice=lattice, user_cell=cell, epoch=sweep_info.get('epoch', 0), ice=sweep_info.get('ice', False), excluded_regions=sweep_info.get( 'excluded_regions', []), ) xsample.add_sweep(xsweep) dmin = dmin_old dmax = dmax_old xc.add_wavelength(xw) self.add_crystal(xc)
def decide_pointgroup(self): """Decide on the correct pointgroup for hklin.""" if not self._xdsin: self.check_hklin() self.set_task("Computing the correct pointgroup for %s" % self.get_hklin()) else: Debug.write("Pointless using XDS input file %s" % self._xdsin) self.set_task("Computing the correct pointgroup for %s" % self.get_xdsin()) # FIXME this should probably be a standard CCP4 keyword if self._xdsin: self.add_command_line("xdsin") self.add_command_line(self._xdsin) self.add_command_line("xmlout") self.add_command_line("%d_pointless.xml" % self.get_xpid()) if self._hklref: self.add_command_line("hklref") self.add_command_line(self._hklref) self.start() self.input("systematicabsences off") self.input("setting symmetry-based") if self._hklref: dev = PhilIndex.params.xia2.settings.developmental if dev.pointless_tolerance > 0.0: self.input("tolerance %f" % dev.pointless_tolerance) # may expect more %age variation for small molecule data if PhilIndex.params.xia2.settings.small_molecule == True: if self._hklref: self.input("tolerance 5.0") if PhilIndex.params.ccp4.pointless.chirality is not None: self.input("chirality %s" % PhilIndex.params.ccp4.pointless.chirality) if self._input_laue_group: self.input("lauegroup %s" % self._input_laue_group) self.close_wait() # check for errors self.check_for_errors() # check for fatal errors output = self.get_all_output() for j, record in enumerate(output): if "FATAL ERROR message:" in record: raise RuntimeError, "Pointless error: %s" % output[j + 1].strip() hklin_spacegroup = "" hklin_lattice = "" for o in self.get_all_output(): if "Spacegroup from HKLIN file" in o: # hklin_spacegroup = o.split(':')[-1].strip() hklin_spacegroup = spacegroup_name_xHM_to_old(o.replace("Spacegroup from HKLIN file :", "").strip()) hklin_lattice = Syminfo.get_lattice(hklin_spacegroup) if "No alternative indexing possible" in o: # then the XML file will be broken - no worries... self._pointgroup = hklin_spacegroup self._confidence = 1.0 self._totalprob = 1.0 self._reindex_matrix = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0] self._reindex_operator = "h,k,l" return "ok" if "**** Incompatible symmetries ****" in o: raise RuntimeError, "reindexing against a reference with different symmetry" if "***** Stopping because cell discrepancy between files" in o: raise RuntimeError, "incompatible unit cells between data sets" if "L-test suggests that the data may be twinned" in o: self._probably_twinned = True # parse the XML file for the information I need... xml_file = os.path.join(self.get_working_directory(), "%d_pointless.xml" % self.get_xpid()) mend_pointless_xml(xml_file) # catch the case sometimes on ppc mac where pointless adds # an extra .xml on the end... if not os.path.exists(xml_file) and os.path.exists("%s.xml" % xml_file): xml_file = "%s.xml" % xml_file if not self._hklref: dom = xml.dom.minidom.parse(xml_file) try: best = dom.getElementsByTagName("BestSolution")[0] except IndexError, e: raise RuntimeError, "error getting solution from pointless" self._pointgroup = best.getElementsByTagName("GroupName")[0].childNodes[0].data self._confidence = float(best.getElementsByTagName("Confidence")[0].childNodes[0].data) self._totalprob = float(best.getElementsByTagName("TotalProb")[0].childNodes[0].data) self._reindex_matrix = map( float, best.getElementsByTagName("ReindexMatrix")[0].childNodes[0].data.split() ) self._reindex_operator = clean_reindex_operator( best.getElementsByTagName("ReindexOperator")[0].childNodes[0].data.strip() )
def _standard_scale_prepare(self): pointgroups = {} reindex_ops = {} probably_twinned = False need_to_return = False lattices = [] # First check for the existence of multiple lattices. If only one # epoch, then this gives the necessary data for proceeding straight # to the point group check. for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) intgr = si.get_integrater() experiment = intgr.get_integrated_experiments() reflections = intgr.get_integrated_reflections() refiner = intgr.get_integrater_refiner() pointgroup, reindex_op, ntr, pt, _, __, ___ = self._dials_symmetry_indexer_jiffy( [experiment], [reflections], [refiner] ) lattice = Syminfo.get_lattice(pointgroup) if lattice not in lattices: lattices.append(lattice) if ntr: si.get_integrater().integrater_reset_reindex_operator() need_to_return = True if pt: probably_twinned = True pointgroups[epoch] = pointgroup reindex_ops[epoch] = reindex_op Debug.write("Pointgroup: %s (%s)" % (pointgroup, reindex_op)) if len(lattices) > 1: # Check consistency of lattices if more than one. If not, then # can proceed to straight to checking point group consistency # using the cached results. correct_lattice = sort_lattices(lattices)[0] Chatter.write("Correct lattice asserted to be %s" % correct_lattice) # transfer this information back to the indexers for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) refiner = si.get_integrater().get_integrater_refiner() _tup = (correct_lattice, si.get_sweep_name()) state = refiner.set_refiner_asserted_lattice(correct_lattice) if state == refiner.LATTICE_CORRECT: Chatter.write("Lattice %s ok for sweep %s" % _tup) elif state == refiner.LATTICE_IMPOSSIBLE: raise RuntimeError("Lattice %s impossible for %s" % _tup) elif state == refiner.LATTICE_POSSIBLE: Chatter.write("Lattice %s assigned for sweep %s" % _tup) need_to_return = True if need_to_return: return need_to_return need_to_return = False pointgroup_set = {pointgroups[e] for e in pointgroups} if len(pointgroup_set) > 1 and not probably_twinned: raise RuntimeError( "non uniform pointgroups: %s" % str(list(pointgroup_set)) ) if len(pointgroup_set) > 1: Debug.write( "Probably twinned, pointgroups: %s" % " ".join([p.replace(" ", "") for p in list(pointgroup_set)]) ) numbers = [Syminfo.spacegroup_name_to_number(s) for s in pointgroup_set] overall_pointgroup = Syminfo.spacegroup_number_to_name(min(numbers)) self._scalr_input_pointgroup = overall_pointgroup Chatter.write( "Twinning detected, assume pointgroup %s" % overall_pointgroup ) need_to_return = True else: overall_pointgroup = pointgroup_set.pop() self._scalr_likely_spacegroups = [overall_pointgroup] for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) self._helper.reindex_jiffy(si, overall_pointgroup, reindex_ops[epoch]) return need_to_return