def _scale_prepare(self): '''Perform all of the preparation required to deliver the scaled data. This should sort together the reflection files, ensure that they are correctly indexed (via pointless) and generally tidy things up.''' # acknowledge all of the programs we are about to use... Citations.cite('pointless') Citations.cite('aimless') Citations.cite('ccp4') # ---------- GATHER ---------- self._sweep_handler = SweepInformationHandler(self._scalr_integraters) Journal.block( 'gathering', self.get_scaler_xcrystal().get_name(), 'CCP4', {'working directory':self.get_working_directory()}) for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) pname, xname, dname = si.get_project_info() sname = si.get_sweep_name() exclude_sweep = False for sweep in PhilIndex.params.xia2.settings.sweep: if sweep.id == sname and sweep.exclude: exclude_sweep = True break if exclude_sweep: self._sweep_handler.remove_epoch(epoch) Debug.write('Excluding sweep %s' %sname) else: Journal.entry({'adding data from':'%s/%s/%s' % \ (xname, dname, sname)}) # gather data for all images which belonged to the parent # crystal - allowing for the fact that things could go wrong # e.g. epoch information not available, exposure times not in # headers etc... for e in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(e) assert is_mtz_file(si.get_reflections()) p, x = self._sweep_handler.get_project_info() self._scalr_pname = p self._scalr_xname = x # verify that the lattices are consistent, calling eliminate if # they are not N.B. there could be corner cases here need_to_return = False multi_sweep_indexing = \ PhilIndex.params.xia2.settings.developmental.multi_sweep_indexing if len(self._sweep_handler.get_epochs()) > 1: if multi_sweep_indexing and not self._scalr_input_pointgroup: pointless_hklins = [] max_batches = 0 for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) hklin = si.get_reflections() md = self._factory.Mtzdump() md.set_hklin(hklin) md.dump() batches = md.get_batches() if 1 + max(batches) - min(batches) > max_batches: max_batches = max(batches) - min(batches) + 1 datasets = md.get_datasets() Debug.write('In reflection file %s found:' % hklin) for d in datasets: Debug.write('... %s' % d) dataset_info = md.get_dataset_info(datasets[0]) from xia2.lib.bits import nifty_power_of_ten Debug.write('Biggest sweep has %d batches' % max_batches) max_batches = nifty_power_of_ten(max_batches) counter = 0 for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) hklin = si.get_reflections() integrater = si.get_integrater() refiner = integrater.get_integrater_refiner() hklin = self._prepare_pointless_hklin( hklin, si.get_integrater().get_phi_width()) rb = self._factory.Rebatch() hklout = os.path.join(self.get_working_directory(), '%s_%s_%s_%s_prepointless.mtz' % \ (pname, xname, dname, si.get_sweep_name())) # we will want to delete this one exit FileHandler.record_temporary_file(hklout) first_batch = min(si.get_batches()) si.set_batch_offset(counter * max_batches - first_batch + 1) rb.set_hklin(hklin) rb.set_first_batch(counter * max_batches + 1) rb.set_project_info(pname, xname, dname) rb.set_hklout(hklout) new_batches = rb.rebatch() pointless_hklins.append(hklout) # update the counter & recycle counter += 1 s = self._factory.Sortmtz() pointless_hklin = os.path.join(self.get_working_directory(), '%s_%s_prepointless_sorted.mtz' % \ (self._scalr_pname, self._scalr_xname)) s.set_hklout(pointless_hklin) for hklin in pointless_hklins: s.add_hklin(hklin) s.sort() pointgroup, reindex_op, ntr, pt = \ self._pointless_indexer_jiffy( pointless_hklin, refiner) Debug.write('X1698: %s: %s' % (pointgroup, reindex_op)) lattices = [Syminfo.get_lattice(pointgroup)] for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) intgr = si.get_integrater() hklin = si.get_reflections() refiner = intgr.get_integrater_refiner() if ntr: intgr.integrater_reset_reindex_operator() need_to_return = True else: lattices = [] for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) intgr = si.get_integrater() hklin = si.get_reflections() refiner = intgr.get_integrater_refiner() if self._scalr_input_pointgroup: pointgroup = self._scalr_input_pointgroup reindex_op = 'h,k,l' ntr = False else: pointless_hklin = self._prepare_pointless_hklin( hklin, si.get_integrater().get_phi_width()) pointgroup, reindex_op, ntr, pt = \ self._pointless_indexer_jiffy( pointless_hklin, refiner) Debug.write('X1698: %s: %s' % (pointgroup, reindex_op)) lattice = Syminfo.get_lattice(pointgroup) if not lattice in lattices: lattices.append(lattice) if ntr: intgr.integrater_reset_reindex_operator() need_to_return = True if len(lattices) > 1: # why not using pointless indexer jiffy??! correct_lattice = sort_lattices(lattices)[0] Chatter.write('Correct lattice asserted to be %s' % \ correct_lattice) # transfer this information back to the indexers for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) refiner = si.get_integrater().get_integrater_refiner() sname = si.get_sweep_name() state = refiner.set_refiner_asserted_lattice( correct_lattice) if state == refiner.LATTICE_CORRECT: Chatter.write('Lattice %s ok for sweep %s' % \ (correct_lattice, sname)) elif state == refiner.LATTICE_IMPOSSIBLE: raise RuntimeError, 'Lattice %s impossible for %s' \ % (correct_lattice, sname) elif state == refiner.LATTICE_POSSIBLE: Chatter.write('Lattice %s assigned for sweep %s' % \ (correct_lattice, sname)) need_to_return = True # if one or more of them was not in the lowest lattice, # need to return here to allow reprocessing if need_to_return: self.set_scaler_done(False) self.set_scaler_prepare_done(False) return # ---------- REINDEX ALL DATA TO CORRECT POINTGROUP ---------- # all should share the same pointgroup, unless twinned... in which # case force them to be... pointgroups = { } reindex_ops = { } probably_twinned = False need_to_return = False multi_sweep_indexing = \ PhilIndex.params.xia2.settings.developmental.multi_sweep_indexing if multi_sweep_indexing and not self._scalr_input_pointgroup: pointless_hklins = [] max_batches = 0 for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) hklin = si.get_reflections() md = self._factory.Mtzdump() md.set_hklin(hklin) md.dump() batches = md.get_batches() if 1 + max(batches) - min(batches) > max_batches: max_batches = max(batches) - min(batches) + 1 datasets = md.get_datasets() Debug.write('In reflection file %s found:' % hklin) for d in datasets: Debug.write('... %s' % d) dataset_info = md.get_dataset_info(datasets[0]) from xia2.lib.bits import nifty_power_of_ten Debug.write('Biggest sweep has %d batches' % max_batches) max_batches = nifty_power_of_ten(max_batches) counter = 0 for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) hklin = si.get_reflections() integrater = si.get_integrater() refiner = integrater.get_integrater_refiner() hklin = self._prepare_pointless_hklin( hklin, si.get_integrater().get_phi_width()) rb = self._factory.Rebatch() hklout = os.path.join(self.get_working_directory(), '%s_%s_%s_%s_prepointless.mtz' % \ (pname, xname, dname, si.get_sweep_name())) # we will want to delete this one exit FileHandler.record_temporary_file(hklout) first_batch = min(si.get_batches()) si.set_batch_offset(counter * max_batches - first_batch + 1) rb.set_hklin(hklin) rb.set_first_batch(counter * max_batches + 1) rb.set_project_info(pname, xname, dname) rb.set_hklout(hklout) new_batches = rb.rebatch() pointless_hklins.append(hklout) # update the counter & recycle counter += 1 s = self._factory.Sortmtz() pointless_hklin = os.path.join(self.get_working_directory(), '%s_%s_prepointless_sorted.mtz' % \ (self._scalr_pname, self._scalr_xname)) s.set_hklout(pointless_hklin) for hklin in pointless_hklins: s.add_hklin(hklin) s.sort() pointgroup, reindex_op, ntr, pt = \ self._pointless_indexer_jiffy( pointless_hklin, refiner) for epoch in self._sweep_handler.get_epochs(): pointgroups[epoch] = pointgroup reindex_ops[epoch] = reindex_op else: for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) hklin = si.get_reflections() #hklout = os.path.join( #self.get_working_directory(), #os.path.split(hklin)[-1].replace('.mtz', '_rdx.mtz')) #FileHandler.record_temporary_file(hklout) integrater = si.get_integrater() refiner = integrater.get_integrater_refiner() if self._scalr_input_pointgroup: Debug.write('Using input pointgroup: %s' % \ self._scalr_input_pointgroup) pointgroup = self._scalr_input_pointgroup reindex_op = 'h,k,l' pt = False else: pointless_hklin = self._prepare_pointless_hklin( hklin, si.get_integrater().get_phi_width()) pointgroup, reindex_op, ntr, pt = \ self._pointless_indexer_jiffy( pointless_hklin, refiner) Debug.write('X1698: %s: %s' % (pointgroup, reindex_op)) if ntr: integrater.integrater_reset_reindex_operator() need_to_return = True if pt and not probably_twinned: probably_twinned = True Debug.write('Pointgroup: %s (%s)' % (pointgroup, reindex_op)) pointgroups[epoch] = pointgroup reindex_ops[epoch] = reindex_op overall_pointgroup = None pointgroup_set = set([pointgroups[e] for e in pointgroups]) if len(pointgroup_set) > 1 and \ not probably_twinned: raise RuntimeError, 'non uniform pointgroups' if len(pointgroup_set) > 1: Debug.write('Probably twinned, pointgroups: %s' % \ ' '.join([p.replace(' ', '') for p in \ list(pointgroup_set)])) numbers = [Syminfo.spacegroup_name_to_number(s) for s in \ pointgroup_set] overall_pointgroup = Syminfo.spacegroup_number_to_name( min(numbers)) self._scalr_input_pointgroup = overall_pointgroup Chatter.write('Twinning detected, assume pointgroup %s' % \ overall_pointgroup) need_to_return = True else: overall_pointgroup = pointgroup_set.pop() for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) integrater = si.get_integrater() integrater.set_integrater_spacegroup_number( Syminfo.spacegroup_name_to_number(overall_pointgroup)) integrater.set_integrater_reindex_operator( reindex_ops[epoch], reason='setting point group') # This will give us the reflections in the correct point group si.set_reflections(integrater.get_integrater_intensities()) if need_to_return: self.set_scaler_done(False) self.set_scaler_prepare_done(False) return # in here now optinally work through the data files which should be # indexed with a consistent point group, and transform the orientation # matrices by the lattice symmetry operations (if possible) to get a # consistent definition of U matrix modulo fixed rotations if PhilIndex.params.xia2.settings.unify_setting: from scitbx.matrix import sqr reference_U = None i3 = sqr((1, 0, 0, 0, 1, 0, 0, 0, 1)) for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) intgr = si.get_integrater() fixed = sqr(intgr.get_goniometer().get_fixed_rotation()) u, b, s = get_umat_bmat_lattice_symmetry_from_mtz(si.get_reflections()) U = fixed.inverse() * sqr(u).transpose() B = sqr(b) if reference_U is None: reference_U = U continue results = [] for op in s.all_ops(): R = B * sqr(op.r().as_double()).transpose() * B.inverse() nearly_i3 = (U * R).inverse() * reference_U score = sum([abs(_n - _i) for (_n, _i) in zip(nearly_i3, i3)]) results.append((score, op.r().as_hkl(), op)) results.sort() best = results[0] Debug.write('Best reindex: %s %.3f' % (best[1], best[0])) intgr.set_integrater_reindex_operator(best[2].r().inverse().as_hkl(), reason='unifying [U] setting') si.set_reflections(intgr.get_integrater_intensities()) # recalculate to verify u, b, s = get_umat_bmat_lattice_symmetry_from_mtz(si.get_reflections()) U = fixed.inverse() * sqr(u).transpose() Debug.write('New reindex: %s' % (U.inverse() * reference_U)) # FIXME I should probably raise an exception at this stage if this # is not about I3... if self.get_scaler_reference_reflection_file(): self._reference = self.get_scaler_reference_reflection_file() Debug.write('Using HKLREF %s' % self._reference) elif Flags.get_reference_reflection_file(): self._reference = Flags.get_reference_reflection_file() Debug.write('Using HKLREF %s' % self._reference) params = PhilIndex.params use_brehm_diederichs = params.xia2.settings.use_brehm_diederichs if len(self._sweep_handler.get_epochs()) > 1 and use_brehm_diederichs: brehm_diederichs_files_in = [] for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) hklin = si.get_reflections() brehm_diederichs_files_in.append(hklin) # now run cctbx.brehm_diederichs to figure out the indexing hand for # each sweep from xia2.Wrappers.Cctbx.BrehmDiederichs import BrehmDiederichs from xia2.lib.bits import auto_logfiler brehm_diederichs = BrehmDiederichs() brehm_diederichs.set_working_directory(self.get_working_directory()) auto_logfiler(brehm_diederichs) brehm_diederichs.set_input_filenames(brehm_diederichs_files_in) # 1 or 3? 1 seems to work better? brehm_diederichs.set_asymmetric(1) brehm_diederichs.run() reindexing_dict = brehm_diederichs.get_reindexing_dict() for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) intgr = si.get_integrater() hklin = si.get_reflections() reindex_op = reindexing_dict.get(os.path.abspath(hklin)) assert reindex_op is not None if 1 or reindex_op != 'h,k,l': # apply the reindexing operator intgr.set_integrater_reindex_operator( reindex_op, reason='match reference') si.set_reflections(intgr.get_integrater_intensities()) elif len(self._sweep_handler.get_epochs()) > 1 and \ not self._reference: first = self._sweep_handler.get_epochs()[0] si = self._sweep_handler.get_sweep_information(first) self._reference = si.get_reflections() if self._reference: md = self._factory.Mtzdump() md.set_hklin(self._reference) md.dump() if md.get_batches() and False: raise RuntimeError, 'reference reflection file %s unmerged' % \ self._reference datasets = md.get_datasets() if len(datasets) > 1 and False: raise RuntimeError, 'more than one dataset in %s' % \ self._reference # then get the unit cell, lattice etc. reference_lattice = Syminfo.get_lattice(md.get_spacegroup()) reference_cell = md.get_dataset_info(datasets[0])['cell'] # then compute the pointgroup from this... # ---------- REINDEX TO CORRECT (REFERENCE) SETTING ---------- for epoch in self._sweep_handler.get_epochs(): pl = self._factory.Pointless() si = self._sweep_handler.get_sweep_information(epoch) hklin = si.get_reflections() pl.set_hklin(self._prepare_pointless_hklin( hklin, si.get_integrater().get_phi_width())) hklout = os.path.join( self.get_working_directory(), '%s_rdx2.mtz' % os.path.split(hklin)[-1][:-4]) # we will want to delete this one exit FileHandler.record_temporary_file(hklout) # now set the initial reflection set as a reference... pl.set_hklref(self._reference) # write a pointless log file... pl.decide_pointgroup() Debug.write('Reindexing analysis of %s' % pl.get_hklin()) pointgroup = pl.get_pointgroup() reindex_op = pl.get_reindex_operator() Debug.write('Operator: %s' % reindex_op) # apply this... integrater = si.get_integrater() integrater.set_integrater_reindex_operator(reindex_op, reason='match reference') integrater.set_integrater_spacegroup_number( Syminfo.spacegroup_name_to_number(pointgroup)) si.set_reflections(integrater.get_integrater_intensities()) md = self._factory.Mtzdump() md.set_hklin(si.get_reflections()) md.dump() datasets = md.get_datasets() if len(datasets) > 1: raise RuntimeError, 'more than one dataset in %s' % \ si.get_reflections() # then get the unit cell, lattice etc. lattice = Syminfo.get_lattice(md.get_spacegroup()) cell = md.get_dataset_info(datasets[0])['cell'] if lattice != reference_lattice: raise RuntimeError, 'lattices differ in %s and %s' % \ (self._reference, si.get_reflections()) for j in range(6): if math.fabs((cell[j] - reference_cell[j]) / reference_cell[j]) > 0.1: raise RuntimeError, \ 'unit cell parameters differ in %s and %s' % \ (self._reference, si.get_reflections()) # ---------- SORT TOGETHER DATA ---------- self._sort_together_data_ccp4() self._scalr_resolution_limits = { } # store central resolution limit estimates batch_ranges = [self._sweep_handler.get_sweep_information( epoch).get_batch_range() for epoch in self._sweep_handler.get_epochs()] self._resolution_limit_estimates = erzatz_resolution( self._prepared_reflections, batch_ranges) return
def _scale_prepare(self): """Perform all of the preparation required to deliver the scaled data. This should sort together the reflection files, ensure that they are correctly indexed (via pointless) and generally tidy things up.""" # acknowledge all of the programs we are about to use... Citations.cite("pointless") Citations.cite("aimless") Citations.cite("ccp4") # ---------- GATHER ---------- self._sweep_handler = SweepInformationHandler(self._scalr_integraters) for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) pname, xname, dname = si.get_project_info() sname = si.get_sweep_name() exclude_sweep = False for sweep in PhilIndex.params.xia2.settings.sweep: if sweep.id == sname and sweep.exclude: exclude_sweep = True break if exclude_sweep: self._sweep_handler.remove_epoch(epoch) logger.debug("Excluding sweep %s", sname) else: logger.debug("%-30s %s/%s/%s", "adding data from:", xname, dname, sname) # gather data for all images which belonged to the parent # crystal - allowing for the fact that things could go wrong # e.g. epoch information not available, exposure times not in # headers etc... for e in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(e) assert is_mtz_file(si.get_reflections()), repr(si.get_reflections()) p, x = self._sweep_handler.get_project_info() self._scalr_pname = p self._scalr_xname = x # verify that the lattices are consistent, calling eliminate if # they are not N.B. there could be corner cases here need_to_return = False multi_sweep_indexing = PhilIndex.params.xia2.settings.multi_sweep_indexing # START OF if more than one epoch if len(self._sweep_handler.get_epochs()) > 1: # if we have multi-sweep-indexing going on then logic says all should # share common lattice & UB definition => this is not used here? # START OF if multi_sweep indexing and not input pg if multi_sweep_indexing and not self._scalr_input_pointgroup: pointless_hklins = [] max_batches = 0 for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) hklin = si.get_reflections() batches = MtzUtils.batches_from_mtz(hklin) if 1 + max(batches) - min(batches) > max_batches: max_batches = max(batches) - min(batches) + 1 logger.debug("Biggest sweep has %d batches", max_batches) max_batches = nifty_power_of_ten(max_batches) counter = 0 refiners = [] for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) hklin = si.get_reflections() integrater = si.get_integrater() refiner = integrater.get_integrater_refiner() refiners.append(refiner) hklin = self._prepare_pointless_hklin( hklin, si.get_integrater().get_phi_width() ) hklout = os.path.join( self.get_working_directory(), "%s_%s_%s_%s_prepointless.mtz" % (pname, xname, dname, si.get_sweep_name()), ) # we will want to delete this one exit FileHandler.record_temporary_file(hklout) first_batch = min(si.get_batches()) si.set_batch_offset(counter * max_batches - first_batch + 1) rebatch( hklin, hklout, first_batch=counter * max_batches + 1, pname=pname, xname=xname, dname=dname, ) pointless_hklins.append(hklout) # update the counter & recycle counter += 1 # SUMMARY - have added all sweeps to pointless_hklins s = self._factory.Sortmtz() pointless_hklin = os.path.join( self.get_working_directory(), "%s_%s_prepointless_sorted.mtz" % (self._scalr_pname, self._scalr_xname), ) s.set_hklout(pointless_hklin) for hklin in pointless_hklins: s.add_hklin(hklin) s.sort() # FIXME xia2-51 in here look at running constant scaling on the # pointless hklin to put the runs on the same scale. Ref=[A] pointless_const = os.path.join( self.get_working_directory(), "%s_%s_prepointless_const.mtz" % (self._scalr_pname, self._scalr_xname), ) FileHandler.record_temporary_file(pointless_const) aimless_const = self._factory.Aimless() aimless_const.set_hklin(pointless_hklin) aimless_const.set_hklout(pointless_const) aimless_const.const() pointless_const = os.path.join( self.get_working_directory(), "%s_%s_prepointless_const_unmerged.mtz" % (self._scalr_pname, self._scalr_xname), ) FileHandler.record_temporary_file(pointless_const) pointless_hklin = pointless_const # FIXME xia2-51 in here need to pass all refiners to ensure that the # information is passed back to all of them not just the last one... logger.debug( "Running multisweep pointless for %d sweeps", len(refiners) ) pointgroup, reindex_op, ntr, pt = self._pointless_indexer_multisweep( pointless_hklin, refiners ) logger.debug("X1698: %s: %s", pointgroup, reindex_op) lattices = [Syminfo.get_lattice(pointgroup)] for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) intgr = si.get_integrater() hklin = si.get_reflections() refiner = intgr.get_integrater_refiner() if ntr: intgr.integrater_reset_reindex_operator() need_to_return = True # SUMMARY - added all sweeps together into an mtz, ran # _pointless_indexer_multisweep on this, made a list of one lattice # and potentially reset reindex op? # END OF if multi_sweep indexing and not input pg # START OF if not multi_sweep, or input pg given else: lattices = [] for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) intgr = si.get_integrater() hklin = si.get_reflections() refiner = intgr.get_integrater_refiner() if self._scalr_input_pointgroup: pointgroup = self._scalr_input_pointgroup reindex_op = "h,k,l" ntr = False else: pointless_hklin = self._prepare_pointless_hklin( hklin, si.get_integrater().get_phi_width() ) pointgroup, reindex_op, ntr, pt = self._pointless_indexer_jiffy( pointless_hklin, refiner ) logger.debug("X1698: %s: %s", pointgroup, reindex_op) lattice = Syminfo.get_lattice(pointgroup) if lattice not in lattices: lattices.append(lattice) if ntr: intgr.integrater_reset_reindex_operator() need_to_return = True # SUMMARY do pointless_indexer on each sweep, get lattices and make a list # of unique lattices, potentially reset reindex op. # END OF if not multi_sweep, or input pg given # SUMMARY - still within if more than one epoch, now have a list of number # of lattices # START OF if multiple-lattices if len(lattices) > 1: # why not using pointless indexer jiffy??! correct_lattice = sort_lattices(lattices)[0] logger.info("Correct lattice asserted to be %s", correct_lattice) # transfer this information back to the indexers for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) refiner = si.get_integrater().get_integrater_refiner() sname = si.get_sweep_name() state = refiner.set_refiner_asserted_lattice(correct_lattice) if state == refiner.LATTICE_CORRECT: logger.info( "Lattice %s ok for sweep %s", correct_lattice, sname ) elif state == refiner.LATTICE_IMPOSSIBLE: raise RuntimeError( f"Lattice {correct_lattice} impossible for {sname}" ) elif state == refiner.LATTICE_POSSIBLE: logger.info( "Lattice %s assigned for sweep %s", correct_lattice, sname ) need_to_return = True # END OF if multiple-lattices # SUMMARY - forced all lattices to be same and hope its okay. # END OF if more than one epoch # if one or more of them was not in the lowest lattice, # need to return here to allow reprocessing if need_to_return: self.set_scaler_done(False) self.set_scaler_prepare_done(False) return # ---------- REINDEX ALL DATA TO CORRECT POINTGROUP ---------- # all should share the same pointgroup, unless twinned... in which # case force them to be... pointgroups = {} reindex_ops = {} probably_twinned = False need_to_return = False multi_sweep_indexing = PhilIndex.params.xia2.settings.multi_sweep_indexing # START OF if multi-sweep and not input pg if multi_sweep_indexing and not self._scalr_input_pointgroup: pointless_hklins = [] max_batches = 0 for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) hklin = si.get_reflections() batches = MtzUtils.batches_from_mtz(hklin) if 1 + max(batches) - min(batches) > max_batches: max_batches = max(batches) - min(batches) + 1 logger.debug("Biggest sweep has %d batches", max_batches) max_batches = nifty_power_of_ten(max_batches) counter = 0 refiners = [] for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) hklin = si.get_reflections() integrater = si.get_integrater() refiner = integrater.get_integrater_refiner() refiners.append(refiner) hklin = self._prepare_pointless_hklin( hklin, si.get_integrater().get_phi_width() ) hklout = os.path.join( self.get_working_directory(), "%s_%s_%s_%s_prepointless.mtz" % (pname, xname, dname, si.get_sweep_name()), ) # we will want to delete this one exit FileHandler.record_temporary_file(hklout) first_batch = min(si.get_batches()) si.set_batch_offset(counter * max_batches - first_batch + 1) rebatch( hklin, hklout, first_batch=counter * max_batches + 1, pname=pname, xname=xname, dname=dname, ) pointless_hklins.append(hklout) # update the counter & recycle counter += 1 # FIXME related to xia2-51 - this looks very very similar to the logic # in [A] above - is this duplicated logic? s = self._factory.Sortmtz() pointless_hklin = os.path.join( self.get_working_directory(), "%s_%s_prepointless_sorted.mtz" % (self._scalr_pname, self._scalr_xname), ) s.set_hklout(pointless_hklin) for hklin in pointless_hklins: s.add_hklin(hklin) s.sort() pointless_const = os.path.join( self.get_working_directory(), f"{self._scalr_pname}_{self._scalr_xname}_prepointless_const.mtz", ) FileHandler.record_temporary_file(pointless_const) aimless_const = self._factory.Aimless() aimless_const.set_hklin(pointless_hklin) aimless_const.set_hklout(pointless_const) aimless_const.const() pointless_const = os.path.join( self.get_working_directory(), "%s_%s_prepointless_const_unmerged.mtz" % (self._scalr_pname, self._scalr_xname), ) FileHandler.record_temporary_file(pointless_const) pointless_hklin = pointless_const pointgroup, reindex_op, ntr, pt = self._pointless_indexer_multisweep( pointless_hklin, refiners ) for epoch in self._sweep_handler.get_epochs(): pointgroups[epoch] = pointgroup reindex_ops[epoch] = reindex_op # SUMMARY ran pointless multisweep on combined mtz and made a dict # of pointgroups and reindex_ops (all same) # END OF if multi-sweep and not input pg # START OF if not mulit-sweep or pg given else: for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) hklin = si.get_reflections() integrater = si.get_integrater() refiner = integrater.get_integrater_refiner() if self._scalr_input_pointgroup: logger.debug( "Using input pointgroup: %s", self._scalr_input_pointgroup ) pointgroup = self._scalr_input_pointgroup reindex_op = "h,k,l" pt = False else: pointless_hklin = self._prepare_pointless_hklin( hklin, si.get_integrater().get_phi_width() ) pointgroup, reindex_op, ntr, pt = self._pointless_indexer_jiffy( pointless_hklin, refiner ) logger.debug("X1698: %s: %s", pointgroup, reindex_op) if ntr: integrater.integrater_reset_reindex_operator() need_to_return = True if pt and not probably_twinned: probably_twinned = True logger.debug("Pointgroup: %s (%s)", pointgroup, reindex_op) pointgroups[epoch] = pointgroup reindex_ops[epoch] = reindex_op # SUMMARY - for each sweep, run indexer jiffy and get reindex operators # and pointgroups dictionaries (could be different between sweeps) # END OF if not mulit-sweep or pg given overall_pointgroup = None pointgroup_set = {pointgroups[e] for e in pointgroups} if len(pointgroup_set) > 1 and not probably_twinned: raise RuntimeError( "non uniform pointgroups: %s" % str(list(pointgroup_set)) ) if len(pointgroup_set) > 1: logger.debug( "Probably twinned, pointgroups: %s", " ".join(p.replace(" ", "") for p in pointgroup_set), ) numbers = (Syminfo.spacegroup_name_to_number(ps) for ps in pointgroup_set) overall_pointgroup = Syminfo.spacegroup_number_to_name(min(numbers)) self._scalr_input_pointgroup = overall_pointgroup logger.info("Twinning detected, assume pointgroup %s", overall_pointgroup) need_to_return = True else: overall_pointgroup = pointgroup_set.pop() # SUMMARY - Have handled if different pointgroups & chosen an overall_pointgroup # which is the lowest symmetry # Now go through sweeps and do reindexing for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) integrater = si.get_integrater() integrater.set_integrater_spacegroup_number( Syminfo.spacegroup_name_to_number(overall_pointgroup) ) integrater.set_integrater_reindex_operator( reindex_ops[epoch], reason="setting point group" ) # This will give us the reflections in the correct point group si.set_reflections(integrater.get_integrater_intensities()) if need_to_return: self.set_scaler_done(False) self.set_scaler_prepare_done(False) return # in here now optionally work through the data files which should be # indexed with a consistent point group, and transform the orientation # matrices by the lattice symmetry operations (if possible) to get a # consistent definition of U matrix modulo fixed rotations if PhilIndex.params.xia2.settings.unify_setting: self.unify_setting() if self.get_scaler_reference_reflection_file(): self._reference = self.get_scaler_reference_reflection_file() logger.debug("Using HKLREF %s", self._reference) elif PhilIndex.params.xia2.settings.scale.reference_reflection_file: self._reference = ( PhilIndex.params.xia2.settings.scale.reference_reflection_file ) logger.debug("Using HKLREF %s", self._reference) params = PhilIndex.params use_brehm_diederichs = params.xia2.settings.use_brehm_diederichs if len(self._sweep_handler.get_epochs()) > 1 and use_brehm_diederichs: self.brehm_diederichs_reindexing() # If not Brehm-deidrichs, set reference as first sweep elif len(self._sweep_handler.get_epochs()) > 1 and not self._reference: first = self._sweep_handler.get_epochs()[0] si = self._sweep_handler.get_sweep_information(first) self._reference = si.get_reflections() # Now reindex to be consistent with first dataset - run pointless on each # dataset with reference if self._reference: md = self._factory.Mtzdump() md.set_hklin(self._reference) md.dump() datasets = md.get_datasets() # then get the unit cell, lattice etc. reference_lattice = Syminfo.get_lattice(md.get_spacegroup()) reference_cell = md.get_dataset_info(datasets[0])["cell"] # then compute the pointgroup from this... # ---------- REINDEX TO CORRECT (REFERENCE) SETTING ---------- for epoch in self._sweep_handler.get_epochs(): # if we are working with unified UB matrix then this should not # be a problem here (note, *if*; *should*) # what about e.g. alternative P1 settings? # see JIRA MXSW-904 if PhilIndex.params.xia2.settings.unify_setting: continue pl = self._factory.Pointless() si = self._sweep_handler.get_sweep_information(epoch) hklin = si.get_reflections() pl.set_hklin( self._prepare_pointless_hklin( hklin, si.get_integrater().get_phi_width() ) ) hklout = os.path.join( self.get_working_directory(), "%s_rdx2.mtz" % os.path.split(hklin)[-1][:-4], ) # we will want to delete this one exit FileHandler.record_temporary_file(hklout) # now set the initial reflection set as a reference... pl.set_hklref(self._reference) # https://github.com/xia2/xia2/issues/115 - should ideally iteratively # construct a reference or a tree of correlations to ensure correct # reference setting - however if small molecule assume has been # multi-sweep-indexed so can ignore "fatal errors" - temporary hack pl.decide_pointgroup( ignore_errors=PhilIndex.params.xia2.settings.small_molecule ) logger.debug("Reindexing analysis of %s", pl.get_hklin()) pointgroup = pl.get_pointgroup() reindex_op = pl.get_reindex_operator() logger.debug("Operator: %s", reindex_op) # apply this... integrater = si.get_integrater() integrater.set_integrater_reindex_operator( reindex_op, reason="match reference" ) integrater.set_integrater_spacegroup_number( Syminfo.spacegroup_name_to_number(pointgroup) ) si.set_reflections(integrater.get_integrater_intensities()) md = self._factory.Mtzdump() md.set_hklin(si.get_reflections()) md.dump() datasets = md.get_datasets() if len(datasets) > 1: raise RuntimeError( "more than one dataset in %s" % si.get_reflections() ) # then get the unit cell, lattice etc. lattice = Syminfo.get_lattice(md.get_spacegroup()) cell = md.get_dataset_info(datasets[0])["cell"] if lattice != reference_lattice: raise RuntimeError( "lattices differ in %s and %s" % (self._reference, si.get_reflections()) ) logger.debug("Cell: %.2f %.2f %.2f %.2f %.2f %.2f" % cell) logger.debug("Ref: %.2f %.2f %.2f %.2f %.2f %.2f" % reference_cell) for j in range(6): if ( math.fabs((cell[j] - reference_cell[j]) / reference_cell[j]) > 0.1 ): raise RuntimeError( "unit cell parameters differ in %s and %s" % (self._reference, si.get_reflections()) ) # ---------- SORT TOGETHER DATA ---------- self._sort_together_data_ccp4() self._scalr_resolution_limits = {} # store central resolution limit estimates batch_ranges = [ self._sweep_handler.get_sweep_information(epoch).get_batch_range() for epoch in self._sweep_handler.get_epochs() ] self._resolution_limit_estimates = ersatz_resolution( self._prepared_reflections, batch_ranges )
def _sort_together_data_ccp4(self): '''Sort together in the right order (rebatching as we go) the sweeps we want to scale together.''' max_batches = 0 for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) pname, xname, dname = si.get_project_info() sname = si.get_sweep_name() hklin = si.get_reflections() # limit the reflections - e.g. if we are re-running the scaling step # on just a subset of the integrated data hklin = si.get_reflections() limit_batch_range = None for sweep in PhilIndex.params.xia2.settings.sweep: if sweep.id == sname and sweep.range is not None: limit_batch_range = sweep.range break if limit_batch_range is not None: Debug.write('Limiting batch range for %s: %s' % (sname, limit_batch_range)) start, end = limit_batch_range hklout = os.path.splitext(hklin)[0] + '_tmp.mtz' FileHandler.record_temporary_file(hklout) rb = self._factory.Pointless() rb.set_hklin(hklin) rb.set_hklout(hklout) rb.limit_batches(start, end) si.set_reflections(hklout) si.set_batches(limit_batch_range) # keep a count of the maximum number of batches in a block - # this will be used to make rebatch work below. hklin = si.get_reflections() batches = MtzUtils.batches_from_mtz(hklin) if 1 + max(batches) - min(batches) > max_batches: max_batches = max(batches) - min(batches) + 1 Debug.write('Biggest sweep has %d batches' % max_batches) max_batches = nifty_power_of_ten(max_batches) # then rebatch the files, to make sure that the batch numbers are # in the same order as the epochs of data collection. counter = 0 for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) hklin = si.get_reflections() pname, xname, dname = si.get_project_info() sname = si.get_sweep_name() hklout = os.path.join(self.get_working_directory(), '%s_%s_%s_%s_integrated.mtz' % \ (pname, xname, dname, sname)) first_batch = min(si.get_batches()) si.set_batch_offset(counter * max_batches - first_batch + 1) from xia2.Modules.Scaler.rebatch import rebatch new_batches = rebatch(hklin, hklout, first_batch=counter * max_batches + 1, pname=pname, xname=xname, dname=dname) # update the "input information" si.set_reflections(hklout) si.set_batches(new_batches) # update the counter & recycle counter += 1 s = self._factory.Sortmtz() hklout = os.path.join(self.get_working_directory(), '%s_%s_sorted.mtz' % \ (self._scalr_pname, self._scalr_xname)) s.set_hklout(hklout) for epoch in self._sweep_handler.get_epochs(): s.add_hklin( self._sweep_handler.get_sweep_information( epoch).get_reflections()) s.sort() # verify that the measurements are in the correct setting # choice for the spacegroup hklin = hklout hklout = hklin.replace('sorted.mtz', 'temp.mtz') if not self.get_scaler_reference_reflection_file(): if PhilIndex.params.xia2.settings.symmetry.program == 'dials': p = self._factory.dials_symmetry() else: p = self._factory.Pointless() FileHandler.record_log_file('%s %s pointless' % \ (self._scalr_pname, self._scalr_xname), p.get_log_file()) if len(self._sweep_handler.get_epochs()) > 1: p.set_hklin(hklin) else: # permit the use of pointless preparation... epoch = self._sweep_handler.get_epochs()[0] p.set_hklin( self._prepare_pointless_hklin( hklin, self._sweep_handler.get_sweep_information( epoch).get_integrater().get_phi_width())) if self._scalr_input_spacegroup: Debug.write('Assigning user input spacegroup: %s' % \ self._scalr_input_spacegroup) p.decide_spacegroup() spacegroup = p.get_spacegroup() reindex_operator = p.get_spacegroup_reindex_operator() Debug.write('Pointless thought %s (reindex as %s)' % \ (spacegroup, reindex_operator)) spacegroup = self._scalr_input_spacegroup reindex_operator = 'h,k,l' self._spacegroup_reindex_operator = reindex_operator else: p.decide_spacegroup() spacegroup = p.get_spacegroup() reindex_operator = p.get_spacegroup_reindex_operator() self._spacegroup_reindex_operator = clean_reindex_operator( reindex_operator) Debug.write('Pointless thought %s (reindex as %s)' % \ (spacegroup, reindex_operator)) if self._scalr_input_spacegroup: self._scalr_likely_spacegroups = [self._scalr_input_spacegroup] else: self._scalr_likely_spacegroups = p.get_likely_spacegroups() Chatter.write('Likely spacegroups:') for spag in self._scalr_likely_spacegroups: Chatter.write('%s' % spag) Chatter.write( 'Reindexing to first spacegroup setting: %s (%s)' % \ (spacegroup, clean_reindex_operator(reindex_operator))) else: spacegroup = MtzUtils.space_group_name_from_mtz( self.get_scaler_reference_reflection_file()) reindex_operator = 'h,k,l' self._scalr_likely_spacegroups = [spacegroup] Debug.write('Assigning spacegroup %s from reference' % \ spacegroup) # then run reindex to set the correct spacegroup ri = self._factory.Reindex() ri.set_hklin(hklin) ri.set_hklout(hklout) ri.set_spacegroup(spacegroup) ri.set_operator(reindex_operator) ri.reindex() FileHandler.record_temporary_file(hklout) # then resort the reflections (one last time!) s = self._factory.Sortmtz() temp = hklin hklin = hklout hklout = temp s.add_hklin(hklin) s.set_hklout(hklout) s.sort() # done preparing! self._prepared_reflections = s.get_hklout()
def _sort_together_data_xds(self): if len(self._sweep_information) == 1: return self._sort_together_data_xds_one_sweep() max_batches = 0 for epoch in self._sweep_information.keys(): hklin = self._sweep_information[epoch]['scaled_reflections'] if self._sweep_information[epoch]['batches'] == [0, 0]: Chatter.write('Getting batches from %s' % hklin) batches = MtzUtils.batches_from_mtz(hklin) self._sweep_information[epoch]['batches'] = [ min(batches), max(batches) ] Chatter.write('=> %d to %d' % (min(batches), max(batches))) batches = self._sweep_information[epoch]['batches'] if 1 + max(batches) - min(batches) > max_batches: max_batches = max(batches) - min(batches) + 1 Debug.write('Biggest sweep has %d batches' % max_batches) max_batches = nifty_power_of_ten(max_batches) epochs = sorted(self._sweep_information.keys()) counter = 0 for epoch in epochs: hklin = self._sweep_information[epoch]['scaled_reflections'] pname = self._sweep_information[epoch]['pname'] xname = self._sweep_information[epoch]['xname'] dname = self._sweep_information[epoch]['dname'] sname = self._sweep_information[epoch]['sname'] hklout = os.path.join(self.get_working_directory(), '%s_%s_%s_%d.mtz' % \ (pname, xname, dname, counter)) # we will want to delete this one exit FileHandler.record_temporary_file(hklout) # record this for future reference - will be needed in the # radiation damage analysis... # hack - reset this as it gets in a muddle... intgr = self._sweep_information[epoch]['integrater'] self._sweep_information[epoch][ 'batches'] = intgr.get_integrater_batches() first_batch = min(self._sweep_information[epoch]['batches']) offset = counter * max_batches - first_batch + 1 self._sweep_information[epoch]['batch_offset'] = offset from xia2.Modules.Scaler.rebatch import rebatch new_batches = rebatch(hklin, hklout, add_batch=offset, pname=pname, xname=xname, dname=dname) # update the "input information" self._sweep_information[epoch]['hklin'] = hklout self._sweep_information[epoch]['batches'] = new_batches # update the counter & recycle counter += 1 s = self._factory.Sortmtz() hklout = os.path.join(self.get_working_directory(), '%s_%s_sorted.mtz' % \ (self._scalr_pname, self._scalr_xname)) s.set_hklout(hklout) for epoch in epochs: s.add_hklin(self._sweep_information[epoch]['hklin']) s.sort(vrset=-99999999.0) self._prepared_reflections = hklout if self.get_scaler_reference_reflection_file(): spacegroups = [ MtzUtils.space_group_name_from_mtz( self.get_scaler_reference_reflection_file()) ] reindex_operator = 'h,k,l' else: pointless = self._factory.Pointless() pointless.set_hklin(hklout) pointless.decide_spacegroup() FileHandler.record_log_file('%s %s pointless' % \ (self._scalr_pname, self._scalr_xname), pointless.get_log_file()) spacegroups = pointless.get_likely_spacegroups() reindex_operator = pointless.get_spacegroup_reindex_operator() if self._scalr_input_spacegroup: Debug.write('Assigning user input spacegroup: %s' % \ self._scalr_input_spacegroup) spacegroups = [self._scalr_input_spacegroup] reindex_operator = 'h,k,l' self._scalr_likely_spacegroups = spacegroups spacegroup = self._scalr_likely_spacegroups[0] self._scalr_reindex_operator = reindex_operator Chatter.write('Likely spacegroups:') for spag in self._scalr_likely_spacegroups: Chatter.write('%s' % spag) Chatter.write( 'Reindexing to first spacegroup setting: %s (%s)' % \ (spacegroup, clean_reindex_operator(reindex_operator))) hklin = self._prepared_reflections hklout = os.path.join(self.get_working_directory(), '%s_%s_reindex.mtz' % \ (self._scalr_pname, self._scalr_xname)) FileHandler.record_temporary_file(hklout) ri = self._factory.Reindex() ri.set_hklin(hklin) ri.set_hklout(hklout) ri.set_spacegroup(spacegroup) ri.set_operator(reindex_operator) ri.reindex() hklin = hklout hklout = os.path.join(self.get_working_directory(), '%s_%s_sorted.mtz' % \ (self._scalr_pname, self._scalr_xname)) s = self._factory.Sortmtz() s.set_hklin(hklin) s.set_hklout(hklout) s.sort(vrset=-99999999.0) self._prepared_reflections = hklout Debug.write( 'Updating unit cell to %.2f %.2f %.2f %.2f %.2f %.2f' % \ tuple(ri.get_cell())) self._scalr_cell = tuple(ri.get_cell()) return
def _scale_prepare(self): '''Perform all of the preparation required to deliver the scaled data. This should sort together the reflection files, ensure that they are correctly indexed (via pointless) and generally tidy things up.''' # acknowledge all of the programs we are about to use... Citations.cite('pointless') Citations.cite('aimless') Citations.cite('ccp4') # ---------- GATHER ---------- self._sweep_handler = SweepInformationHandler(self._scalr_integraters) Journal.block( 'gathering', self.get_scaler_xcrystal().get_name(), 'CCP4', {'working directory':self.get_working_directory()}) for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) pname, xname, dname = si.get_project_info() sname = si.get_sweep_name() exclude_sweep = False for sweep in PhilIndex.params.xia2.settings.sweep: if sweep.id == sname and sweep.exclude: exclude_sweep = True break if exclude_sweep: self._sweep_handler.remove_epoch(epoch) Debug.write('Excluding sweep %s' % sname) else: Journal.entry({'adding data from':'%s/%s/%s' % \ (xname, dname, sname)}) # gather data for all images which belonged to the parent # crystal - allowing for the fact that things could go wrong # e.g. epoch information not available, exposure times not in # headers etc... for e in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(e) assert is_mtz_file(si.get_reflections()) p, x = self._sweep_handler.get_project_info() self._scalr_pname = p self._scalr_xname = x # verify that the lattices are consistent, calling eliminate if # they are not N.B. there could be corner cases here need_to_return = False multi_sweep_indexing = \ PhilIndex.params.xia2.settings.multi_sweep_indexing == True if len(self._sweep_handler.get_epochs()) > 1: # if we have multi-sweep-indexing going on then logic says all should # share common lattice & UB definition => this is not used here? if multi_sweep_indexing and not self._scalr_input_pointgroup: pointless_hklins = [] max_batches = 0 for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) hklin = si.get_reflections() batches = MtzUtils.batches_from_mtz(hklin) if 1 + max(batches) - min(batches) > max_batches: max_batches = max(batches) - min(batches) + 1 from xia2.lib.bits import nifty_power_of_ten Debug.write('Biggest sweep has %d batches' % max_batches) max_batches = nifty_power_of_ten(max_batches) counter = 0 refiners = [] for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) hklin = si.get_reflections() integrater = si.get_integrater() refiner = integrater.get_integrater_refiner() refiners.append(refiner) hklin = self._prepare_pointless_hklin( hklin, si.get_integrater().get_phi_width()) hklout = os.path.join(self.get_working_directory(), '%s_%s_%s_%s_prepointless.mtz' % \ (pname, xname, dname, si.get_sweep_name())) # we will want to delete this one exit FileHandler.record_temporary_file(hklout) first_batch = min(si.get_batches()) si.set_batch_offset(counter * max_batches - first_batch + 1) from xia2.Modules.Scaler.rebatch import rebatch new_batches = rebatch( hklin, hklout, first_batch=counter * max_batches + 1, pname=pname, xname=xname, dname=dname) pointless_hklins.append(hklout) # update the counter & recycle counter += 1 s = self._factory.Sortmtz() pointless_hklin = os.path.join(self.get_working_directory(), '%s_%s_prepointless_sorted.mtz' % \ (self._scalr_pname, self._scalr_xname)) s.set_hklout(pointless_hklin) for hklin in pointless_hklins: s.add_hklin(hklin) s.sort() # FIXME xia2-51 in here look at running constant scaling on the # pointless hklin to put the runs on the same scale. Ref=[A] pointless_const = os.path.join(self.get_working_directory(), '%s_%s_prepointless_const.mtz' % \ (self._scalr_pname, self._scalr_xname)) FileHandler.record_temporary_file(pointless_const) aimless_const = self._factory.Aimless() aimless_const.set_hklin(pointless_hklin) aimless_const.set_hklout(pointless_const) aimless_const.const() pointless_const = os.path.join(self.get_working_directory(), '%s_%s_prepointless_const_unmerged.mtz' % \ (self._scalr_pname, self._scalr_xname)) FileHandler.record_temporary_file(pointless_const) pointless_hklin = pointless_const # FIXME xia2-51 in here need to pass all refiners to ensure that the # information is passed back to all of them not just the last one... Debug.write('Running multisweep pointless for %d sweeps' % len(refiners)) pointgroup, reindex_op, ntr, pt = \ self._pointless_indexer_multisweep(pointless_hklin, refiners) Debug.write('X1698: %s: %s' % (pointgroup, reindex_op)) lattices = [Syminfo.get_lattice(pointgroup)] for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) intgr = si.get_integrater() hklin = si.get_reflections() refiner = intgr.get_integrater_refiner() if ntr: intgr.integrater_reset_reindex_operator() need_to_return = True else: lattices = [] for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) intgr = si.get_integrater() hklin = si.get_reflections() refiner = intgr.get_integrater_refiner() if self._scalr_input_pointgroup: pointgroup = self._scalr_input_pointgroup reindex_op = 'h,k,l' ntr = False else: pointless_hklin = self._prepare_pointless_hklin( hklin, si.get_integrater().get_phi_width()) pointgroup, reindex_op, ntr, pt = \ self._pointless_indexer_jiffy( pointless_hklin, refiner) Debug.write('X1698: %s: %s' % (pointgroup, reindex_op)) lattice = Syminfo.get_lattice(pointgroup) if not lattice in lattices: lattices.append(lattice) if ntr: intgr.integrater_reset_reindex_operator() need_to_return = True if len(lattices) > 1: # why not using pointless indexer jiffy??! correct_lattice = sort_lattices(lattices)[0] Chatter.write('Correct lattice asserted to be %s' % \ correct_lattice) # transfer this information back to the indexers for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) refiner = si.get_integrater().get_integrater_refiner() sname = si.get_sweep_name() state = refiner.set_refiner_asserted_lattice( correct_lattice) if state == refiner.LATTICE_CORRECT: Chatter.write('Lattice %s ok for sweep %s' % \ (correct_lattice, sname)) elif state == refiner.LATTICE_IMPOSSIBLE: raise RuntimeError('Lattice %s impossible for %s' \ % (correct_lattice, sname)) elif state == refiner.LATTICE_POSSIBLE: Chatter.write('Lattice %s assigned for sweep %s' % \ (correct_lattice, sname)) need_to_return = True # if one or more of them was not in the lowest lattice, # need to return here to allow reprocessing if need_to_return: self.set_scaler_done(False) self.set_scaler_prepare_done(False) return # ---------- REINDEX ALL DATA TO CORRECT POINTGROUP ---------- # all should share the same pointgroup, unless twinned... in which # case force them to be... pointgroups = {} reindex_ops = {} probably_twinned = False need_to_return = False multi_sweep_indexing = \ PhilIndex.params.xia2.settings.multi_sweep_indexing == True if multi_sweep_indexing and not self._scalr_input_pointgroup: pointless_hklins = [] max_batches = 0 for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) hklin = si.get_reflections() batches = MtzUtils.batches_from_mtz(hklin) if 1 + max(batches) - min(batches) > max_batches: max_batches = max(batches) - min(batches) + 1 from xia2.lib.bits import nifty_power_of_ten Debug.write('Biggest sweep has %d batches' % max_batches) max_batches = nifty_power_of_ten(max_batches) counter = 0 refiners = [] for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) hklin = si.get_reflections() integrater = si.get_integrater() refiner = integrater.get_integrater_refiner() refiners.append(refiner) hklin = self._prepare_pointless_hklin( hklin, si.get_integrater().get_phi_width()) hklout = os.path.join(self.get_working_directory(), '%s_%s_%s_%s_prepointless.mtz' % \ (pname, xname, dname, si.get_sweep_name())) # we will want to delete this one exit FileHandler.record_temporary_file(hklout) first_batch = min(si.get_batches()) si.set_batch_offset(counter * max_batches - first_batch + 1) from xia2.Modules.Scaler.rebatch import rebatch new_batches = rebatch( hklin, hklout, first_batch=counter * max_batches + 1, pname=pname, xname=xname, dname=dname) pointless_hklins.append(hklout) # update the counter & recycle counter += 1 # FIXME related to xia2-51 - this looks very very similar to the logic # in [A] above - is this duplicated logic? s = self._factory.Sortmtz() pointless_hklin = os.path.join(self.get_working_directory(), '%s_%s_prepointless_sorted.mtz' % \ (self._scalr_pname, self._scalr_xname)) s.set_hklout(pointless_hklin) for hklin in pointless_hklins: s.add_hklin(hklin) s.sort() pointless_const = os.path.join(self.get_working_directory(), '%s_%s_prepointless_const.mtz' % \ (self._scalr_pname, self._scalr_xname)) FileHandler.record_temporary_file(pointless_const) aimless_const = self._factory.Aimless() aimless_const.set_hklin(pointless_hklin) aimless_const.set_hklout(pointless_const) aimless_const.const() pointless_const = os.path.join(self.get_working_directory(), '%s_%s_prepointless_const_unmerged.mtz' % \ (self._scalr_pname, self._scalr_xname)) FileHandler.record_temporary_file(pointless_const) pointless_hklin = pointless_const pointgroup, reindex_op, ntr, pt = \ self._pointless_indexer_multisweep( pointless_hklin, refiners) for epoch in self._sweep_handler.get_epochs(): pointgroups[epoch] = pointgroup reindex_ops[epoch] = reindex_op else: for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) hklin = si.get_reflections() integrater = si.get_integrater() refiner = integrater.get_integrater_refiner() if self._scalr_input_pointgroup: Debug.write('Using input pointgroup: %s' % \ self._scalr_input_pointgroup) pointgroup = self._scalr_input_pointgroup reindex_op = 'h,k,l' pt = False else: pointless_hklin = self._prepare_pointless_hklin( hklin, si.get_integrater().get_phi_width()) pointgroup, reindex_op, ntr, pt = \ self._pointless_indexer_jiffy( pointless_hklin, refiner) Debug.write('X1698: %s: %s' % (pointgroup, reindex_op)) if ntr: integrater.integrater_reset_reindex_operator() need_to_return = True if pt and not probably_twinned: probably_twinned = True Debug.write('Pointgroup: %s (%s)' % (pointgroup, reindex_op)) pointgroups[epoch] = pointgroup reindex_ops[epoch] = reindex_op overall_pointgroup = None pointgroup_set = {pointgroups[e] for e in pointgroups} if len(pointgroup_set) > 1 and \ not probably_twinned: raise RuntimeError('non uniform pointgroups') if len(pointgroup_set) > 1: Debug.write('Probably twinned, pointgroups: %s' % \ ' '.join([p.replace(' ', '') for p in \ list(pointgroup_set)])) numbers = [Syminfo.spacegroup_name_to_number(s) for s in \ pointgroup_set] overall_pointgroup = Syminfo.spacegroup_number_to_name(min(numbers)) self._scalr_input_pointgroup = overall_pointgroup Chatter.write('Twinning detected, assume pointgroup %s' % \ overall_pointgroup) need_to_return = True else: overall_pointgroup = pointgroup_set.pop() for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) integrater = si.get_integrater() integrater.set_integrater_spacegroup_number( Syminfo.spacegroup_name_to_number(overall_pointgroup)) integrater.set_integrater_reindex_operator( reindex_ops[epoch], reason='setting point group') # This will give us the reflections in the correct point group si.set_reflections(integrater.get_integrater_intensities()) if need_to_return: self.set_scaler_done(False) self.set_scaler_prepare_done(False) return # in here now optionally work through the data files which should be # indexed with a consistent point group, and transform the orientation # matrices by the lattice symmetry operations (if possible) to get a # consistent definition of U matrix modulo fixed rotations if PhilIndex.params.xia2.settings.unify_setting: from scitbx.matrix import sqr reference_U = None i3 = sqr((1, 0, 0, 0, 1, 0, 0, 0, 1)) for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) intgr = si.get_integrater() fixed = sqr(intgr.get_goniometer().get_fixed_rotation()) u, b, s = get_umat_bmat_lattice_symmetry_from_mtz(si.get_reflections()) U = fixed.inverse() * sqr(u).transpose() B = sqr(b) if reference_U is None: reference_U = U continue results = [] for op in s.all_ops(): R = B * sqr(op.r().as_double()).transpose() * B.inverse() nearly_i3 = (U * R).inverse() * reference_U score = sum([abs(_n - _i) for (_n, _i) in zip(nearly_i3, i3)]) results.append((score, op.r().as_hkl(), op)) results.sort() best = results[0] Debug.write('Best reindex: %s %.3f' % (best[1], best[0])) intgr.set_integrater_reindex_operator(best[2].r().inverse().as_hkl(), reason='unifying [U] setting') si.set_reflections(intgr.get_integrater_intensities()) # recalculate to verify u, b, s = get_umat_bmat_lattice_symmetry_from_mtz(si.get_reflections()) U = fixed.inverse() * sqr(u).transpose() Debug.write('New reindex: %s' % (U.inverse() * reference_U)) # FIXME I should probably raise an exception at this stage if this # is not about I3... if self.get_scaler_reference_reflection_file(): self._reference = self.get_scaler_reference_reflection_file() Debug.write('Using HKLREF %s' % self._reference) elif PhilIndex.params.xia2.settings.scale.reference_reflection_file: self._reference = PhilIndex.params.xia2.settings.scale.reference_reflection_file Debug.write('Using HKLREF %s' % self._reference) params = PhilIndex.params use_brehm_diederichs = params.xia2.settings.use_brehm_diederichs if len(self._sweep_handler.get_epochs()) > 1 and use_brehm_diederichs: brehm_diederichs_files_in = [] for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) hklin = si.get_reflections() brehm_diederichs_files_in.append(hklin) # now run cctbx.brehm_diederichs to figure out the indexing hand for # each sweep from xia2.Wrappers.Cctbx.BrehmDiederichs import BrehmDiederichs from xia2.lib.bits import auto_logfiler brehm_diederichs = BrehmDiederichs() brehm_diederichs.set_working_directory(self.get_working_directory()) auto_logfiler(brehm_diederichs) brehm_diederichs.set_input_filenames(brehm_diederichs_files_in) # 1 or 3? 1 seems to work better? brehm_diederichs.set_asymmetric(1) brehm_diederichs.run() reindexing_dict = brehm_diederichs.get_reindexing_dict() for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) intgr = si.get_integrater() hklin = si.get_reflections() reindex_op = reindexing_dict.get(os.path.abspath(hklin)) assert reindex_op is not None if 1 or reindex_op != 'h,k,l': # apply the reindexing operator intgr.set_integrater_reindex_operator( reindex_op, reason='match reference') si.set_reflections(intgr.get_integrater_intensities()) elif len(self._sweep_handler.get_epochs()) > 1 and \ not self._reference: first = self._sweep_handler.get_epochs()[0] si = self._sweep_handler.get_sweep_information(first) self._reference = si.get_reflections() if self._reference: md = self._factory.Mtzdump() md.set_hklin(self._reference) md.dump() if md.get_batches() and False: raise RuntimeError('reference reflection file %s unmerged' % \ self._reference) datasets = md.get_datasets() if len(datasets) > 1 and False: raise RuntimeError('more than one dataset in %s' % \ self._reference) # then get the unit cell, lattice etc. reference_lattice = Syminfo.get_lattice(md.get_spacegroup()) reference_cell = md.get_dataset_info(datasets[0])['cell'] # then compute the pointgroup from this... # ---------- REINDEX TO CORRECT (REFERENCE) SETTING ---------- for epoch in self._sweep_handler.get_epochs(): # if we are working with unified UB matrix then this should not # be a problem here (note, *if*; *should*) # what about e.g. alternative P1 settings? # see JIRA MXSW-904 if PhilIndex.params.xia2.settings.unify_setting: continue pl = self._factory.Pointless() si = self._sweep_handler.get_sweep_information(epoch) hklin = si.get_reflections() pl.set_hklin(self._prepare_pointless_hklin( hklin, si.get_integrater().get_phi_width())) hklout = os.path.join( self.get_working_directory(), '%s_rdx2.mtz' % os.path.split(hklin)[-1][:-4]) # we will want to delete this one exit FileHandler.record_temporary_file(hklout) # now set the initial reflection set as a reference... pl.set_hklref(self._reference) # https://github.com/xia2/xia2/issues/115 - should ideally iteratively # construct a reference or a tree of correlations to ensure correct # reference setting - however if small molecule assume has been # multi-sweep-indexed so can ignore "fatal errors" - temporary hack pl.decide_pointgroup( ignore_errors=PhilIndex.params.xia2.settings.small_molecule) Debug.write('Reindexing analysis of %s' % pl.get_hklin()) pointgroup = pl.get_pointgroup() reindex_op = pl.get_reindex_operator() Debug.write('Operator: %s' % reindex_op) # apply this... integrater = si.get_integrater() integrater.set_integrater_reindex_operator(reindex_op, reason='match reference') integrater.set_integrater_spacegroup_number( Syminfo.spacegroup_name_to_number(pointgroup)) si.set_reflections(integrater.get_integrater_intensities()) md = self._factory.Mtzdump() md.set_hklin(si.get_reflections()) md.dump() datasets = md.get_datasets() if len(datasets) > 1: raise RuntimeError('more than one dataset in %s' % \ si.get_reflections()) # then get the unit cell, lattice etc. lattice = Syminfo.get_lattice(md.get_spacegroup()) cell = md.get_dataset_info(datasets[0])['cell'] if lattice != reference_lattice: raise RuntimeError('lattices differ in %s and %s' % \ (self._reference, si.get_reflections())) Debug.write('Cell: %.2f %.2f %.2f %.2f %.2f %.2f' % cell) Debug.write('Ref: %.2f %.2f %.2f %.2f %.2f %.2f' % reference_cell) for j in range(6): if math.fabs((cell[j] - reference_cell[j]) / reference_cell[j]) > 0.1: raise RuntimeError( \ 'unit cell parameters differ in %s and %s' % \ (self._reference, si.get_reflections())) # ---------- SORT TOGETHER DATA ---------- self._sort_together_data_ccp4() self._scalr_resolution_limits = {} # store central resolution limit estimates batch_ranges = [ self._sweep_handler.get_sweep_information(epoch).get_batch_range() for epoch in self._sweep_handler.get_epochs() ] self._resolution_limit_estimates = ersatz_resolution( self._prepared_reflections, batch_ranges)
def _sort_together_data_ccp4(self): '''Sort together in the right order (rebatching as we go) the sweeps we want to scale together.''' max_batches = 0 for e in self._sweep_handler.get_epochs(): if Flags.get_small_molecule(): continue si = self._sweep_handler.get_sweep_information(e) pname, xname, dname = si.get_project_info() sname = si.get_sweep_name() for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) hklin = si.get_reflections() # limit the reflections - e.g. if we are re-running the scaling step # on just a subset of the integrated data hklin = si.get_reflections() limit_batch_range = None for sweep in PhilIndex.params.xia2.settings.sweep: if sweep.id == sname and sweep.range is not None: limit_batch_range = sweep.range break if limit_batch_range is not None: Debug.write('Limiting batch range for %s: %s' %(sname, limit_batch_range)) start, end = limit_batch_range hklout = os.path.splitext(hklin)[0] + '_tmp.mtz' FileHandler.record_temporary_file(hklout) rb = self._factory.Pointless() rb.set_hklin(hklin) rb.set_hklout(hklout) rb.limit_batches(start, end) si.set_reflections(hklout) si.set_batches(limit_batch_range) # keep a count of the maximum number of batches in a block - # this will be used to make rebatch work below. hklin = si.get_reflections() md = self._factory.Mtzdump() md.set_hklin(hklin) md.dump() batches = md.get_batches() if 1 + max(batches) - min(batches) > max_batches: max_batches = max(batches) - min(batches) + 1 datasets = md.get_datasets() Debug.write('In reflection file %s found:' % hklin) for d in datasets: Debug.write('... %s' % d) dataset_info = md.get_dataset_info(datasets[0]) Debug.write('Biggest sweep has %d batches' % max_batches) max_batches = nifty_power_of_ten(max_batches) # then rebatch the files, to make sure that the batch numbers are # in the same order as the epochs of data collection. counter = 0 for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) rb = self._factory.Rebatch() hklin = si.get_reflections() pname, xname, dname = si.get_project_info() sname = si.get_sweep_name() hklout = os.path.join(self.get_working_directory(), '%s_%s_%s_%s_integrated.mtz' % \ (pname, xname, dname, sname)) first_batch = min(si.get_batches()) si.set_batch_offset(counter * max_batches - first_batch + 1) rb.set_hklin(hklin) rb.set_first_batch(counter * max_batches + 1) rb.set_project_info(pname, xname, dname) rb.set_hklout(hklout) new_batches = rb.rebatch() # update the "input information" si.set_reflections(hklout) si.set_batches(new_batches) # update the counter & recycle counter += 1 s = self._factory.Sortmtz() hklout = os.path.join(self.get_working_directory(), '%s_%s_sorted.mtz' % \ (self._scalr_pname, self._scalr_xname)) s.set_hklout(hklout) for epoch in self._sweep_handler.get_epochs(): s.add_hklin(self._sweep_handler.get_sweep_information( epoch).get_reflections()) s.sort() # verify that the measurements are in the correct setting # choice for the spacegroup hklin = hklout hklout = hklin.replace('sorted.mtz', 'temp.mtz') if not self.get_scaler_reference_reflection_file(): p = self._factory.Pointless() FileHandler.record_log_file('%s %s pointless' % \ (self._scalr_pname, self._scalr_xname), p.get_log_file()) if len(self._sweep_handler.get_epochs()) > 1: p.set_hklin(hklin) else: # permit the use of pointless preparation... epoch = self._sweep_handler.get_epochs()[0] p.set_hklin(self._prepare_pointless_hklin( hklin, self._sweep_handler.get_sweep_information( epoch).get_integrater().get_phi_width())) if self._scalr_input_spacegroup: Debug.write('Assigning user input spacegroup: %s' % \ self._scalr_input_spacegroup) p.decide_spacegroup() spacegroup = p.get_spacegroup() reindex_operator = p.get_spacegroup_reindex_operator() Debug.write('Pointless thought %s (reindex as %s)' % \ (spacegroup, reindex_operator)) spacegroup = self._scalr_input_spacegroup reindex_operator = 'h,k,l' elif Flags.get_small_molecule() and False: p.decide_pointgroup() spacegroup = p.get_pointgroup() reindex_operator = p.get_reindex_operator() Debug.write('Pointless thought %s (reindex as %s)' % \ (spacegroup, reindex_operator)) self._scalr_likely_spacegroups = [spacegroup] else: p.decide_spacegroup() spacegroup = p.get_spacegroup() reindex_operator = p.get_spacegroup_reindex_operator() Debug.write('Pointless thought %s (reindex as %s)' % \ (spacegroup, reindex_operator)) if self._scalr_input_spacegroup: self._scalr_likely_spacegroups = [self._scalr_input_spacegroup] else: self._scalr_likely_spacegroups = p.get_likely_spacegroups() Chatter.write('Likely spacegroups:') for spag in self._scalr_likely_spacegroups: Chatter.write('%s' % spag) Chatter.write( 'Reindexing to first spacegroup setting: %s (%s)' % \ (spacegroup, clean_reindex_operator(reindex_operator))) else: md = self._factory.Mtzdump() md.set_hklin(self.get_scaler_reference_reflection_file()) md.dump() spacegroup = md.get_spacegroup() reindex_operator = 'h,k,l' self._scalr_likely_spacegroups = [spacegroup] Debug.write('Assigning spacegroup %s from reference' % \ spacegroup) # then run reindex to set the correct spacegroup ri = self._factory.Reindex() ri.set_hklin(hklin) ri.set_hklout(hklout) ri.set_spacegroup(spacegroup) ri.set_operator(reindex_operator) ri.reindex() FileHandler.record_temporary_file(hklout) # then resort the reflections (one last time!) s = self._factory.Sortmtz() temp = hklin hklin = hklout hklout = temp s.add_hklin(hklin) s.set_hklout(hklout) s.sort() # done preparing! self._prepared_reflections = s.get_hklout() return
def _sort_together_data_xds(self): if len(self._sweep_information) == 1: return self._sort_together_data_xds_one_sweep() max_batches = 0 for epoch in self._sweep_information.keys(): hklin = self._sweep_information[epoch]['scaled_reflections'] md = self._factory.Mtzdump() md.set_hklin(hklin) md.dump() if self._sweep_information[epoch]['batches'] == [0, 0]: Chatter.write('Getting batches from %s' % hklin) batches = md.get_batches() self._sweep_information[epoch]['batches'] = [min(batches), max(batches)] Chatter.write('=> %d to %d' % (min(batches), max(batches))) batches = self._sweep_information[epoch]['batches'] if 1 + max(batches) - min(batches) > max_batches: max_batches = max(batches) - min(batches) + 1 datasets = md.get_datasets() Debug.write('In reflection file %s found:' % hklin) for d in datasets: Debug.write('... %s' % d) dataset_info = md.get_dataset_info(datasets[0]) Debug.write('Biggest sweep has %d batches' % max_batches) max_batches = nifty_power_of_ten(max_batches) epochs = self._sweep_information.keys() epochs.sort() counter = 0 for epoch in epochs: rb = self._factory.Rebatch() hklin = self._sweep_information[epoch]['scaled_reflections'] pname = self._sweep_information[epoch]['pname'] xname = self._sweep_information[epoch]['xname'] dname = self._sweep_information[epoch]['dname'] sname = self._sweep_information[epoch]['sname'] hklout = os.path.join(self.get_working_directory(), '%s_%s_%s_%d.mtz' % \ (pname, xname, dname, counter)) # we will want to delete this one exit FileHandler.record_temporary_file(hklout) # record this for future reference - will be needed in the # radiation damage analysis... # hack - reset this as it gets in a muddle... intgr = self._sweep_information[epoch]['integrater'] self._sweep_information[epoch][ 'batches'] = intgr.get_integrater_batches() first_batch = min(self._sweep_information[epoch]['batches']) self._sweep_information[epoch][ 'batch_offset'] = counter * max_batches - first_batch + 1 rb.set_hklin(hklin) rb.set_first_batch(counter * max_batches + 1) rb.set_hklout(hklout) new_batches = rb.rebatch() # update the "input information" self._sweep_information[epoch]['hklin'] = hklout self._sweep_information[epoch]['batches'] = new_batches # update the counter & recycle counter += 1 if Flags.get_chef(): self._sweep_information_to_chef() s = self._factory.Sortmtz() hklout = os.path.join(self.get_working_directory(), '%s_%s_sorted.mtz' % \ (self._scalr_pname, self._scalr_xname)) s.set_hklout(hklout) for epoch in epochs: s.add_hklin(self._sweep_information[epoch]['hklin']) s.sort(vrset = -99999999.0) self._prepared_reflections = hklout if self.get_scaler_reference_reflection_file(): md = self._factory.Mtzdump() md.set_hklin(self.get_scaler_reference_reflection_file()) md.dump() spacegroups = [md.get_spacegroup()] reindex_operator = 'h,k,l' else: pointless = self._factory.Pointless() pointless.set_hklin(hklout) pointless.decide_spacegroup() FileHandler.record_log_file('%s %s pointless' % \ (self._scalr_pname, self._scalr_xname), pointless.get_log_file()) spacegroups = pointless.get_likely_spacegroups() reindex_operator = pointless.get_spacegroup_reindex_operator() if self._scalr_input_spacegroup: Debug.write('Assigning user input spacegroup: %s' % \ self._scalr_input_spacegroup) spacegroups = [self._scalr_input_spacegroup] reindex_operator = 'h,k,l' self._scalr_likely_spacegroups = spacegroups spacegroup = self._scalr_likely_spacegroups[0] self._scalr_reindex_operator = reindex_operator Chatter.write('Likely spacegroups:') for spag in self._scalr_likely_spacegroups: Chatter.write('%s' % spag) Chatter.write( 'Reindexing to first spacegroup setting: %s (%s)' % \ (spacegroup, clean_reindex_operator(reindex_operator))) hklin = self._prepared_reflections hklout = os.path.join(self.get_working_directory(), '%s_%s_reindex.mtz' % \ (self._scalr_pname, self._scalr_xname)) FileHandler.record_temporary_file(hklout) ri = self._factory.Reindex() ri.set_hklin(hklin) ri.set_hklout(hklout) ri.set_spacegroup(spacegroup) ri.set_operator(reindex_operator) ri.reindex() hklin = hklout hklout = os.path.join(self.get_working_directory(), '%s_%s_sorted.mtz' % \ (self._scalr_pname, self._scalr_xname)) s = self._factory.Sortmtz() s.set_hklin(hklin) s.set_hklout(hklout) s.sort(vrset = -99999999.0) self._prepared_reflections = hklout Debug.write( 'Updating unit cell to %.2f %.2f %.2f %.2f %.2f %.2f' % \ tuple(ri.get_cell())) self._scalr_cell = tuple(ri.get_cell()) return