def _integrate(self): '''Implement the integrater interface.''' # cite the program Citations.cite('mosflm') images_str = '%d to %d' % tuple(self._intgr_wedge) cell_str = '%.2f %.2f %.2f %.2f %.2f %.2f' % tuple(self._intgr_cell) if len(self._fp_directory) <= 50: dirname = self._fp_directory else: dirname = '...%s' % self._fp_directory[-46:] Journal.block( 'integrating', self._intgr_sweep_name, 'mosflm', {'images':images_str, 'cell':cell_str, 'lattice':self.get_integrater_refiner().get_refiner_lattice(), 'template':self._fp_template, 'directory':dirname, 'resolution':'%.2f' % self._intgr_reso_high}) self._mosflm_rerun_integration = False wd = self.get_working_directory() try: if self.get_integrater_sweep_name(): pname, xname, dname = self.get_integrater_project_info() nproc = PhilIndex.params.xia2.settings.multiprocessing.nproc if nproc > 1: Debug.write('Parallel integration: %d jobs' %nproc) self._mosflm_hklout = self._mosflm_parallel_integrate() else: self._mosflm_hklout = self._mosflm_integrate() # record integration output for e.g. BLEND. sweep = self.get_integrater_sweep_name() if sweep: FileHandler.record_more_data_file( '%s %s %s %s INTEGRATE' % (pname, xname, dname, sweep), self._mosflm_hklout) except IntegrationError, e: if 'negative mosaic spread' in str(e): if self._mosflm_postref_fix_mosaic: Chatter.write( 'Negative mosaic spread - stopping integration') raise BadLatticeError, 'negative mosaic spread' Chatter.write( 'Negative mosaic spread - rerunning integration') self.set_integrater_done(False) self._mosflm_postref_fix_mosaic = True
def integrate(self): '''Actually perform integration until we think we are done...''' while not self.get_integrater_finish_done(): while not self.get_integrater_done(): while not self.get_integrater_prepare_done(): Debug.write('Preparing to do some integration...') self.set_integrater_prepare_done(True) # if this raises an exception, perhaps the autoindexing # solution has too high symmetry. if this the case, then # perform a self._intgr_indexer.eliminate() - this should # reset the indexing system try: self._integrate_prepare() except BadLatticeError, e: Journal.banner('eliminated this lattice', size = 80) Chatter.write('Rejecting bad lattice %s' % str(e)) self._intgr_refiner.eliminate() self._integrater_reset() # FIXME x1698 - may be the case that _integrate() returns the # raw intensities, _integrate_finish() returns intensities # which may have been adjusted or corrected. See #1698 below. Debug.write('Doing some integration...') self.set_integrater_done(True) template = self.get_integrater_sweep().get_template() if self._intgr_sweep_name: if PhilIndex.params.xia2.settings.show_template: Chatter.banner('Integrating %s (%s)' % \ (self._intgr_sweep_name, template)) else: Chatter.banner('Integrating %s' % \ (self._intgr_sweep_name)) try: #1698 self._intgr_hklout_raw = self._integrate() except BadLatticeError, e: Chatter.write('Rejecting bad lattice %s' % str(e)) Journal.banner('eliminated this lattice', size = 80) self._intgr_refiner.eliminate() self._integrater_reset()
def _integrate(self): '''Actually do the integration - in XDS terms this will mean running DEFPIX and INTEGRATE to measure all the reflections.''' experiment = self._intgr_refiner.get_refined_experiment_list( self.get_integrater_epoch())[0] crystal_model = experiment.crystal self._intgr_refiner_cell = crystal_model.get_unit_cell().parameters() images_str = '%d to %d' % tuple(self._intgr_wedge) cell_str = '%.2f %.2f %.2f %.2f %.2f %.2f' %tuple(self._intgr_refiner_cell) if len(self._fp_directory) <= 50: dirname = self._fp_directory else: dirname = '...%s' % self._fp_directory[-46:] Journal.block( 'integrating', self._intgr_sweep_name, 'XDS', {'images':images_str, 'cell':cell_str, 'lattice':self._intgr_refiner.get_refiner_lattice(), 'template':self._fp_template, 'directory':dirname, 'resolution':'%.2f' % self._intgr_reso_high}) first_image_in_wedge = self.get_image_name(self._intgr_wedge[0]) defpix = self.Defpix() # pass in the correct data for file in ['X-CORRECTIONS.cbf', 'Y-CORRECTIONS.cbf', 'BKGINIT.cbf', 'XPARM.XDS']: defpix.set_input_data_file(file, self._xds_data_files[file]) defpix.set_data_range(self._intgr_wedge[0], self._intgr_wedge[1]) if self.get_integrater_high_resolution() > 0.0 and \ self.get_integrater_user_resolution(): Debug.write('Setting resolution limit in DEFPIX to %.2f' % \ self.get_integrater_high_resolution()) defpix.set_resolution_high(self.get_integrater_high_resolution()) defpix.set_resolution_low(self.get_integrater_low_resolution()) elif self.get_integrater_low_resolution(): Debug.write('Setting low resolution limit in DEFPIX to %.2f' % \ self.get_integrater_low_resolution()) defpix.set_resolution_high(0.0) defpix.set_resolution_low(self.get_integrater_low_resolution()) defpix.run() # and gather the result files for file in ['BKGPIX.cbf', 'ABS.cbf']: self._xds_data_files[file] = defpix.get_output_data_file(file) integrate = self.Integrate() if self._xds_integrate_parameters: integrate.set_updates(self._xds_integrate_parameters) # decide what images we are going to process, if not already # specified if not self._intgr_wedge: images = self.get_matching_images() self.set_integrater_wedge(min(images), max(images)) first_image_in_wedge = self.get_image_name(self._intgr_wedge[0]) integrate.set_data_range(self._intgr_wedge[0], self._intgr_wedge[1]) for file in ['X-CORRECTIONS.cbf', 'Y-CORRECTIONS.cbf', 'BLANK.cbf', 'BKGPIX.cbf', 'GAIN.cbf']: integrate.set_input_data_file(file, self._xds_data_files[file]) if self._xds_data_files.has_key('GXPARM.XDS'): Debug.write('Using globally refined parameters') integrate.set_input_data_file( 'XPARM.XDS', self._xds_data_files['GXPARM.XDS']) integrate.set_refined_xparm() else: integrate.set_input_data_file( 'XPARM.XDS', self._xds_data_files['XPARM.XDS']) integrate.run() self._intgr_per_image_statistics = integrate.get_per_image_statistics() Chatter.write(self.show_per_image_statistics()) # record the log file - pname, xname, dname = self.get_integrater_project_info() sweep = self.get_integrater_sweep_name() FileHandler.record_log_file('%s %s %s %s INTEGRATE' % \ (pname, xname, dname, sweep), os.path.join(self.get_working_directory(), 'INTEGRATE.LP')) # and copy the first pass INTEGRATE.HKL... lattice = self._intgr_refiner.get_refiner_lattice() if not os.path.exists(os.path.join( self.get_working_directory(), 'INTEGRATE-%s.HKL' % lattice)): here = self.get_working_directory() shutil.copyfile(os.path.join(here, 'INTEGRATE.HKL'), os.path.join(here, 'INTEGRATE-%s.HKL' % lattice)) # record INTEGRATE.HKL for e.g. BLEND. FileHandler.record_more_data_file( '%s %s %s %s INTEGRATE' % (pname, xname, dname, sweep), os.path.join(self.get_working_directory(), 'INTEGRATE.HKL')) # should the existence of these require that I rerun the # integration or can we assume that the application of a # sensible resolution limit will achieve this?? self._xds_integrate_parameters = integrate.get_updates() # record the mosaic spread &c. m_min, m_mean, m_max = integrate.get_mosaic() self.set_integrater_mosaic_min_mean_max(m_min, m_mean, m_max) Chatter.write('Mosaic spread: %.3f < %.3f < %.3f' % \ self.get_integrater_mosaic_min_mean_max()) return os.path.join(self.get_working_directory(), 'INTEGRATE.HKL')
def _scale(self): '''Perform all of the operations required to deliver the scaled data.''' epochs = self._sweep_handler.get_epochs() if PhilIndex.params.xia2.settings.optimize_scaling: self._determine_best_scale_model_8way() else: self._scalr_corrections = True self._scalr_correct_absorption = True self._scalr_correct_partiality = False self._scalr_correct_decay = True if self._scalr_corrections: Journal.block( 'scaling', self.get_scaler_xcrystal().get_name(), 'CCP4', {'scaling model':'automatic', 'absorption':self._scalr_correct_absorption, 'tails':self._scalr_correct_partiality, 'decay':self._scalr_correct_decay }) else: Journal.block( 'scaling', self.get_scaler_xcrystal().get_name(), 'CCP4', {'scaling model':'default'}) sc = self._updated_aimless() sc.set_hklin(self._prepared_reflections) sc.set_intensities(PhilIndex.params.ccp4.aimless.intensities) sc.set_chef_unmerged(True) sc.set_new_scales_file('%s.scales' % self._scalr_xname) user_resolution_limits = { } for epoch in epochs: si = self._sweep_handler.get_sweep_information(epoch) pname, xname, dname = si.get_project_info() sname = si.get_sweep_name() intgr = si.get_integrater() if intgr.get_integrater_user_resolution(): dmin = intgr.get_integrater_high_resolution() if not user_resolution_limits.has_key((dname, sname)): user_resolution_limits[(dname, sname)] = dmin elif dmin < user_resolution_limits[(dname, sname)]: user_resolution_limits[(dname, sname)] = dmin start, end = si.get_batch_range() if (dname, sname) in self._scalr_resolution_limits: resolution = self._scalr_resolution_limits[(dname, sname)] sc.add_run(start, end, exclude = False, resolution = resolution, name = sname) else: sc.add_run(start, end, name = sname) sc.set_hklout(os.path.join(self.get_working_directory(), '%s_%s_scaled_test.mtz' % \ (self._scalr_pname, self._scalr_xname))) if self.get_scaler_anomalous(): sc.set_anomalous() # what follows, sucks if Flags.get_failover(): try: sc.scale() except RuntimeError, e: es = str(e) if 'bad batch' in es or \ 'negative scales run' in es or \ 'no observations' in es: # first ID the sweep from the batch no batch = int(es.split()[-1]) epoch = self._identify_sweep_epoch(batch) sweep = self._scalr_integraters[ epoch].get_integrater_sweep() # then remove it from my parent xcrystal self.get_scaler_xcrystal().remove_sweep(sweep) # then remove it from the scaler list of intergraters # - this should really be a scaler interface method del(self._scalr_integraters[epoch]) # then tell the user what is happening Chatter.write( 'Sweep %s gave negative scales - removing' % \ sweep.get_name()) # then reset the prepare, do, finish flags self.set_scaler_prepare_done(False) self.set_scaler_done(False) self.set_scaler_finish_done(False) # and return return else: raise e
def _scale_prepare(self): '''Perform all of the preparation required to deliver the scaled data. This should sort together the reflection files, ensure that they are correctly indexed (via pointless) and generally tidy things up.''' # acknowledge all of the programs we are about to use... Citations.cite('pointless') Citations.cite('aimless') Citations.cite('ccp4') # ---------- GATHER ---------- self._sweep_handler = SweepInformationHandler(self._scalr_integraters) Journal.block( 'gathering', self.get_scaler_xcrystal().get_name(), 'CCP4', {'working directory':self.get_working_directory()}) for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) pname, xname, dname = si.get_project_info() sname = si.get_sweep_name() exclude_sweep = False for sweep in PhilIndex.params.xia2.settings.sweep: if sweep.id == sname and sweep.exclude: exclude_sweep = True break if exclude_sweep: self._sweep_handler.remove_epoch(epoch) Debug.write('Excluding sweep %s' %sname) else: Journal.entry({'adding data from':'%s/%s/%s' % \ (xname, dname, sname)}) # gather data for all images which belonged to the parent # crystal - allowing for the fact that things could go wrong # e.g. epoch information not available, exposure times not in # headers etc... for e in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(e) assert is_mtz_file(si.get_reflections()) p, x = self._sweep_handler.get_project_info() self._scalr_pname = p self._scalr_xname = x # verify that the lattices are consistent, calling eliminate if # they are not N.B. there could be corner cases here need_to_return = False multi_sweep_indexing = \ PhilIndex.params.xia2.settings.developmental.multi_sweep_indexing if len(self._sweep_handler.get_epochs()) > 1: if multi_sweep_indexing and not self._scalr_input_pointgroup: pointless_hklins = [] max_batches = 0 for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) hklin = si.get_reflections() md = self._factory.Mtzdump() md.set_hklin(hklin) md.dump() batches = md.get_batches() if 1 + max(batches) - min(batches) > max_batches: max_batches = max(batches) - min(batches) + 1 datasets = md.get_datasets() Debug.write('In reflection file %s found:' % hklin) for d in datasets: Debug.write('... %s' % d) dataset_info = md.get_dataset_info(datasets[0]) from xia2.lib.bits import nifty_power_of_ten Debug.write('Biggest sweep has %d batches' % max_batches) max_batches = nifty_power_of_ten(max_batches) counter = 0 for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) hklin = si.get_reflections() integrater = si.get_integrater() refiner = integrater.get_integrater_refiner() hklin = self._prepare_pointless_hklin( hklin, si.get_integrater().get_phi_width()) rb = self._factory.Rebatch() hklout = os.path.join(self.get_working_directory(), '%s_%s_%s_%s_prepointless.mtz' % \ (pname, xname, dname, si.get_sweep_name())) # we will want to delete this one exit FileHandler.record_temporary_file(hklout) first_batch = min(si.get_batches()) si.set_batch_offset(counter * max_batches - first_batch + 1) rb.set_hklin(hklin) rb.set_first_batch(counter * max_batches + 1) rb.set_project_info(pname, xname, dname) rb.set_hklout(hklout) new_batches = rb.rebatch() pointless_hklins.append(hklout) # update the counter & recycle counter += 1 s = self._factory.Sortmtz() pointless_hklin = os.path.join(self.get_working_directory(), '%s_%s_prepointless_sorted.mtz' % \ (self._scalr_pname, self._scalr_xname)) s.set_hklout(pointless_hklin) for hklin in pointless_hklins: s.add_hklin(hklin) s.sort() pointgroup, reindex_op, ntr, pt = \ self._pointless_indexer_jiffy( pointless_hklin, refiner) Debug.write('X1698: %s: %s' % (pointgroup, reindex_op)) lattices = [Syminfo.get_lattice(pointgroup)] for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) intgr = si.get_integrater() hklin = si.get_reflections() refiner = intgr.get_integrater_refiner() if ntr: intgr.integrater_reset_reindex_operator() need_to_return = True else: lattices = [] for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) intgr = si.get_integrater() hklin = si.get_reflections() refiner = intgr.get_integrater_refiner() if self._scalr_input_pointgroup: pointgroup = self._scalr_input_pointgroup reindex_op = 'h,k,l' ntr = False else: pointless_hklin = self._prepare_pointless_hklin( hklin, si.get_integrater().get_phi_width()) pointgroup, reindex_op, ntr, pt = \ self._pointless_indexer_jiffy( pointless_hklin, refiner) Debug.write('X1698: %s: %s' % (pointgroup, reindex_op)) lattice = Syminfo.get_lattice(pointgroup) if not lattice in lattices: lattices.append(lattice) if ntr: intgr.integrater_reset_reindex_operator() need_to_return = True if len(lattices) > 1: # why not using pointless indexer jiffy??! correct_lattice = sort_lattices(lattices)[0] Chatter.write('Correct lattice asserted to be %s' % \ correct_lattice) # transfer this information back to the indexers for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) refiner = si.get_integrater().get_integrater_refiner() sname = si.get_sweep_name() state = refiner.set_refiner_asserted_lattice( correct_lattice) if state == refiner.LATTICE_CORRECT: Chatter.write('Lattice %s ok for sweep %s' % \ (correct_lattice, sname)) elif state == refiner.LATTICE_IMPOSSIBLE: raise RuntimeError, 'Lattice %s impossible for %s' \ % (correct_lattice, sname) elif state == refiner.LATTICE_POSSIBLE: Chatter.write('Lattice %s assigned for sweep %s' % \ (correct_lattice, sname)) need_to_return = True # if one or more of them was not in the lowest lattice, # need to return here to allow reprocessing if need_to_return: self.set_scaler_done(False) self.set_scaler_prepare_done(False) return # ---------- REINDEX ALL DATA TO CORRECT POINTGROUP ---------- # all should share the same pointgroup, unless twinned... in which # case force them to be... pointgroups = { } reindex_ops = { } probably_twinned = False need_to_return = False multi_sweep_indexing = \ PhilIndex.params.xia2.settings.developmental.multi_sweep_indexing if multi_sweep_indexing and not self._scalr_input_pointgroup: pointless_hklins = [] max_batches = 0 for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) hklin = si.get_reflections() md = self._factory.Mtzdump() md.set_hklin(hklin) md.dump() batches = md.get_batches() if 1 + max(batches) - min(batches) > max_batches: max_batches = max(batches) - min(batches) + 1 datasets = md.get_datasets() Debug.write('In reflection file %s found:' % hklin) for d in datasets: Debug.write('... %s' % d) dataset_info = md.get_dataset_info(datasets[0]) from xia2.lib.bits import nifty_power_of_ten Debug.write('Biggest sweep has %d batches' % max_batches) max_batches = nifty_power_of_ten(max_batches) counter = 0 for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) hklin = si.get_reflections() integrater = si.get_integrater() refiner = integrater.get_integrater_refiner() hklin = self._prepare_pointless_hklin( hklin, si.get_integrater().get_phi_width()) rb = self._factory.Rebatch() hklout = os.path.join(self.get_working_directory(), '%s_%s_%s_%s_prepointless.mtz' % \ (pname, xname, dname, si.get_sweep_name())) # we will want to delete this one exit FileHandler.record_temporary_file(hklout) first_batch = min(si.get_batches()) si.set_batch_offset(counter * max_batches - first_batch + 1) rb.set_hklin(hklin) rb.set_first_batch(counter * max_batches + 1) rb.set_project_info(pname, xname, dname) rb.set_hklout(hklout) new_batches = rb.rebatch() pointless_hklins.append(hklout) # update the counter & recycle counter += 1 s = self._factory.Sortmtz() pointless_hklin = os.path.join(self.get_working_directory(), '%s_%s_prepointless_sorted.mtz' % \ (self._scalr_pname, self._scalr_xname)) s.set_hklout(pointless_hklin) for hklin in pointless_hklins: s.add_hklin(hklin) s.sort() pointgroup, reindex_op, ntr, pt = \ self._pointless_indexer_jiffy( pointless_hklin, refiner) for epoch in self._sweep_handler.get_epochs(): pointgroups[epoch] = pointgroup reindex_ops[epoch] = reindex_op else: for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) hklin = si.get_reflections() #hklout = os.path.join( #self.get_working_directory(), #os.path.split(hklin)[-1].replace('.mtz', '_rdx.mtz')) #FileHandler.record_temporary_file(hklout) integrater = si.get_integrater() refiner = integrater.get_integrater_refiner() if self._scalr_input_pointgroup: Debug.write('Using input pointgroup: %s' % \ self._scalr_input_pointgroup) pointgroup = self._scalr_input_pointgroup reindex_op = 'h,k,l' pt = False else: pointless_hklin = self._prepare_pointless_hklin( hklin, si.get_integrater().get_phi_width()) pointgroup, reindex_op, ntr, pt = \ self._pointless_indexer_jiffy( pointless_hklin, refiner) Debug.write('X1698: %s: %s' % (pointgroup, reindex_op)) if ntr: integrater.integrater_reset_reindex_operator() need_to_return = True if pt and not probably_twinned: probably_twinned = True Debug.write('Pointgroup: %s (%s)' % (pointgroup, reindex_op)) pointgroups[epoch] = pointgroup reindex_ops[epoch] = reindex_op overall_pointgroup = None pointgroup_set = set([pointgroups[e] for e in pointgroups]) if len(pointgroup_set) > 1 and \ not probably_twinned: raise RuntimeError, 'non uniform pointgroups' if len(pointgroup_set) > 1: Debug.write('Probably twinned, pointgroups: %s' % \ ' '.join([p.replace(' ', '') for p in \ list(pointgroup_set)])) numbers = [Syminfo.spacegroup_name_to_number(s) for s in \ pointgroup_set] overall_pointgroup = Syminfo.spacegroup_number_to_name( min(numbers)) self._scalr_input_pointgroup = overall_pointgroup Chatter.write('Twinning detected, assume pointgroup %s' % \ overall_pointgroup) need_to_return = True else: overall_pointgroup = pointgroup_set.pop() for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) integrater = si.get_integrater() integrater.set_integrater_spacegroup_number( Syminfo.spacegroup_name_to_number(overall_pointgroup)) integrater.set_integrater_reindex_operator( reindex_ops[epoch], reason='setting point group') # This will give us the reflections in the correct point group si.set_reflections(integrater.get_integrater_intensities()) if need_to_return: self.set_scaler_done(False) self.set_scaler_prepare_done(False) return # in here now optinally work through the data files which should be # indexed with a consistent point group, and transform the orientation # matrices by the lattice symmetry operations (if possible) to get a # consistent definition of U matrix modulo fixed rotations if PhilIndex.params.xia2.settings.unify_setting: from scitbx.matrix import sqr reference_U = None i3 = sqr((1, 0, 0, 0, 1, 0, 0, 0, 1)) for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) intgr = si.get_integrater() fixed = sqr(intgr.get_goniometer().get_fixed_rotation()) u, b, s = get_umat_bmat_lattice_symmetry_from_mtz(si.get_reflections()) U = fixed.inverse() * sqr(u).transpose() B = sqr(b) if reference_U is None: reference_U = U continue results = [] for op in s.all_ops(): R = B * sqr(op.r().as_double()).transpose() * B.inverse() nearly_i3 = (U * R).inverse() * reference_U score = sum([abs(_n - _i) for (_n, _i) in zip(nearly_i3, i3)]) results.append((score, op.r().as_hkl(), op)) results.sort() best = results[0] Debug.write('Best reindex: %s %.3f' % (best[1], best[0])) intgr.set_integrater_reindex_operator(best[2].r().inverse().as_hkl(), reason='unifying [U] setting') si.set_reflections(intgr.get_integrater_intensities()) # recalculate to verify u, b, s = get_umat_bmat_lattice_symmetry_from_mtz(si.get_reflections()) U = fixed.inverse() * sqr(u).transpose() Debug.write('New reindex: %s' % (U.inverse() * reference_U)) # FIXME I should probably raise an exception at this stage if this # is not about I3... if self.get_scaler_reference_reflection_file(): self._reference = self.get_scaler_reference_reflection_file() Debug.write('Using HKLREF %s' % self._reference) elif Flags.get_reference_reflection_file(): self._reference = Flags.get_reference_reflection_file() Debug.write('Using HKLREF %s' % self._reference) params = PhilIndex.params use_brehm_diederichs = params.xia2.settings.use_brehm_diederichs if len(self._sweep_handler.get_epochs()) > 1 and use_brehm_diederichs: brehm_diederichs_files_in = [] for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) hklin = si.get_reflections() brehm_diederichs_files_in.append(hklin) # now run cctbx.brehm_diederichs to figure out the indexing hand for # each sweep from xia2.Wrappers.Cctbx.BrehmDiederichs import BrehmDiederichs from xia2.lib.bits import auto_logfiler brehm_diederichs = BrehmDiederichs() brehm_diederichs.set_working_directory(self.get_working_directory()) auto_logfiler(brehm_diederichs) brehm_diederichs.set_input_filenames(brehm_diederichs_files_in) # 1 or 3? 1 seems to work better? brehm_diederichs.set_asymmetric(1) brehm_diederichs.run() reindexing_dict = brehm_diederichs.get_reindexing_dict() for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) intgr = si.get_integrater() hklin = si.get_reflections() reindex_op = reindexing_dict.get(os.path.abspath(hklin)) assert reindex_op is not None if 1 or reindex_op != 'h,k,l': # apply the reindexing operator intgr.set_integrater_reindex_operator( reindex_op, reason='match reference') si.set_reflections(intgr.get_integrater_intensities()) elif len(self._sweep_handler.get_epochs()) > 1 and \ not self._reference: first = self._sweep_handler.get_epochs()[0] si = self._sweep_handler.get_sweep_information(first) self._reference = si.get_reflections() if self._reference: md = self._factory.Mtzdump() md.set_hklin(self._reference) md.dump() if md.get_batches() and False: raise RuntimeError, 'reference reflection file %s unmerged' % \ self._reference datasets = md.get_datasets() if len(datasets) > 1 and False: raise RuntimeError, 'more than one dataset in %s' % \ self._reference # then get the unit cell, lattice etc. reference_lattice = Syminfo.get_lattice(md.get_spacegroup()) reference_cell = md.get_dataset_info(datasets[0])['cell'] # then compute the pointgroup from this... # ---------- REINDEX TO CORRECT (REFERENCE) SETTING ---------- for epoch in self._sweep_handler.get_epochs(): pl = self._factory.Pointless() si = self._sweep_handler.get_sweep_information(epoch) hklin = si.get_reflections() pl.set_hklin(self._prepare_pointless_hklin( hklin, si.get_integrater().get_phi_width())) hklout = os.path.join( self.get_working_directory(), '%s_rdx2.mtz' % os.path.split(hklin)[-1][:-4]) # we will want to delete this one exit FileHandler.record_temporary_file(hklout) # now set the initial reflection set as a reference... pl.set_hklref(self._reference) # write a pointless log file... pl.decide_pointgroup() Debug.write('Reindexing analysis of %s' % pl.get_hklin()) pointgroup = pl.get_pointgroup() reindex_op = pl.get_reindex_operator() Debug.write('Operator: %s' % reindex_op) # apply this... integrater = si.get_integrater() integrater.set_integrater_reindex_operator(reindex_op, reason='match reference') integrater.set_integrater_spacegroup_number( Syminfo.spacegroup_name_to_number(pointgroup)) si.set_reflections(integrater.get_integrater_intensities()) md = self._factory.Mtzdump() md.set_hklin(si.get_reflections()) md.dump() datasets = md.get_datasets() if len(datasets) > 1: raise RuntimeError, 'more than one dataset in %s' % \ si.get_reflections() # then get the unit cell, lattice etc. lattice = Syminfo.get_lattice(md.get_spacegroup()) cell = md.get_dataset_info(datasets[0])['cell'] if lattice != reference_lattice: raise RuntimeError, 'lattices differ in %s and %s' % \ (self._reference, si.get_reflections()) for j in range(6): if math.fabs((cell[j] - reference_cell[j]) / reference_cell[j]) > 0.1: raise RuntimeError, \ 'unit cell parameters differ in %s and %s' % \ (self._reference, si.get_reflections()) # ---------- SORT TOGETHER DATA ---------- self._sort_together_data_ccp4() self._scalr_resolution_limits = { } # store central resolution limit estimates batch_ranges = [self._sweep_handler.get_sweep_information( epoch).get_batch_range() for epoch in self._sweep_handler.get_epochs()] self._resolution_limit_estimates = erzatz_resolution( self._prepared_reflections, batch_ranges) return
def _scale(self): '''Actually scale all of the data together.''' from xia2.Handlers.Environment import debug_memory_usage debug_memory_usage() Journal.block( 'scaling', self.get_scaler_xcrystal().get_name(), 'XSCALE', {'scaling model':'default (all)'}) epochs = self._sweep_information.keys() epochs.sort() xscale = self.XScale() xscale.set_spacegroup_number(self._xds_spacegroup) xscale.set_cell(self._scalr_cell) Debug.write('Set CELL: %.2f %.2f %.2f %.2f %.2f %.2f' % \ tuple(self._scalr_cell)) Debug.write('Set SPACEGROUP_NUMBER: %d' % \ self._xds_spacegroup) Debug.write('Gathering measurements for scaling') for epoch in epochs: # get the prepared reflections reflections = self._sweep_information[epoch][ 'prepared_reflections'] # and the get wavelength that this belongs to dname = self._sweep_information[epoch]['dname'] sname = self._sweep_information[epoch]['sname'] # and the resolution range for the reflections intgr = self._sweep_information[epoch]['integrater'] Debug.write('Epoch: %d' % epoch) Debug.write('HKL: %s (%s/%s)' % (reflections, dname, sname)) resolution_low = intgr.get_integrater_low_resolution() resolution_high, _ = self._scalr_resolution_limits.get((dname, sname), (0.0, None)) resolution = (resolution_high, resolution_low) xscale.add_reflection_file(reflections, dname, resolution) # set the global properties of the sample xscale.set_crystal(self._scalr_xname) xscale.set_anomalous(self._scalr_anomalous) debug_memory_usage() xscale.run() scale_factor = xscale.get_scale_factor() Debug.write('XSCALE scale factor found to be: %e' % scale_factor) # record the log file pname = self._scalr_pname xname = self._scalr_xname FileHandler.record_log_file('%s %s XSCALE' % \ (pname, xname), os.path.join(self.get_working_directory(), 'XSCALE.LP')) # check for outlier reflections and if a number are found # then iterate (that is, rerun XSCALE, rejecting these outliers) if not PhilIndex.params.dials.fast_mode and not PhilIndex.params.xds.keep_outliers: xscale_remove = xscale.get_remove() if xscale_remove: current_remove = [] final_remove = [] # first ensure that there are no duplicate entries... if os.path.exists(os.path.join( self.get_working_directory(), 'REMOVE.HKL')): for line in open(os.path.join( self.get_working_directory(), 'REMOVE.HKL'), 'r').readlines(): h, k, l = map(int, line.split()[:3]) z = float(line.split()[3]) if not (h, k, l, z) in current_remove: current_remove.append((h, k, l, z)) for c in xscale_remove: if c in current_remove: continue final_remove.append(c) Debug.write( '%d alien reflections are already removed' % \ (len(xscale_remove) - len(final_remove))) else: # we want to remove all of the new dodgy reflections final_remove = xscale_remove remove_hkl = open(os.path.join( self.get_working_directory(), 'REMOVE.HKL'), 'w') z_min = PhilIndex.params.xds.z_min rejected = 0 # write in the old reflections for remove in current_remove: z = remove[3] if z >= z_min: remove_hkl.write('%d %d %d %f\n' % remove) else: rejected += 1 Debug.write('Wrote %d old reflections to REMOVE.HKL' % \ (len(current_remove) - rejected)) Debug.write('Rejected %d as z < %f' % \ (rejected, z_min)) # and the new reflections rejected = 0 used = 0 for remove in final_remove: z = remove[3] if z >= z_min: used += 1 remove_hkl.write('%d %d %d %f\n' % remove) else: rejected += 1 Debug.write('Wrote %d new reflections to REMOVE.HKL' % \ (len(final_remove) - rejected)) Debug.write('Rejected %d as z < %f' % \ (rejected, z_min)) remove_hkl.close() # we want to rerun the finishing step so... # unless we have added no new reflections if used: self.set_scaler_done(False) if not self.get_scaler_done(): Chatter.write('Excluding outlier reflections Z > %.2f' % PhilIndex.params.xds.z_min) return debug_memory_usage() # now get the reflection files out and merge them with aimless output_files = xscale.get_output_reflection_files() wavelength_names = output_files.keys() # these are per wavelength - also allow for user defined resolution # limits a la bug # 3183. No longer... for epoch in self._sweep_information.keys(): input = self._sweep_information[epoch] intgr = input['integrater'] rkey = input['dname'], input['sname'] if intgr.get_integrater_user_resolution(): dmin = intgr.get_integrater_high_resolution() if rkey not in self._user_resolution_limits: self._scalr_resolution_limits[rkey] = (dmin, None) self._user_resolution_limits[rkey] = dmin elif dmin < self._user_resolution_limits[rkey]: self._scalr_resolution_limits[rkey] = (dmin, None) self._user_resolution_limits[rkey] = dmin self._scalr_scaled_refl_files = { } self._scalr_statistics = { } max_batches = 0 mtz_dict = { } project_info = { } for epoch in self._sweep_information.keys(): pname = self._scalr_pname xname = self._scalr_xname dname = self._sweep_information[epoch]['dname'] reflections = os.path.split( self._sweep_information[epoch]['prepared_reflections'])[-1] project_info[reflections] = (pname, xname, dname) for epoch in self._sweep_information.keys(): self._sweep_information[epoch]['scaled_reflections'] = None debug_memory_usage() for wavelength in wavelength_names: hklin = output_files[wavelength] xsh = XDSScalerHelper() xsh.set_working_directory(self.get_working_directory()) ref = xsh.split_and_convert_xscale_output( hklin, 'SCALED_', project_info, 1.0 / scale_factor) for hklout in ref.keys(): for epoch in self._sweep_information.keys(): if os.path.split(self._sweep_information[epoch][ 'prepared_reflections'])[-1] == \ os.path.split(hklout)[-1]: if self._sweep_information[epoch][ 'scaled_reflections'] is not None: raise RuntimeError, 'duplicate entries' self._sweep_information[epoch][ 'scaled_reflections'] = ref[hklout] del(xsh) debug_memory_usage() for epoch in self._sweep_information.keys(): hklin = self._sweep_information[epoch]['scaled_reflections'] dname = self._sweep_information[epoch]['dname'] sname = self._sweep_information[epoch]['sname'] hkl_copy = os.path.join(self.get_working_directory(), 'R_%s' % os.path.split(hklin)[-1]) if not os.path.exists(hkl_copy): shutil.copyfile(hklin, hkl_copy) # let's properly listen to the user's resolution limit needs... if self._user_resolution_limits.get((dname, sname), False): resolution = self._user_resolution_limits[(dname, sname)] else: if PhilIndex.params.xia2.settings.resolution.keep_all_reflections == True: try: resolution = intgr.get_detector().get_max_resolution(intgr.get_beam_obj().get_s0()) Debug.write('keep_all_reflections set, using detector limits') except Exception: resolution = self._estimate_resolution_limit(hklin) else: resolution = self._estimate_resolution_limit(hklin) Chatter.write('Resolution for sweep %s/%s: %.2f' % \ (dname, sname, resolution)) if (dname, sname) not in self._scalr_resolution_limits: self._scalr_resolution_limits[(dname, sname)] = (resolution, None) self.set_scaler_done(False) else: if resolution < self._scalr_resolution_limits[(dname, sname)][0]: self._scalr_resolution_limits[(dname, sname)] = (resolution, None) self.set_scaler_done(False) debug_memory_usage() if not self.get_scaler_done(): Debug.write('Returning as scaling not finished...') return self._sort_together_data_xds() highest_resolution = min(limit for limit, _ in self._scalr_resolution_limits.values()) self._scalr_highest_resolution = highest_resolution Debug.write('Scaler highest resolution set to %5.2f' % \ highest_resolution) if not self.get_scaler_done(): Debug.write('Returning as scaling not finished...') return sdadd_full = 0.0 sdb_full = 0.0 # ---------- FINAL MERGING ---------- sc = self._factory.Aimless() FileHandler.record_log_file('%s %s aimless' % (self._scalr_pname, self._scalr_xname), sc.get_log_file()) sc.set_resolution(highest_resolution) sc.set_hklin(self._prepared_reflections) sc.set_new_scales_file('%s_final.scales' % self._scalr_xname) if sdadd_full == 0.0 and sdb_full == 0.0: pass else: sc.add_sd_correction('both', 1.0, sdadd_full, sdb_full) for epoch in epochs: input = self._sweep_information[epoch] start, end = (min(input['batches']), max(input['batches'])) rkey = input['dname'], input['sname'] run_resolution_limit, _ = self._scalr_resolution_limits[rkey] sc.add_run(start, end, exclude = False, resolution = run_resolution_limit, name = input['sname']) sc.set_hklout(os.path.join(self.get_working_directory(), '%s_%s_scaled.mtz' % \ (self._scalr_pname, self._scalr_xname))) if self.get_scaler_anomalous(): sc.set_anomalous() sc.multi_merge() FileHandler.record_xml_file('%s %s aimless xml' % (self._scalr_pname, self._scalr_xname), sc.get_xmlout()) data = sc.get_summary() loggraph = sc.parse_ccp4_loggraph() standard_deviation_info = { } for key in loggraph.keys(): if 'standard deviation v. Intensity' in key: dataset = key.split(',')[-1].strip() standard_deviation_info[dataset] = transpose_loggraph( loggraph[key]) resolution_info = { } for key in loggraph.keys(): if 'Analysis against resolution' in key: dataset = key.split(',')[-1].strip() resolution_info[dataset] = transpose_loggraph( loggraph[key]) # and also radiation damage stuff... batch_info = { } for key in loggraph.keys(): if 'Analysis against Batch' in key: dataset = key.split(',')[-1].strip() batch_info[dataset] = transpose_loggraph( loggraph[key]) # finally put all of the results "somewhere useful" self._scalr_statistics = data self._scalr_scaled_refl_files = copy.deepcopy( sc.get_scaled_reflection_files()) self._scalr_scaled_reflection_files = { } # also output the unmerged scalepack format files... sc = self._factory.Aimless() sc.set_resolution(highest_resolution) sc.set_hklin(self._prepared_reflections) sc.set_scalepack() for epoch in epochs: input = self._sweep_information[epoch] start, end = (min(input['batches']), max(input['batches'])) rkey = input['dname'], input['sname'] run_resolution_limit, _ = self._scalr_resolution_limits[rkey] sc.add_run(start, end, exclude = False, resolution = run_resolution_limit, name = input['sname']) sc.set_hklout(os.path.join(self.get_working_directory(), '%s_%s_scaled.mtz' % \ (self._scalr_pname, self._scalr_xname))) if self.get_scaler_anomalous(): sc.set_anomalous() sc.multi_merge() self._scalr_scaled_reflection_files['sca_unmerged'] = { } self._scalr_scaled_reflection_files['mtz_unmerged'] = { } for dataset in sc.get_scaled_reflection_files().keys(): hklout = sc.get_scaled_reflection_files()[dataset] # then mark the scalepack files for copying... scalepack = os.path.join(os.path.split(hklout)[0], os.path.split(hklout)[1].replace( '_scaled', '_scaled_unmerged').replace('.mtz', '.sca')) self._scalr_scaled_reflection_files['sca_unmerged'][ dataset] = scalepack FileHandler.record_data_file(scalepack) mtz_unmerged = os.path.splitext(scalepack)[0] + '.mtz' self._scalr_scaled_reflection_files['mtz_unmerged'][dataset] = mtz_unmerged FileHandler.record_data_file(mtz_unmerged) if PhilIndex.params.xia2.settings.merging_statistics.source == 'cctbx': for key in self._scalr_scaled_refl_files: stats = self._compute_scaler_statistics( self._scalr_scaled_reflection_files['mtz_unmerged'][key], wave=key) self._scalr_statistics[ (self._scalr_pname, self._scalr_xname, key)] = stats # convert reflection files to .sca format - use mtz2various for this self._scalr_scaled_reflection_files['sca'] = { } self._scalr_scaled_reflection_files['hkl'] = { } for key in self._scalr_scaled_refl_files: f = self._scalr_scaled_refl_files[key] scaout = '%s.sca' % f[:-4] m2v = self._factory.Mtz2various() m2v.set_hklin(f) m2v.set_hklout(scaout) m2v.convert() self._scalr_scaled_reflection_files['sca'][key] = scaout FileHandler.record_data_file(scaout) if PhilIndex.params.xia2.settings.small_molecule == True: hklout = '%s.hkl' % f[:-4] m2v = self._factory.Mtz2various() m2v.set_hklin(f) m2v.set_hklout(hklout) m2v.convert_shelx() self._scalr_scaled_reflection_files['hkl'][key] = hklout FileHandler.record_data_file(hklout)
def _scale_prepare(self): '''Prepare the data for scaling - this will reindex it the reflections to the correct pointgroup and setting, for instance, and move the reflection files to the scale directory.''' Citations.cite('xds') Citations.cite('ccp4') Citations.cite('pointless') # GATHER phase - get the reflection files together... note that # it is not necessary in here to keep the batch information as we # don't wish to rebatch the reflections prior to scaling. # FIXME need to think about what I will do about the radiation # damage analysis in here... self._sweep_information = { } # FIXME in here I want to record the batch number to # epoch mapping as per the CCP4 Scaler implementation. Journal.block( 'gathering', self.get_scaler_xcrystal().get_name(), 'XDS', {'working directory':self.get_working_directory()}) for epoch in self._scalr_integraters.keys(): intgr = self._scalr_integraters[epoch] pname, xname, dname = intgr.get_integrater_project_info() sname = intgr.get_integrater_sweep_name() self._sweep_information[epoch] = { 'pname':pname, 'xname':xname, 'dname':dname, 'integrater':intgr, 'corrected_intensities':intgr.get_integrater_corrected_intensities(), 'prepared_reflections':None, 'scaled_reflections':None, 'header':intgr.get_header(), 'batches':intgr.get_integrater_batches(), 'image_to_epoch':intgr.get_integrater_sweep( ).get_image_to_epoch(), 'image_to_dose':{}, 'batch_offset':0, 'sname':sname } Journal.entry({'adding data from':'%s/%s/%s' % \ (xname, dname, sname)}) # what are these used for? # pname / xname / dname - dataset identifiers # image to epoch / batch offset / batches - for RD analysis Debug.write('For EPOCH %s have:' % str(epoch)) Debug.write('ID = %s/%s/%s' % (pname, xname, dname)) Debug.write('SWEEP = %s' % intgr.get_integrater_sweep_name()) # next work through all of the reflection files and make sure that # they are XDS_ASCII format... epochs = self._sweep_information.keys() epochs.sort() self._first_epoch = min(epochs) self._scalr_pname = self._sweep_information[epochs[0]]['pname'] self._scalr_xname = self._sweep_information[epochs[0]]['xname'] for epoch in epochs: intgr = self._scalr_integraters[epoch] pname = self._sweep_information[epoch]['pname'] xname = self._sweep_information[epoch]['xname'] dname = self._sweep_information[epoch]['dname'] sname = self._sweep_information[epoch]['sname'] if self._scalr_pname != pname: raise RuntimeError, 'all data must have a common project name' xname = self._sweep_information[epoch]['xname'] if self._scalr_xname != xname: raise RuntimeError, \ 'all data for scaling must come from one crystal' xsh = XDSScalerHelper() xsh.set_working_directory(self.get_working_directory()) hklin = self._sweep_information[epoch]['corrected_intensities'] hklout = os.path.join(self.get_working_directory(), '%s_%s_%s_%s_CORRECTED.HKL' %( pname, xname, dname, sname)) sweep = intgr.get_integrater_sweep() if sweep.get_frames_to_process() is not None: offset = intgr.get_frame_offset() #print "offset: %d" %offset start, end = sweep.get_frames_to_process() start -= offset end -= offset #end += 1 ???? #print "limiting batches: %d-%d" %(start, end) xsh.limit_batches(hklin, hklout, start, end) self._sweep_information[epoch]['corrected_intensities'] = hklout # if there is more than one sweep then compare the lattices # and eliminate all but the lowest symmetry examples if # there are more than one... # ------------------------------------------------- # Ensure that the integration lattices are the same # ------------------------------------------------- need_to_return = False if len(self._sweep_information.keys()) > 1: lattices = [] # FIXME run this stuff in parallel as well... for epoch in self._sweep_information.keys(): intgr = self._sweep_information[epoch]['integrater'] hklin = self._sweep_information[epoch]['corrected_intensities'] refiner = intgr.get_integrater_refiner() if self._scalr_input_pointgroup: pointgroup = self._scalr_input_pointgroup reindex_op = 'h,k,l' ntr = False else: pointgroup, reindex_op, ntr = \ self._pointless_indexer_jiffy(hklin, refiner) Debug.write('X1698: %s: %s' % (pointgroup, reindex_op)) lattice = Syminfo.get_lattice(pointgroup) if not lattice in lattices: lattices.append(lattice) if ntr: # if we need to return, we should logically reset # any reindexing operator right? right here all # we are talking about is the correctness of # individual pointgroups?? Bug # 3373 reindex_op = 'h,k,l' # actually, should this not be done "by magic" # when a new pointgroup is assigned in the # pointless indexer jiffy above?! intgr.set_integrater_reindex_operator( reindex_op, compose = False) need_to_return = True # bug # 2433 - need to ensure that all of the lattice # conclusions were the same... if len(lattices) > 1: ordered_lattices = [] for l in lattices_in_order(): if l in lattices: ordered_lattices.append(l) correct_lattice = ordered_lattices[0] Debug.write('Correct lattice asserted to be %s' % \ correct_lattice) # transfer this information back to the indexers for epoch in self._sweep_information.keys(): integrater = self._sweep_information[ epoch]['integrater'] refiner = integrater.get_integrater_refiner() sname = integrater.get_integrater_sweep_name() if not refiner: continue state = refiner.set_refiner_asserted_lattice( correct_lattice) if state == refiner.LATTICE_CORRECT: Debug.write('Lattice %s ok for sweep %s' % \ (correct_lattice, sname)) elif state == refiner.LATTICE_IMPOSSIBLE: raise RuntimeError, 'Lattice %s impossible for %s' % \ (correct_lattice, sname) elif state == refiner.LATTICE_POSSIBLE: Debug.write('Lattice %s assigned for sweep %s' % \ (correct_lattice, sname)) need_to_return = True # if one or more of them was not in the lowest lattice, # need to return here to allow reprocessing if need_to_return: self.set_scaler_done(False) self.set_scaler_prepare_done(False) return # next if there is more than one sweep then generate # a merged reference reflection file to check that the # setting for all reflection files is the same... # if we get to here then all data was processed with the same # lattice # ---------------------------------------------------------- # next ensure that all sweeps are set in the correct setting # ---------------------------------------------------------- if self.get_scaler_reference_reflection_file(): self._reference = self.get_scaler_reference_reflection_file() Debug.write('Using HKLREF %s' % self._reference) md = self._factory.Mtzdump() md.set_hklin(self.get_scaler_reference_reflection_file()) md.dump() self._xds_spacegroup = Syminfo.spacegroup_name_to_number( md.get_spacegroup()) Debug.write('Spacegroup %d' % self._xds_spacegroup) elif PhilIndex.params.xia2.settings.scale.reference_reflection_file: self._reference = PhilIndex.params.xia2.settings.scale.reference_reflection_file Debug.write('Using HKLREF %s' % self._reference) md = self._factory.Mtzdump() md.set_hklin(PhilIndex.params.xia2.settings.scale.reference_reflection_file) md.dump() self._xds_spacegroup = Syminfo.spacegroup_name_to_number( md.get_spacegroup()) Debug.write('Spacegroup %d' % self._xds_spacegroup) params = PhilIndex.params use_brehm_diederichs = params.xia2.settings.use_brehm_diederichs if len(self._sweep_information.keys()) > 1 and use_brehm_diederichs: brehm_diederichs_files_in = [] for epoch in self._sweep_information.keys(): intgr = self._sweep_information[epoch]['integrater'] hklin = self._sweep_information[epoch]['corrected_intensities'] refiner = intgr.get_integrater_refiner() # in here need to consider what to do if the user has # assigned the pointgroup on the command line ... if not self._scalr_input_pointgroup: pointgroup, reindex_op, ntr = \ self._pointless_indexer_jiffy(hklin, refiner) if ntr: # Bug # 3373 Debug.write('Reindex to standard (PIJ): %s' % \ reindex_op) intgr.set_integrater_reindex_operator( reindex_op, compose = False) reindex_op = 'h,k,l' need_to_return = True else: # 27/FEB/08 to support user assignment of pointgroups Debug.write('Using input pointgroup: %s' % \ self._scalr_input_pointgroup) pointgroup = self._scalr_input_pointgroup reindex_op = 'h,k,l' intgr.set_integrater_reindex_operator(reindex_op) intgr.set_integrater_spacegroup_number( Syminfo.spacegroup_name_to_number(pointgroup)) self._sweep_information[epoch]['corrected_intensities'] \ = intgr.get_integrater_corrected_intensities() # convert the XDS_ASCII for this sweep to mtz - on the next # get this should be in the correct setting... dname = self._sweep_information[epoch]['dname'] sname = intgr.get_integrater_sweep_name() hklin = self._sweep_information[epoch]['corrected_intensities'] hklout = os.path.join(self.get_working_directory(), '%s_%s.mtz' % (dname, sname)) FileHandler.record_temporary_file(hklout) # now use pointless to make this conversion pointless = self._factory.Pointless() pointless.set_xdsin(hklin) pointless.set_hklout(hklout) pointless.xds_to_mtz() brehm_diederichs_files_in.append(hklout) # now run cctbx.brehm_diederichs to figure out the indexing hand for # each sweep from xia2.Wrappers.Cctbx.BrehmDiederichs import BrehmDiederichs brehm_diederichs = BrehmDiederichs() brehm_diederichs.set_working_directory(self.get_working_directory()) auto_logfiler(brehm_diederichs) brehm_diederichs.set_input_filenames(brehm_diederichs_files_in) # 1 or 3? 1 seems to work better? brehm_diederichs.set_asymmetric(1) brehm_diederichs.run() reindexing_dict = brehm_diederichs.get_reindexing_dict() for epoch in self._sweep_information.keys(): intgr = self._sweep_information[epoch]['integrater'] dname = self._sweep_information[epoch]['dname'] sname = intgr.get_integrater_sweep_name() hklin = self._sweep_information[epoch]['corrected_intensities'] hklout = os.path.join(self.get_working_directory(), '%s_%s.mtz' % (dname, sname)) # apply the reindexing operator intgr.set_integrater_reindex_operator(reindex_op) # and copy the reflection file to the local directory hklin = self._sweep_information[epoch]['corrected_intensities'] hklout = os.path.join(self.get_working_directory(), '%s_%s.HKL' % (dname, sname)) Debug.write('Copying %s to %s' % (hklin, hklout)) shutil.copyfile(hklin, hklout) # record just the local file name... self._sweep_information[epoch][ 'prepared_reflections'] = os.path.split(hklout)[-1] elif len(self._sweep_information.keys()) > 1 and \ not self._reference: # need to generate a reference reflection file - generate this # from the reflections in self._first_epoch # # FIXME this should really use the Brehm and Diederichs method # if you have lots of little sweeps... intgr = self._sweep_information[self._first_epoch]['integrater'] hklin = self._sweep_information[epoch]['corrected_intensities'] refiner = intgr.get_integrater_refiner() if self._scalr_input_pointgroup: Debug.write('Using input pointgroup: %s' % \ self._scalr_input_pointgroup) pointgroup = self._scalr_input_pointgroup ntr = False reindex_op = 'h,k,l' else: pointgroup, reindex_op, ntr = self._pointless_indexer_jiffy( hklin, refiner) Debug.write('X1698: %s: %s' % (pointgroup, reindex_op)) reference_reindex_op = intgr.get_integrater_reindex_operator() if ntr: # Bug # 3373 intgr.set_integrater_reindex_operator( reindex_op, compose = False) reindex_op = 'h,k,l' need_to_return = True self._xds_spacegroup = Syminfo.spacegroup_name_to_number(pointgroup) # next pass this reindexing operator back to the source # of the reflections intgr.set_integrater_reindex_operator(reindex_op) intgr.set_integrater_spacegroup_number( Syminfo.spacegroup_name_to_number(pointgroup)) self._sweep_information[epoch]['corrected_intensities'] \ = intgr.get_integrater_corrected_intensities() hklin = self._sweep_information[epoch]['corrected_intensities'] hklout = os.path.join(self.get_working_directory(), 'xds-pointgroup-reference-unsorted.mtz') FileHandler.record_temporary_file(hklout) # now use pointless to handle this conversion pointless = self._factory.Pointless() pointless.set_xdsin(hklin) pointless.set_hklout(hklout) pointless.xds_to_mtz() self._reference = hklout if self._reference: from xia2.Driver.DriverFactory import DriverFactory def run_one_sweep(args): sweep_information = args[0] pointless_indexer_jiffy = args[1] factory = args[2] job_type = args[3] if job_type: DriverFactory.set_driver_type(job_type) intgr = sweep_information['integrater'] hklin = sweep_information['corrected_intensities'] refiner = intgr.get_integrater_refiner() # in here need to consider what to do if the user has # assigned the pointgroup on the command line ... if not self._scalr_input_pointgroup: pointgroup, reindex_op, ntr = \ self._pointless_indexer_jiffy(hklin, refiner) if ntr: # Bug # 3373 Debug.write('Reindex to standard (PIJ): %s' % \ reindex_op) intgr.set_integrater_reindex_operator( reindex_op, compose = False) reindex_op = 'h,k,l' need_to_return = True else: # 27/FEB/08 to support user assignment of pointgroups Debug.write('Using input pointgroup: %s' % \ self._scalr_input_pointgroup) pointgroup = self._scalr_input_pointgroup reindex_op = 'h,k,l' intgr.set_integrater_reindex_operator(reindex_op) intgr.set_integrater_spacegroup_number( Syminfo.spacegroup_name_to_number(pointgroup)) sweep_information['corrected_intensities'] \ = intgr.get_integrater_corrected_intensities() # convert the XDS_ASCII for this sweep to mtz - on the next # get this should be in the correct setting... hklin = sweep_information['corrected_intensities'] # now use pointless to make this conversion # try with no conversion?! pointless = self._factory.Pointless() pointless.set_xdsin(hklin) hklout = os.path.join( self.get_working_directory(), '%d_xds-pointgroup-unsorted.mtz' %pointless.get_xpid()) FileHandler.record_temporary_file(hklout) pointless.set_hklout(hklout) pointless.xds_to_mtz() pointless = self._factory.Pointless() pointless.set_hklin(hklout) pointless.set_hklref(self._reference) pointless.decide_pointgroup() pointgroup = pointless.get_pointgroup() reindex_op = pointless.get_reindex_operator() # for debugging print out the reindexing operations and # what have you... Debug.write('Reindex to standard: %s' % reindex_op) # this should send back enough information that this # is in the correct pointgroup (from the call above) and # also in the correct setting, from the interaction # with the reference set... - though I guess that the # spacegroup number should not have changed, right? # set the reindex operation afterwards... though if the # spacegroup number is the same this should make no # difference, right?! intgr.set_integrater_spacegroup_number( Syminfo.spacegroup_name_to_number(pointgroup)) intgr.set_integrater_reindex_operator(reindex_op) sweep_information['corrected_intensities'] \ = intgr.get_integrater_corrected_intensities() # and copy the reflection file to the local directory dname = sweep_information['dname'] sname = intgr.get_integrater_sweep_name() hklin = sweep_information['corrected_intensities'] hklout = os.path.join(self.get_working_directory(), '%s_%s.HKL' % (dname, sname)) Debug.write('Copying %s to %s' % (hklin, hklout)) shutil.copyfile(hklin, hklout) # record just the local file name... sweep_information['prepared_reflections'] = os.path.split(hklout)[-1] return sweep_information from libtbx import easy_mp params = PhilIndex.get_python_object() mp_params = params.xia2.settings.multiprocessing njob = mp_params.njob if njob > 1: # cache drivertype drivertype = DriverFactory.get_driver_type() args = [ (self._sweep_information[epoch], self._pointless_indexer_jiffy, self._factory, mp_params.type) for epoch in self._sweep_information.keys()] results_list = easy_mp.parallel_map( run_one_sweep, args, params=None, processes=njob, method="threading", asynchronous=True, callback=None, preserve_order=True, preserve_exception_message=True) # restore drivertype DriverFactory.set_driver_type(drivertype) # results should be given back in the same order for i, epoch in enumerate(self._sweep_information.keys()): self._sweep_information[epoch] = results_list[i] else: for epoch in self._sweep_information.keys(): self._sweep_information[epoch] = run_one_sweep( (self._sweep_information[epoch], self._pointless_indexer_jiffy, self._factory, None)) else: # convert the XDS_ASCII for this sweep to mtz epoch = self._first_epoch intgr = self._sweep_information[epoch]['integrater'] refiner = intgr.get_integrater_refiner() sname = intgr.get_integrater_sweep_name() hklout = os.path.join(self.get_working_directory(), '%s-pointless.mtz' % sname) FileHandler.record_temporary_file(hklout) pointless = self._factory.Pointless() pointless.set_xdsin(self._sweep_information[epoch]['corrected_intensities']) pointless.set_hklout(hklout) pointless.xds_to_mtz() # run it through pointless interacting with the # Indexer which belongs to this sweep hklin = hklout if self._scalr_input_pointgroup: Debug.write('Using input pointgroup: %s' % \ self._scalr_input_pointgroup) pointgroup = self._scalr_input_pointgroup ntr = False reindex_op = 'h,k,l' else: pointgroup, reindex_op, ntr = self._pointless_indexer_jiffy( hklin, refiner) if ntr: # if we need to return, we should logically reset # any reindexing operator right? right here all # we are talking about is the correctness of # individual pointgroups?? Bug # 3373 reindex_op = 'h,k,l' intgr.set_integrater_reindex_operator( reindex_op, compose = False) need_to_return = True self._xds_spacegroup = Syminfo.spacegroup_name_to_number(pointgroup) # next pass this reindexing operator back to the source # of the reflections intgr.set_integrater_reindex_operator(reindex_op) intgr.set_integrater_spacegroup_number( Syminfo.spacegroup_name_to_number(pointgroup)) self._sweep_information[epoch]['corrected_intensities'] \ = intgr.get_integrater_corrected_intensities() hklin = self._sweep_information[epoch]['corrected_intensities'] dname = self._sweep_information[epoch]['dname'] hklout = os.path.join(self.get_working_directory(), '%s_%s.HKL' % (dname, sname)) # and copy the reflection file to the local # directory Debug.write('Copying %s to %s' % (hklin, hklout)) shutil.copyfile(hklin, hklout) # record just the local file name... self._sweep_information[epoch][ 'prepared_reflections'] = os.path.split(hklout)[-1] if need_to_return: self.set_scaler_done(False) self.set_scaler_prepare_done(False) return unit_cell_list = [] for epoch in self._sweep_information.keys(): integrater = self._sweep_information[epoch]['integrater'] cell = integrater.get_integrater_cell() n_ref = integrater.get_integrater_n_ref() Debug.write('Cell for %s: %.2f %.2f %.2f %.2f %.2f %.2f' % \ (integrater.get_integrater_sweep_name(), cell[0], cell[1], cell[2], cell[3], cell[4], cell[5])) Debug.write('=> %d reflections' % n_ref) unit_cell_list.append((cell, n_ref)) self._scalr_cell = compute_average_unit_cell(unit_cell_list) self._scalr_resolution_limits = { } Debug.write('Determined unit cell: %.2f %.2f %.2f %.2f %.2f %.2f' % \ tuple(self._scalr_cell)) if os.path.exists(os.path.join( self.get_working_directory(), 'REMOVE.HKL')): os.remove(os.path.join( self.get_working_directory(), 'REMOVE.HKL')) Debug.write('Deleting REMOVE.HKL at end of scale prepare.') return
def _integrate(self): """Actually do the integration - in XDS terms this will mean running DEFPIX and INTEGRATE to measure all the reflections.""" experiment = self._intgr_refiner.get_refined_experiment_list( self.get_integrater_epoch())[0] crystal_model = experiment.crystal self._intgr_refiner_cell = crystal_model.get_unit_cell().parameters() images_str = "%d to %d" % tuple(self._intgr_wedge) cell_str = "%.2f %.2f %.2f %.2f %.2f %.2f" % tuple( self._intgr_refiner_cell) if len(self._fp_directory) <= 50: dirname = self._fp_directory else: dirname = "...%s" % self._fp_directory[-46:] Journal.block( "integrating", self._intgr_sweep_name, "XDS", { "images": images_str, "cell": cell_str, "lattice": self._intgr_refiner.get_refiner_lattice(), "template": self._fp_template, "directory": dirname, "resolution": "%.2f" % self._intgr_reso_high, }, ) defpix = self.Defpix() # pass in the correct data for file in [ "X-CORRECTIONS.cbf", "Y-CORRECTIONS.cbf", "BKGINIT.cbf", "XPARM.XDS", ]: defpix.set_input_data_file(file, self._xds_data_files[file]) defpix.set_data_range( self._intgr_wedge[0] + self.get_frame_offset(), self._intgr_wedge[1] + self.get_frame_offset(), ) if (self.get_integrater_high_resolution() > 0.0 and self.get_integrater_user_resolution()): Debug.write("Setting resolution limit in DEFPIX to %.2f" % self.get_integrater_high_resolution()) defpix.set_resolution_high(self.get_integrater_high_resolution()) defpix.set_resolution_low(self.get_integrater_low_resolution()) elif self.get_integrater_low_resolution(): Debug.write("Setting low resolution limit in DEFPIX to %.2f" % self.get_integrater_low_resolution()) defpix.set_resolution_high(0.0) defpix.set_resolution_low(self.get_integrater_low_resolution()) defpix.run() # and gather the result files for file in ["BKGPIX.cbf", "ABS.cbf"]: self._xds_data_files[file] = defpix.get_output_data_file(file) integrate = self.Integrate() if self._xds_integrate_parameters: integrate.set_updates(self._xds_integrate_parameters) # decide what images we are going to process, if not already # specified if not self._intgr_wedge: images = self.get_matching_images() self.set_integrater_wedge(min(images), max(images)) integrate.set_data_range( self._intgr_wedge[0] + self.get_frame_offset(), self._intgr_wedge[1] + self.get_frame_offset(), ) for file in [ "X-CORRECTIONS.cbf", "Y-CORRECTIONS.cbf", "BLANK.cbf", "BKGPIX.cbf", "GAIN.cbf", ]: integrate.set_input_data_file(file, self._xds_data_files[file]) if "GXPARM.XDS" in self._xds_data_files: Debug.write("Using globally refined parameters") integrate.set_input_data_file("XPARM.XDS", self._xds_data_files["GXPARM.XDS"]) integrate.set_refined_xparm() else: integrate.set_input_data_file("XPARM.XDS", self._xds_data_files["XPARM.XDS"]) integrate.run() self._intgr_per_image_statistics = integrate.get_per_image_statistics() Chatter.write(self.show_per_image_statistics()) # record the log file - pname, xname, dname = self.get_integrater_project_info() sweep = self.get_integrater_sweep_name() FileHandler.record_log_file( "%s %s %s %s INTEGRATE" % (pname, xname, dname, sweep), os.path.join(self.get_working_directory(), "INTEGRATE.LP"), ) # and copy the first pass INTEGRATE.HKL... lattice = self._intgr_refiner.get_refiner_lattice() if not os.path.exists( os.path.join(self.get_working_directory(), "INTEGRATE-%s.HKL" % lattice)): here = self.get_working_directory() shutil.copyfile( os.path.join(here, "INTEGRATE.HKL"), os.path.join(here, "INTEGRATE-%s.HKL" % lattice), ) # record INTEGRATE.HKL for e.g. BLEND. FileHandler.record_more_data_file( "%s %s %s %s INTEGRATE" % (pname, xname, dname, sweep), os.path.join(self.get_working_directory(), "INTEGRATE.HKL"), ) # should the existence of these require that I rerun the # integration or can we assume that the application of a # sensible resolution limit will achieve this?? self._xds_integrate_parameters = integrate.get_updates() # record the mosaic spread &c. m_min, m_mean, m_max = integrate.get_mosaic() self.set_integrater_mosaic_min_mean_max(m_min, m_mean, m_max) Chatter.write("Mosaic spread: %.3f < %.3f < %.3f" % self.get_integrater_mosaic_min_mean_max()) return os.path.join(self.get_working_directory(), "INTEGRATE.HKL")
def _index(self): '''Actually index the diffraction pattern. Note well that this is not going to compute the matrix...''' # acknowledge this program if not self._indxr_images: raise RuntimeError, 'No good spots found on any images' Citations.cite('labelit') Citations.cite('distl') _images = [] for i in self._indxr_images: for j in i: if not j in _images: _images.append(j) _images.sort() images_str = '%d' % _images[0] for i in _images[1:]: images_str += ', %d' % i cell_str = None if self._indxr_input_cell: cell_str = '%.2f %.2f %.2f %.2f %.2f %.2f' % \ self._indxr_input_cell if self._indxr_sweep_name: # then this is a proper autoindexing run - describe this # to the journal entry if len(self._fp_directory) <= 50: dirname = self._fp_directory else: dirname = '...%s' % self._fp_directory[-46:] Journal.block( 'autoindexing', self._indxr_sweep_name, 'labelit', {'images':images_str, 'target cell':cell_str, 'target lattice':self._indxr_input_lattice, 'template':self._fp_template, 'directory':dirname}) #auto_logfiler(self) from xia2.Wrappers.Labelit.LabelitIndex import LabelitIndex index = LabelitIndex() index.set_working_directory(self.get_working_directory()) auto_logfiler(index) #task = 'Autoindex from images:' #for i in _images: #task += ' %s' % self.get_image_name(i) #self.set_task(task) #self.add_command_line('--index_only') Debug.write('Indexing from images:') for i in _images: index.add_image(self.get_image_name(i)) Debug.write('%s' % self.get_image_name(i)) if self._indxr_input_lattice and False: index.set_space_group_number( lattice_to_spacegroup(self._indxr_input_lattice)) if self._primitive_unit_cell: index.set_primitive_unit_cell(self._primitive_unit_cell) if self._indxr_input_cell: index.set_max_cell(1.25 * max(self._indxr_input_cell[:3])) xsweep = self.get_indexer_sweep() if xsweep is not None: if xsweep.get_distance() is not None: index.set_distance(xsweep.get_distance()) #if self.get_wavelength_prov() == 'user': #index.set_wavelength(self.get_wavelength()) if xsweep.get_beam_centre() is not None: index.set_beam_centre(xsweep.get_beam_centre()) if self._refine_beam is False: index.set_refine_beam(False) else: index.set_refine_beam(True) index.set_beam_search_scope(self._beam_search_scope) if ((math.fabs(self.get_wavelength() - 1.54) < 0.01) or (math.fabs(self.get_wavelength() - 2.29) < 0.01)): index.set_Cu_KA_or_Cr_KA(True) try: index.run() except RuntimeError, e: if self._refine_beam is False: raise e # can we improve the situation? if self._beam_search_scope < 4.0: self._beam_search_scope += 4.0 # try repeating the indexing! self.set_indexer_done(False) return 'failed' # otherwise this is beyond redemption raise e
def _index(self): '''Implement the indexer interface.''' Citations.cite('mosflm') indexer = MosflmIndex() indexer.set_working_directory(self.get_working_directory()) auto_logfiler(indexer) from xia2.lib.bits import unique_elements _images = unique_elements(self._indxr_images) indexer.set_images(_images) images_str = ', '.join(map(str, _images)) cell_str = None if self._indxr_input_cell: cell_str = '%.2f %.2f %.2f %.2f %.2f %.2f' % \ self._indxr_input_cell if self._indxr_sweep_name: #if len(self._fp_directory) <= 50: #dirname = self._fp_directory #else: #dirname = '...%s' % self._fp_directory[-46:] dirname = os.path.dirname(self.get_imageset().get_template()) Journal.block( 'autoindexing', self._indxr_sweep_name, 'mosflm', {'images':images_str, 'target cell':self._indxr_input_cell, 'target lattice':self._indxr_input_lattice, 'template':self.get_imageset().get_template(), 'directory':dirname}) #task = 'Autoindex from images:' #for i in _images: #task += ' %s' % self.get_image_name(i) #self.set_task(task) indexer.set_template(os.path.basename(self.get_template())) indexer.set_directory(self.get_directory()) xsweep = self.get_indexer_sweep() if xsweep is not None: if xsweep.get_distance() is not None: indexer.set_distance(xsweep.get_distance()) #if self.get_wavelength_prov() == 'user': #index.set_wavelength(self.get_wavelength()) if xsweep.get_beam_centre() is not None: indexer.set_beam_centre(xsweep.get_beam_centre()) if self._indxr_input_cell: indexer.set_unit_cell(self._indxr_input_cell) if self._indxr_input_lattice != None: spacegroup_number = lattice_to_spacegroup( self._indxr_input_lattice) indexer.set_space_group_number(spacegroup_number) if not self._mosflm_autoindex_thresh: try: min_peaks = 200 Debug.write('Aiming for at least %d spots...' % min_peaks) thresholds = [] for i in _images: p = Printpeaks() p.set_working_directory(self.get_working_directory()) auto_logfiler(p) p.set_image(self.get_image_name(i)) thresh = p.threshold(min_peaks) Debug.write('Autoindex threshold for image %d: %d' % \ (i, thresh)) thresholds.append(thresh) thresh = min(thresholds) self._mosflm_autoindex_thresh = thresh except Exception as e: print str(e) #XXX this should disappear! Debug.write('Error computing threshold: %s' % str(e)) Debug.write('Using default of 20.0') thresh = 20.0 else: thresh = self._mosflm_autoindex_thresh Debug.write('Using autoindex threshold: %d' % thresh) if self._mosflm_autoindex_sol: indexer.set_solution_number(self._mosflm_autoindex_sol) indexer.set_threshold(thresh) # now forget this to prevent weird things happening later on if self._mosflm_autoindex_sol: self._mosflm_autoindex_sol = 0 indexer.run() #sweep = self.get_indexer_sweep_name() #FileHandler.record_log_file( #'%s INDEX' % (sweep), self.get_log_file()) indxr_cell = indexer.get_refined_unit_cell() self._indxr_lattice = indexer.get_lattice() space_group_number = indexer.get_indexed_space_group_number() detector_distance = indexer.get_refined_distance() beam_centre = indexer.get_refined_beam_centre() mosaic_spreads = indexer.get_mosaic_spreads() if min(list(indxr_cell)) < 10.0 and \ indxr_cell[2] / indxr_cell[0] > 6: Debug.write( 'Unrealistic autoindexing solution: ' + '%.2f %.2f %.2f %.2f %.2f %.2f' % indxr_cell) # tweak some parameters and try again... self._mosflm_autoindex_thresh *= 1.5 self.set_indexer_done(False) return intgr_params = { } # look up other possible indexing solutions (not well - in # standard settings only!) This is moved earlier as it could # result in returning if Mosflm has selected the wrong # solution! try: self._indxr_other_lattice_cell = indexer.get_solutions() # Change 27/FEB/08 to support user assigned spacegroups if self._indxr_user_input_lattice: lattice_to_spacegroup_dict = { 'aP':1, 'mP':3, 'mC':5, 'oP':16, 'oC':20, 'oF':22, 'oI':23, 'tP':75, 'tI':79, 'hP':143, 'hR':146, 'cP':195, 'cF':196, 'cI':197} for k in self._indxr_other_lattice_cell.keys(): if lattice_to_spacegroup_dict[k] > \ lattice_to_spacegroup_dict[ self._indxr_input_lattice]: del(self._indxr_other_lattice_cell[k]) # check that the selected unit cell matches - and if # not raise a "horrible" exception if self._indxr_input_cell: assert indxr_cell is not None for j in range(6): if math.fabs(self._indxr_input_cell[j] - indxr_cell[j]) > 2.0: Chatter.write( 'Mosflm autoindexing did not select ' + 'correct (target) unit cell') raise RuntimeError, \ 'something horrible happened in indexing' except RuntimeError, e: # check if mosflm rejected a solution we have it if 'horribl' in str(e): # ok it did - time to break out the big guns... if not self._indxr_input_cell: raise RuntimeError, \ 'error in solution selection when not preset' # XXX FIXME self._mosflm_autoindex_sol = _get_indexing_solution_number( indexer.get_all_output(), self._indxr_input_cell, self._indxr_input_lattice) # set the fact that we are not done... self.set_indexer_done(False) # and return - hopefully this will restart everything return else: raise e
def _index(self): '''Actually do the autoindexing using the data prepared by the previous method.''' images_str = '%d to %d' % tuple(self._indxr_images[0]) for i in self._indxr_images[1:]: images_str += ', %d to %d' % tuple(i) cell_str = None if self._indxr_input_cell: cell_str = '%.2f %.2f %.2f %.2f %.2f %.2f' % \ self._indxr_input_cell # then this is a proper autoindexing run - describe this # to the journal entry dirname = self.get_directory() Journal.block('autoindexing', self._indxr_sweep_name, 'XDS', {'images':images_str, 'target cell':cell_str, 'target lattice':self._indxr_input_lattice, 'template':self.get_template(), 'directory':dirname}) idxref = self.Idxref() self._index_remove_masked_regions() for file in ['SPOT.XDS']: idxref.set_input_data_file(file, self._indxr_payload[file]) # edit SPOT.XDS to remove reflections in untrusted regions of the detector idxref.set_data_range(self._indxr_images[0][0], self._indxr_images[0][1]) idxref.set_background_range(self._indxr_images[0][0], self._indxr_images[0][1]) # set the phi start etc correctly for block in self._indxr_images[:1]: starting_frame = block[0] starting_angle = self.get_scan().get_angle_from_image_index(starting_frame) idxref.set_starting_frame(starting_frame) idxref.set_starting_angle(starting_angle) idxref.add_spot_range(block[0], block[1]) for block in self._indxr_images[1:]: idxref.add_spot_range(block[0], block[1]) if self._indxr_user_input_lattice: idxref.set_indexer_user_input_lattice(True) if self._indxr_input_lattice and self._indxr_input_cell: idxref.set_indexer_input_lattice(self._indxr_input_lattice) idxref.set_indexer_input_cell(self._indxr_input_cell) Debug.write('Set lattice: %s' % self._indxr_input_lattice) Debug.write('Set cell: %f %f %f %f %f %f' % \ self._indxr_input_cell) original_cell = self._indxr_input_cell elif self._indxr_input_lattice: idxref.set_indexer_input_lattice(self._indxr_input_lattice) original_cell = None else: original_cell = None from dxtbx.serialize.xds import to_xds converter = to_xds(self.get_imageset()) xds_beam_centre = converter.detector_origin idxref.set_beam_centre(xds_beam_centre[0], xds_beam_centre[1]) # fixme need to check if the lattice, cell have been set already, # and if they have, pass these in as input to the indexing job. done = False while not done: try: done = idxref.run() # N.B. in here if the IDXREF step was being run in the first # pass done is FALSE however there should be a refined # P1 orientation matrix etc. available - so keep it! except XDSException, e: # inspect this - if we have complaints about not # enough reflections indexed, and we have a target # unit cell, and they are the same, well ignore it if 'solution is inaccurate' in str(e): Debug.write( 'XDS complains solution inaccurate - ignoring') done = idxref.continue_from_error() elif ('insufficient percentage (< 70%)' in str(e) or 'insufficient percentage (< 50%)' in str(e)) and \ original_cell: done = idxref.continue_from_error() lattice, cell, mosaic = \ idxref.get_indexing_solution() # compare solutions FIXME should use xds_cell_deviation check = PhilIndex.params.xia2.settings.xds_check_cell_deviation for j in range(3): # allow two percent variation in unit cell length if math.fabs((cell[j] - original_cell[j]) / \ original_cell[j]) > 0.02 and check: Debug.write('XDS unhappy and solution wrong') raise e # and two degree difference in angle if math.fabs(cell[j + 3] - original_cell[j + 3]) \ > 2.0 and check: Debug.write('XDS unhappy and solution wrong') raise e Debug.write('XDS unhappy but solution ok') elif 'insufficient percentage (< 70%)' in str(e) or \ 'insufficient percentage (< 50%)' in str(e): Debug.write('XDS unhappy but solution probably ok') done = idxref.continue_from_error() else: raise e
def _index(self): '''Actually do the autoindexing using the data prepared by the previous method.''' images_str = '%d to %d' % tuple(self._indxr_images[0]) for i in self._indxr_images[1:]: images_str += ', %d to %d' % tuple(i) cell_str = None if self._indxr_input_cell: cell_str = '%.2f %.2f %.2f %.2f %.2f %.2f' % \ self._indxr_input_cell # then this is a proper autoindexing run - describe this # to the journal entry #if len(self._fp_directory) <= 50: #dirname = self._fp_directory #else: #dirname = '...%s' % self._fp_directory[-46:] dirname = self.get_directory() Journal.block('autoindexing', self._indxr_sweep_name, 'XDS', {'images':images_str, 'target cell':cell_str, 'target lattice':self._indxr_input_lattice, 'template':self.get_template(), 'directory':dirname}) idxref = self.Idxref() self._index_remove_masked_regions() for file in ['SPOT.XDS']: idxref.set_input_data_file(file, self._indxr_payload[file]) idxref.set_data_range(self._indxr_images[0][0], self._indxr_images[0][1]) idxref.set_background_range(self._indxr_images[0][0], self._indxr_images[0][1]) # set the phi start etc correctly if self._i_or_ii == None: self._i_or_ii = self.decide_i_or_ii() Debug.write('Selecting I or II, chose %s' % self._i_or_ii) if self._i_or_ii == 'i': blocks = self._index_select_images_i() for block in blocks[:1]: starting_frame = block[0] dd = Diffdump() dd.set_image(self.get_image_name(starting_frame)) starting_angle = dd.readheader()['phi_start'] idxref.set_starting_frame(starting_frame) idxref.set_starting_angle(starting_angle) idxref.add_spot_range(block[0], block[1]) for block in blocks[1:]: idxref.add_spot_range(block[0], block[1]) else: for block in self._indxr_images[:1]: starting_frame = block[0] dd = Diffdump() dd.set_image(self.get_image_name(starting_frame)) starting_angle = dd.readheader()['phi_start'] idxref.set_starting_frame(starting_frame) idxref.set_starting_angle(starting_angle) idxref.add_spot_range(block[0], block[1]) for block in self._indxr_images[1:]: idxref.add_spot_range(block[0], block[1]) # FIXME need to also be able to pass in the known unit # cell and lattice if already available e.g. from # the helper... indirectly if self._indxr_user_input_lattice: idxref.set_indexer_user_input_lattice(True) if self._indxr_input_lattice and self._indxr_input_cell: idxref.set_indexer_input_lattice(self._indxr_input_lattice) idxref.set_indexer_input_cell(self._indxr_input_cell) Debug.write('Set lattice: %s' % self._indxr_input_lattice) Debug.write('Set cell: %f %f %f %f %f %f' % \ self._indxr_input_cell) original_cell = self._indxr_input_cell elif self._indxr_input_lattice: idxref.set_indexer_input_lattice(self._indxr_input_lattice) original_cell = None else: original_cell = None # FIXED need to set the beam centre here - this needs to come # from the input .xinfo object or header, and be converted # to the XDS frame... done. #mosflm_beam_centre = self.get_beam_centre() #xds_beam_centre = beam_centre_mosflm_to_xds( #mosflm_beam_centre[0], mosflm_beam_centre[1], self.get_header()) from dxtbx.serialize.xds import to_xds converter = to_xds(self.get_imageset()) xds_beam_centre = converter.detector_origin idxref.set_beam_centre(xds_beam_centre[0], xds_beam_centre[1]) # fixme need to check if the lattice, cell have been set already, # and if they have, pass these in as input to the indexing job. done = False while not done: try: done = idxref.run() # N.B. in here if the IDXREF step was being run in the first # pass done is FALSE however there should be a refined # P1 orientation matrix etc. available - so keep it! except XDSException, e: # inspect this - if we have complaints about not # enough reflections indexed, and we have a target # unit cell, and they are the same, well ignore it if 'solution is inaccurate' in str(e): Debug.write( 'XDS complains solution inaccurate - ignoring') done = idxref.continue_from_error() elif ('insufficient percentage (< 70%)' in str(e) or 'insufficient percentage (< 50%)' in str(e)) and \ original_cell: done = idxref.continue_from_error() lattice, cell, mosaic = \ idxref.get_indexing_solution() # compare solutions for j in range(3): # allow two percent variation in unit cell length if math.fabs((cell[j] - original_cell[j]) / \ original_cell[j]) > 0.02 and \ not Flags.get_relax(): Debug.write('XDS unhappy and solution wrong') raise e # and two degree difference in angle if math.fabs(cell[j + 3] - original_cell[j + 3]) \ > 2.0 and not Flags.get_relax(): Debug.write('XDS unhappy and solution wrong') raise e Debug.write('XDS unhappy but solution ok') elif 'insufficient percentage (< 70%)' in str(e) or \ 'insufficient percentage (< 50%)' in str(e): Debug.write('XDS unhappy but solution probably ok') done = idxref.continue_from_error() else: raise e
def _index(self): """Actually do the autoindexing using the data prepared by the previous method.""" images_str = "%d to %d" % tuple(self._indxr_images[0]) for i in self._indxr_images[1:]: images_str += ", %d to %d" % tuple(i) cell_str = None if self._indxr_input_cell: cell_str = "%.2f %.2f %.2f %.2f %.2f %.2f" % self._indxr_input_cell # then this is a proper autoindexing run - describe this # to the journal entry # if len(self._fp_directory) <= 50: # dirname = self._fp_directory # else: # dirname = '...%s' % self._fp_directory[-46:] dirname = self.get_directory() Journal.block( "autoindexing", self._indxr_sweep_name, "XDS", { "images": images_str, "target cell": cell_str, "target lattice": self._indxr_input_lattice, "template": self.get_template(), "directory": dirname, }, ) self._index_remove_masked_regions() if self._i_or_ii is None: self._i_or_ii = self.decide_i_or_ii() Debug.write("Selecting I or II, chose %s" % self._i_or_ii) idxref = self.Idxref() for file in ["SPOT.XDS"]: idxref.set_input_data_file(file, self._indxr_payload[file]) # set the phi start etc correctly idxref.set_data_range(self._indxr_images[0][0], self._indxr_images[0][1]) idxref.set_background_range(self._indxr_images[0][0], self._indxr_images[0][1]) if self._i_or_ii == "i": blocks = self._index_select_images_i() for block in blocks[:1]: starting_frame = block[0] starting_angle = self.get_scan().get_angle_from_image_index( starting_frame) idxref.set_starting_frame(starting_frame) idxref.set_starting_angle(starting_angle) idxref.add_spot_range(block[0], block[1]) for block in blocks[1:]: idxref.add_spot_range(block[0], block[1]) else: for block in self._indxr_images[:1]: starting_frame = block[0] starting_angle = self.get_scan().get_angle_from_image_index( starting_frame) idxref.set_starting_frame(starting_frame) idxref.set_starting_angle(starting_angle) idxref.add_spot_range(block[0], block[1]) for block in self._indxr_images[1:]: idxref.add_spot_range(block[0], block[1]) # FIXME need to also be able to pass in the known unit # cell and lattice if already available e.g. from # the helper... indirectly if self._indxr_user_input_lattice: idxref.set_indexer_user_input_lattice(True) if self._indxr_input_lattice and self._indxr_input_cell: idxref.set_indexer_input_lattice(self._indxr_input_lattice) idxref.set_indexer_input_cell(self._indxr_input_cell) Debug.write("Set lattice: %s" % self._indxr_input_lattice) Debug.write("Set cell: %f %f %f %f %f %f" % self._indxr_input_cell) original_cell = self._indxr_input_cell elif self._indxr_input_lattice: idxref.set_indexer_input_lattice(self._indxr_input_lattice) original_cell = None else: original_cell = None # FIXED need to set the beam centre here - this needs to come # from the input .xinfo object or header, and be converted # to the XDS frame... done. # mosflm_beam_centre = self.get_beam_centre() # xds_beam_centre = beam_centre_mosflm_to_xds( # mosflm_beam_centre[0], mosflm_beam_centre[1], self.get_header()) from dxtbx.serialize.xds import to_xds converter = to_xds(self.get_imageset()) xds_beam_centre = converter.detector_origin idxref.set_beam_centre(xds_beam_centre[0], xds_beam_centre[1]) # fixme need to check if the lattice, cell have been set already, # and if they have, pass these in as input to the indexing job. done = False while not done: try: done = idxref.run() # N.B. in here if the IDXREF step was being run in the first # pass done is FALSE however there should be a refined # P1 orientation matrix etc. available - so keep it! except XDSException as e: # inspect this - if we have complaints about not # enough reflections indexed, and we have a target # unit cell, and they are the same, well ignore it if "solution is inaccurate" in str(e): Debug.write("XDS complains solution inaccurate - ignoring") done = idxref.continue_from_error() elif ("insufficient percentage (< 70%)" in str(e) or "insufficient percentage (< 50%)" in str(e)) and original_cell: done = idxref.continue_from_error() lattice, cell, mosaic = idxref.get_indexing_solution() # compare solutions check = PhilIndex.params.xia2.settings.xds_check_cell_deviation for j in range(3): # allow two percent variation in unit cell length if (math.fabs( (cell[j] - original_cell[j]) / original_cell[j]) > 0.02 and check): Debug.write("XDS unhappy and solution wrong") raise e # and two degree difference in angle if (math.fabs(cell[j + 3] - original_cell[j + 3]) > 2.0 and check): Debug.write("XDS unhappy and solution wrong") raise e Debug.write("XDS unhappy but solution ok") elif "insufficient percentage (< 70%)" in str( e) or "insufficient percentage (< 50%)" in str(e): Debug.write("XDS unhappy but solution probably ok") done = idxref.continue_from_error() else: raise e FileHandler.record_log_file( "%s INDEX" % self.get_indexer_full_name(), os.path.join(self.get_working_directory(), "IDXREF.LP"), ) for file in ["SPOT.XDS", "XPARM.XDS"]: self._indxr_payload[file] = idxref.get_output_data_file(file) # need to get the indexing solutions out somehow... self._indxr_other_lattice_cell = idxref.get_indexing_solutions() self._indxr_lattice, self._indxr_cell, self._indxr_mosaic = ( idxref.get_indexing_solution()) import dxtbx from dxtbx.serialize.xds import to_crystal xparm_file = os.path.join(self.get_working_directory(), "XPARM.XDS") models = dxtbx.load(xparm_file) crystal_model = to_crystal(xparm_file) from dxtbx.model import Experiment, ExperimentList experiment = Experiment( beam=models.get_beam(), detector=models.get_detector(), goniometer=models.get_goniometer(), scan=models.get_scan(), crystal=crystal_model, # imageset=self.get_imageset(), ) experiment_list = ExperimentList([experiment]) self.set_indexer_experiment_list(experiment_list) # I will want this later on to check that the lattice was ok self._idxref_subtree_problem = idxref.get_index_tree_problem() return
def _index(self): '''Implement the indexer interface.''' Citations.cite('mosflm') indexer = MosflmIndex() indexer.set_working_directory(self.get_working_directory()) auto_logfiler(indexer) from xia2.lib.bits import unique_elements _images = unique_elements(self._indxr_images) indexer.set_images(_images) images_str = ', '.join(map(str, _images)) cell_str = None if self._indxr_input_cell: cell_str = '%.2f %.2f %.2f %.2f %.2f %.2f' % \ self._indxr_input_cell if self._indxr_sweep_name: #if len(self._fp_directory) <= 50: #dirname = self._fp_directory #else: #dirname = '...%s' % self._fp_directory[-46:] dirname = os.path.dirname(self.get_imageset().get_template()) Journal.block( 'autoindexing', self._indxr_sweep_name, 'mosflm', { 'images': images_str, 'target cell': self._indxr_input_cell, 'target lattice': self._indxr_input_lattice, 'template': self.get_imageset().get_template(), 'directory': dirname }) #task = 'Autoindex from images:' #for i in _images: #task += ' %s' % self.get_image_name(i) #self.set_task(task) indexer.set_template(os.path.basename(self.get_template())) indexer.set_directory(self.get_directory()) xsweep = self.get_indexer_sweep() if xsweep is not None: if xsweep.get_distance() is not None: indexer.set_distance(xsweep.get_distance()) #if self.get_wavelength_prov() == 'user': #index.set_wavelength(self.get_wavelength()) if xsweep.get_beam_centre() is not None: indexer.set_beam_centre(xsweep.get_beam_centre()) if self._indxr_input_cell: indexer.set_unit_cell(self._indxr_input_cell) if self._indxr_input_lattice is not None: spacegroup_number = lattice_to_spacegroup( self._indxr_input_lattice) indexer.set_space_group_number(spacegroup_number) if not self._mosflm_autoindex_thresh: try: min_peaks = 200 Debug.write('Aiming for at least %d spots...' % min_peaks) thresholds = [] for i in _images: p = Printpeaks() p.set_working_directory(self.get_working_directory()) auto_logfiler(p) p.set_image(self.get_image_name(i)) thresh = p.threshold(min_peaks) Debug.write('Autoindex threshold for image %d: %d' % \ (i, thresh)) thresholds.append(thresh) thresh = min(thresholds) self._mosflm_autoindex_thresh = thresh except Exception as e: print(str(e)) #XXX this should disappear! Debug.write('Error computing threshold: %s' % str(e)) Debug.write('Using default of 20.0') thresh = 20.0 else: thresh = self._mosflm_autoindex_thresh Debug.write('Using autoindex threshold: %d' % thresh) if self._mosflm_autoindex_sol: indexer.set_solution_number(self._mosflm_autoindex_sol) indexer.set_threshold(thresh) # now forget this to prevent weird things happening later on if self._mosflm_autoindex_sol: self._mosflm_autoindex_sol = 0 indexer.run() indxr_cell = indexer.get_refined_unit_cell() self._indxr_lattice = indexer.get_lattice() space_group_number = indexer.get_indexed_space_group_number() detector_distance = indexer.get_refined_distance() beam_centre = indexer.get_refined_beam_centre() mosaic_spreads = indexer.get_mosaic_spreads() if min(list(indxr_cell)) < 10.0 and \ indxr_cell[2] / indxr_cell[0] > 6: Debug.write('Unrealistic autoindexing solution: ' + '%.2f %.2f %.2f %.2f %.2f %.2f' % indxr_cell) # tweak some parameters and try again... self._mosflm_autoindex_thresh *= 1.5 self.set_indexer_done(False) return intgr_params = {} # look up other possible indexing solutions (not well - in # standard settings only!) This is moved earlier as it could # result in returning if Mosflm has selected the wrong # solution! try: self._indxr_other_lattice_cell = indexer.get_solutions() # Change 27/FEB/08 to support user assigned spacegroups if self._indxr_user_input_lattice: lattice_to_spacegroup_dict = { 'aP': 1, 'mP': 3, 'mC': 5, 'oP': 16, 'oC': 20, 'oF': 22, 'oI': 23, 'tP': 75, 'tI': 79, 'hP': 143, 'hR': 146, 'cP': 195, 'cF': 196, 'cI': 197 } for k in self._indxr_other_lattice_cell.keys(): if lattice_to_spacegroup_dict[k] > \ lattice_to_spacegroup_dict[ self._indxr_input_lattice]: del self._indxr_other_lattice_cell[k] # check that the selected unit cell matches - and if # not raise a "horrible" exception if self._indxr_input_cell: assert indxr_cell is not None for j in range(6): if math.fabs(self._indxr_input_cell[j] - indxr_cell[j]) > 2.0: Chatter.write('Mosflm autoindexing did not select ' + 'correct (target) unit cell') raise RuntimeError( 'something horrible happened in indexing') except RuntimeError as e: # check if mosflm rejected a solution we have it if 'horribl' in str(e): # ok it did - time to break out the big guns... if not self._indxr_input_cell: raise RuntimeError( 'error in solution selection when not preset') # XXX FIXME self._mosflm_autoindex_sol = _get_indexing_solution_number( indexer.get_all_output(), self._indxr_input_cell, self._indxr_input_lattice) # set the fact that we are not done... self.set_indexer_done(False) # and return - hopefully this will restart everything return else: raise e if len(mosaic_spreads) == 0: # then consider setting it do a default value... # equal to the oscillation width (a good guess) phi_width = self.get_phi_width() Chatter.write( 'Mosaic estimation failed, so guessing at %4.2f' % \ phi_width) # only consider this if we have thus far no idea on the # mosaic spread... mosaic_spreads.append(phi_width) intgr_params['raster'] = indexer.get_raster() intgr_params['separation'] = indexer.get_separation() self._indxr_resolution_estimate = indexer.get_resolution_estimate() # compute mosaic as mean(mosaic_spreads) self._indxr_mosaic = sum(mosaic_spreads) / len(mosaic_spreads) self._indxr_payload['mosflm_integration_parameters'] = intgr_params self._indxr_payload['mosflm_orientation_matrix'] = open( os.path.join(self.get_working_directory(), 'xiaindex.mat'), 'r').readlines() import copy from dxtbx.model.detector_helpers import set_mosflm_beam_centre from xia2.Wrappers.Mosflm.AutoindexHelpers import set_distance from xia2.Wrappers.Mosflm.AutoindexHelpers import crystal_model_from_mosflm_mat from cctbx import sgtbx, uctbx # update the beam centre (i.e. shift the origin of the detector) detector = copy.deepcopy(self.get_detector()) beam = copy.deepcopy(self.get_beam()) set_mosflm_beam_centre(detector, beam, beam_centre) if detector_distance is not None: set_distance(detector, detector_distance) # make a dxtbx crystal_model object from the mosflm matrix space_group = sgtbx.space_group_info(number=space_group_number).group() crystal_model = crystal_model_from_mosflm_mat( self._indxr_payload['mosflm_orientation_matrix'], unit_cell=uctbx.unit_cell(tuple(indxr_cell)), space_group=space_group) # construct an experiment_list from dxtbx.model import Experiment, ExperimentList experiment = Experiment(beam=beam, detector=detector, goniometer=self.get_goniometer(), scan=self.get_scan(), crystal=crystal_model) experiment_list = ExperimentList([experiment]) self.set_indexer_experiment_list(experiment_list)
def _index(self): '''Actually index the diffraction pattern. Note well that this is not going to compute the matrix...''' # acknowledge this program Citations.cite('labelit') Citations.cite('distl') #self.reset() _images = [] for i in self._indxr_images: for j in i: if not j in _images: _images.append(j) _images.sort() images_str = '%d' % _images[0] for i in _images[1:]: images_str += ', %d' % i cell_str = None if self._indxr_input_cell: cell_str = '%.2f %.2f %.2f %.2f %.2f %.2f' % \ self._indxr_input_cell if self._indxr_sweep_name: # then this is a proper autoindexing run - describe this # to the journal entry #if len(self._fp_directory) <= 50: #dirname = self._fp_directory #else: #dirname = '...%s' % self._fp_directory[-46:] dirname = os.path.dirname(self.get_imageset().get_template()) Journal.block( 'autoindexing', self._indxr_sweep_name, 'labelit', {'images':images_str, 'target cell':cell_str, 'target lattice':self._indxr_input_lattice, 'template':self.get_imageset().get_template(), 'directory':dirname}) if len(_images) > 4: raise RuntimeError, 'cannot use more than 4 images' from xia2.Wrappers.Labelit.LabelitIndex import LabelitIndex index = LabelitIndex() index.set_working_directory(self.get_working_directory()) auto_logfiler(index) #task = 'Autoindex from images:' #for i in _images: #task += ' %s' % self.get_image_name(i) #self.set_task(task) Debug.write('Indexing from images:') for i in _images: index.add_image(self.get_image_name(i)) Debug.write('%s' % self.get_image_name(i)) xsweep = self.get_indexer_sweep() if xsweep is not None: if xsweep.get_distance() is not None: index.set_distance(xsweep.get_distance()) #if self.get_wavelength_prov() == 'user': #index.set_wavelength(self.get_wavelength()) if xsweep.get_beam_centre() is not None: index.set_beam_centre(xsweep.get_beam_centre()) if self._refine_beam is False: index.set_refine_beam(False) else: index.set_refine_beam(True) index.set_beam_search_scope(self._beam_search_scope) if ((math.fabs(self.get_wavelength() - 1.54) < 0.01) or (math.fabs(self.get_wavelength() - 2.29) < 0.01)): index.set_Cu_KA_or_Cr_KA(True) #sweep = self.get_indexer_sweep_name() #FileHandler.record_log_file( #'%s INDEX' % (sweep), self.get_log_file()) try: index.run() except RuntimeError, e: if self._refine_beam is False: raise e # can we improve the situation? if self._beam_search_scope < 4.0: self._beam_search_scope += 4.0 # try repeating the indexing! self.set_indexer_done(False) return 'failed' # otherwise this is beyond redemption raise e
def _integrate(self): '''Actually do the integration - in XDS terms this will mean running DEFPIX and INTEGRATE to measure all the reflections.''' images_str = '%d to %d' % tuple(self._intgr_wedge) cell_str = '%.2f %.2f %.2f %.2f %.2f %.2f' % tuple(self._intgr_cell) if len(self._fp_directory) <= 50: dirname = self._fp_directory else: dirname = '...%s' % self._fp_directory[-46:] Journal.block( 'integrating', self._intgr_sweep_name, 'DIALS', {'images':images_str, 'cell':cell_str, 'lattice':self.get_integrater_refiner().get_refiner_lattice(), 'template':self._fp_template, 'directory':dirname, 'resolution':'%.2f' % self._intgr_reso_high}) integrate = self.Integrate() # decide what images we are going to process, if not already # specified if not self._intgr_wedge: images = self.get_matching_images() self.set_integrater_wedge(min(images), max(images)) imageset = self.get_imageset() beam = imageset.get_beam() detector = imageset.get_detector() d_min_limit = detector.get_max_resolution(beam.get_s0()) if d_min_limit > self._intgr_reso_high \ or PhilIndex.params.xia2.settings.resolution.keep_all_reflections: Debug.write('Overriding high resolution limit: %f => %f' % \ (self._intgr_reso_high, d_min_limit)) self._intgr_reso_high = d_min_limit integrate.set_experiments_filename(self._intgr_experiments_filename) integrate.set_reflections_filename(self._intgr_indexed_filename) integrate.set_d_max(self._intgr_reso_low) integrate.set_d_min(self._intgr_reso_high) pname, xname, dname = self.get_integrater_project_info() sweep = self.get_integrater_sweep_name() FileHandler.record_log_file('%s %s %s %s INTEGRATE' % \ (pname, xname, dname, sweep), integrate.get_log_file()) try: integrate.run() except RuntimeError, e: s = str(e) if ('dials.integrate requires more memory than is available.' in s and not self._intgr_reso_high): # Try to estimate a more sensible resolution limit for integration # in case we were just integrating noise to the edge of the detector images = self._integrate_select_images_wedges() Debug.write( 'Integrating subset of images to estimate resolution limit.\n' 'Integrating images %s' %images) integrate = self.Integrate() integrate.set_experiments_filename(self._intgr_experiments_filename) integrate.set_reflections_filename(self._intgr_indexed_filename) integrate.set_d_max(self._intgr_reso_low) integrate.set_d_min(self._intgr_reso_high) for (start, stop) in images: integrate.add_scan_range(start-self.get_matching_images()[0], stop-self.get_matching_images()[0]) integrate.set_reflections_per_degree(1000) integrate.run() integrated_pickle = integrate.get_integrated_filename() from xia2.Wrappers.Dials.EstimateResolutionLimit import EstimateResolutionLimit d_min_estimater = EstimateResolutionLimit() d_min_estimater.set_working_directory(self.get_working_directory()) auto_logfiler(d_min_estimater) d_min_estimater.set_experiments_filename(self._intgr_experiments_filename) d_min_estimater.set_reflections_filename(integrated_pickle) d_min = d_min_estimater.run() Debug.write('Estimate for d_min: %.2f' %d_min) Debug.write('Re-running integration to this resolution limit') self._intgr_reso_high = d_min self.set_integrater_done(False) return raise
def _integrate(self): '''Actually do the integration - in XDS terms this will mean running DEFPIX and INTEGRATE to measure all the reflections.''' experiment = self._intgr_refiner.get_refined_experiment_list( self.get_integrater_epoch())[0] crystal_model = experiment.crystal self._intgr_refiner_cell = crystal_model.get_unit_cell().parameters() images_str = '%d to %d' % tuple(self._intgr_wedge) cell_str = '%.2f %.2f %.2f %.2f %.2f %.2f' % tuple( self._intgr_refiner_cell) if len(self._fp_directory) <= 50: dirname = self._fp_directory else: dirname = '...%s' % self._fp_directory[-46:] Journal.block( 'integrating', self._intgr_sweep_name, 'XDS', { 'images': images_str, 'cell': cell_str, 'lattice': self._intgr_refiner.get_refiner_lattice(), 'template': self._fp_template, 'directory': dirname, 'resolution': '%.2f' % self._intgr_reso_high }) defpix = self.Defpix() # pass in the correct data for file in [ 'X-CORRECTIONS.cbf', 'Y-CORRECTIONS.cbf', 'BKGINIT.cbf', 'XPARM.XDS' ]: defpix.set_input_data_file(file, self._xds_data_files[file]) defpix.set_data_range(self._intgr_wedge[0] + self.get_frame_offset(), self._intgr_wedge[1] + self.get_frame_offset()) if self.get_integrater_high_resolution() > 0.0 and \ self.get_integrater_user_resolution(): Debug.write('Setting resolution limit in DEFPIX to %.2f' % \ self.get_integrater_high_resolution()) defpix.set_resolution_high(self.get_integrater_high_resolution()) defpix.set_resolution_low(self.get_integrater_low_resolution()) elif self.get_integrater_low_resolution(): Debug.write('Setting low resolution limit in DEFPIX to %.2f' % \ self.get_integrater_low_resolution()) defpix.set_resolution_high(0.0) defpix.set_resolution_low(self.get_integrater_low_resolution()) defpix.run() # and gather the result files for file in ['BKGPIX.cbf', 'ABS.cbf']: self._xds_data_files[file] = defpix.get_output_data_file(file) integrate = self.Integrate() if self._xds_integrate_parameters: integrate.set_updates(self._xds_integrate_parameters) # decide what images we are going to process, if not already # specified if not self._intgr_wedge: images = self.get_matching_images() self.set_integrater_wedge(min(images), max(images)) integrate.set_data_range( self._intgr_wedge[0] + self.get_frame_offset(), self._intgr_wedge[1] + self.get_frame_offset()) for file in [ 'X-CORRECTIONS.cbf', 'Y-CORRECTIONS.cbf', 'BLANK.cbf', 'BKGPIX.cbf', 'GAIN.cbf' ]: integrate.set_input_data_file(file, self._xds_data_files[file]) if 'GXPARM.XDS' in self._xds_data_files: Debug.write('Using globally refined parameters') integrate.set_input_data_file('XPARM.XDS', self._xds_data_files['GXPARM.XDS']) integrate.set_refined_xparm() else: integrate.set_input_data_file('XPARM.XDS', self._xds_data_files['XPARM.XDS']) integrate.run() self._intgr_per_image_statistics = integrate.get_per_image_statistics() Chatter.write(self.show_per_image_statistics()) # record the log file - pname, xname, dname = self.get_integrater_project_info() sweep = self.get_integrater_sweep_name() FileHandler.record_log_file('%s %s %s %s INTEGRATE' % \ (pname, xname, dname, sweep), os.path.join(self.get_working_directory(), 'INTEGRATE.LP')) # and copy the first pass INTEGRATE.HKL... lattice = self._intgr_refiner.get_refiner_lattice() if not os.path.exists( os.path.join(self.get_working_directory(), 'INTEGRATE-%s.HKL' % lattice)): here = self.get_working_directory() shutil.copyfile(os.path.join(here, 'INTEGRATE.HKL'), os.path.join(here, 'INTEGRATE-%s.HKL' % lattice)) # record INTEGRATE.HKL for e.g. BLEND. FileHandler.record_more_data_file( '%s %s %s %s INTEGRATE' % (pname, xname, dname, sweep), os.path.join(self.get_working_directory(), 'INTEGRATE.HKL')) # should the existence of these require that I rerun the # integration or can we assume that the application of a # sensible resolution limit will achieve this?? self._xds_integrate_parameters = integrate.get_updates() # record the mosaic spread &c. m_min, m_mean, m_max = integrate.get_mosaic() self.set_integrater_mosaic_min_mean_max(m_min, m_mean, m_max) Chatter.write('Mosaic spread: %.3f < %.3f < %.3f' % \ self.get_integrater_mosaic_min_mean_max()) return os.path.join(self.get_working_directory(), 'INTEGRATE.HKL')