def run(self, args=None): """ Perform the integration. """ from dials.util.command_line import heading from dials.util.options import flatten_reflections, flatten_experiments from dials.util import log from time import time from dials.util import Sorry # Check the number of arguments is correct start_time = time() # Parse the command line params, options = self.parser.parse_args(args=args, show_diff_phil=False) reference = flatten_reflections(params.input.reflections) experiments = flatten_experiments(params.input.experiments) if len(reference) == 0 and len(experiments) == 0: self.parser.print_help() return if len(reference) == 0: reference = None elif len(reference) != 1: raise Sorry("more than 1 reflection file was given") else: reference = reference[0] if len(experiments) == 0: raise Sorry("no experiment list was specified") # Save phil parameters if params.output.phil is not None: with open(params.output.phil, "w") as outfile: outfile.write(self.parser.diff_phil.as_str()) if __name__ == "__main__": # Configure logging log.config(params.verbosity, info=params.output.log, debug=params.output.debug_log) from dials.util.version import dials_version logger.info(dials_version()) # Log the diff phil diff_phil = self.parser.diff_phil.as_str() if diff_phil != "": logger.info("The following parameters have been modified:\n") logger.info(diff_phil) for abs_params in params.absorption_correction: if abs_params.apply: if not (params.integration.debug.output and not params.integration.debug.separate_files): raise Sorry( "Shoeboxes must be saved to integration intermediates to apply an absorption correction. " + "Set integration.debug.output=True, integration.debug.separate_files=False and " + "integration.debug.delete_shoeboxes=True to temporarily store shoeboxes." ) # Print if we're using a mask for i, exp in enumerate(experiments): mask = exp.imageset.external_lookup.mask if mask.filename is not None: if mask.data: logger.info("Using external mask: %s" % mask.filename) for tile in mask.data: logger.info(" Mask has %d pixels masked" % tile.data().count(False)) # Print the experimental models for i, exp in enumerate(experiments): logger.info("=" * 80) logger.info("") logger.info("Experiments") logger.info("") logger.info("Models for experiment %d" % i) logger.info("") logger.info(str(exp.beam)) logger.info(str(exp.detector)) if exp.goniometer: logger.info(str(exp.goniometer)) if exp.scan: logger.info(str(exp.scan)) logger.info(str(exp.crystal)) logger.info("=" * 80) logger.info("") logger.info(heading("Initialising")) logger.info("") # Load the data reference, rubbish = self.process_reference(reference) # Check pixels don't belong to neighbours if reference is not None: if exp.goniometer is not None and exp.scan is not None: self.filter_reference_pixels(reference, experiments) logger.info("") # Initialise the integrator from dials.algorithms.profile_model.factory import ProfileModelFactory from dials.algorithms.integration.integrator import IntegratorFactory # Modify experiment list if scan range is set. experiments, reference = self.split_for_scan_range( experiments, reference, params.scan_range) # Modify experiment list if exclude images is set experiments = self.exclude_images(experiments, params.exclude_images) # Predict the reflections logger.info("") logger.info("=" * 80) logger.info("") logger.info(heading("Predicting reflections")) logger.info("") predicted = flex.reflection_table.from_predictions_multi( experiments, dmin=params.prediction.d_min, dmax=params.prediction.d_max, margin=params.prediction.margin, force_static=params.prediction.force_static, padding=params.prediction.padding, ) # Match reference with predicted if reference: matched, reference, unmatched = predicted.match_with_reference( reference) assert len(matched) == len(predicted) assert matched.count(True) <= len(reference) if matched.count(True) == 0: raise Sorry(""" Invalid input for reference reflections. Zero reference spots were matched to predictions """) elif len(unmatched) != 0: logger.info("") logger.info("*" * 80) logger.info( "Warning: %d reference spots were not matched to predictions" % (len(unmatched))) logger.info("*" * 80) logger.info("") rubbish.extend(unmatched) if len(experiments) > 1: # filter out any experiments without matched reference reflections # f_: filtered from dxtbx.model.experiment_list import ExperimentList f_reference = flex.reflection_table() f_predicted = flex.reflection_table() f_rubbish = flex.reflection_table() f_experiments = ExperimentList() good_expt_count = 0 def refl_extend(src, dest, eid): tmp = src.select(src["id"] == eid) tmp["id"] = flex.int(len(tmp), good_expt_count) dest.extend(tmp) for expt_id, experiment in enumerate(experiments): if len(reference.select(reference["id"] == expt_id)) != 0: refl_extend(reference, f_reference, expt_id) refl_extend(predicted, f_predicted, expt_id) refl_extend(rubbish, f_rubbish, expt_id) f_experiments.append(experiment) good_expt_count += 1 else: logger.info( "Removing experiment %d: no reference reflections matched to predictions" % expt_id) reference = f_reference predicted = f_predicted experiments = f_experiments rubbish = f_rubbish # Select a random sample of the predicted reflections if not params.sampling.integrate_all_reflections: predicted = self.sample_predictions(experiments, predicted, params) # Compute the profile model if (params.create_profile_model and reference is not None and "shoebox" in reference): experiments = ProfileModelFactory.create(params, experiments, reference) else: experiments = ProfileModelFactory.create(params, experiments) for expr in experiments: if expr.profile is None: raise Sorry("No profile information in experiment list") del reference # Compute the bounding box predicted.compute_bbox(experiments) # Create the integrator logger.info("") integrator = IntegratorFactory.create(params, experiments, predicted) # Integrate the reflections reflections = integrator.integrate() # Append rubbish data onto the end if rubbish is not None and params.output.include_bad_reference: mask = flex.bool(len(rubbish), True) rubbish.unset_flags(mask, rubbish.flags.integrated_sum) rubbish.unset_flags(mask, rubbish.flags.integrated_prf) rubbish.set_flags(mask, rubbish.flags.bad_reference) reflections.extend(rubbish) # Correct integrated intensities for absorption correction, if necessary for abs_params in params.absorption_correction: if abs_params.apply and abs_params.algorithm == "fuller_kapton": from dials.algorithms.integration.kapton_correction import ( multi_kapton_correction, ) experiments, reflections = multi_kapton_correction( experiments, reflections, abs_params.fuller_kapton, logger=logger)() if params.significance_filter.enable: from dials.algorithms.integration.stills_significance_filter import ( SignificanceFilter, ) from dxtbx.model.experiment_list import ExperimentList sig_filter = SignificanceFilter(params) filtered_refls = sig_filter(experiments, reflections) accepted_expts = ExperimentList() accepted_refls = flex.reflection_table() logger.info( "Removed %d reflections out of %d when applying significance filter" % (len(reflections) - len(filtered_refls), len(reflections))) for expt_id, expt in enumerate(experiments): refls = filtered_refls.select(filtered_refls["id"] == expt_id) if len(refls) > 0: accepted_expts.append(expt) refls["id"] = flex.int(len(refls), len(accepted_expts) - 1) accepted_refls.extend(refls) else: logger.info( "Removed experiment %d which has no reflections left after applying significance filter" % expt_id) if len(accepted_refls) == 0: raise Sorry( "No reflections left after applying significance filter") experiments = accepted_expts reflections = accepted_refls # Delete the shoeboxes used for intermediate calculations, if requested if params.integration.debug.delete_shoeboxes and "shoebox" in reflections: del reflections["shoebox"] # Save the reflections self.save_reflections(reflections, params.output.reflections) self.save_experiments(experiments, params.output.experiments) # Write a report if requested if params.output.report is not None: integrator.report().as_file(params.output.report) # Print the total time taken logger.info("\nTotal time taken: %f" % (time() - start_time)) return experiments, reflections
def run_integration(params, experiments, reference=None): """Perform the integration. Returns: experiments: The integrated experiments reflections: The integrated reflections report(optional): An integration report. Raises: ValueError: For a number of bad inputs RuntimeError: If the profile model creation fails """ predicted = None rubbish = None for abs_params in params.absorption_correction: if abs_params.apply: if not (params.integration.debug.output and not params.integration.debug.separate_files): raise ValueError( "Shoeboxes must be saved to integration intermediates to apply an absorption correction. " + "Set integration.debug.output=True, integration.debug.separate_files=False and " + "integration.debug.delete_shoeboxes=True to temporarily store shoeboxes." ) # Print if we're using a mask for i, exp in enumerate(experiments): mask = exp.imageset.external_lookup.mask if mask.filename is not None: if mask.data: logger.info("Using external mask: %s", mask.filename) for tile in mask.data: logger.info(" Mask has %d pixels masked", tile.data().count(False)) # Print the experimental models for i, exp in enumerate(experiments): summary = "\n".join(( "", "=" * 80, "", "Experiments", "", "Models for experiment %d" % i, "", str(exp.beam), str(exp.detector), )) if exp.goniometer: summary += str(exp.goniometer) + "\n" if exp.scan: summary += str(exp.scan) + "\n" summary += str(exp.crystal) logger.info(summary) logger.info("\n".join(("", "=" * 80, ""))) logger.info(heading("Initialising")) # Load the data if reference: reference, rubbish = process_reference(reference) # Check pixels don't belong to neighbours if exp.goniometer is not None and exp.scan is not None: reference = filter_reference_pixels(reference, experiments) # Modify experiment list if scan range is set. experiments, reference = split_for_scan_range(experiments, reference, params.scan_range) # Modify experiment list if exclude images is set if params.exclude_images: for experiment in experiments: for index in params.exclude_images: experiment.imageset.mark_for_rejection(index, True) # Predict the reflections logger.info("\n".join(("", "=" * 80, ""))) logger.info(heading("Predicting reflections")) predicted = flex.reflection_table.from_predictions_multi( experiments, dmin=params.prediction.d_min, dmax=params.prediction.d_max, margin=params.prediction.margin, force_static=params.prediction.force_static, padding=params.prediction.padding, ) # Match reference with predicted if reference: matched, reference, unmatched = predicted.match_with_reference( reference) assert len(matched) == len(predicted) assert matched.count(True) <= len(reference) if matched.count(True) == 0: raise ValueError(""" Invalid input for reference reflections. Zero reference spots were matched to predictions """) elif unmatched: msg = ( "Warning: %d reference spots were not matched to predictions" % unmatched.size()) border = "\n".join(("", "*" * 80, "")) logger.info("".join((border, msg, border))) rubbish.extend(unmatched) if len(experiments) > 1: # filter out any experiments without matched reference reflections # f_: filtered f_reference = flex.reflection_table() f_predicted = flex.reflection_table() f_rubbish = flex.reflection_table() f_experiments = ExperimentList() good_expt_count = 0 def refl_extend(src, dest, eid): old_id = eid new_id = good_expt_count tmp = src.select(src["id"] == old_id) tmp["id"] = flex.int(len(tmp), good_expt_count) if old_id in tmp.experiment_identifiers(): identifier = tmp.experiment_identifiers()[old_id] del tmp.experiment_identifiers()[old_id] tmp.experiment_identifiers()[new_id] = identifier dest.extend(tmp) for expt_id, experiment in enumerate(experiments): if len(reference.select(reference["id"] == expt_id)) != 0: refl_extend(reference, f_reference, expt_id) refl_extend(predicted, f_predicted, expt_id) refl_extend(rubbish, f_rubbish, expt_id) f_experiments.append(experiment) good_expt_count += 1 else: logger.info( "Removing experiment %d: no reference reflections matched to predictions", expt_id, ) reference = f_reference predicted = f_predicted experiments = f_experiments rubbish = f_rubbish # Select a random sample of the predicted reflections if not params.sampling.integrate_all_reflections: predicted = sample_predictions(experiments, predicted, params) # Compute the profile model - either load existing or compute # can raise RuntimeError experiments = ProfileModelFactory.create(params, experiments, reference) for expr in experiments: if expr.profile is None: raise ValueError("No profile information in experiment list") del reference # Compute the bounding box predicted.compute_bbox(experiments) # Create the integrator integrator = create_integrator(params, experiments, predicted) # Integrate the reflections reflections = integrator.integrate() # Remove unintegrated reflections if not params.output.output_unintegrated_reflections: keep = reflections.get_flags(reflections.flags.integrated, all=False) logger.info( "Removing %d unintegrated reflections of %d total", keep.count(False), keep.size(), ) reflections = reflections.select(keep) # Append rubbish data onto the end if rubbish is not None and params.output.include_bad_reference: mask = flex.bool(len(rubbish), True) rubbish.unset_flags(mask, rubbish.flags.integrated_sum) rubbish.unset_flags(mask, rubbish.flags.integrated_prf) rubbish.set_flags(mask, rubbish.flags.bad_reference) reflections.extend(rubbish) # Correct integrated intensities for absorption correction, if necessary for abs_params in params.absorption_correction: if abs_params.apply and abs_params.algorithm == "fuller_kapton": from dials.algorithms.integration.kapton_correction import ( multi_kapton_correction, ) experiments, reflections = multi_kapton_correction( experiments, reflections, abs_params.fuller_kapton, logger=logger)() if params.significance_filter.enable: from dials.algorithms.integration.stills_significance_filter import ( SignificanceFilter, ) sig_filter = SignificanceFilter(params) filtered_refls = sig_filter(experiments, reflections) accepted_expts = ExperimentList() accepted_refls = flex.reflection_table() logger.info( "Removed %d reflections out of %d when applying significance filter", (reflections.size() - filtered_refls.size()), reflections.size(), ) for expt_id, expt in enumerate(experiments): refls = filtered_refls.select(filtered_refls["id"] == expt_id) if refls: accepted_expts.append(expt) current_id = expt_id new_id = len(accepted_expts) - 1 refls["id"] = flex.int(len(refls), new_id) if expt.identifier: del refls.experiment_identifiers()[current_id] refls.experiment_identifiers()[new_id] = expt.identifier accepted_refls.extend(refls) else: logger.info( "Removed experiment %d which has no reflections left after applying significance filter", expt_id, ) if not accepted_refls: raise ValueError( "No reflections left after applying significance filter") experiments = accepted_expts reflections = accepted_refls # Write a report if requested report = None if params.output.report is not None: report = integrator.report() return experiments, reflections, report
def integrate(self, experiments, indexed): from time import time st = time() logger.info('*' * 80) logger.info('Integrating Reflections') logger.info('*' * 80) indexed,_ = self.process_reference(indexed) # Get the integrator from the input parameters logger.info('Configuring integrator from input parameters') from dials.algorithms.profile_model.factory import ProfileModelFactory from dials.algorithms.integration.integrator import IntegratorFactory from dials.array_family import flex # Compute the profile model # Predict the reflections # Match the predictions with the reference # Create the integrator experiments = ProfileModelFactory.create(self.params, experiments, indexed) logger.info("") logger.info("=" * 80) logger.info("") logger.info("Predicting reflections") logger.info("") predicted = flex.reflection_table.from_predictions_multi( experiments, dmin=self.params.prediction.d_min, dmax=self.params.prediction.d_max, margin=self.params.prediction.margin, force_static=self.params.prediction.force_static) predicted.match_with_reference(indexed) logger.info("") integrator = IntegratorFactory.create(self.params, experiments, predicted) # Integrate the reflections integrated = integrator.integrate() # Select only those reflections which were integrated if 'intensity.prf.variance' in integrated: selection = integrated.get_flags( integrated.flags.integrated, all=True) else: selection = integrated.get_flags( integrated.flags.integrated_sum) integrated = integrated.select(selection) len_all = len(integrated) integrated = integrated.select(~integrated.get_flags(integrated.flags.foreground_includes_bad_pixels)) print "Filtering %d reflections with at least one bad foreground pixel out of %d"%(len_all-len(integrated), len_all) # verify sigmas are sensible if 'intensity.prf.value' in integrated: if (integrated['intensity.prf.variance'] <= 0).count(True) > 0: raise Sorry("Found negative variances") if 'intensity.sum.value' in integrated: if (integrated['intensity.sum.variance'] <= 0).count(True) > 0: raise Sorry("Found negative variances") # apply detector gain to summation variances integrated['intensity.sum.variance'] *= self.params.integration.summation.detector_gain if 'background.sum.value' in integrated: if (integrated['background.sum.variance'] < 0).count(True) > 0: raise Sorry("Found negative variances") if (integrated['background.sum.variance'] == 0).count(True) > 0: print "Filtering %d reflections with zero background variance" % ((integrated['background.sum.variance'] == 0).count(True)) integrated = integrated.select(integrated['background.sum.variance'] > 0) # apply detector gain to background summation variances integrated['background.sum.variance'] *= self.params.integration.summation.detector_gain # correct integrated intensities for absorption correction, if necessary for abs_params in self.params.integration.absorption_correction: if abs_params.apply and abs_params.algorithm == "fuller_kapton": from dials.algorithms.integration.kapton_correction import multi_kapton_correction experiments, integrated = multi_kapton_correction(experiments, integrated, abs_params.fuller_kapton, logger=logger)() if self.params.significance_filter.enable: from dials.algorithms.integration.stills_significance_filter import SignificanceFilter sig_filter = SignificanceFilter(self.params) refls = sig_filter(experiments, integrated) logger.info("Removed %d reflections out of %d when applying significance filter"%(len(integrated)-len(refls), len(integrated))) if len(refls) == 0: raise Sorry("No reflections left after applying significance filter") integrated = refls if self.params.output.integrated_filename: # Save the reflections self.save_reflections(integrated, self.params.output.integrated_filename) self.write_integration_pickles(integrated, experiments) from dials.algorithms.indexing.stills_indexer import calc_2D_rmsd_and_displacements rmsd_indexed, _ = calc_2D_rmsd_and_displacements(indexed) log_str = "RMSD indexed (px): %f\n"%(rmsd_indexed) for i in xrange(6): bright_integrated = integrated.select((integrated['intensity.sum.value']/flex.sqrt(integrated['intensity.sum.variance']))>=i) if len(bright_integrated) > 0: rmsd_integrated, _ = calc_2D_rmsd_and_displacements(bright_integrated) else: rmsd_integrated = 0 log_str += "N reflections integrated at I/sigI >= %d: % 4d, RMSD (px): %f\n"%(i, len(bright_integrated), rmsd_integrated) for crystal_model in experiments.crystals(): if hasattr(crystal_model, '_ML_domain_size_ang'): log_str += ". Final ML model: domain size angstroms: %f, half mosaicity degrees: %f"%(crystal_model._ML_domain_size_ang, crystal_model._ML_half_mosaicity_deg) logger.info(log_str) logger.info('') logger.info('Time Taken = %f seconds' % (time() - st)) return integrated
def integrate(self, experiments, indexed): if self.params.skip_hopper: return super(Hopper_Processor, self).integrate(experiments, indexed) st = time.time() logger.info("*" * 80) logger.info("Integrating Reflections") logger.info("*" * 80) indexed, _ = self.process_reference(indexed) if self.params.integration.integration_only_overrides.trusted_range: for detector in experiments.detectors(): for panel in detector: panel.set_trusted_range( self.params.integration.integration_only_overrides. trusted_range) if self.params.dispatch.coset: from xfel.util.sublattice_helper import integrate_coset integrate_coset(self, experiments, indexed) # Get the integrator from the input parameters logger.info("Configuring integrator from input parameters") from dials.algorithms.integration.integrator import create_integrator from dials.algorithms.profile_model.factory import ProfileModelFactory # Compute the profile model # Predict the reflections # Match the predictions with the reference # Create the integrator experiments = ProfileModelFactory.create(self.params, experiments, indexed) new_experiments = ExperimentList() new_reflections = flex.reflection_table() for expt_id, expt in enumerate(experiments): if (self.params.profile.gaussian_rs.parameters.sigma_b_cutoff is None or expt.profile.sigma_b() < self.params.profile.gaussian_rs.parameters.sigma_b_cutoff): refls = indexed.select(indexed["id"] == expt_id) refls["id"] = flex.int(len(refls), len(new_experiments)) # refls.reset_ids() del refls.experiment_identifiers()[expt_id] refls.experiment_identifiers()[len( new_experiments)] = expt.identifier new_reflections.extend(refls) new_experiments.append(expt) else: logger.info("Rejected expt %d with sigma_b %f" % (expt_id, expt.profile.sigma_b())) experiments = new_experiments indexed = new_reflections if len(experiments) == 0: raise RuntimeError("No experiments after filtering by sigma_b") logger.info("") logger.info("=" * 80) logger.info("") logger.info("Predicting reflections") logger.info("") # NOTE: this is the only changed needed to dials.stills_process # TODO: multi xtal # TODO: add in normal dials predictions as an option predicted, model = predictions.get_predicted_from_pandas( self.stage1_df, self.params.diffBragg, self.observed, experiments[0].identifier, self.device_id, spectrum_override=self.stage1_modeler.SIM.beam.spectrum) if self.params.refine_predictions: experiments, rnd2_refls = self.refine(experiments, predicted, refining_predictions=True, best=self.stage1_df) # TODO: match rnd2_refls with indexed.refl and re-save indexed.refl predicted, model = predictions.get_predicted_from_pandas( self.stage1_df, self.params.diffBragg, self.observed, experiments[0].identifier, self.device_id, spectrum_override=self.stage1_modeler.SIM.beam.spectrum) predicted.match_with_reference(indexed) integrator = create_integrator(self.params, experiments, predicted) # Integrate the reflections integrated = integrator.integrate() if self.params.partial_correct: integrated = predictions.normalize_by_partiality( integrated, model, default_F=self.params.diffBragg.predictions.default_Famplitude, gain=self.params.diffBragg.refiner.adu_per_photon) # correct integrated intensities for absorption correction, if necessary for abs_params in self.params.integration.absorption_correction: if abs_params.apply: if abs_params.algorithm == "fuller_kapton": from dials.algorithms.integration.kapton_correction import ( multi_kapton_correction, ) elif abs_params.algorithm == "kapton_2019": from dials.algorithms.integration.kapton_2019_correction import ( multi_kapton_correction, ) experiments, integrated = multi_kapton_correction( experiments, integrated, abs_params.fuller_kapton, logger=logger)() if self.params.significance_filter.enable: from dials.algorithms.integration.stills_significance_filter import ( SignificanceFilter, ) sig_filter = SignificanceFilter(self.params) filtered_refls = sig_filter(experiments, integrated) accepted_expts = ExperimentList() accepted_refls = flex.reflection_table() logger.info( "Removed %d reflections out of %d when applying significance filter", len(integrated) - len(filtered_refls), len(integrated), ) for expt_id, expt in enumerate(experiments): refls = filtered_refls.select(filtered_refls["id"] == expt_id) if len(refls) > 0: accepted_expts.append(expt) refls["id"] = flex.int(len(refls), len(accepted_expts) - 1) accepted_refls.extend(refls) else: logger.info( "Removed experiment %d which has no reflections left after applying significance filter", expt_id, ) if len(accepted_refls) == 0: raise Sorry( "No reflections left after applying significance filter") experiments = accepted_expts integrated = accepted_refls # Delete the shoeboxes used for intermediate calculations, if requested if self.params.integration.debug.delete_shoeboxes and "shoebox" in integrated: del integrated["shoebox"] if self.params.output.composite_output: if (self.params.output.integrated_experiments_filename or self.params.output.integrated_filename): assert (self.params.output.integrated_experiments_filename is not None and self.params.output.integrated_filename is not None) n = len(self.all_integrated_experiments) self.all_integrated_experiments.extend(experiments) for i, experiment in enumerate(experiments): refls = integrated.select(integrated["id"] == i) refls["id"] = flex.int(len(refls), n) del refls.experiment_identifiers()[i] refls.experiment_identifiers()[n] = experiment.identifier self.all_integrated_reflections.extend(refls) n += 1 else: # Dump experiments to disk if self.params.output.integrated_experiments_filename: experiments.as_json( self.params.output.integrated_experiments_filename) if self.params.output.integrated_filename: # Save the reflections self.save_reflections(integrated, self.params.output.integrated_filename) self.write_integration_pickles(integrated, experiments) from dials.algorithms.indexing.stills_indexer import ( calc_2D_rmsd_and_displacements, ) rmsd_indexed, _ = calc_2D_rmsd_and_displacements(indexed) log_str = "RMSD indexed (px): %f\n" % rmsd_indexed for i in range(6): bright_integrated = integrated.select( (integrated["intensity.sum.value"] / flex.sqrt(integrated["intensity.sum.variance"])) >= i) if len(bright_integrated) > 0: rmsd_integrated, _ = calc_2D_rmsd_and_displacements( bright_integrated) else: rmsd_integrated = 0 log_str += ( "N reflections integrated at I/sigI >= %d: % 4d, RMSD (px): %f\n" % (i, len(bright_integrated), rmsd_integrated)) for crystal_model in experiments.crystals(): if hasattr(crystal_model, "get_domain_size_ang"): log_str += ". Final ML model: domain size angstroms: {:f}, half mosaicity degrees: {:f}".format( crystal_model.get_domain_size_ang(), crystal_model.get_half_mosaicity_deg(), ) logger.info(log_str) logger.info("") logger.info("Time Taken = %f seconds", time.time() - st) return integrated
def integrate(self, experiments, indexed): from time import time st = time() logger.info('*' * 80) logger.info('Integrating Reflections') logger.info('*' * 80) indexed, _ = self.process_reference(indexed) # Get the integrator from the input parameters logger.info('Configuring integrator from input parameters') from dials.algorithms.profile_model.factory import ProfileModelFactory from dials.algorithms.integration.integrator import IntegratorFactory from dials.array_family import flex # Compute the profile model # Predict the reflections # Match the predictions with the reference # Create the integrator experiments = ProfileModelFactory.create(self.params, experiments, indexed) logger.info("") logger.info("=" * 80) logger.info("") logger.info("Predicting reflections") logger.info("") predicted = flex.reflection_table.from_predictions_multi( experiments, dmin=self.params.prediction.d_min, dmax=self.params.prediction.d_max, margin=self.params.prediction.margin, force_static=self.params.prediction.force_static) predicted.match_with_reference(indexed) logger.info("") integrator = IntegratorFactory.create(self.params, experiments, predicted) # Integrate the reflections integrated = integrator.integrate() # correct integrated intensities for absorption correction, if necessary for abs_params in self.params.integration.absorption_correction: if abs_params.apply and abs_params.algorithm == "fuller_kapton": from dials.algorithms.integration.kapton_correction import multi_kapton_correction experiments, integrated = multi_kapton_correction( experiments, integrated, abs_params.fuller_kapton, logger=logger)() if self.params.significance_filter.enable: from dials.algorithms.integration.stills_significance_filter import SignificanceFilter sig_filter = SignificanceFilter(self.params) refls = sig_filter(experiments, integrated) logger.info( "Removed %d reflections out of %d when applying significance filter" % (len(integrated) - len(refls), len(integrated))) if len(refls) == 0: raise Sorry( "No reflections left after applying significance filter") integrated = refls # Delete the shoeboxes used for intermediate calculations, if requested if self.params.integration.debug.delete_shoeboxes and 'shoebox' in integrated: del integrated['shoebox'] if self.params.output.composite_output: if self.params.output.integrated_experiments_filename or self.params.output.integrated_filename: assert self.params.output.integrated_experiments_filename is not None and self.params.output.integrated_filename is not None from dials.array_family import flex n = len(self.all_integrated_experiments) self.all_integrated_experiments.extend(experiments) for i, experiment in enumerate(experiments): refls = integrated.select(integrated['id'] == i) refls['id'] = flex.int(len(refls), n) self.all_integrated_reflections.extend(refls) n += 1 else: # Dump experiments to disk if self.params.output.integrated_experiments_filename: from dxtbx.model.experiment_list import ExperimentListDumper dump = ExperimentListDumper(experiments) dump.as_json( self.params.output.integrated_experiments_filename) if self.params.output.integrated_filename: # Save the reflections self.save_reflections(integrated, self.params.output.integrated_filename) self.write_integration_pickles(integrated, experiments) from dials.algorithms.indexing.stills_indexer import calc_2D_rmsd_and_displacements rmsd_indexed, _ = calc_2D_rmsd_and_displacements(indexed) log_str = "RMSD indexed (px): %f\n" % (rmsd_indexed) for i in xrange(6): bright_integrated = integrated.select( (integrated['intensity.sum.value'] / flex.sqrt(integrated['intensity.sum.variance'])) >= i) if len(bright_integrated) > 0: rmsd_integrated, _ = calc_2D_rmsd_and_displacements( bright_integrated) else: rmsd_integrated = 0 log_str += "N reflections integrated at I/sigI >= %d: % 4d, RMSD (px): %f\n" % ( i, len(bright_integrated), rmsd_integrated) for crystal_model in experiments.crystals(): if hasattr(crystal_model, 'get_domain_size_ang'): log_str += ". Final ML model: domain size angstroms: %f, half mosaicity degrees: %f" % ( crystal_model.get_domain_size_ang(), crystal_model.get_half_mosaicity_deg()) logger.info(log_str) logger.info('') logger.info('Time Taken = %f seconds' % (time() - st)) return integrated
def integrate(self, experiments, indexed): # TODO: Figure out if this is necessary and/or how to do this better indexed, _ = self.process_reference(indexed) if self.params.integration.integration_only_overrides.trusted_range: for detector in experiments.detectors(): for panel in detector: panel.set_trusted_range( self.params.integration.integration_only_overrides.trusted_range ) # Get the integrator from the input parameters from dials.algorithms.integration.integrator import create_integrator from dials.algorithms.profile_model.factory import ProfileModelFactory # Compute the profile model # Predict the reflections # Match the predictions with the reference # Create the integrator experiments = ProfileModelFactory.create(self.params, experiments, indexed) new_experiments = ExperimentList() new_reflections = flex.reflection_table() for expt_id, expt in enumerate(experiments): if ( self.params.profile.gaussian_rs.parameters.sigma_b_cutoff is None or expt.profile.sigma_b() < self.params.profile.gaussian_rs.parameters.sigma_b_cutoff ): refls = indexed.select(indexed["id"] == expt_id) refls["id"] = flex.int(len(refls), len(new_experiments)) # refls.reset_ids() del refls.experiment_identifiers()[expt_id] refls.experiment_identifiers()[len(new_experiments)] = expt.identifier new_reflections.extend(refls) new_experiments.append(expt) else: # TODO: this can be done better, also print( "Rejected expt %d with sigma_b %f" % (expt_id, expt.profile.sigma_b()) ) experiments = new_experiments indexed = new_reflections if len(experiments) == 0: raise RuntimeError("No experiments after filtering by sigma_b") predicted = flex.reflection_table.from_predictions_multi( experiments, dmin=self.params.prediction.d_min, dmax=self.params.prediction.d_max, margin=self.params.prediction.margin, force_static=self.params.prediction.force_static, ) predicted.match_with_reference(indexed) integrator = create_integrator(self.params, experiments, predicted) # Integrate the reflections integrated = integrator.integrate() # correct integrated intensities for absorption correction, if necessary for abs_params in self.params.integration.absorption_correction: if abs_params.apply: if abs_params.algorithm == "fuller_kapton": from dials.algorithms.integration.kapton_correction import ( multi_kapton_correction, ) elif abs_params.algorithm == "kapton_2019": from dials.algorithms.integration.kapton_2019_correction import ( multi_kapton_correction, ) experiments, integrated = multi_kapton_correction( experiments, integrated, abs_params.fuller_kapton, logger=logger )() if self.params.significance_filter.enable: from dials.algorithms.integration.stills_significance_filter import ( SignificanceFilter, ) sig_filter = SignificanceFilter(self.params) filtered_refls = sig_filter(experiments, integrated) accepted_expts = ExperimentList() accepted_refls = flex.reflection_table() for expt_id, expt in enumerate(experiments): refls = filtered_refls.select(filtered_refls["id"] == expt_id) if len(refls) > 0: accepted_expts.append(expt) refls["id"] = flex.int(len(refls), len(accepted_expts) - 1) accepted_refls.extend(refls) else: print( "Removed experiment %d which has no reflections left after applying significance filter", expt_id, ) if len(accepted_refls) == 0: raise Sorry("No reflections left after applying significance filter") experiments = accepted_expts integrated = accepted_refls # Delete the shoeboxes used for intermediate calculations, if requested if self.params.integration.debug.delete_shoeboxes and "shoebox" in integrated: del integrated["shoebox"] # Dump experiments to disk if self.params.output.integrated_experiments_filename: experiments.as_json(self.params.output.integrated_experiments_filename) if self.params.output.integrated_filename: # Save the reflections self.save_reflections( integrated, self.params.output.integrated_filename ) self.write_integration_pickles(integrated, experiments) # TODO: Figure out what this is from dials.algorithms.indexing.stills_indexer import ( calc_2D_rmsd_and_displacements, ) rmsd_indexed, _ = calc_2D_rmsd_and_displacements(indexed) log_str = f"RMSD indexed (px): {rmsd_indexed:f}\n" for i in range(6): bright_integrated = integrated.select( ( integrated["intensity.sum.value"] / flex.sqrt(integrated["intensity.sum.variance"]) ) >= i ) if len(bright_integrated) > 0: rmsd_integrated, _ = calc_2D_rmsd_and_displacements(bright_integrated) else: rmsd_integrated = 0 log_str += ( "N reflections integrated at I/sigI >= %d: % 4d, RMSD (px): %f\n" % (i, len(bright_integrated), rmsd_integrated) ) for crystal_model in experiments.crystals(): if hasattr(crystal_model, "get_domain_size_ang"): log_str += ". Final ML model: domain size angstroms: {:f}, half mosaicity degrees: {:f}".format( crystal_model.get_domain_size_ang(), crystal_model.get_half_mosaicity_deg(), ) print(log_str) return integrated