def run(args): from dials.util.options import OptionParser from dials.util.options import flatten_datablocks from dials.util.options import flatten_experiments import libtbx.load_env usage = "%s [options] datablock.json | experiments.json" %( libtbx.env.dispatcher_name) parser = OptionParser( usage=usage, phil=phil_scope, read_datablocks=True, read_experiments=True, check_format=False, epilog=help_message) params, options = parser.parse_args(show_diff_phil=True) experiments = flatten_experiments(params.input.experiments) datablocks = flatten_datablocks(params.input.datablock) if len(experiments) == 0 and len(datablocks) == 0: parser.print_help() exit(0) from dials.command_line.dials_import import ManualGeometryUpdater update_geometry = ManualGeometryUpdater(params) if len(experiments): imagesets = experiments.imagesets() elif len(datablocks): assert len(datablocks) == 1 imagesets = datablocks[0].extract_imagesets() for imageset in imagesets: imageset_new = update_geometry(imageset) imageset.set_detector(imageset_new.get_detector()) imageset.set_beam(imageset_new.get_beam()) imageset.set_goniometer(imageset_new.get_goniometer()) imageset.set_scan(imageset_new.get_scan()) from dxtbx.serialize import dump if len(experiments): print "Saving modified experiments to %s" %params.output.experiments dump.experiment_list(experiments, params.output.experiments) elif len(datablocks): print "Saving modified datablock to %s" %params.output.datablock dump.datablock(datablocks, params.output.datablock)
def run(self): st = time() self.load_reference_geometry() update_geometry = ManualGeometryUpdater(self.params) # Import stuff # no preimport for MPI multifile specialization # Wrapper function def do_work(i, item_list): processor = Processor(copy.deepcopy(self.params), composite_tag="%04d" % i) for item in item_list: tag, filename = item experiments = do_import(filename) imagesets = experiments.imagesets() if len(imagesets) == 0 or len(imagesets[0]) == 0: logger.info("Zero length imageset in file: %s" % filename) return if len(imagesets) > 1: raise Abort("Found more than one imageset in file: %s" % filename) if len(imagesets[0]) > 1: raise Abort( "Found a multi-image file. Run again with pre_import=True" ) if self.reference_detector is not None: imagesets[0].set_detector( Detector.from_dict(self.reference_detector.to_dict())) update_geometry(imagesets[0]) processor.process_experiments(tag, experiments) processor.finalize() # Process the data assert self.params.mp.method == "mpi" do_work(self.rank, self.subset) # Total Time logger.info("") logger.info("Total Time Taken = %f seconds" % (time() - st))
def update(experiments: ExperimentList, new_params: libtbx.phil.scope_extract) -> ExperimentList: """ Modify detector, beam, goniometer and scan in experiments with the values in new_params """ update_geometry = ManualGeometryUpdater(new_params) imagesets = experiments.imagesets() for imageset in imagesets: imageset_new = update_geometry(imageset) imageset.set_detector(imageset_new.get_detector()) imageset.set_beam(imageset_new.get_beam()) imageset.set_goniometer(imageset_new.get_goniometer()) imageset.set_scan(imageset_new.get_scan()) return experiments
def __init__(self, params_filename, output_tag, logfile=None): """ @param params_filename cctbx.xfel/DIALS parameter file for processing @output_tag String that will prefix output files @logfile File name for logging """ self.parsed_params = parse(file_name=params_filename) dials_params = phil_scope.fetch(self.parsed_params).extract() super(CctbxPsanaEventProcessor, self).__init__(dials_params, output_tag) self.update_geometry = ManualGeometryUpdater(dials_params) simple_script = SimpleScript(dials_params) simple_script.load_reference_geometry() self.reference_detector = getattr(simple_script, 'reference_detector', None) self.output_tag = output_tag self.detector_params = None if logfile is not None: log.config(logfile=logfile)
def run(args): from dials.util.options import OptionParser from dials.util.options import flatten_experiments usage = "dials.modify_geometry [options] models.expt" parser = OptionParser( usage=usage, phil=phil_scope, read_experiments=True, check_format=False, epilog=help_message, ) params, options = parser.parse_args(show_diff_phil=True) experiments = flatten_experiments(params.input.experiments) if len(experiments) == 0: parser.print_help() exit(0) from dials.command_line.dials_import import ManualGeometryUpdater update_geometry = ManualGeometryUpdater(params) if len(experiments): imagesets = experiments.imagesets() for imageset in imagesets: imageset_new = update_geometry(imageset) imageset.set_detector(imageset_new.get_detector()) imageset.set_beam(imageset_new.get_beam()) imageset.set_goniometer(imageset_new.get_goniometer()) imageset.set_scan(imageset_new.get_scan()) from dxtbx.serialize import dump if len(experiments): print("Saving modified experiments to %s" % params.output.experiments) dump.experiment_list(experiments, params.output.experiments)
def run(self): '''Execute the script.''' from dials.util import log from time import time from libtbx import easy_mp import copy # Parse the command line params, options, all_paths = self.parser.parse_args(show_diff_phil=False, return_unhandled=True) # Check we have some filenames if not all_paths: self.parser.print_help() return # Save the options self.options = options self.params = params st = time() # Configure logging log.config( params.verbosity, info='dials.process.log', debug='dials.process.debug.log') # Log the diff phil diff_phil = self.parser.diff_phil.as_str() if diff_phil is not '': logger.info('The following parameters have been modified:\n') logger.info(diff_phil) for abs_params in self.params.integration.absorption_correction: if abs_params.apply: if not (self.params.integration.debug.output and not self.params.integration.debug.separate_files): raise Sorry('Shoeboxes must be saved to integration intermediates to apply an absorption correction. '\ +'Set integration.debug.output=True and integration.debug.separate_files=False to save shoeboxes.') self.load_reference_geometry() from dials.command_line.dials_import import ManualGeometryUpdater update_geometry = ManualGeometryUpdater(params) # Import stuff logger.info("Loading files...") pre_import = params.dispatch.pre_import or len(all_paths) == 1 if pre_import: # Handle still imagesets by breaking them apart into multiple datablocks # Further handle single file still imagesets (like HDF5) by tagging each # frame using its index datablocks = [do_import(path) for path in all_paths] if self.reference_detector is not None: from dxtbx.model import Detector for datablock in datablocks: for imageset in datablock.extract_imagesets(): for i in range(len(imageset)): imageset.set_detector( Detector.from_dict(self.reference_detector.to_dict()), index=i) for datablock in datablocks: for imageset in datablock.extract_imagesets(): update_geometry(imageset) indices = [] basenames = [] split_datablocks = [] for datablock in datablocks: for imageset in datablock.extract_imagesets(): paths = imageset.paths() for i in xrange(len(imageset)): subset = imageset[i:i+1] split_datablocks.append(DataBlockFactory.from_imageset(subset)[0]) indices.append(i) basenames.append(os.path.splitext(os.path.basename(paths[i]))[0]) tags = [] for i, basename in zip(indices, basenames): if basenames.count(basename) > 1: tags.append("%s_%05d"%(basename, i)) else: tags.append(basename) # Wrapper function def do_work(item): Processor(copy.deepcopy(params)).process_datablock(item[0], item[1]) iterable = zip(tags, split_datablocks) else: basenames = [os.path.splitext(os.path.basename(filename))[0] for filename in all_paths] tags = [] for i, basename in enumerate(basenames): if basenames.count(basename) > 1: tags.append("%s_%05d"%(basename, i)) else: tags.append(basename) # Wrapper function def do_work(item): tag, filename = item datablock = do_import(filename) imagesets = datablock.extract_imagesets() if len(imagesets) == 0 or len(imagesets[0]) == 0: logger.info("Zero length imageset in file: %s"%filename) return if len(imagesets) > 1: raise Abort("Found more than one imageset in file: %s"%filename) if len(imagesets[0]) > 1: raise Abort("Found a multi-image file. Run again with pre_import=True") if self.reference_detector is not None: from dxtbx.model import Detector imagesets[0].set_detector(Detector.from_dict(self.reference_detector.to_dict())) update_geometry(imagesets[0]) Processor(copy.deepcopy(params)).process_datablock(tag, datablock) iterable = zip(tags, all_paths) # Process the data if params.mp.method == 'mpi': from mpi4py import MPI comm = MPI.COMM_WORLD rank = comm.Get_rank() # each process in MPI has a unique id, 0-indexed size = comm.Get_size() # size: number of processes running in this job for i, item in enumerate(iterable): if (i+rank)%size == 0: do_work(item) else: easy_mp.parallel_map( func=do_work, iterable=iterable, processes=params.mp.nproc, method=params.mp.method, preserve_order=True, preserve_exception_message=True) # Total Time logger.info("") logger.info("Total Time Taken = %f seconds" % (time() - st))
def process(self, img_object): # write out DIALS info (tied to self.write_pickle) if self.write_pickle: self.params.output.indexed_filename = img_object.ridx_path self.params.output.strong_filename = img_object.rspf_path self.params.output.refined_experiments_filename = img_object.eref_path self.params.output.integrated_experiments_filename = img_object.eint_path self.params.output.integrated_filename = img_object.rint_path # Set up integration pickle path and logfile self.params.output.integration_pickle = img_object.int_file self.int_log = img_object.int_log # configure DIALS logging self.dials_log = getattr(img_object, 'dials_log', None) if self.dials_log: log.config(verbosity=1, logfile=self.dials_log) # Create output folder if one does not exist if self.write_pickle: if not os.path.isdir(img_object.int_path): os.makedirs(img_object.int_path) # Auto-set threshold and gain (not saved for target.phil) if self.iparams.cctbx_xfel.auto_threshold: center_int = img_object.center_int if img_object.center_int else 0 threshold = int(center_int) self.params.spotfinder.threshold.dispersion.global_threshold = threshold if self.iparams.image_import.estimate_gain: self.params.spotfinder.threshold.dispersion.gain = img_object.gain # Update geometry if reference geometry was applied from dials.command_line.dials_import import ManualGeometryUpdater update_geometry = ManualGeometryUpdater(self.params) try: imagesets = img_object.experiments.imagesets() update_geometry(imagesets[0]) experiment = img_object.experiments[0] experiment.beam = imagesets[0].get_beam() experiment.detector = imagesets[0].get_detector() except RuntimeError as e: print("DEBUG: Error updating geometry on {}, {}".format( img_object.img_path, e)) # Set detector if reference geometry was applied if self.reference_detector is not None: try: from dxtbx.model import Detector imageset = img_object.experiments[0].imageset imageset.set_detector( Detector.from_dict(self.reference_detector.to_dict())) img_object.experiments[0].detector = imageset.get_detector() except Exception as e: print('DEBUG: cannot set detector! ', e) # Write full params to file (DEBUG) if self.write_logs: param_string = phil_scope.format( python_object=self.params).as_str() full_param_dir = os.path.dirname(self.iparams.cctbx_xfel.target) full_param_fn = 'full_' + os.path.basename( self.iparams.cctbx_xfel.target) full_param_file = os.path.join(full_param_dir, full_param_fn) with open(full_param_file, 'w') as ftarg: ftarg.write(param_string) # **** SPOTFINDING **** # with util.Capturing() as output: try: print("{:-^100}\n".format(" SPOTFINDING: ")) print('<--->') observed = self.find_spots(img_object.experiments) img_object.final['spots'] = len(observed) except Exception as e: e_spf = str(e) observed = None else: if (self.iparams.data_selection.image_triage and len(observed) >= self.iparams.data_selection. image_triage.minimum_Bragg_peaks): msg = " FOUND {} SPOTS - IMAGE ACCEPTED!".format( len(observed)) print("{:-^100}\n\n".format(msg)) else: msg = " FOUND {} SPOTS - IMAGE REJECTED!".format( len(observed)) print("{:-^100}\n\n".format(msg)) e = 'Insufficient spots found ({})!'.format(len(observed)) return self.error_handler(e, 'triage', img_object, output) if not observed: return self.error_handler(e_spf, 'spotfinding', img_object, output) if self.write_logs: self.write_int_log(path=img_object.int_log, output=output, dials_log=self.dials_log) # Finish if spotfinding is the last processing stage if 'spotfind' in self.last_stage: try: detector = img_object.experiments.unique_detectors()[0] beam = img_object.experiments.unique_beams()[0] except AttributeError: detector = img_object.experiments.imagesets()[0].get_detector() beam = img_object.experiments.imagesets()[0].get_beam() s1 = flex.vec3_double() for i in range(len(observed)): s1.append(detector[observed['panel'][i]].get_pixel_lab_coord( observed['xyzobs.px.value'][i][0:2])) two_theta = s1.angle(beam.get_s0()) d = beam.get_wavelength() / (2 * flex.asin(two_theta / 2)) img_object.final['res'] = np.max(d) img_object.final['lres'] = np.min(d) return img_object # **** INDEXING **** # with util.Capturing() as output: try: print("{:-^100}\n".format(" INDEXING")) print('<--->') experiments, indexed = self.index(img_object.experiments, observed) except Exception as e: e_idx = str(e) indexed = None else: if indexed: img_object.final['indexed'] = len(indexed) print("{:-^100}\n\n".format(" USED {} INDEXED REFLECTIONS " "".format(len(indexed)))) else: e_idx = "Not indexed for unspecified reason(s)" img_object.fail = 'failed indexing' if indexed: if self.write_logs: self.write_int_log(path=img_object.int_log, output=output, dials_log=self.dials_log) else: return self.error_handler(e_idx, 'indexing', img_object, output) with util.Capturing() as output: # Bravais lattice and reindex if self.iparams.cctbx_xfel.determine_sg_and_reindex: try: print("{:-^100}\n".format(" DETERMINING SPACE GROUP")) print('<--->') experiments, indexed = self.pg_and_reindex( indexed, experiments) img_object.final['indexed'] = len(indexed) lat = experiments[0].crystal.get_space_group().info() sg = str(lat).replace(' ', '') if sg != 'P1': print("{:-^100}\n".format( " REINDEXED TO SPACE GROUP {} ".format(sg))) else: print("{:-^100}\n".format( " RETAINED TRICLINIC (P1) SYMMETRY ")) reindex_success = True except Exception as e: e_ridx = str(e) reindex_success = False if reindex_success: if self.write_logs: self.write_int_log(path=img_object.int_log, output=output, dials_log=self.dials_log) else: return self.error_handler(e_ridx, 'indexing', img_object, output) # **** REFINEMENT **** # with util.Capturing() as output: try: experiments, indexed = self.refine(experiments, indexed) refined = True except Exception as e: e_ref = str(e) refined = False if refined: if self.write_logs: self.write_int_log(path=img_object.int_log, output=output, dials_log=self.dials_log) else: return self.error_handler(e_ref, 'refinement', img_object, output) # **** INTEGRATION **** # with util.Capturing() as output: try: print("{:-^100}\n".format(" INTEGRATING ")) print('<--->') integrated = self.integrate(experiments, indexed) except Exception as e: e_int = str(e) integrated = None else: if integrated: img_object.final['integrated'] = len(integrated) print("{:-^100}\n\n".format( " FINAL {} INTEGRATED REFLECTIONS " "".format(len(integrated)))) if integrated: if self.write_logs: self.write_int_log(path=img_object.int_log, output=output, dials_log=self.dials_log) else: return self.error_handler(e_int, 'integration', img_object, output) # Filter if self.iparams.cctbx_xfel.filter.flag_on: self.selector = Selector( frame=self.frame, uc_tol=self.iparams.cctbx_xfel.filter.uc_tolerance, xsys=self.iparams.cctbx_xfel.filter.crystal_system, pg=self.iparams.cctbx_xfel.filter.pointgroup, uc=self.iparams.cctbx_xfel.filter.unit_cell, min_ref=self.iparams.cctbx_xfel.filter.min_reflections, min_res=self.iparams.cctbx_xfel.filter.min_resolution) fail, e = self.selector.result_filter() if fail: return self.error_handler(e, 'filter', img_object, output) int_results, log_entry = self.collect_information( img_object=img_object) # Update final entry with integration results img_object.final.update(int_results) # Update image log log_entry = "\n".join(log_entry) img_object.log_info.append(log_entry) if self.write_logs: self.write_int_log(path=img_object.int_log, log_entry=log_entry) return img_object
def load_imagesets( template, directory, id_image=None, image_range=None, use_cache=True, reversephi=False, ): global imageset_cache from dxtbx.model.experiment_list import ExperimentListFactory from xia2.Applications.xia2setup import known_hdf5_extensions from dxtbx.imageset import ImageSweep full_template_path = os.path.join(directory, template) if full_template_path not in imageset_cache or not use_cache: from dxtbx.model.experiment_list import BeamComparison from dxtbx.model.experiment_list import DetectorComparison from dxtbx.model.experiment_list import GoniometerComparison params = PhilIndex.params.xia2.settings compare_beam = BeamComparison( wavelength_tolerance=params.input.tolerance.beam.wavelength, direction_tolerance=params.input.tolerance.beam.direction, polarization_normal_tolerance=params.input.tolerance.beam.polarization_normal, polarization_fraction_tolerance=params.input.tolerance.beam.polarization_fraction, ) compare_detector = DetectorComparison( fast_axis_tolerance=params.input.tolerance.detector.fast_axis, slow_axis_tolerance=params.input.tolerance.detector.slow_axis, origin_tolerance=params.input.tolerance.detector.origin, ) compare_goniometer = GoniometerComparison( rotation_axis_tolerance=params.input.tolerance.goniometer.rotation_axis, fixed_rotation_tolerance=params.input.tolerance.goniometer.fixed_rotation, setting_rotation_tolerance=params.input.tolerance.goniometer.setting_rotation, ) scan_tolerance = params.input.tolerance.scan.oscillation format_kwargs = { "dynamic_shadowing": params.input.format.dynamic_shadowing, "multi_panel": params.input.format.multi_panel, } if os.path.splitext(full_template_path)[-1] in known_hdf5_extensions: # if we are passed the correct file, use this, else look for a master # file (i.e. something_master.h5) if os.path.exists(full_template_path) and os.path.isfile( full_template_path ): master_file = full_template_path else: import glob g = glob.glob(os.path.join(directory, "*_master.h5")) master_file = None for p in g: substr = longest_common_substring(template, p) if substr: if master_file is None or ( len(substr) > len(longest_common_substring(template, master_file)) ): master_file = p if master_file is None: raise RuntimeError("Can't find master file for %s" % full_template_path) unhandled = [] experiments = ExperimentListFactory.from_filenames( [master_file], verbose=False, unhandled=unhandled, compare_beam=compare_beam, compare_detector=compare_detector, compare_goniometer=compare_goniometer, scan_tolerance=scan_tolerance, format_kwargs=format_kwargs, ) assert len(unhandled) == 0, ( "unhandled image files identified: %s" % unhandled ) else: from dxtbx.sweep_filenames import locate_files_matching_template_string params = PhilIndex.get_python_object() read_all_image_headers = params.xia2.settings.read_all_image_headers if read_all_image_headers: paths = sorted( locate_files_matching_template_string(full_template_path) ) unhandled = [] experiments = ExperimentListFactory.from_filenames( paths, verbose=False, unhandled=unhandled, compare_beam=compare_beam, compare_detector=compare_detector, compare_goniometer=compare_goniometer, scan_tolerance=scan_tolerance, format_kwargs=format_kwargs, ) assert len(unhandled) == 0, ( "unhandled image files identified: %s" % unhandled ) else: from dxtbx.model.experiment_list import ExperimentListTemplateImporter importer = ExperimentListTemplateImporter( [full_template_path], format_kwargs=format_kwargs ) experiments = importer.experiments imagesets = [ iset for iset in experiments.imagesets() if isinstance(iset, ImageSweep) ] assert len(imagesets) > 0, "no imageset found" imageset_cache[full_template_path] = collections.OrderedDict() if reversephi: for imageset in imagesets: goniometer = imageset.get_goniometer() goniometer.set_rotation_axis( tuple(-g for g in goniometer.get_rotation_axis()) ) reference_geometry = PhilIndex.params.xia2.settings.input.reference_geometry if reference_geometry is not None and len(reference_geometry) > 0: update_with_reference_geometry(imagesets, reference_geometry) # Update the geometry params = PhilIndex.params.xia2.settings update_geometry = [] from dials.command_line.dials_import import ManualGeometryUpdater from dials.util.options import geometry_phil_scope # Then add manual geometry work_phil = geometry_phil_scope.format(params.input) diff_phil = geometry_phil_scope.fetch_diff(source=work_phil) if diff_phil.as_str() != "": update_geometry.append(ManualGeometryUpdater(params.input)) imageset_list = [] for imageset in imagesets: for updater in update_geometry: imageset = updater(imageset) imageset_list.append(imageset) imagesets = imageset_list from scitbx.array_family import flex for imageset in imagesets: scan = imageset.get_scan() exposure_times = scan.get_exposure_times() epochs = scan.get_epochs() if exposure_times.all_eq(0) or exposure_times[0] == 0: exposure_times = flex.double(exposure_times.size(), 1) scan.set_exposure_times(exposure_times) elif not exposure_times.all_gt(0): exposure_times = flex.double(exposure_times.size(), exposure_times[0]) scan.set_exposure_times(exposure_times) if epochs.size() > 1 and not epochs.all_gt(0): if epochs[0] == 0: epochs[0] = 1 for i in range(1, epochs.size()): epochs[i] = epochs[i - 1] + exposure_times[i - 1] scan.set_epochs(epochs) _id_image = scan.get_image_range()[0] imageset_cache[full_template_path][_id_image] = imageset if id_image is not None: return [imageset_cache[full_template_path][id_image]] elif image_range is not None: for imageset in imageset_cache[full_template_path].values(): scan = imageset.get_scan() scan_image_range = scan.get_image_range() if ( image_range[0] >= scan_image_range[0] and image_range[1] <= scan_image_range[1] ): imagesets = [ imageset[ image_range[0] - scan_image_range[0] : image_range[1] + 1 - scan_image_range[0] ] ] assert len(imagesets[0]) == image_range[1] - image_range[0] + 1, len( imagesets[0] ) return imagesets return imageset_cache[full_template_path].values()
def process(self, img_object): # write out DIALS info pfx = os.path.splitext(img_object.obj_file)[0] self.params.output.experiments_filename = pfx + '_experiments.json' self.params.output.indexed_filename = pfx + '_indexed.pickle' self.params.output.strong_filename = pfx + '_strong.pickle' self.params.output.refined_experiments_filename = pfx + '_refined_experiments.json' self.params.output.integrated_experiments_filename = pfx + '_integrated_experiments.json' self.params.output.integrated_filename = pfx + '_integrated.pickle' # Set up integration pickle path and logfile self.params.verbosity = 10 self.params.output.integration_pickle = img_object.int_file self.int_log = img_object.int_log # Create output folder if one does not exist if self.write_pickle: if not os.path.isdir(img_object.int_path): os.makedirs(img_object.int_path) if not img_object.experiments: from dxtbx.model.experiment_list import ExperimentListFactory as exp img_object.experiments = exp.from_filenames([img_object.img_path])[0] # Auto-set threshold and gain (not saved for target.phil) if self.iparams.cctbx_xfel.auto_threshold: threshold = int(img_object.center_int) self.params.spotfinder.threshold.dispersion.global_threshold = threshold if self.iparams.image_import.estimate_gain: self.params.spotfinder.threshold.dispersion.gain = img_object.gain # Update geometry if reference geometry was applied from dials.command_line.dials_import import ManualGeometryUpdater update_geometry = ManualGeometryUpdater(self.params) try: imagesets = img_object.experiments.imagesets() update_geometry(imagesets[0]) experiment = img_object.experiments[0] experiment.beam = imagesets[0].get_beam() experiment.detector = imagesets[0].get_detector() except RuntimeError as e: print("DEBUG: Error updating geometry on {}, {}".format( img_object.img_path, e)) # Set detector if reference geometry was applied if self.reference_detector is not None: try: from dxtbx.model import Detector imageset = img_object.experiments[0].imageset imageset.set_detector( Detector.from_dict(self.reference_detector.to_dict()) ) img_object.experiments[0].detector = imageset.get_detector() except Exception as e: print ('DEBUG: cannot set detector! ', e) proc_output = [] # **** SPOTFINDING **** # with util.Capturing() as output: try: print ("{:-^100}\n".format(" SPOTFINDING: ")) observed = self.find_spots(img_object.experiments) img_object.final['spots'] = len(observed) except Exception as e: return self.error_handler(e, 'spotfinding', img_object, output) else: if ( self.iparams.image_import.image_triage and len(observed) >= self.iparams.image_import.minimum_Bragg_peaks ): msg = " FOUND {} SPOTS - IMAGE ACCEPTED!".format(len(observed)) print("{:-^100}\n\n".format(msg)) else: msg = " FOUND {} SPOTS - IMAGE REJECTED!".format(len(observed)) print("{:-^100}\n\n".format(msg)) e = 'Insufficient spots found ({})!'.format(len(observed)) return self.error_handler(e, 'triage', img_object, output) proc_output.extend(output) # Finish if spotfinding is the last processing stage if 'spotfind' in self.last_stage: detector = img_object.experiments.unique_detectors()[0] beam = img_object.experiments.unique_beams()[0] s1 = flex.vec3_double() for i in range(len(observed)): s1.append(detector[observed['panel'][i]].get_pixel_lab_coord( observed['xyzobs.px.value'][i][0:2])) two_theta = s1.angle(beam.get_s0()) d = beam.get_wavelength() / (2 * flex.asin(two_theta / 2)) img_object.final['res'] = np.max(d) img_object.final['lres'] = np.min(d) return img_object # **** INDEXING **** # with util.Capturing() as output: try: print ("{:-^100}\n".format(" INDEXING ")) experiments, indexed = self.index(img_object.experiments, observed) except Exception as e: return self.error_handler(e, 'indexing', img_object, output) else: if indexed: img_object.final['indexed'] = len(indexed) print ("{:-^100}\n\n".format(" USED {} INDEXED REFLECTIONS " "".format(len(indexed)))) else: img_object.fail = 'failed indexing' return img_object # Bravais lattice and reindex if self.iparams.cctbx_xfel.determine_sg_and_reindex: try: print ("{:-^100}\n".format(" DETERMINING SPACE GROUP ")) experiments, indexed = self.pg_and_reindex(indexed, experiments) img_object.final['indexed'] = len(indexed) lat = experiments[0].crystal.get_space_group().info() sg = str(lat).replace(' ', '') if sg != 'P1': print ("{:-^100}\n".format(" REINDEXED TO SPACE GROUP {} ".format(sg))) else: print ("{:-^100}\n".format(" RETAINED TRICLINIC (P1) SYMMETRY ")) except Exception as e: return self.error_handler(e, 'indexing', img_object, output) proc_output.extend(output) # **** INTEGRATION **** # with util.Capturing() as output: try: experiments, indexed = self.refine(experiments, indexed) print ("{:-^100}\n".format(" INTEGRATING ")) integrated = self.integrate(experiments, indexed) except Exception as e: return self.error_handler(e, 'integration', img_object, output) else: if integrated: img_object.final['integrated'] = len(integrated) print ("{:-^100}\n\n".format(" FINAL {} INTEGRATED REFLECTIONS " "".format(len(integrated)))) proc_output.extend(output) # Filter if self.iparams.cctbx_xfel.filter.flag_on: self.selector = Selector(frame=self.frame, uc_tol=self.iparams.cctbx_xfel.filter.uc_tolerance, xsys=self.iparams.cctbx_xfel.filter.crystal_system, pg=self.iparams.cctbx_xfel.filter.pointgroup, uc=self.iparams.cctbx_xfel.filter.unit_cell, min_ref=self.iparams.cctbx_xfel.filter.min_reflections, min_res=self.iparams.cctbx_xfel.filter.min_resolution) fail, e = self.selector.result_filter() if fail: return self.error_handler(e, 'filter', img_object, proc_output) int_results, log_entry = self.collect_information(img_object=img_object) # Update final entry with integration results img_object.final.update(int_results) # Update image log log_entry = "\n".join(log_entry) img_object.log_info.append(log_entry) if self.write_logs: with open(img_object.int_log, 'w') as tf: for i in proc_output: if 'cxi_version' not in i: tf.write('\n{}'.format(i)) tf.write('\n{}'.format(log_entry)) return img_object
def run(self): '''Execute the script.''' from dials.util import log from time import time from libtbx import easy_mp import copy # Parse the command line params, options, all_paths = self.parser.parse_args( show_diff_phil=False, return_unhandled=True, quick_parse=True) # Check we have some filenames if not all_paths: self.parser.print_help() return # Mask validation for mask_path in params.spotfinder.lookup.mask, params.integration.lookup.mask: if mask_path is not None and not os.path.isfile(mask_path): raise Sorry("Mask %s not found" % mask_path) # Save the options self.options = options self.params = params st = time() # Configure logging log.config(params.verbosity, info='dials.process.log', debug='dials.process.debug.log') # Log the diff phil diff_phil = self.parser.diff_phil.as_str() if diff_phil is not '': logger.info('The following parameters have been modified:\n') logger.info(diff_phil) for abs_params in self.params.integration.absorption_correction: if abs_params.apply: if not (self.params.integration.debug.output and not self.params.integration.debug.separate_files): raise Sorry('Shoeboxes must be saved to integration intermediates to apply an absorption correction. '\ +'Set integration.debug.output=True, integration.debug.separate_files=False and '\ +'integration.debug.delete_shoeboxes=True to temporarily store shoeboxes.') self.load_reference_geometry() from dials.command_line.dials_import import ManualGeometryUpdater update_geometry = ManualGeometryUpdater(params) # Import stuff logger.info("Loading files...") pre_import = params.dispatch.pre_import or len(all_paths) == 1 if pre_import: # Handle still imagesets by breaking them apart into multiple datablocks # Further handle single file still imagesets (like HDF5) by tagging each # frame using its index datablocks = [do_import(path) for path in all_paths] indices = [] basenames = [] split_datablocks = [] for datablock in datablocks: for imageset in datablock.extract_imagesets(): paths = imageset.paths() for i in xrange(len(imageset)): subset = imageset[i:i + 1] split_datablocks.append( DataBlockFactory.from_imageset(subset)[0]) indices.append(i) basenames.append( os.path.splitext(os.path.basename(paths[i]))[0]) tags = [] for i, basename in zip(indices, basenames): if basenames.count(basename) > 1: tags.append("%s_%05d" % (basename, i)) else: tags.append(basename) # Wrapper function def do_work(i, item_list): processor = Processor(copy.deepcopy(params), composite_tag="%04d" % i) for item in item_list: try: for imageset in item[1].extract_imagesets(): update_geometry(imageset) except RuntimeError as e: logger.warning( "Error updating geometry on item %s, %s" % (str(item[0]), str(e))) continue if self.reference_detector is not None: from dxtbx.model import Detector for i in range(len(imageset)): imageset.set_detector(Detector.from_dict( self.reference_detector.to_dict()), index=i) processor.process_datablock(item[0], item[1]) processor.finalize() iterable = zip(tags, split_datablocks) else: basenames = [ os.path.splitext(os.path.basename(filename))[0] for filename in all_paths ] tags = [] for i, basename in enumerate(basenames): if basenames.count(basename) > 1: tags.append("%s_%05d" % (basename, i)) else: tags.append(basename) # Wrapper function def do_work(i, item_list): processor = Processor(copy.deepcopy(params), composite_tag="%04d" % i) for item in item_list: tag, filename = item datablock = do_import(filename) imagesets = datablock.extract_imagesets() if len(imagesets) == 0 or len(imagesets[0]) == 0: logger.info("Zero length imageset in file: %s" % filename) return if len(imagesets) > 1: raise Abort( "Found more than one imageset in file: %s" % filename) if len(imagesets[0]) > 1: raise Abort( "Found a multi-image file. Run again with pre_import=True" ) try: update_geometry(imagesets[0]) except RuntimeError as e: logger.warning( "Error updating geometry on item %s, %s" % (tag, str(e))) continue if self.reference_detector is not None: from dxtbx.model import Detector imagesets[0].set_detector( Detector.from_dict( self.reference_detector.to_dict())) processor.process_datablock(tag, datablock) processor.finalize() iterable = zip(tags, all_paths) # Process the data if params.mp.method == 'mpi': from mpi4py import MPI comm = MPI.COMM_WORLD rank = comm.Get_rank( ) # each process in MPI has a unique id, 0-indexed size = comm.Get_size( ) # size: number of processes running in this job subset = [ item for i, item in enumerate(iterable) if (i + rank) % size == 0 ] do_work(rank, subset) else: from dxtbx.command_line.image_average import splitit if params.mp.nproc == 1: do_work(0, iterable) else: result = list( easy_mp.multi_core_run( myfunction=do_work, argstuples=list( enumerate(splitit(iterable, params.mp.nproc))), nproc=params.mp.nproc)) error_list = [r[2] for r in result] if error_list.count(None) != len(error_list): print( "Some processes failed excecution. Not all images may have processed. Error messages:" ) for error in error_list: if error is None: continue print(error) # Total Time logger.info("") logger.info("Total Time Taken = %f seconds" % (time() - st))
def run(self): """Execute the script.""" from dials.util import log from time import time from libtbx import easy_mp import copy # Parse the command line params, options, all_paths = self.parser.parse_args( show_diff_phil=False, return_unhandled=True, quick_parse=True) # Check we have some filenames if not all_paths: self.parser.print_help() return # Mask validation for mask_path in params.spotfinder.lookup.mask, params.integration.lookup.mask: if mask_path is not None and not os.path.isfile(mask_path): raise Sorry("Mask %s not found" % mask_path) # Save the options self.options = options self.params = params st = time() # Configure logging #log.config( # params.verbosity, info="exafel_spotfinding.process.log", debug="exafel.spot_finding.debug.log" #) bad_phils = [f for f in all_paths if os.path.splitext(f)[1] == ".phil"] if len(bad_phils) > 0: self.parser.print_help() logger.error( "Error: the following phil files were not understood: %s" % (", ".join(bad_phils))) return # Log the diff phil diff_phil = self.parser.diff_phil.as_str() if diff_phil is not "": logger.info("The following parameters have been modified:\n") logger.info(diff_phil) for abs_params in self.params.integration.absorption_correction: if abs_params.apply: if not (self.params.integration.debug.output and not self.params.integration.debug.separate_files): raise Sorry( "Shoeboxes must be saved to integration intermediates to apply an absorption correction. " + "Set integration.debug.output=True, integration.debug.separate_files=False and " + "integration.debug.delete_shoeboxes=True to temporarily store shoeboxes." ) self.load_reference_geometry() from dials.command_line.dials_import import ManualGeometryUpdater update_geometry = ManualGeometryUpdater(params) # Import stuff logger.info("Loading files...") pre_import = params.dispatch.pre_import or len(all_paths) == 1 if True: #pre_import: # Handle still imagesets by breaking them apart into multiple experiments # Further handle single file still imagesets (like HDF5) by tagging each # frame using its index experiments = ExperimentList() for path in all_paths: experiments.extend(do_import(path, load_models=False)) indices = [] basenames = [] split_experiments = [] for i, imageset in enumerate(experiments.imagesets()): assert len(imageset) == 1 paths = imageset.paths() indices.append(i) basenames.append( os.path.splitext(os.path.basename(paths[0]))[0]) split_experiments.append(experiments[i:i + 1]) tags = [] for i, basename in zip(indices, basenames): if basenames.count(basename) > 1: tags.append("%s_%05d" % (basename, i)) else: tags.append(basename) # Wrapper function def do_work(i, item_list): processor = SpotFinding_Processor(copy.deepcopy(params), composite_tag="%04d" % i, rank=i) if params.LS49.dump_CBF: print('READING IN TIMESTAMPS TO DUMP') # Read in file with timestamps information processor.timestamps_to_dump = [] for fin in glob.glob( os.path.join( self.params.LS49. path_to_rayonix_crystal_models, 'idx-fee_data*')): #for fin in glob.glob(os.path.join(self.params.LS49.path_to_rayonix_crystal_models, 'int-0-*')): int_file = os.path.basename(fin) ts = int_file[13:30] processor.timestamps_to_dump.append(ts) #with open(os.path.join(self.params.output.output_dir,'../timestamps_to_dump.dat'), 'r') as fin: # for line in fin: # if line !='\n': # ts = line.split()[0].strip() # processor.timestamps_to_dump.append(ts) from dials.array_family import flex all_spots_from_rank = flex.reflection_table() for item in item_list: try: assert len(item[1]) == 1 experiment = item[1][0] experiment.load_models() imageset = experiment.imageset update_geometry(imageset) experiment.beam = imageset.get_beam() experiment.detector = imageset.get_detector() except RuntimeError as e: logger.warning( "Error updating geometry on item %s, %s" % (str(item[0]), str(e))) continue if self.reference_detector is not None: from dxtbx.model import Detector experiment = item[1][0] imageset = experiment.imageset imageset.set_detector( Detector.from_dict( self.reference_detector.to_dict())) experiment.detector = imageset.get_detector() refl_table = processor.process_experiments( item[0], item[1], item[2]) if refl_table is not None: all_spots_from_rank.extend(refl_table) processor.finalize() return all_spots_from_rank iterable = zip(tags, split_experiments, indices) # Process the data if params.mp.method == 'mpi': from mpi4py import MPI comm = MPI.COMM_WORLD rank = comm.Get_rank( ) # each process in MPI has a unique id, 0-indexed size = comm.Get_size( ) # size: number of processes running in this job # Configure the logging if params.output.logging_dir is None: info_path = '' debug_path = '' else: import sys log_path = os.path.join(params.output.logging_dir, "log_rank%04d.out" % rank) error_path = os.path.join(params.output.logging_dir, "error_rank%04d.out" % rank) print("Redirecting stdout to %s" % log_path) print("Redirecting stderr to %s" % error_path) sys.stdout = open(log_path, 'a', buffering=0) sys.stderr = open(error_path, 'a', buffering=0) print("Should be redirected now") info_path = os.path.join(params.output.logging_dir, "info_rank%04d.out" % rank) debug_path = os.path.join(params.output.logging_dir, "debug_rank%04d.out" % rank) from dials.util import log print('IOTA_ALL_SPOTS_RANKS_0') #log.config(params.verbosity, info=info_path, debug=debug_path) subset = [ item for i, item in enumerate(iterable) if (i + rank) % size == 0 ] all_spots_from_rank = do_work(rank, subset) all_spots_rank0 = comm.gather(all_spots_from_rank, root=0) print('IOTA_ALL_SPOTS_RANKS_1') exit() if rank == 0: from dials.array_family import flex all_spots = flex.reflection_table() for ii, refl_table in enumerate(all_spots_rank0): if refl_table is not None: all_spots.extend(refl_table) from libtbx.easy_pickle import dump #dump('all_spots.pickle', all_spots_rank0) #dump('all_experiments.pickle', experiments) #print ('IOTA_ALL_SPOTS_RANKS_2') #print ('IOTA_ALL_SPOTS_RANKS_3') from dials.algorithms.spot_finding import per_image_analysis from six.moves import cStringIO as StringIO s = StringIO() # Assuming one datablock. Might be dangerous # FIXME from dxtbx.format.cbf_writer import FullCBFWriter for i, imageset in enumerate(experiments.imagesets()): print("Number of centroids per image for imageset %i:" % i, file=s) #from IPython import embed; embed(); exit() print('IOTA_ALL_SPOTS_RANKS_4') stats = custom_stats_imageset( imageset, all_spots.select(all_spots['img_id'] == i)) n_spots_total = flex.int(stats.n_spots_total) max_number_of_spots = max(stats.n_spots_total) for num_spots in range(1, max_number_of_spots + 1): print("IOTA_NUMBER_OF_SPOTS %d %d" % (num_spots, len( n_spots_total.select( n_spots_total == num_spots)))) if max_number_of_spots > 0: # assuming one imageset per experiment here : applicable for stills ts = imageset.get_image_identifier(0) xfel_ts = ts[0:4] + ts[5:7] + ts[8:10] + ts[ 11:13] + ts[14:16] + ts[17:19] + ts[20:23] cbf_path = os.path.join(params.output.logging_dir, 'jungfrau_%s.cbf' % xfel_ts) cbf_writer = FullCBFWriter(imageset=imageset) cbf_writer.write_cbf(cbf_path) per_image_analysis.print_table(stats) logger.info(s.getvalue()) comm.barrier() else: do_work(0, iterable)