def write_experiments(self, experiments, params): """ Output the experiments to file. """ if params.output.experiments: logger.info("-" * 80) logger.info("Writing experiments to %s" % params.output.experiments) dump = ExperimentListDumper(experiments) dump.as_file(params.output.experiments, compact=params.output.compact)
def run(self): import datetime time_now = datetime.datetime.now() self.mpi_logger.log(str(time_now)) if self.mpi_helper.rank == 0: self.mpi_logger.main_log(str(time_now)) self.mpi_logger.log_step_time("TOTAL") self.mpi_logger.log_step_time("PARSE_INPUT_PARAMS") self.parse_input() self.mpi_logger.log_step_time("PARSE_INPUT_PARAMS", True) # Create the workers using the factories self.mpi_logger.log_step_time("CREATE_WORKERS") from xfel.merging import application import importlib workers = [] steps = default_steps if self.params.dispatch.step_list is None else self.params.dispatch.step_list for step in steps: step_factory_name = step step_additional_info = [] step_info = step.split(' ') assert len(step_info) > 0 if len(step_info) > 1: step_factory_name = step_info[0] step_additional_info = step_info[1:] factory = importlib.import_module('xfel.merging.application.' + step_factory_name + '.factory') workers.extend( factory.factory.from_parameters(self.params, step_additional_info, mpi_helper=self.mpi_helper, mpi_logger=self.mpi_logger)) # Perform phil validation up front for worker in workers: worker.validate() self.mpi_logger.log_step_time("CREATE_WORKERS", True) # Do the work experiments = reflections = None step = 0 while (workers): worker = workers.pop(0) self.mpi_logger.log_step_time("STEP_" + worker.__repr__()) # Log worker name, i.e. execution step name step += 1 if step > 1: self.mpi_logger.log('') step_desc = "STEP %d: %s" % (step, worker) self.mpi_logger.log(step_desc) if self.mpi_helper.rank == 0: if step > 1: self.mpi_logger.main_log('') self.mpi_logger.main_log(step_desc) # Execute worker experiments, reflections = worker.run(experiments, reflections) self.mpi_logger.log_step_time("STEP_" + worker.__repr__(), True) if self.params.output.save_experiments_and_reflections: from dxtbx.model.experiment_list import ExperimentListDumper import os if 'id' not in reflections: from dials.array_family import flex id_ = flex.int(len(reflections), -1) for expt_number, expt in enumerate(experiments): sel = reflections['exp_id'] == expt.identifier id_.set_selected(sel, expt_number) reflections['id'] = id_ reflections.as_pickle( os.path.join( self.params.output.output_dir, self.params.output.prefix + "_%06d.pickle" % self.mpi_helper.rank)) dump = ExperimentListDumper(experiments) dump.as_file( os.path.join( self.params.output.output_dir, self.params.output.prefix + "_%06d.json" % self.mpi_helper.rank)) self.mpi_logger.log_step_time("TOTAL", True)
def __call__(self, params, options): from dxtbx.model.experiment_list import ExperimentListFactory from dxtbx.model.experiment_list import ExperimentListDumper import os # Get the XDS.INP file xds_inp = os.path.join(self.args[0], 'XDS.INP') if params.input.xds_file is None: xds_file = XDSFileImporter.find_best_xds_file(self.args[0]) else: xds_file = os.path.join(self.args[0], params.input.xds_file) # Check a file is given if xds_file is None: raise RuntimeError('No XDS file found') # Load the experiment list unhandled = [] experiments = ExperimentListFactory.from_xds(xds_inp, xds_file) # Print out any unhandled files if len(unhandled) > 0: print('-' * 80) print('The following command line arguments were not handled:') for filename in unhandled: print(' %s' % filename) # Print some general info print('-' * 80) print('Read %d experiments from %s' % (len(experiments), xds_file)) # Attempt to create scan-varying crystal model if requested if params.read_varying_crystal: integrate_lp = os.path.join(self.args[0], 'INTEGRATE.LP') if os.path.isfile(integrate_lp): self.extract_varying_crystal(integrate_lp, experiments) else: print( "No INTEGRATE.LP to extract varying crystal model. Skipping" ) # Loop through the data blocks for i, exp in enumerate(experiments): # Print some experiment info print("-" * 80) print("Experiment %d" % i) print(" format: %s" % str(exp.imageset.get_format_class())) print(" type: %s" % type(exp.imageset)) print(" num images: %d" % len(exp.imageset)) # Print some model info if options.verbose > 1: print("") if exp.beam: print(exp.beam) else: print("no beam!") if exp.detector: print(exp.detector) else: print("no detector!") if exp.goniometer: print(exp.goniometer) else: print("no goniometer!") if exp.scan: print(exp.scan) else: print("no scan!") if exp.crystal: print(exp.crystal) else: print("no crystal!") # Write the experiment list to a JSON or pickle file if params.output.filename is None: params.output.filename = 'experiments.json' print("-" * 80) print('Writing experiments to %s' % params.output.filename) dump = ExperimentListDumper(experiments) dump.as_file(params.output.filename) # Optionally save as a data block if params.output.xds_datablock: print("-" * 80) print("Writing data block to %s" % params.output.xds_datablock) dump = DataBlockDumper(experiments.to_datablocks()) dump.as_file(params.output.xds_datablock)
def run(self, args=None): """Execute the script.""" from dxtbx.model.experiment_list import ExperimentListDumper from dials.array_family import flex from dials.util.options import flatten_experiments from time import time from dials.util import log start_time = time() # Parse the command line params, options = self.parser.parse_args(args=args, show_diff_phil=False) if __name__ == "__main__": # Configure the logging log.config(params.verbosity, info=params.output.log, debug=params.output.debug_log) from dials.util.version import dials_version logger.info(dials_version()) # Log the diff phil diff_phil = self.parser.diff_phil.as_str() if diff_phil != "": logger.info("The following parameters have been modified:\n") logger.info(diff_phil) ''' I am going to start adding stuff here this comment above any code means that I, JORGE DIAZ JR, added whatever line of code with this signature, JAD7 ''' # JAD7 logger.info("JAD7: {}".format(params.input.experiments)) # Ensure we have a data block experiments = flatten_experiments(params.input.experiments) # JAD7 logger.info("JAD7: {}".format(experiments)) if len(experiments) == 0: self.parser.print_help() return # Loop through all the imagesets and find the strong spots reflections = flex.reflection_table.from_observations( experiments, params) # JAD7 logger.info("JAD7: {}".format(reflections)) # Add n_signal column - before deleting shoeboxes from dials.algorithms.shoebox import MaskCode good = MaskCode.Foreground | MaskCode.Valid reflections["n_signal"] = reflections["shoebox"].count_mask_values( good) # Delete the shoeboxes if not params.output.shoeboxes: del reflections["shoebox"] # ascii spot count per image plot from dials.util.ascii_art import spot_counts_per_image_plot for i, experiment in enumerate(experiments): ascii_plot = spot_counts_per_image_plot( reflections.select(reflections["id"] == i)) if len(ascii_plot): logger.info( "\nHistogram of per-image spot count for imageset %i:" % i) logger.info(ascii_plot) # Save the reflections to file logger.info("\n" + "-" * 80) reflections.as_file(params.output.reflections) logger.info("Saved {} reflections to {}".format( len(reflections), params.output.reflections)) # Save the experiments if params.output.experiments: logger.info("Saving experiments to {}".format( params.output.experiments)) dump = ExperimentListDumper(experiments) dump.as_file(params.output.experiments) # Print some per image statistics if params.per_image_statistics: from dials.algorithms.spot_finding import per_image_analysis from six.moves import cStringIO as StringIO s = StringIO() for i, experiment in enumerate(experiments): print("Number of centroids per image for imageset %i:" % i, file=s) imageset = experiment.imageset stats = per_image_analysis.stats_imageset( imageset, reflections.select(reflections["id"] == i), resolution_analysis=False, ) per_image_analysis.print_table(stats, out=s) logger.info(s.getvalue()) # Print the time logger.info("Time Taken: %f" % (time() - start_time)) if params.output.experiments: return experiments, reflections else: return reflections
if exp.detector: print exp.detector else: print "no detector!" if exp.goniometer: print exp.goniometer else: print "no goniometer!" if exp.scan: print exp.scan else: print "no scan!" if exp.crystal: print exp.crystal else: print "no crystal!" # Write the experiment list to a JSON or pickle file if params.output.filename is None: params.output.filename = 'experiments.json' print "-" * 80 print 'Writing experiments to %s' % params.output.filename dump = ExperimentListDumper(experiments) dump.as_file(params.output.filename) # Optionally save as a data block if params.output.xds_datablock: print "-" * 80 print "Writing data block to %s" % params.output.xds_datablock dump = DataBlockDumper(experiments.to_datablocks()) dump.as_file(params.output.xds_datablock) @staticmethod def find_best_xds_file(xds_dir): ''' Find the best available file.''' from os.path import exists, join # The possible files to check paths = [
def experiment_list(obj, outfile): ''' Dump an experiment list. ''' from dxtbx.model.experiment_list import ExperimentListDumper dumper = ExperimentListDumper(obj) dumper.as_file(outfile)
def run(self): '''Execute the script.''' from dials.util.options import flatten_reflections, flatten_experiments, \ flatten_datablocks import cPickle as pickle # Parse the command line params, options = self.parser.parse_args(show_diff_phil=True) reflections = flatten_reflections(params.input.reflections) experiments = flatten_experiments(params.input.experiments) datablocks = flatten_datablocks(params.input.datablock) # Try to load the models and data slice_exps = len(experiments) > 0 slice_refs = len(reflections) > 0 slice_dbs = len(datablocks) > 0 # Catch case of nothing to do if not any([slice_exps, slice_refs, slice_dbs]): print "No suitable input provided" self.parser.print_help() return if reflections: if len(reflections) > 1: raise Sorry("Only one reflections list can be imported at present") reflections = reflections[0] # calculate frame numbers if needed if experiments: reflections = calculate_frame_numbers(reflections, experiments) # if we still don't have the right column give up if 'xyzobs.px.value' not in reflections: raise Sorry("These reflections do not have frame numbers set, and " "there are no experiments provided to calculate these.") # set trivial case where no scan range is provided at all if not params.image_range: params.image_range = [None] # check if slicing into blocks if params.block_size is not None: # in this case for simplicity, ensure that there is either an # an experiment list or datablocks, but not both. Ensure there is only # a single scan contained within. if [slice_exps, slice_dbs].count(True) != 1: raise Sorry("For slicing into blocks please provide either datablocks" " or experiments, but not both.") if slice_exps: if len(experiments) > 1: raise Sorry("For slicing into blocks please provide a single " "scan only") scan = experiments[0].scan if slice_dbs: scans = datablocks[0].unique_scans() if len(scans) > 1 or len(datablocks) > 1: raise Sorry("For slicing into blocks please provide a single " "scan only") scan = scans[0] # Having extracted the scan, calculate the blocks params.image_range = calculate_block_ranges(scan, params.block_size) # Do the slicing then recombine if slice_exps: sliced = [slice_experiments(experiments, [sr])[0] \ for sr in params.image_range] sliced_experiments = ExperimentList() for exp in sliced: sliced_experiments.append(exp) if slice_dbs: sliced = [slice_datablocks(datablocks, [sr])[0] \ for sr in params.image_range] imagesets = [db.extract_imagesets()[0] for db in sliced] sliced_datablocks = DataBlock(imagesets) # slice reflections if present if slice_refs: sliced = [slice_reflections(reflections, [sr]) \ for sr in params.image_range] sliced_reflections = sliced[0] for i, rt in enumerate(sliced[1:]): rt['id'] += (i + 1) # set id sliced_reflections.extend(rt) else: # slice each dataset into the requested subset if slice_exps: sliced_experiments = slice_experiments(experiments, params.image_range) if slice_refs: sliced_reflections = slice_reflections(reflections, params.image_range) if slice_dbs: sliced_datablocks = slice_datablocks(datablocks, params.image_range) # Save sliced experiments if slice_exps: output_experiments_filename = params.output.experiments_filename if output_experiments_filename is None: # take first filename as template bname = basename(params.input.experiments[0].filename) bname = splitext(bname)[0] if not bname: bname = "experiments" if len(params.image_range) == 1 and params.image_range[0] is not None: ext = "_{0}_{1}.json".format(*params.image_range[0]) else: ext = "_sliced.json" output_experiments_filename = bname + ext print 'Saving sliced experiments to {0}'.format( output_experiments_filename) from dxtbx.model.experiment_list import ExperimentListDumper dump = ExperimentListDumper(sliced_experiments) dump.as_json(output_experiments_filename) # Save sliced reflections if slice_refs: output_reflections_filename = params.output.reflections_filename if output_reflections_filename is None: # take first filename as template bname = basename(params.input.reflections[0].filename) bname = splitext(bname)[0] if not bname: bname = "reflections" if len(params.image_range) == 1 and params.image_range[0] is not None: ext = "_{0}_{1}.pickle".format(*params.image_range[0]) else: ext = "_sliced.pickle" output_reflections_filename = bname + ext print 'Saving sliced reflections to {0}'.format( output_reflections_filename) sliced_reflections.as_pickle(output_reflections_filename) # Save sliced datablocks if slice_dbs: output_datablocks_filename = params.output.datablocks_filename if output_datablocks_filename is None: # take first filename as template bname = basename(params.input.datablock[0].filename) bname = splitext(bname)[0] if not bname: bname = "datablock" if len(params.image_range) == 1 and params.image_range[0] is not None: ext = "_{0}_{1}.json".format(*params.image_range[0]) else: ext = "_sliced.json" output_datablocks_filename = bname + ext print 'Saving sliced datablocks to {0}'.format( output_datablocks_filename) from dxtbx.datablock import DataBlockDumper dump = DataBlockDumper(sliced_datablocks) dump.as_file(output_datablocks_filename) return