def run(args): user_phil = [] for arg in args: try: user_phil.append(parse(arg)) except Exception as e: raise Sorry("Unrecognized argument %s" % arg) params = phil_scope.fetch(sources=user_phil).extract() app = xfel_db_application(params) runs = [] all_results = [] if params.rungroup is None: assert len(params.run) == 0 trial = app.get_trial(trial_number=params.trial) for rungroup in trial.rungroups: for run in rungroup.runs: stats = HitrateStats(app, run.run, trial.trial, rungroup.id, params.d_min)() if len(stats[0]) > 0: runs.append(run.run) all_results.append(stats) else: for run_no in params.run: runs.append(run_no) all_results.append( HitrateStats(app, run_no, params.trial, params.rungroup, params.d_min)()) plot_multirun_stats(all_results, runs, params.d_min, n_strong_cutoff=params.n_strong_cutoff, \ i_sigi_cutoff=params.i_sigi_cutoff, run_tags=params.run_tags, title=params.title, \ minimalist=params.minimalist, interactive=True, compress_runs=params.compress_runs)
# Bin the reflections possibly present in this space group/cell so that we can report average I/sigma # in the highest requested and lowest resolution bins from cctbx.crystal import symmetry cs = symmetry(unit_cell=uc, space_group_info=crystal.get_space_group().info()) mset = cs.build_miller_set(anomalous_flag=False, d_min=params.d_min) binner = mset.setup_binner(n_bins=10) for j, i_sigi in zip([0, -1], [average_i_sigi_low, average_i_sigi_high]): d_max, d_min = binner.bin_d_range(binner.range_used()[j]) refls = reflections.select((d <= d_max) & (d > d_min)) n_refls = len(refls) avg_i_sigi = flex.mean( refls['intensity.sum.value'] / flex.sqrt( refls['intensity.sum.variance'])) if n_refls > 0 else 0 i_sigi.append(avg_i_sigi) all_results.append((timestamps, two_theta_low, two_theta_high, n_strong, average_i_sigi_low, average_i_sigi_high)) plot_multirun_stats(all_results, runs, params.d_min, n_strong_cutoff=params.n_strong_cutoff, \ i_sigi_cutoff=params.i_sigi_cutoff, run_tags=params.run_tags, \ minimalist=params.minimalist, interactive=True, compress_runs=params.compress_runs, \ title=params.title) if __name__ == "__main__": run(sys.argv[1:])
def run(args): user_phil = [] for arg in args: try: user_phil.append(parse(arg)) except Exception, e: raise Sorry("Unrecognized argument %s"%arg) params = phil_scope.fetch(sources=user_phil).extract() app = xfel_db_application(params) runs = [] all_results = [] if params.rungroup is None: assert len(params.run) == 0 trial = app.get_trial(trial_number = params.trial) for rungroup in trial.rungroups: for run in rungroup.runs: stats = HitrateStats(app, run.run, trial.trial, rungroup.id, params.d_min)() if len(stats[0]) > 0: runs.append(run.run) all_results.append(stats) else: for run_no in params.run: runs.append(run_no) all_results.append(HitrateStats(app, run_no, params.trial, params.rungroup, params.d_min)()) plot_multirun_stats(all_results, runs, params.d_min, n_strong_cutoff=params.hit_cutoff, \ interactive=True, compress_runs=params.compress_runs) if __name__ == "__main__": run(sys.argv[1:])
def run(args): user_phil = [] input_dirs = [] input_paths = [] for arg in args: if os.path.isdir(arg): input_dirs.append(arg) continue elif os.path.exists(arg): input_paths.append(arg) continue try: user_phil.append(parse(arg)) except Exception as e: raise Sorry("Unrecognized argument %s" % arg) params = phil_scope.fetch(sources=user_phil).extract() sf_params = sf_scope.extract() sf_params.significance_filter.isigi_cutoff = params.i_sigi_cutoff sf_params.significance_filter.d_min = params.d_min def get_paths(dirname): absolute = lambda name: os.path.join(dirname, name) names = os.listdir(dirname) return map(absolute, names) files_dict = {dirname: get_paths(dirname) for dirname in input_dirs} if params.run_tags_from_filenames: for path in input_paths: filename = os.path.basename(path) try: run = int( filename.split("idx-")[1].split("-")[0].split("run")[1]) except Exception: run = int( filename.split("idx-")[1].split("-")[0].split("r")[1]) except Exception: run = None try: files_dict[run].append(path) except KeyError: files_dict[run] = [path] else: files_dict[None] = input_paths all_results = [] runs = [] # iterate through grouped file paths and look for processing results for run, files in files_dict.iteritems(): if len(files) == 0: continue runs.append(run) timestamps = flex.double() two_theta_low = flex.double() two_theta_high = flex.double() n_strong = flex.int() resolutions = flex.double() n_lattices = flex.int() for i, path in enumerate(sorted(files)): root = os.path.dirname(path) filename = os.path.basename(path) split_fn = filename.split('_') if len(split_fn) <= 0 or split_fn[-1] != "strong.pickle": continue base = os.path.join(root, "_".join(split_fn[:-1])) print filename strong_name = base + "_strong.pickle" if not os.path.exists(strong_name): print "Couldn't log %s, strong pickle not found" % filename continue # Read the spotfinding results strong = easy_pickle.load(strong_name) print "N strong reflections: %d" % len(strong) timestamps.append(i) n_strong.append(len(strong)) two_theta_low.append(0) two_theta_high.append(0) # Read indexing results if possible experiments_name = base + "_integrated_experiments.json" indexed_name = base + "_integrated.pickle" if not os.path.exists(experiments_name) or not os.path.exists( indexed_name): print "Frame didn't index" resolutions.append(0) n_lattices.append(0) continue experiments = ExperimentListFactory.from_json_file( experiments_name, check_format=False) n_lattices.append(len(experiments)) reflections = easy_pickle.load(indexed_name) reflections = reflections.select(reflections['intensity.sum.value'] > 0) # positive reflections only best_d_min = None for expt_id, experiment in enumerate(experiments): refls = reflections.select(reflections['id'] == expt_id) refls['id'] = flex.int(len(refls), 0) sig_filter = SignificanceFilter(sf_params) sig_filter(experiments[expt_id:expt_id + 1], refls) if best_d_min is None or sig_filter.best_d_min < best_d_min: best_d_min = sig_filter.best_d_min resolutions.append(best_d_min or 0) all_results.append((timestamps, two_theta_low, two_theta_high, n_strong, resolutions, n_lattices)) plot_multirun_stats(all_results, runs, params.d_min, n_strong_cutoff=params.n_strong_cutoff, \ i_sigi_cutoff=params.i_sigi_cutoff, run_tags=params.run_tags, \ minimalist=params.minimalist, interactive=True, compress_runs=True, \ title=params.title)