def _plot_intensities(self, bins, hist_value_factor): columns, rows = 80, 25 if sys.stdout.isatty(): try: result = run_process(['stty', 'size'], timeout=1, print_stdout=False, print_stderr=False, debug=procrunner_debug) rows, columns = [int(i) for i in result['stdout'].split()] except Exception: # ignore any errors and use default size pass rows = min(rows, int(columns / 3)) command = [ "gnuplot" ] plot_commands = [ "set term dumb %d %d" % (columns, rows-2), "set title 'Spot intensity distribution'", "set xlabel '% of maximum'", "set ylabel 'Number of observed pixels'", "set logscale y", "set boxwidth %f" % hist_value_factor, "set xtics out nomirror", "set ytics out", "plot '-' using 1:2 title '' with boxes" ] for x in sorted(bins.iterkeys()): plot_commands.append("%f %d" % (x * hist_value_factor, bins[x])) plot_commands.append("e") debug("running %s with:\n %s\n" % (" ".join(command), "\n ".join(plot_commands))) try: result = run_process(command, stdin="\n".join(plot_commands)+"\n", timeout=120, print_stdout=False, print_stderr=False, debug=procrunner_debug) except OSError: info(traceback.format_exc()) debug("result = %s" % self._prettyprint_dictionary(result)) if result['exitcode'] == 0: star = re.compile(r'\*') space = re.compile(' ') state = set() for l in result['stdout'].split("\n"): if l.strip() != '': stars = {m.start(0) for m in re.finditer(star, l)} if not stars: state = set() else: state |= stars l = list(l) for s in state: l[s] = '*' info("".join(l)) else: warn("Error running gnuplot. Can not plot intensity distribution. Exit code %d" % result['exitcode'])
def __call__(self): print self.cmd self.result = run_process(self.cmd.split(" ")) print "running command took {0:.2f} seconds\n".format(self.result["runtime"]) assert self.result["exitcode"] == 0 self.mangle_result() return {"cmd": self.cmd, "result": self.result["stdout"]}
def _index(self): base_command = [ "dials.index", self.json_file, "strong.pickle", "indexing.nproc=%s" % self.nproc ] runlist = [ ("Indexing...", base_command), ("Retrying with max_cell constraint", base_command + [ "max_cell=20" ]), ("Retrying with 1D FFT", base_command + [ "indexing.method=fft1d" ]) ] for message, command in runlist: info("\n%s..." % message) result = run_process(command, print_stdout=False, debug=procrunner_debug) debug("result = %s" % self._prettyprint_dictionary(result)) if result['exitcode'] != 0: warn("Failed with exit code %d" % result['exitcode']) else: break if result['exitcode'] != 0: return False m = re.search('model [0-9]+ \(([0-9]+) [^\n]*\n[^\n]*\n[^\n]*Unit cell: \(([^\n]*)\)\n[^\n]*Space group: ([^\n]*)\n', result['stdout']) info("Found primitive solution: %s (%s) using %s reflections" % (m.group(3), m.group(2), m.group(1))) info("Successfully completed (%.1f sec)" % result['runtime']) return True
def __call__(self): print self.cmd self.result = run_process(self.cmd.split(' ')) print "running command took {0:.2f} seconds\n".format(self.result['runtime']) assert self.result['exitcode'] == 0 self.mangle_result() return self.result
def find_new_python3_incompatible_code(module_under_test): ''' Check source code to see if any files violate Python 3 syntax that previously did not. Example call: def test_find_python3_violations(): import xia2 import pytest import dials.test.python3_regression as py3test result = py3test.find_new_python3_incompatible_code(xia2) if result is None: pytest.skip('No python3 interpreter available') elif result: pytest.fail(result) Known violations are kept in file .known-python3-violations in the module directory. ''' # File containing list of excluded files allowed_broken_files_list = '.known-python3-violations' # Mask all *PYTHON* variables from environment - Python3 will not like cctbx python settings environ_override = { k: '' for k in list(os.environ) if 'PYTHON' in k } from dials.util.procrunner import run_process module_path = module_under_test.__path__[0] try: result = run_process(['python3', '-m', 'compileall', '-x', '\.git', '-q', module_path], environ=environ_override, print_stdout=False) except OSError as e: if e.errno == 2: return None raise if result['stderr']: return 'Python3 compilation exited with unexpected STDERR output' if not result['exitcode']: # No compilation errors return False errors = map(lambda x: x.replace(module_path + os.path.sep, '').strip(), result['stdout'].split('***')) errors = filter(lambda x: "'" in x, errors) broken_files = { error.split("'")[1]: error for error in errors } exclusion_file = os.path.join(module_path, allowed_broken_files_list) with open(exclusion_file + '.log', 'w') as fh: fh.write("\n".join(sorted(broken_files))) if os.path.exists(exclusion_file): with open(exclusion_file, 'r') as fh: excluded_files = fh.read().splitlines() broken_files = { filename: broken_files[filename] for filename in broken_files if filename not in excluded_files } if not broken_files: # No syntax violations in new files return False for filename in sorted(broken_files): print(broken_files[filename], end="\n\n") return "{} file[s] contain newly introduced Python3 syntax errors".format(len(broken_files))
def __call__(self): print self.cmd self.result = run_process(self.cmd.split(' ')) print "running command took {0:.2f} seconds\n".format( self.result['runtime']) assert self.result['exitcode'] == 0 self.mangle_result() return {'cmd': self.cmd, 'result': self.result['stdout']}
def _refine_bravais(self): info("\nRefining bravais settings...") command = [ "dials.refine_bravais_settings", "experiments.json", "indexed.pickle" ] result = run_process(command, print_stdout=False, debug=procrunner_debug) debug("result = %s" % self._prettyprint_dictionary(result)) if result['exitcode'] == 0: m = re.search('---+\n[^\n]*\n---+\n(.*\n)*---+', result['stdout']) info(m.group(0)) info("Successfully completed (%.1f sec)" % result['runtime']) else: warn("Failed with exit code %d" % result['exitcode']) sys.exit(1)
def _predict(self): info("\nPredicting reflections...") command = [ "dials.predict", "experiments_with_profile_model.json" ] result = run_process(command, print_stdout=False, debug=procrunner_debug) debug("result = %s" % self._prettyprint_dictionary(result)) if result['exitcode'] == 0: info("To view predicted reflections run:") info(" dials.image_viewer experiments_with_profile_model.json predicted.pickle") info("Successfully completed (%.1f sec)" % result['runtime']) return True else: warn("Failed with exit code %d" % result['exitcode']) return False
def _count_processors(self, nproc=None): if nproc is not None: self.nproc = nproc return command = [ "libtbx.show_number_of_processors" ] debug("running %s" % command) result = run_process(command, print_stdout=False, debug=procrunner_debug) debug("result = %s" % self._prettyprint_dictionary(result)) if result['exitcode'] == 0: self.nproc = result['stdout'].strip() else: warn("Could not determine number of available processors. Error code %d" % result['exitcode']) sys.exit(1)
def _refine(self): info("\nIndexing...") command = [ "dials.refine", "experiments.json", "indexed.pickle" ] result = run_process(command, print_stdout=False, debug=procrunner_debug) debug("result = %s" % self._prettyprint_dictionary(result)) if result['exitcode'] != 0: warn("Failed with exit code %d" % result['exitcode']) warn("Giving up.") sys.exit(1) info("Successfully refined (%.1f sec)" % result['runtime']) os.rename("experiments.json", "experiments.unrefined.json") os.rename("indexed.pickle", "indexed.unrefined.pickle") os.rename("refined_experiments.json", "experiments.json") os.rename("refined.pickle", "indexed.pickle")
def _find_spots(self, additional_parameters=None): if additional_parameters is None: additional_parameters = [] info("\nSpot finding...") command = [ "dials.find_spots", self.json_file, "nproc=%s" % self.nproc ] + additional_parameters result = run_process(command, print_stdout=False, debug=procrunner_debug) debug("result = %s" % self._prettyprint_dictionary(result)) if result['exitcode'] != 0: warn("Failed with exit code %d" % result['exitcode']) sys.exit(1) info(60 * '-') from libtbx import easy_pickle from dials.util.ascii_art import spot_counts_per_image_plot refl = easy_pickle.load('strong.pickle') info(spot_counts_per_image_plot(refl)) info(60 * '-') info("Successfully completed (%.1f sec)" % result['runtime'])
def _create_profile_model(self): info("\nCreating profile model...") command = [ "dials.create_profile_model", "experiments.json", "indexed.pickle" ] result = run_process(command, print_stdout=False, debug=procrunner_debug) debug("result = %s" % self._prettyprint_dictionary(result)) if result['exitcode'] == 0: from dxtbx.model.experiment.experiment_list import ExperimentListFactory db = ExperimentListFactory.from_json_file('experiments_with_profile_model.json')[0] self._num_images = db.imageset.get_scan().get_num_images() self._oscillation = db.imageset.get_scan().get_oscillation()[1] self._sigma_m = db.profile.sigma_m() info("%d images, %s deg. oscillation, sigma_m=%.3f" % (self._num_images, str(self._oscillation), self._sigma_m)) info("Successfully completed (%.1f sec)" % result['runtime']) return True else: warn("Failed with exit code %d" % result['exitcode']) return False
def _run_dials_import(self, parameters): command = [ "dials.import" ] + parameters # + [ 'allow_multiple_sweeps=true' ] debug("running %s" % " ".join(command)) result = run_process(command, print_stdout=False, debug=procrunner_debug) debug("result = %s" % self._prettyprint_dictionary(result)) if result['exitcode'] == 0: if os.path.isfile('datablock.json'): info("Successfully completed (%.1f sec)" % result['runtime']) else: warn("Could not import images. Do the specified images exist at that location?") sys.exit(1) else: if "More than 1 sweep was found." in result['stderr']: warn("The data contain multiple sweeps. i19.screen can only run on a single sweep of data.") sys.exit(1) warn("Failed with exit code %d" % result['exitcode']) sys.exit(1)
def _report(self): info("\nCreating report...") command = [ "dials.report", "experiments_with_profile_model.json", "indexed.pickle" ] result = run_process(command, print_stdout=False, debug=procrunner_debug) debug("result = %s" % self._prettyprint_dictionary(result)) if result['exitcode'] == 0: info("Successfully completed (%.1f sec)" % result['runtime']) # if sys.stdout.isatty(): # info("Trying to start browser") # try: # import subprocess # d = dict(os.environ) # d["LD_LIBRARY_PATH"] = "" # subprocess.Popen(["xdg-open", "dials-report.html"], env=d) # except Exception, e: # debug("Could not open browser") # debug(str(e)) else: warn("Failed with exit code %d" % result['exitcode']) sys.exit(1)
def test_fast_dp_X4_wide(tmpdir): src = os.path.join(libtbx.env.under_build('xia2_regression'), 'test_data', 'X4_wide') dls = '/dls/science/groups/scisoft/DIALS/repositories/current/' + \ 'xia2_regression_data/test_data/X4_wide' if not os.path.exists(src): if not os.path.exists(dls): pytest.skip('Could not find test image file') src = dls image = os.path.join(src, 'X4_wide_M1S4_2_0001.cbf') bin = os.path.split(__file__)[0].replace('src', 'bin') fast_dp = os.path.join(bin, 'fast_dp') cmd = [fast_dp, '-a', 'Ba', image] with tmpdir.as_cwd(): result = run_process(cmd) assert result['stderr'] == '', 'fast_dp output to STDERR:' assert result['exitcode'] == 0, 'fast_dp non-zero exit code' for output in ['fast_dp.mtz', 'fast_dp.log']: assert tmpdir.join(output).check(), 'No output found'
# LIBTBX_SET_DISPATCHER_NAME dev.xia2.make_sphinx_html from __future__ import absolute_import, division import os import shutil import libtbx.load_env from dials.util.procrunner import run_process if (__name__ == "__main__"): xia2_dir = libtbx.env.find_in_repositories("xia2", optional=False) assert (xia2_dir is not None) dest_dir = os.path.join(xia2_dir, "html") if os.path.exists(dest_dir): shutil.rmtree(dest_dir) os.chdir(os.path.join(xia2_dir, "doc", "sphinx")) result = run_process(["make", "clean"]) assert result['exitcode'] == 0, \ 'make clean failed with exit code %d' % result['exitcode'] result = run_process(["make", "html"]) assert result['exitcode'] == 0, \ 'make html failed with exit code %d' % result['exitcode'] print "Moving HTML pages to", dest_dir shutil.move("build/html", dest_dir)
assets[shortname] = (last_update, version, a.get('browser_download_url')) long_names = { 'macosx.pkg': 'Mac installer (OS X 10.11)', 'macosx-10.6.pkg': 'Mac installer (OS X 10.6)', 'linux-x86_64.tar.xz': 'Linux installer', 'source.tar.xz': 'Source installer' } buttons = [ download_button(long_names.get(asset, asset), version, link) \ for asset, (_, version, link) in assets.iteritems() ] release.write("".join(sorted(buttons))) if __name__ == "__main__": update_dials_download_links() cctbx_base = libtbx.env.find_in_repositories("cctbx_project") dials_dir = libtbx.env.find_in_repositories("dials") dials_github_io = libtbx.env.find_in_repositories("dials.github.io") assert (dials_github_io is not None) assert (cctbx_base is not None) base_dir = os.path.dirname(cctbx_base) dest_dir = dials_github_io os.chdir(os.path.join(dials_dir, "doc", "sphinx")) result = run_process(["make", "clean"]) assert result['exitcode'] == 0, \ 'make clean failed with exit code %d' % result['exitcode'] result = run_process(["make", "html"]) assert result['exitcode'] == 0, \ 'make html failed with exit code %d' % result['exitcode'] print "Copying HTML pages to", dest_dir recursive_overwrite("build/html", dest_dir)
def run_process(command): """Runs a command, prints running info and results the result, if success""" result = run_process(shlex.split(command)) print "running command took {0:.2f} seconds\n".format(result['runtime']) assert result['exitcode'] == 0, "Command execution failed" return result
def _check_intensities(self): info("\nTesting pixel intensities...") command = [ "xia2.overload", "nproc=%s" % self.nproc, self.json_file ] debug("running %s" % command) result = run_process(command, print_stdout=False, debug=procrunner_debug) debug("result = %s" % self._prettyprint_dictionary(result)) info("Successfully completed (%.1f sec)" % result['runtime']) if result['exitcode'] != 0: warn("Failed with exit code %d" % result['exitcode']) sys.exit(1) with open('overload.json') as fh: overload_data = json.load(fh) print "Pixel intensity distribution:" count_sum = 0 hist = {} if 'bins' in overload_data: for b in range(overload_data['bin_count']): if overload_data['bins'][b] > 0: hist[b] = overload_data['bins'][b] count_sum += b * overload_data['bins'][b] else: hist = { int(k): v for k, v in overload_data['counts'].iteritems() if int(k) > 0 } count_sum = sum([k * v for k, v in hist.iteritems()]) histcount = sum(hist.itervalues()) # we have checked this: if _sigma_m >> _oscillation it works out about 1 # as you would expect M = math.sqrt(math.pi) * self._sigma_m * \ math.erf(self._oscillation / (2 * self._sigma_m)) average_to_peak = M / self._oscillation info("Average-to-peak intensity ratio: %f" % average_to_peak) scale = 100 * overload_data['scale_factor'] / average_to_peak info("Determined scale factor for intensities as %f" % scale) debug("intensity histogram: { %s }", ", ".join(["%d:%d" % (k, hist[k]) for k in sorted(hist)])) max_count = max(hist.iterkeys()) hist_max = max_count * scale hist_granularity, hist_format = 1, '%.0f' if hist_max < 50: hist_granularity, hist_format = 2, '%.1f' if hist_max < 15: hist_granularity, hist_format = 10, '%.1f' rescaled_hist = {} for x in hist.iterkeys(): rescaled = round(x * scale * hist_granularity) if rescaled > 0: rescaled_hist[rescaled] = hist[x] + rescaled_hist.get(rescaled, 0) hist = rescaled_hist debug("rescaled histogram: { %s }", ", ".join([(hist_format + ":%d") % (k / hist_granularity, hist[k]) for k in sorted(hist)])) self._plot_intensities(hist, 1 / hist_granularity) text = "Strongest pixel (%d counts) reaches %.1f %% of the detector count rate limit" % (max_count, hist_max) if (hist_max > 100): warn("Warning: %s!" % text) else: info(text) if 'overload_limit' in overload_data and max_count >= overload_data['overload_limit']: warn("Warning: THE DATA CONTAIN REGULAR OVERLOADS!") warn(" The photon incidence rate is outside the specified limits of the detector.") warn(" The built-in detector count rate correction cannot adjust for this.") warn(" You should aim for count rates below 25% of the detector limit.") elif (hist_max > 70): warn("Warning: The photon incidence rate is well outside the linear response region of the detector (<25%).") warn(" The built-in detector count rate correction may not be able to adjust for this.") elif (hist_max > 25): info("The photon incidence rate is outside the linear response region of the detector (<25%).") info("The built-in detector count rate correction should be able to adjust for this.") info("Total sum of counts in dataset: %d" % count_sum)
def ccp4_version(): result = run_process(['refmac5', '-i'], print_stdout=False) assert result['exitcode'] == 0 and not result['timeout'] version = re.search('patch level *([0-9]+)\.([0-9]+)\.([0-9]+)', result['stdout']) assert version return [int(v) for v in version.groups()]
def run_xia2_tolerant(test_name, command_line_args, expected_data_files=[]): cwd = os.path.abspath(os.curdir) tmp_dir = os.path.join(os.curdir, 'xia2_regression.%s' % test_name) try: os.mkdir(tmp_dir) except OSError as exc: import errno if exc.errno == errno.EEXIST and os.path.isdir(tmp_dir): pass else: raise os.chdir(tmp_dir) ccp4 = ccp4_version() result = run_process(['xia2'] + command_line_args) error_file = 'xia2.error' if os.path.exists(error_file): print >> sys.stderr, open(error_file, 'r').read() from libtbx.utils import Sorry raise Sorry("xia2.error present after execution") assert result['stderr'] == '', "xia2 terminated with output to STDERR:\n" + result['stderr'] assert result['exitcode'] == 0, "xia2 terminated with non-zero exit code (%d)" % result['exitcode'] summary_file = 'xia2-summary.dat' assert os.path.exists(summary_file), "xia2-summary.dat not present after execution" summary_text = open(summary_file, 'rb').read() summary_text_lines = summary_text.split('\n') template_name = 'result.%s.%d.%d.%d' % (test_name, ccp4[0], ccp4[1], ccp4[2]) with open(template_name, 'w') as fh: fh.write(generate_tolerant_template(summary_text_lines)) expected_result_dir = os.path.join(os.path.dirname(__file__), 'expected') expected_result_file, expected_result_file_version = None, None if os.path.exists(expected_result_dir): for f in os.listdir(expected_result_dir): if f.startswith('result.%s' % test_name) and os.path.isfile(os.path.join(expected_result_dir, f)): candidate_version = re.search("\.([0-9]+)\.([0-9]+)\.([0-9]+)$", f) if candidate_version: major, minor, revision = [int(v) for v in candidate_version.groups()] cmaj, cmin, crev = ccp4 # ensure file is not made for a newer CCP4 version if cmaj < major: continue if cmaj == major and cmin < minor: continue if cmaj == major and cmin == minor and crev < revision: continue if expected_result_file is not None and expected_result_file_version is not None: cmaj, cmin, crev = expected_result_file_version # ensure file is for a more recent version than any already found file if cmaj > major: continue if cmaj == major and cmin > minor: continue if cmaj == major and cmin == minor and crev > revision: continue expected_result_file = f expected_result_file_version = [int(v) for v in candidate_version.groups()] elif expected_result_file is None: expected_result_file = f assert expected_result_file is not None, "Could not find expected results file to compare actual results to" with open(os.path.join(expected_result_dir, expected_result_file), 'r') as fh: expected_summary_lines = fh.readlines() compare = StringIO.StringIO() print >>compare, 'Comparing output against %s' % expected_result_file print >>compare, '-' * 80 number = re.compile('(\d*\.\d+|\d+\.?)') number_with_tolerance = re.compile('(\d*\.\d+|\d+\.?)\((ignore|\*\*|\d*\.\d+%?|\d+\.?%?)\)') output_identical = True for actual, expected in zip(summary_text_lines, expected_summary_lines): if actual == expected: print >>compare, ' ' + actual continue actual_s = actual.split() expected_s = expected.split() actual_s = re.split(r'(\s+)', actual) expected_s = re.split(r'(\s+)', expected) valid = [] equal = [] for e, a in zip(expected_s, actual_s): if e == '***' or e.strip() == a.strip(): equal.append(True) valid.append(True) elif e == '(ignore)': equal.append(False) valid.append(True) elif number_with_tolerance.match(e) and number.match(a): expected_value, tolerance = number_with_tolerance.match(e).groups() expected_value = float(expected_value) if number.match(e).groups()[0] == a: # identical value, but missing brackets equal.append(True) valid.append(True) continue if tolerance == '**': equal.append(True) valid.append(True) continue if tolerance == 'ignore': equal.append(False) valid.append(True) continue if isinstance(tolerance, basestring) and '%' in tolerance: # percentage tolerance = expected_value * float(tolerance[:-1]) / 100 else: tolerance = float(tolerance) equal.append(False) valid.append(abs(expected_value - float(a)) <= tolerance) else: equal.append(False) valid.append(False) if all(equal): print >>compare, ' ' + actual continue expected_line = '' actual_line = '' for expected, actual, vld, eq in zip(expected_s, actual_s, valid, equal): template = '%%-%ds' % max(len(expected), len(actual)) if eq: expected_line += template % expected actual_line += template % '' elif vld: expected_line += template % expected actual_line += template % actual else: expected_line += ' ' + template % expected + ' ' actual_line += '*' + template % actual + '*' output_identical = False print >>compare, '-' + expected_line if not all(valid): print >>compare, '>' + actual_line else: print >>compare, '+' + actual_line print >>compare, '-' * 80 for data_file in expected_data_files: if not os.path.exists(os.path.join('DataFiles', data_file)): print >>compare, "> expected file %s is missing" % data_file output_identical = False html_file = 'xia2.html' if not os.path.exists(html_file): print >>compare, "> xia2.html not present after execution" output_identical = False g = glob.glob('LogFiles/*_report.html') if len(g) == 0: print >>compare, "> xia2 report not present after execution" output_identical = False os.chdir(cwd) if not output_identical: from libtbx.utils import Sorry sys.stderr.write(compare.getvalue()) raise Sorry("xia2 output failing tolerance checks") sys.stdout.write(compare.getvalue())