def test_out_redirection(self): import tempfile py = create_tmp_test( """ import sys import os sys.stdout.write("stdout") sys.stderr.write("stderr") """ ) file_obj = tempfile.TemporaryFile() out = python(py.name, _out=file_obj) self.assertTrue(len(out) == 0) file_obj.seek(0) actual_out = file_obj.read() file_obj.close() self.assertTrue(len(actual_out) != 0) # test with tee file_obj = tempfile.TemporaryFile() out = python(py.name, _out=file_obj, _tee=True) self.assertTrue(len(out) != 0) file_obj.seek(0) actual_out = file_obj.read() file_obj.close() self.assertTrue(len(actual_out) != 0)
def execute_locally(self): """Runs the equivalent command locally in a blocking way.""" # Make script file # self.make_script() # Do it # with open(self.kwargs['out_file'], 'w') as handle: sh.python(self.script_path, _out=handle, _err=handle)
def test_environment(self): import os env = {"HERP": "DERP"} py = create_tmp_test( """ import os osx_cruft = ["__CF_USER_TEXT_ENCODING", "__PYVENV_LAUNCHER__"] for key in osx_cruft: try: del os.environ[key] except: pass print(os.environ["HERP"] + " " + str(len(os.environ))) """ ) out = python(py.name, _env=env).strip() self.assertEqual(out, "DERP 1") py = create_tmp_test( """ import os, sys sys.path.insert(0, os.getcwd()) import sh osx_cruft = ["__CF_USER_TEXT_ENCODING", "__PYVENV_LAUNCHER__"] for key in osx_cruft: try: del os.environ[key] except: pass print(sh.HERP + " " + str(len(os.environ))) """ ) out = python(py.name, _env=env, _cwd=THIS_DIR).strip() self.assertEqual(out, "DERP 1")
def test_no_err(self): py = create_tmp_test( """ import sys sys.stdout.write("stdout") sys.stderr.write("stderr") """ ) p = python(py.name, _no_err=True) self.assertEqual(p.stderr, b"") self.assertEqual(p.stdout, b"stdout") self.assertFalse(p.process._pipe_queue.empty()) def callback(line): pass p = python(py.name, _err=callback) self.assertEqual(p.stderr, b"") self.assertEqual(p.stdout, b"stdout") self.assertFalse(p.process._pipe_queue.empty()) p = python(py.name) self.assertEqual(p.stderr, b"stderr") self.assertEqual(p.stdout, b"stdout") self.assertFalse(p.process._pipe_queue.empty())
def test_binary_pipe(self): binary = b"\xec;\xedr\xdbF\x92\xf9\x8d\xa7\x98\x02/\x15\xd2K\xc3\x94d\xc9" py1 = create_tmp_test( """ import sys import os sys.stdout = os.fdopen(sys.stdout.fileno(), "wb", 0) sys.stdout.write(%r) """ % binary ) py2 = create_tmp_test( """ import sys import os sys.stdin = os.fdopen(sys.stdin.fileno(), "rb", 0) sys.stdout = os.fdopen(sys.stdout.fileno(), "wb", 0) sys.stdout.write(sys.stdin.read()) """ ) out = python(python(py1.name), py2.name) self.assertEqual(out.stdout, binary)
def test_with_context_args(self): from sh import whoami import getpass py = create_tmp_test(""" import sys import os import subprocess from optparse import OptionParser parser = OptionParser() parser.add_option("-o", "--opt", action="store_true", default=False, dest="opt") options, args = parser.parse_args() if options.opt: subprocess.Popen(args[0], shell=False).wait() """) with python(py.name, opt=True, _with=True): out = whoami() self.assertTrue(getpass.getuser() == out.strip()) with python(py.name, _with=True): out = whoami() self.assertTrue(out == "")
def test_err_redirection(self): import tempfile py = create_tmp_test( """ import sys import os sys.stdout.write("stdout") sys.stderr.write("stderr") """ ) file_obj = tempfile.TemporaryFile() p = python(py.name, _err=file_obj) file_obj.seek(0) stderr = file_obj.read().decode() file_obj.close() self.assertTrue(p.stdout == b"stdout") self.assertTrue(stderr == "stderr") self.assertTrue(len(p.stderr) == 0) # now with tee file_obj = tempfile.TemporaryFile() p = python(py.name, _err=file_obj, _tee="err") file_obj.seek(0) stderr = file_obj.read().decode() file_obj.close() self.assertTrue(p.stdout == b"stdout") self.assertTrue(stderr == "stderr") self.assertTrue(len(p.stderr) != 0)
def run(self, clean): sh.cd(self.working_dir) try: sh.python(self.mainfile, _out=self.print_output, _err=self.print_output).wait() except Exception: pass if clean: self.clean()
def test_make_migrations(cookies): """generated project should be able to generate migrations""" with bake_in_temp_dir(cookies, extra_context={}) as result: res = result.project.join('manage.py') try: sh.python(res, 'makemigrations') except sh.ErrorReturnCode as e: pytest.fail(str(e))
def test_run_tests(cookies): """generated project should run tests""" with bake_in_temp_dir(cookies, extra_context={}) as result: res = result.project.join('runtests.py') try: sh.python(res) except sh.ErrorReturnCode as e: pytest.fail(str(e))
def test_list(ts): n, s = ts with open("log/%s.log" % n, "w") as f: for t in s: dax = "dax/%s" % t start = time.time() sh.python("emo4.py", dax) cost= time.time() - start print >>f, "%s\t%.2fs" % (dax, cost)
def test_screenly_should_exit_if_no_settings_file_found(self): new_env = os.environ.copy() new_env["HOME"] = "/tmp" project_dir = os.path.dirname(__file__) with self.assertRaises(sh.ErrorReturnCode_1): sh.python(project_dir + '/../viewer.py', _env=new_env) with self.assertRaises(sh.ErrorReturnCode_1): sh.python(project_dir + '/../server.py', _env=new_env)
def test_long_option(self): py = create_tmp_test(""" from optparse import OptionParser parser = OptionParser() parser.add_option("-l", "--long-option", action="store", default="", dest="long_option") options, args = parser.parse_args() print(options.long_option.upper()) """) self.assertTrue(python(py.name, long_option="testing").strip() == "TESTING") self.assertTrue(python(py.name).strip() == "")
def test_long_bool_option(self): py = create_tmp_test(""" from optparse import OptionParser parser = OptionParser() parser.add_option("-l", "--long-option", action="store_true", default=False, dest="long_option") options, args = parser.parse_args() print(options.long_option) """) self.assertTrue(python(py.name, long_option=True).strip() == "True") self.assertTrue(python(py.name).strip() == "False")
def upload(): if request.method == 'GET': return render_template("upload.html") else: f = request.files['file'] filename = secure_filename(f.filename) if not os.path.exists('/tmp/polly'): os.makedirs('/tmp/polly') f.save(os.path.join('/tmp/polly', filename)) sh.python("/srv/polly/python-client/send_file.py") return redirect('/upload')
def import_data(json_name, package_dir): json_path = os.path.join(get_json_dir(), json_name) if os.path.exists(json_path): print('loading %s' % json_name) sh.cd(package_dir) if IS_PY2: sh.python('manage.py', 'loaddata', json_path) else: sh.python3('manage.py', 'loaddata', json_path) else: log('%s does not exist' % json_path)
def test_exit_code_from_exception(self): from sh import ErrorReturnCode py = create_tmp_test(""" exit(3) """) self.assertRaises(ErrorReturnCode, python, py.name) try: python(py.name) except Exception as e: self.assertEqual(e.exit_code, 3)
def test_multiple_args_long_option(self): py = create_tmp_test(""" from optparse import OptionParser parser = OptionParser() parser.add_option("-l", "--long-option", dest="long_option") options, args = parser.parse_args() print(len(options.long_option.split())) """) num_args = int(python(py.name, long_option="one two three")) self.assertEqual(num_args, 3) num_args = int(python(py.name, "--long-option", "one's two's three's")) self.assertEqual(num_args, 3)
def gen_model_code(model_codegen_dir, platform, model_file_path, weight_file_path, model_sha256_checksum, weight_sha256_checksum, input_nodes, output_nodes, runtime, model_tag, input_shapes, dsp_mode, embed_model_data, winograd, quantize, quantize_range_file, obfuscate, model_graph_format, data_type, graph_optimize_options): bazel_build_common("//mace/python/tools:converter") if os.path.exists(model_codegen_dir): sh.rm("-rf", model_codegen_dir) sh.mkdir("-p", model_codegen_dir) sh.python("bazel-bin/mace/python/tools/converter", "-u", "--platform=%s" % platform, "--model_file=%s" % model_file_path, "--weight_file=%s" % weight_file_path, "--model_checksum=%s" % model_sha256_checksum, "--weight_checksum=%s" % weight_sha256_checksum, "--input_node=%s" % input_nodes, "--output_node=%s" % output_nodes, "--runtime=%s" % runtime, "--template=%s" % "mace/python/tools", "--model_tag=%s" % model_tag, "--input_shape=%s" % input_shapes, "--dsp_mode=%s" % dsp_mode, "--embed_model_data=%s" % embed_model_data, "--winograd=%s" % winograd, "--quantize=%s" % quantize, "--quantize_range_file=%s" % quantize_range_file, "--obfuscate=%s" % obfuscate, "--output_dir=%s" % model_codegen_dir, "--model_graph_format=%s" % model_graph_format, "--data_type=%s" % data_type, "--graph_optimize_options=%s" % graph_optimize_options, _fg=True)
def build_sigar_python(target): target.pushd(os.path.join(target.dirname, target.buildpath)) target.pushd('bindings') target.pushd('python') tmpenv = os.environ.copy() if "Darwin" in sh.uname('-a'): tmpenv['CPPFLAGS'] = '-Qunused-arguments' tmpenv['CFLAGS'] = '-Qunused-arguments' try: tmpenv['LDFLAGS'] = '-L' + target.prefix + '/lib -L' + target.prefix + '/lib64' sh.python("setup.py", "--with-sigar="+target.prefix, "install", "--prefix="+target.prefix, _env=tmpenv) except sh.ErrorReturnCode,ex: print "Unable to build SIGAR python extensions: %s" % ex.stderr raise ex
def test_files(self): tpl_filename = self.tempfile('test.txt.tpl') txt_filename = self.tempfile('test.txt') with open(tpl_filename, 'wt') as f: f.write('hello {{FOO}}') sh.python( self.envtpl(), tpl_filename, _env={'FOO': 'world'}, ) self.assertFalse(os.path.exists(tpl_filename)) self.assertTrue(os.path.exists(txt_filename)) with open(txt_filename, 'r') as f: self.assertEquals('hello world', f.read())
def extract_config(): def git_sha(base=''): try: return str(sh.git('rev-parse', 'HEAD', _cwd=base)).strip() except Exception: return 'NA' config = c = {} versions = v = {} v['bloscpack'] = bp.__version__ v['blosc'] = blosc.__version__ v['numpy'] = np.__version__ v['joblib'] = jb.__version__ v['tables'] = tables.__version__ v['conda'] = str(sh.conda('--version', _tty_in=True)).strip() v['python'] = str(sh.python('--version', _tty_in=True)).strip() hashes = h = {} h['bloscpack'] = git_sha(os.path.dirname(bp.__file__)) h['joblib'] = git_sha(jb.__path__[0]) h['blosc'] = git_sha(blosc.__path__[0]) h['numpy'] = git_sha(np.__path__[0]) h['tables'] = git_sha(tables.__path__[0]) h['benchmark'] = git_sha() c['uname'] = str(sh.uname('-a')).strip() c['hostname'] = str(sh.hostname()).strip() c['whoami'] = str(sh.whoami()).strip() c['date'] = str(sh.date()).strip() c['versions'] = versions c['hashes'] = hashes return config
def CustomPythonCmakeArgs(): # The CMake 'FindPythonLibs' Module does not work properly. # So we are forced to do its job for it. python_prefix = sh.python_config( '--prefix' ).strip() if p.isfile( p.join( python_prefix, '/Python' ) ): python_library = p.join( python_prefix, '/Python' ) python_include = p.join( python_prefix, '/Headers' ) else: which_python = sh.python( '-c', 'import sys;i=sys.version_info;print "python%d.%d" % (i[0], i[1])' ).strip() lib_python = '{0}/lib/lib{1}'.format( python_prefix, which_python ).strip() if p.isfile( '{0}.a'.format( lib_python ) ): python_library = '{0}.a'.format( lib_python ) # This check is for CYGWIN elif p.isfile( '{0}.dll.a'.format( lib_python ) ): python_library = '{0}.dll.a'.format( lib_python ) else: python_library = '{0}.dylib'.format( lib_python ) python_include = '{0}/include/{1}'.format( python_prefix, which_python ) return [ '-DPYTHON_LIBRARY={0}'.format( python_library ), '-DPYTHON_INCLUDE_DIR={0}'.format( python_include ) ]
def test_stdout_callback_no_wait(self): import time py = create_tmp_test( """ import sys import os import time for i in range(5): print(i) time.sleep(.5) """ ) stdout = [] def agg(line): stdout.append(line) p = python(py.name, _out=agg, u=True) # we give a little pause to make sure that the NamedTemporaryFile # exists when the python process actually starts time.sleep(0.5) self.assertTrue(len(stdout) != 5)
def test_stdout_callback_kill(self): import signal py = create_tmp_test(""" import sys import os import time for i in range(5): print(i) time.sleep(.5) """) stdout = [] def agg(line, stdin, process): line = line.strip() stdout.append(line) if line == "3": process.kill() return True p = python(py.name, _out=agg, u=True) p.wait() self.assertEqual(p.process.exit_code, -signal.SIGKILL) self.assertTrue("4" not in p) self.assertTrue("4" not in stdout)
def minify(): proj() print(". minifying ...") sources = dict(js=[], css=[]) for user in ["./frame", "."]: for min_id, asset_list in dict(js="scripts", css="styles").items(): asset_list = os.path.join(user, asset_list+".txt") if os.path.exists(asset_list): [ sources[min_id].append(asset) for asset in [x.strip() for x in open(asset_list).read().split("\n")] if asset and not asset.startswith("#") and asset not in sources[min_id] ] cfg = open("setup.cfg", "w") cfg.writelines([ CFG_TEMPLATE % {"src": src, "assets": " ".join(assets)} for src, assets in sources.items() ]) print ".. writing out production setup.cfg" cfg.close() [ pprint(sh.python("setup.py", "minify_" + src, verbose=True)) for src in sources ]
def test_general_signal(self): import signal from signal import SIGINT py = create_tmp_test(""" import sys import os import time import signal def sig_handler(sig, frame): print(10) exit(0) signal.signal(signal.SIGINT, sig_handler) for i in range(5): print(i) sys.stdout.flush() time.sleep(0.5) """) stdout = [] def agg(line, stdin, process): line = line.strip() stdout.append(line) if line == "3": process.signal(SIGINT) return True p = python(py.name, _out=agg) p.wait() self.assertEqual(p.process.exit_code, 0) self.assertEqual(p, "0\n1\n2\n3\n10\n")
def test_mods_parameter_id_lines(self): log = pd.read_csv(self.testlog, sep='\t', comment='#') cmd = '{script} {xml} {trees} {log}'.format( script=join(dirname(THIS),'bio_pieces','beast_checkpoint.py'), xml=self.testxml, trees=self.testtrees, log=self.testlog ) ac = log.tail(1)['ac'].values[0] freqs = log.tail(1)[ ['frequencies1','frequencies2','frequencies3','frequencies4'] ] freqs = [str(v.values[0]) for k,v in freqs.iteritems()] freqs = ' '.join(freqs) # These all need to exist in output find_in_lines = [ '<parameter id="frequencies" value="0.225554'.format(freqs), '<parameter id="ac" value="{0}" lower="0.0" upper="Infinity"/>'.format(ac), ] not_find_in_lines = [ '<parameter id="treeModel.rootHeight" value=', ] found = [] def find_lines(line): ''' look for required lines in output ''' for l in find_in_lines + not_find_in_lines: if l in line: found.append(l) out = sh.python(cmd.split(), _out=find_lines, _err=find_lines) print("Looking for lines {0}".format(find_in_lines)) print("Found {0}".format(found)) self.assertEqual(sorted(find_in_lines), sorted(found))
def _(): cmd = sh.python('pocha/cli.py', '--reporter', 'xunit', 'test/input/mixed_its.py', _ok_code=[1], _tty_out=False) stdout = cmd.stdout.decode('utf-8') expect(stdout).to.have.xpath('./testsuite[' + '@name="Pocha Tests" and ' + '@tests="3" and ' + '@errors="0" and ' + '@failures="1" and ' + '@skip="0"]') expect(stdout).to.have.xpath('./testsuite/testcase[' + '@name="can run a passing it" and ' + '@classname=""]') # XXX: should find a way to validate the stacktrace expect(stdout).to.have.xpath('./testsuite/testcase[' + '@name="can runa a failing it" and ' + '@classname=""]/error') expect(stdout).to.have.xpath('./testsuite/testcase[' + '@name="can run another passing it" and ' + '@classname=""]')
def _(): cmd = sh.python('pocha/cli.py', 'test/input/skip_its.py', '--reporter', 'xunit', _tty_out=False) stdout = cmd.stdout.decode('utf-8') expect(stdout).to.have.xpath('./testsuite[' + '@name="Pocha Tests" and ' + '@tests="3" and ' + '@errors="0" and ' + '@failures="0" and ' + '@skip="2"]') expect(stdout).to.have.xpath('./testsuite/testcase[' + '@name="first it" and ' + '@classname="" and ' + '@time="0.000"]/skipped') expect(stdout).to.have.xpath('./testsuite/testcase[' + '@name="second it" and ' + '@classname="" and ' + '@time="0.000"]') expect(stdout).to.have.xpath('./testsuite/testcase[' + '@name="third it" and ' + '@classname="" and ' + '@time="0.000"]/skipped')
def predict(sentences, gold_file, output_file): with open(output_file, "w") as f: result = parser.Predict(sentences) for i in result: f.write(i.to_string()) eval_script = os.path.join( current_path, "utils/evaluation_script/conll17_ud_eval.py") weight_file = os.path.join(current_path, "utils/evaluation_script/weights.clas") eval_process = sh.python(eval_script, "-v", "-w", weight_file, gold_file, output_file, _out=output_file + '.txt') eval_process.wait() sh.cat(output_file + '.txt', _out=sys.stdout) print('Finished predicting {}'.format(gold_file))
def test_dry_run(runner): with runner.isolated_filesystem(): with open("train.py", "w") as f: f.write(train_py) os.environ["WANDB_MODE"] = "dryrun" os.environ["WANDB_TEST"] = "true" try: res = sh.python("train.py") run_dir = glob.glob(os.path.join("wandb", "dry*"))[0] meta = json.loads( open(os.path.join(run_dir, "wandb-metadata.json")).read()) assert meta["state"] == "finished" assert meta["program"] == "train.py" assert meta["exitcode"] == 0 assert os.path.exists(os.path.join(run_dir, "output.log")) assert "loss:" in open(os.path.join(run_dir, "output.log")).read() assert os.path.exists(os.path.join(run_dir, "wandb-history.jsonl")) assert os.path.exists(os.path.join(run_dir, "wandb-events.jsonl")) assert os.path.exists(os.path.join(run_dir, "wandb-summary.json")) finally: del os.environ["WANDB_MODE"] del os.environ["WANDB_TEST"]
def test_dry_run_kill(runner): with runner.isolated_filesystem(): with open("train.py", "w") as f: f.write(train_py.replace("#os.kill", "os.kill")) environ = { "WANDB_MODE": "dryrun", "WANDB_TEST": "true", } res = sh.python("train.py", epochs=10, _bg=True, _env=environ) try: res.wait() print(res) except sh.ErrorReturnCode: pass dirs = glob.glob(os.path.join("wandb", "dry*")) print(dirs) run_dir = dirs[0] meta = json.loads( open(os.path.join(run_dir, "wandb-metadata.json")).read()) assert meta["state"] == "killed" assert meta["exitcode"] == 255 assert meta["args"] == ["--epochs=10"]
def test_multiple_pipes(self): from sh import tr, python import time py = create_tmp_test(""" import sys import os import time for l in "andrew": print(l) time.sleep(.2) """) class Derp(object): def __init__(self): self.times = [] self.stdout = [] self.last_received = None def agg(self, line): self.stdout.append(line.strip()) now = time.time() if self.last_received: self.times.append(now - self.last_received) self.last_received = now derp = Derp() p = tr( tr( tr( python(py.name, _piped=True), "aw", "wa", _piped=True), "ne", "en", _piped=True), "dr", "rd", _out=derp.agg) p.wait() self.assertEqual("".join(derp.stdout), "werdna") self.assertTrue(all([t > .15 for t in derp.times]))
def _(): cmd = sh.python('pocha/cli.py', '--reporter', 'xunit', 'test/input/describe_with_multiple_failing_it.py', _ok_code=[1], _tty_out=False) stdout = cmd.stdout.decode('utf-8') expect(stdout).to.have.xpath('./testsuite[' + '@name="Pocha Tests" and ' + '@tests="1" and ' + '@errors="0" and ' + '@failures="2" and ' + '@skip="0"]') expect(stdout).to.have.xpath('./testsuite/testcase[' + '@name="can run a failing it" and ' + '@classname=""]/error') expect(stdout).to.have.xpath( './testsuite/testcase[' + '@name="can run another failing it" and ' + '@classname=""]/error')
def _(): cmd = sh.python( 'pocha/cli.py', 'test/input/multiple_its_in_a_describe_with_after_failure.py', _ok_code=[1], _tty_out=False) stdout = cmd.stdout.decode('utf-8') expect(stdout).to.match(u''' a describe ✓ can run a passing it ✓ can run another passing it 1\) "after all" hook 2 passing \(\d+ms\) 1 failing 1\) "after all" hook: Exception: doing it on purpose File "test/input/multiple_its_in_a_describe_with_after_failure.py", line 8, in teardown raise Exception\('doing it on purpose'\) ''')
def test_dry_run_kill(runner): with runner.isolated_filesystem(): with open("train.py", "w") as f: f.write(train_py.replace("#os.kill", "os.kill")) os.environ["WANDB_MODE"] = "dryrun" os.environ["WANDB_TEST"] = "true" try: res = sh.python("train.py", epochs=10, _bg=True) try: res.wait() print(res) except sh.ErrorReturnCode: pass dirs = glob.glob("wandb/dry*") print(dirs) run_dir = dirs[0] meta = json.loads(open(run_dir + "/wandb-metadata.json").read()) assert meta["state"] == "killed" assert meta["exitcode"] == 255 assert meta["args"] == ["--epochs=10"] finally: del os.environ["WANDB_MODE"] del os.environ["WANDB_TEST"]
def test_decode_error_handling(self): from functools import partial py = create_tmp_test(""" # -*- coding: utf8 -*- import sys import os sys.stdout = os.fdopen(sys.stdout.fileno(), 'wb') IS_PY3 = sys.version_info[0] == 3 if IS_PY3: sys.stdout.write(bytes("te漢字st", "utf8")) else: sys.stdout.write("te漢字st") """) fn = partial(python, py.name, _encoding="ascii") def s(fn): str(fn()) self.assertRaises(UnicodeDecodeError, s, fn) p = python(py.name, _encoding="ascii", _decode_errors="ignore") self.assertEqual(p, "test")
def build(target_python, requirements): """ Builds an APK given a target Python and a set of requirements. """ if not requirements: return testapp = 'setup_testapp_python2.py' android_sdk_home = os.environ['ANDROID_SDK_HOME'] android_ndk_home = os.environ['ANDROID_NDK_HOME'] if target_python == TargetPython.python3: testapp = 'setup_testapp_python3.py' requirements.add(target_python.name) requirements = ','.join(requirements) print('requirements:', requirements) with current_directory('testapps/'): try: for line in sh.python( testapp, 'apk', '--sdk-dir', android_sdk_home, '--ndk-dir', android_ndk_home, '--bootstrap', 'sdl2', '--requirements', requirements, _err_to_out=True, _iter=True): print(line) except sh.ErrorReturnCode as e: raise
def test_stdout_callback_no_wait(self): import time py = create_tmp_test(""" import sys import os import time for i in range(5): print(i) time.sleep(.5) """) stdout = [] def agg(line): stdout.append(line) p = python(py.name, _out=agg, u=True) # we give a little pause to make sure that the NamedTemporaryFile # exists when the python process actually starts time.sleep(.5) self.assertTrue(len(stdout) != 5)
def sign_file(self, work_dir, from_, cert_type, signing_formats, to=None): if to is None: to = from_ token = os.path.join(work_dir, "token") nonce = os.path.join(work_dir, "nonce") self.get_token(token, cert_type, signing_formats) signtool = os.path.join(self.tools_checkout, "release/signing/signtool.py") cmd = [signtool, "-n", nonce, "-t", token, "-c", self.cert] for s in self.get_suitable_signing_servers(cert_type, signing_formats): cmd.extend(["-H", s.server]) for f in signing_formats: cmd.extend(["-f", f]) cmd.extend(["-o", to, from_]) log.debug("Running python %s", " ".join(cmd)) out = sh.python(*cmd, _err_to_out=True, _cwd=work_dir) log.debug("COMMAND OUTPUT: %s", out) abs_to = os.path.join(work_dir, to) log.info("SHA512SUM: %s SIGNED_FILE: %s", get_hash(abs_to, "sha512"), to) log.info("SHA1SUM: %s SIGNED_FILE: %s", get_hash(abs_to, "sha1"), to) log.debug("Finished signing")
def test_auto_change_buffering(self): binary = b'\xec;\xedr\xdbF\x92\xf9\x8d\xa7\x98\x02/\x15\xd2K\xc3\x94d\xc9' py1 = create_tmp_test(""" import sys import os import time sys.stdout = os.fdopen(sys.stdout.fileno(), "wb", 0) sys.stdout.write(b"testing") sys.stdout.flush() # to ensure that sh's select loop picks up the write before we write again time.sleep(0.5) sys.stdout.write(b"again\\n") sys.stdout.flush() time.sleep(0.5) sys.stdout.write(%r) sys.stdout.flush() """ % binary) out = python(py1.name, _out_bufsize=1) self.assertTrue( out.stdout == b'testingagain\n\xec;\xedr\xdbF\x92\xf9\x8d\xa7\x98\x02/\x15\xd2K\xc3\x94d\xc9' )
def _(): cmd = sh.python('pocha/cli.py', 'test/input/multiple_its_with_before_each_failure.py', _ok_code=[1], _tty_out=False) stdout = cmd.stdout.decode('utf-8') expect(stdout).to.match(u''' 1\) "before each" hook for "can run a passing it" 2\) "before each" hook for "can run another passing it" 0 passing \(\d+ms\) 2 failing 1\) "before each" hook for "can run a passing it": Exception: doing it on purpose File "test/input/multiple_its_with_before_each_failure.py", line 6, in setup raise Exception\('doing it on purpose'\) 2\) "before each" hook for "can run another passing it": Exception: doing it on purpose File "test/input/multiple_its_with_before_each_failure.py", line 6, in setup raise Exception\('doing it on purpose'\) ''')
def test_quote_escaping(self): py = create_tmp_test(""" from optparse import OptionParser parser = OptionParser() options, args = parser.parse_args() print(args) """) out = python(py.name, "one two three").strip() self.assertEqual(out, "['one two three']") out = python(py.name, "one \"two three").strip() self.assertEqual(out, "['one \"two three']") out = python(py.name, "one", "two three").strip() self.assertEqual(out, "['one', 'two three']") out = python(py.name, "one", "two \"haha\" three").strip() self.assertEqual(out, "['one', 'two \"haha\" three']") out = python(py.name, "one two's three").strip() self.assertEqual(out, "[\"one two's three\"]") out = python(py.name, 'one two\'s three').strip() self.assertEqual(out, "[\"one two's three\"]")
def main(args): if not os.path.exists('services.json'): print('Must run this script from Data/Site') return -1 tmpfile = tempfile.mktemp(suffix='.txt') print(tmpfile) sh.python('../../Scripts/serviceMap.py', '-s', tmpfile) service_map = get_service_map(tmpfile) exts = ['json', 'yml', 'yaml'] for ext in exts: contents = get_contents('services.%s' % ext) if contents: contents = process_services(service_map, contents) if contents: store_contents('services.%s' % ext, contents) if os.path.exists('services') and os.path.isdir('services'): for root, dirs, files in os.walk('services'): for f in files: file_name = os.path.join(root, f) contents = get_contents(file_name) if contents: contents = process_services(service_map, contents) if contents: store_contents(file_name, contents) if os.path.exists('../Cloud') and os.path.isdir('../Cloud'): file_types = ['ccp', 'rcp', 'scp'] for root, dirs, files in os.walk('../Cloud'): for f in files: for ft in file_types: if f.startswith(ft): file_name = os.path.join(root, f) contents = get_contents(file_name) if contents: contents = process_control_plane( service_map, contents) if contents: store_contents(file_name, contents) if os.path.exists('../Cloud') and os.path.isdir('../Cloud'): for root, dirs, files in os.walk('../Cloud'): for f in files: if f.find('cloudConfig') != -1 or f.find('CloudConfig') != -1: file_name = os.path.join(root, f) contents = get_contents(file_name) if contents: contents = process_cloud_config(service_map, contents) if contents: store_contents(file_name, contents) # if os.path.exists('../Cloud') and os.path.isdir('../Cloud'): # for root, dirs, files in os.walk('../Cloud'): # for f in files: # if (f.find('networkConfig') != -1 or # f.find('NetworkConfig') != -1): # file_name = os.path.join(root, f) # contents = get_contents(file_name) # if contents: # contents = process_network_config( # service_map, contents) # if contents: # store_contents(file_name, contents) # if os.path.exists('../Cloud') and os.path.isdir('../Cloud'): # for root, dirs, files in os.walk('../Cloud'): # for f in files: # if (f.find('environmentConfig') != -1 or # f.find('EnvironmentConfig') != -1): # file_name = os.path.join(root, f) # contents = get_contents(file_name) # if contents: # contents = process_environment_config( # service_map, contents) # if contents: # store_contents(file_name, contents) # if os.path.exists('../Cloud') and os.path.isdir('../Cloud'): # for root, dirs, files in os.walk('../Cloud'): # for f in files: # if (f.find('serverConfig') != -1 or # f.find('ServerConfig') != -1): # file_name = os.path.join(root, f) # contents = get_contents(file_name) # if contents: # contents = process_server_config( # service_map, contents) # if contents: # store_contents(file_name, contents) # exts = ['json', 'yml', 'yaml'] # for ext in exts: # contents = get_contents('network_traffic.%s' % ext) # if contents: # contents = process_network_traffic( # service_map, contents) # if contents: # store_contents('network_traffic.%s' % ext, contents) # # if os.path.exists('network_traffic') and os.path.isdir('network_traffic'): # for root, dirs, files in os.walk('network_traffic'): # for f in files: # file_name = os.path.join(root, f) # contents = get_contents(file_name) # if contents: # contents = process_network_traffic( # service_map, contents) # if contents: # store_contents(file_name, contents) os.remove(tmpfile)
def grade_q2a(uniqname): try: repo, path = clone('q2a', uniqname, 'csprag-git-conflict1') except sh.ErrorReturnCode as e: text = ''' <p><strong>Error! Failed to clone {}</strong></p> <p>Ran command: <tt>{}</tt></p> <p>stdout:</p> <pre> {} </pre></p> <p>stderr:</p> <pre> {} </pre> '''.format('csprag-git-conflict1', e.full_cmd, e.stdout.decode('utf8'), e.stderr.decode('utf8')) return 0, text with sh.pushd(path): golden = '''\ Welcome to the simple test program According to current estimates, the diag construction will be done: Summer 2017. ''' test_golden = '''\ Success ''' text = '' grade = 0 text += '<dd><strong>Merge content conflict [base 1.0]</strong></dd>\n' grade += 1.0 if not os.path.exists('main.py'): text += '<dd>No <tt>main.py</tt> in repository [-1.0]</dd>' grade -= 1.0 elif not os.path.exists('test.sh'): text += '<dd>No <tt>test.sh</tt> in repository [-1.0]</dd>' grade -= 1.0 else: mainpy = open('main.py').read() testsh = open('test.sh').read() if ('>>>>' in mainpy) or ('<<<<' in mainpy): text += '<dd>Unresolved conflict in <tt>main.py</tt>. Contents:<pre>{}</pre></dd>'.format( mainpy) grade -= 1.0 elif ('>>>>' in testsh) or ('<<<<' in testsh): text += '<dd>Unresolved conflict in <tt>test.sh</tt>. Contents:<pre>{}</pre></dd>'.format( testsh) grade -= 1.0 else: try: out = sh.python('main.py') test_out = sh.bash('test.sh') if ('diag construction' not in out) or ('Summer 2017' not in out): text += '<dd><tt>main.py</tt> content not merged, missing diag construction or completion date [-1.0]. Output of main.py:<pre>{}</pre></dd>'.format( out) grade -= 1.0 else: # fuzz is a fuzzy string matching library that should # allow for modest formatting or text differences if fuzz.ratio(out, golden) < 75: text += '<dd>Output of main.py seems to be a merge, but not quite correct [-0.4]. Expected <pre>{}</pre>, student main.py output <pre>{}</pre>'.format( golden, out) grade -= 0.4 if fuzz.ratio(test_out, test_golden) < 75: text += '<dd><tt>test.sh</tt> does not report success [-0.4]. Output of test.sh:<pre>{}</pre></dd>'.format( test_out) grade -= 0.4 except sh.ErrorReturnCode as e: text += '<dd><tt>main.py</tt> or <tt>test.sh</tt> does not run [-1.0]. Output <pre>Ran {}\n\nStdout\n{}Stderr\n{}</pre></dd>'.format( e.full_cmd, e.stdout.decode('utf8'), e.stderr.decode('utf8')) grade -= 1.0 if grade == 1.0: text += '<dd>All correct!</dd>' q2a_entry = ''' <dt>Question 2a</dt> <dd>{:1.1f}/1.0</dd> <dl> {}</dl> '''.format(grade, text) return grade, text
commit_count = sh.git('rev-list', ['--all']).count('\n') with open('setup.py') as f: setup = f.read() setup = re.sub("MICRO_VERSION = '[0-9]+'", "MICRO_VERSION = '{}'".format(commit_count), setup) major = re.search("MAJOR_VERSION = '([0-9]+)'", setup).groups()[0] minor = re.search("MINOR_VERSION = '([0-9]+)'", setup).groups()[0] micro = re.search("MICRO_VERSION = '([0-9]+)'", setup).groups()[0] version = '{}.{}.{}'.format(major, minor, micro) with open('setup.py', 'w') as f: f.write(setup) with open('whereami/__init__.py') as f: init = f.read() with open('whereami/__init__.py', 'w') as f: f.write( re.sub('__version__ = "[0-9.]+"', '__version__ = "{}"'.format(version), init)) print(sh.python('setup.py', ['sdist', 'bdist_wheel', 'upload'])) sh.cd('../') sh.pip3('install', ['-U', 'whereami'])
for new_config in tests: with open(config_file, "r+") as f: d = f.readlines() f.seek(0) for i in d: print(f"{i}") if i.find("link_data_rate_megabit_per_s") != -1: network_bw = i.split("=")[1].strip("\n") + "Mbits" if i.find("num_of_active_bursts") == -1: f.write(i) else: f.write(f"num_of_active_bursts={new_config}") f.truncate() sh.cp( "/mnt/raid10/hanjing/thesis/ns3/ns3-basic-sim/simulator/run.sh", "/mnt/raid10/hanjing/thesis/ns3/ns3-basic-sim/simulator/run.sh.bak" ) # sh.sed( "-i", f"s/program/{main_program}/g", "/mnt/raid10/hanjing/thesis/ns3/ns3-basic-sim/simulator/run.sh.bak", _err="sed_err_log", _out="sed_out_log") sh.sed( "-i", f"s/program/{main_program}/g", "/mnt/raid10/hanjing/thesis/ns3/ns3-basic-sim/simulator/run.sh.bak" ) new_log_dir = f"logs_ns3_{network_bw}_ActiveBurstsCt_{new_config}" print(f"new_log_dir: {new_log_dir}") sh.bash( "/mnt/raid10/hanjing/thesis/ns3/ns3-basic-sim/simulator/run.sh.bak" ) sh.python( "/mnt/raid10/hanjing/thesis/ns3/ns3-basic-sim/simulator/src/basic-apps/helper/horovod_worker_plot.py", f"{log_dir}/{horovod_prg}") # sh.cp("-r",f"{log_dir}", f"{run_dir}/{new_log_dir}")
#Testing shell commands from Python by Zwaan / refactor by Bragatte# <https://blog.esciencecenter.nl/testing-shell-commands-from-python-2a2ec87ebf71> import sh sh.python(['setup.py', 'install']) import pytest import os import sh def test_install(cookies): # generate a temporary project using the cookiecutter # cookies fixture project = cookies.bake() # remember the directory where tests should be run from cwd = os.getcwd() # change directories to the generated project directory # (the installation command must be run from here) os.chdir(str(project.project)) try: # run the shell command sh.python(['setup.py', 'install']) except sh.ErrorReturnCode as e: # print the error, so we know what went wrong print(e) # make sure the test fails pytest.fail(e) finally: # always change directories to the test directory os.chdir(cwd)
def main(name): base, cmtfiles, ref, output, database = get_paths(name) to_run_pyname = join(current_file_dir, "forward_structure_base.py") sh.python(to_run_pyname, "--base", base, "--cmtfiles", cmtfiles, "--ref", ref, "--output", output, "--database", database)
argparser = ArgumentParser() argparser.add_argument('logfile') argparser.add_argument('--random', action='store_true') argparser.add_argument('--metric', default='cosine') argparser.add_argument('--type', default='similarity') return argparser.parse_args() if __name__ == "__main__": args = parse_args() for num_centers in [10, 20, 50, 100, 1000, 2000, 4000, 8000]: if num_centers < 100: increment = int(num_centers / 10) else: increment = int(num_centers / 100) for top_centers in range(increment, num_centers + increment, increment): print("Running with parameters: %u / %u" % (top_centers, num_centers)) python( cmd, **{ "num_centers": num_centers, "top_centers": top_centers, "logfile": args.logfile, "random": args.random, "metric": args.metric, "type": args.type, "_long_sep": " " })
def python_job_func(job_file_name): print "python %s " % job_file_name sh.python(job_file_name)
def test_valid(): try: sh.python(["-m", "zuul_lint", "tests/data/zuul-config-valid.yaml"]) except sh.ErrorReturnCode as e: pytest.fail(e)
def gen_model_code(model_codegen_dir, platform, model_file_path, weight_file_path, model_sha256_checksum, weight_sha256_checksum, input_nodes, input_data_formats, output_nodes, output_data_formats, check_nodes, runtime, model_tag, input_shapes, input_ranges, output_shapes, check_shapes, dsp_mode, embed_model_data, winograd, quantize, quantize_range_file, change_concat_ranges, obfuscate, model_graph_format, data_type, cl_mem_type, graph_optimize_options): bazel_build_common("//mace/python/tools:converter") if os.path.exists(model_codegen_dir): sh.rm("-rf", model_codegen_dir) sh.mkdir("-p", model_codegen_dir) sh.python("bazel-bin/mace/python/tools/converter", "-u", "--platform=%s" % platform, "--model_file=%s" % model_file_path, "--weight_file=%s" % weight_file_path, "--model_checksum=%s" % model_sha256_checksum, "--weight_checksum=%s" % weight_sha256_checksum, "--input_node=%s" % input_nodes, "--input_data_formats=%s" % input_data_formats, "--output_node=%s" % output_nodes, "--output_data_formats=%s" % output_data_formats, "--check_node=%s" % check_nodes, "--runtime=%s" % runtime, "--template=%s" % "mace/python/tools", "--model_tag=%s" % model_tag, "--input_shape=%s" % input_shapes, "--input_range=%s" % input_ranges, "--output_shape=%s" % output_shapes, "--check_shape=%s" % check_shapes, "--dsp_mode=%s" % dsp_mode, "--embed_model_data=%s" % embed_model_data, "--winograd=%s" % winograd, "--quantize=%s" % quantize, "--quantize_range_file=%s" % quantize_range_file, "--change_concat_ranges=%s" % change_concat_ranges, "--obfuscate=%s" % obfuscate, "--output_dir=%s" % model_codegen_dir, "--model_graph_format=%s" % model_graph_format, "--data_type=%s" % data_type, "--graph_optimize_options=%s" % graph_optimize_options, "--cl_mem_type=%s" % cl_mem_type, _fg=True)
import os import sys import glob import pdb import nlp_ml from nlp_ml import CustomPipeline, spacy_tokenizer, cleaner import numpy as np from sqlitedict import SqliteDict try: import sh except ImportError: # fallback: emulate the sh API with pbs import pbs class Sh(object): def __getattr__(self, attr): return pbs.Command(attr) sh = Sh() train = './spam_data/SMSSpamCollectionMini_train.csv' test = './spam_data/SMSSpamCollectionMini_test.csv' iters = 2 path_to_db = './output.db' sh.python('nlp_ml.py', train, test, iters, path_to_db)
def main(): try: prog = "pypi-up" desc = "%s %s" % (__title__, __version__) desc += " - pypi-up a simple command line tool to increase version number of package" \ "and release on Pypi. Also Git Tag/Push the release" parser = argparse.ArgumentParser(prog=prog, description=desc) parser.add_argument("--setup", help="Setup PYPI-REL", action="store_true") parser.add_argument("-v", "--version", help="Show the current version", action="store_true") parser.add_argument("-p", "--patch", help="Increment PATCH version", action="store_true") parser.add_argument("-m", "--minor", help="Increment MINOR version and reset patch", action="store_true") parser.add_argument( "-j", "--major", help="Increment MAJOR version and reset minor and patch", action="store_true") parser.add_argument("-e", "--edit", help="Manually enter the version number to bump", action="store") parser.add_argument( "--dry", help= "DRY RUN. To test the release, but it will not make any changes", action="store_true") parser.add_argument("-x", "--skip-prompt", help="Skip prompt", action="store_true") arg = parser.parse_args() config = ConfigParser.ConfigParser() print("-" * 80) print("=== PYPI Up ===") print("") if arg.setup: print("Setting up...") if not os.path.isfile(about_file): with open(about_file, "w+") as f: f.write(about_file_content) if not os.path.isfile(setup_cfg): config.add_section(conf_section_name) config.set(conf_section_name, "version-file", "__about__.py") config.set(conf_section_name, "auto-increment", "patch") with open(setup_cfg, "w+") as f: config.write(f) print("Done!") print("-" * 80) exit() with sh.pushd(CWD): if sh.git("status", "--porcelain").strip(): raise Exception("Repository is UNCLEAN. Commit your changes") config.read(setup_cfg) version_file = config.get(conf_section_name, "version-file") version_file = os.path.join(CWD, version_file) if not os.path.isfile(version_file): raise Exception("version-file '%s' is required" % version_file) auto_inc = "" if config.has_option(conf_section_name, "auto-increment"): auto_inc = config.get(conf_section_name, "auto-increment").strip() rvnup = Reversionup(file=setup_cfg) old_version = rvnup.version def test_auto_inc(auto_inc_): return auto_inc == auto_inc_ and ( not arg.patch and not arg.minor and not arg.major and not arg.version and not arg.edit) if arg.edit: rvnup.version = arg.edit elif arg.patch or test_auto_inc("patch"): rvnup.inc_patch() elif arg.minor or test_auto_inc("minor"): rvnup.inc_minor() elif arg.major or test_auto_inc("major"): rvnup.inc_major() elif arg.version: print("Current version: %s" % rvnup.version) print("-" * 80) exit() if arg.dry: print("** DRY RUNNING **") print("") print("* New version: %s " % rvnup.version) print("Old version: %s" % old_version) print("") if not arg.skip_prompt \ and raw_input("Continue with the release? (y | n) ").strip() == "n": print("** Release Aborted") print("-" * 80) exit() skip_tag = not arg.skip_prompt \ and raw_input("Git Tag/Push release version? (y | n) ").strip().lower() == "n" skip_pypi = not arg.skip_prompt \ and raw_input("Release to PYPI? (y | n) ").strip().lower() == "n" print("") if not arg.dry: rvnup.write() replace_file_version(version_file, rvnup.version) if arg.dry or skip_tag: print("- Git Tag/Push release: skipped") else: tag_name = "v%s" % rvnup.version print("+ Git Tag release version: %s " % tag_name) sh.git("add", ".") sh.git("commit", "-m", "Tagged release: %s" % tag_name) sh.git("tag", "-a", tag_name, "-m", tag_name) print("+ Git Push release to repo ...") sh.git("push", "origin", "master") sh.git("push", "--tags") if arg.dry or skip_pypi: print("- Release to Pypi: skipped") else: print("+ Releasing to PYPI ...") sh.python("setup.py", "register", "-r", "pypi") sh.python("setup.py", "sdist", "upload", "-r", "pypi") print("-" * 80) print("") except Exception as ex: print("Error: %s" % ex.message) exit(1)
arg_list = [ filename_arg, path, ss_symex_opt, PC, rep_opt, rep, struct_opt, struct ] else: run_description = '-- running ss-concrete --\n' arg_list = [filename_arg, path, ss_opt] print run_description f.append_data(run_summary_log, run_description) while (not done) and (not exceeded_num_runs): print 'RUN', test_ctr, ':', #sh.python(prog_name, benchmark_path, _out=output_log, _err=process_stderr) # ./secam.py --filename ./examples/heater/heater.tst --ss-symex klee sh.python(prog_name, *arg_list, _out=process_stdout, _err=process_stderr) f.append_data(output_log, STDOUT_DATA) STDOUT_DATA = '' test_ctr = test_ctr + 1 if LAST_RESULT else test_ctr done = NUM_TESTS == test_ctr exceeded_num_runs = test_ctr > MAX_TESTS avg_time_str = 'average time = {}\n'.format(T / NUM_TESTS) success_str = 'successfull runs = {}\n'.format(SUCC_CTR) fail_str = 'failures = {}\n'.format(FAIL_CTR) # time_summary = ''.join(time_spent_list) \ time_summary = \ 'total_time_taken = {}\n'.format(T) \ + avg_time_str \
def render_paraview_scene(pvd_file, outfile=None, field_name='m', timesteps=None, camera_position=[0, -200, +200], camera_focal_point=[0, 0, 0], camera_view_up=[0, 0, 1], view_size=(800, 600), magnification=1, fit_view_to_scene=True, color_by_axis=0, colormap='coolwarm', rescale_colormap_to_data_range=True, show_colorbar=True, colorbar_label_format='%-#5.2g', add_glyphs=True, glyph_type='cones', glyph_scale_factor=1.0, glyph_random_mode=True, glyph_mask_points=True, glyph_max_number_of_points=10000, show_orientation_axes=False, show_center_axes=False, representation="Surface With Edges", palette='screen', use_parallel_projection=False, trim_border=True, rescale=None, diffuse_color=None, debug=False, use_display=None): # Convert color_by_axis to integer and store the name separately try: color_by_axis = _axes[color_by_axis.lower()] except AttributeError: if not color_by_axis in [0, 1, 2, -1]: raise ValueError("color_by_axis must have one of the values " "[0, 1, 2, -1] or ['x', 'y', 'z', 'magnitude']. " "Got: {}".format(color_by_axis)) # Use absolute path for filenames because the script will be # written to a temporary directory in a different location. pvd_file = os.path.abspath(pvd_file) if outfile is None: _, outfile = tempfile.mkstemp(suffix='.png') outfile_is_temporary = True else: outfile_is_temporary = False outfile = os.path.abspath(outfile) outdir = os.path.dirname(outfile) if not os.path.exists(outdir): logger.debug( "Creating non-existing directory component '{}' of output filename." .format(outdir)) os.makedirs(outdir) logger.debug("Done.") # # Create the temporary script. The string 'script_string' will # contain a call to the function in 'visualization_impl.py' which # has all the parameter values filled in correctly. # tmpdir = tempfile.mkdtemp() scriptfile = os.path.join(tmpdir, 'render_scene.py') script_string = textwrap.dedent(""" from visualization_impl import render_paraview_scene, find_valid_X_display import os if not os.environ.has_key('DISPLAY'): display = find_valid_X_display() if display is None: raise RuntimeError("Could not render Paraview scene as no valid X display was found.") else: os.environ['DISPLAY'] = ':' + str(display) render_paraview_scene( '{}', '{}', {}, {}, {}, {}, {}, {}, {}, {}, {}, '{}', {}, {}, '{}', {}, '{}', {}, {}, {}, {}, {}, {}, '{}', '{}', {}, {}, {}, {}) """.format(pvd_file, outfile, repr(field_name), re.sub('\n', '', repr(timesteps)), camera_position, camera_focal_point, camera_view_up, view_size, magnification, fit_view_to_scene, color_by_axis, colormap, rescale_colormap_to_data_range, show_colorbar, colorbar_label_format, add_glyphs, glyph_type, glyph_scale_factor, glyph_random_mode, glyph_mask_points, glyph_max_number_of_points, show_orientation_axes, show_center_axes, representation, palette, use_parallel_projection, trim_border, rescale, diffuse_color)) with open(scriptfile, 'w') as f: f.write(script_string) vis_impl_script = os.path.join(os.path.dirname(__file__), './visualization_impl.py') if not os.path.exists(vis_impl_script): vis_impl_script = os.path.join(os.path.dirname(__file__), './visualization_impl.so') if not os.path.exists(vis_impl_script): raise RuntimeError( "Cannot use Paraview visualisation. This should not happen.") shutil.copy(vis_impl_script, tmpdir) # Execute the script in a separate process curdir_bak = os.getcwd() xpra_display = None use_xpra = configuration.get_config_option("visualization", "use_xpra", "True") try: display_bak = os.environ['DISPLAY'] except KeyError: display_bak = None try: os.chdir(tmpdir) if use_display is None: use_display = configuration.get_config_option( "visualization", "use_display", None) if use_display == 'None': use_display = None if use_display is None and use_xpra.lower() != "false": # Try to create a display using 'xpra' try: # Check whether 'xpra' is installed sh.xpra('--version') xpra_display = find_unused_X_display(xrange(10, 100)) sh.xpra('start', ':{}'.format(xpra_display)) use_display = xpra_display logger.debug( "Rendering Paraview scene on display :{} using xpra.". format(xpra_display)) except sh.CommandNotFound: logger.warning( "Could not find the 'xpra' executable. You may want to " "install it to avoid annoying pop-up windows from " "Paraview. Under Debian/Ubuntu you can install it via " "'sudo apt-get install xpra'.") xpra_display = None if use_display is not None: os.environ['DISPLAY'] = ':{}'.format(use_display) script_stdout = StringIO.StringIO() script_stderr = StringIO.StringIO() sh.python('render_scene.py', _out=script_stdout, _err=script_stderr) except sh.ErrorReturnCode as ex: logger.error( "Could not render Paraview scene. Stdout and stderr of the script: " "'{}', '{}'".format(script_stdout.getvalue(), script_stderr.getvalue())) # raise finally: if debug == True: logger.debug("Temporary directory '{}' kept for debugging. You " "can try to run 'render_script.py' manually " "there.".format(tmpdir)) else: shutil.rmtree(tmpdir) os.chdir(curdir_bak) # change back into the original directory if xpra_display is not None: # XXX TODO: It may be nice to keep the xpra display open # until Finmag exits, because we are likely to # render more than one snapshot. sh.xpra('stop', ':{}'.format(xpra_display)) if display_bak is not None: os.environ['DISPLAY'] = display_bak else: os.environ.pop('DISPLAY', None) try: image = IPython.core.display.Image(filename=outfile) except IOError: # Something went wrong (missing X display?); let's not choke but return # None instead. image = None if outfile_is_temporary: # Clean up temporary file os.remove(outfile) return image
def release(): """Release/publish the documentation to the webpage. """ # Save the current state. branch = git('rev-parse', '--abbrev-ref', 'HEAD').stdout.rstrip() git.stash('save', "Work in progress while updating gh-pages branch") # Check out the gh-pages branch. try: git.checkout('gh-pages') except ErrorReturnCode_128: # Create the branch if necessary. git.checkout('-b', 'gh-pages') # Remove the existing files in the base folder. extensions = ['*.html', '*.inv'] fnames = util.multiglob('..', extensions) for fname in fnames: os.remove(fname) # Copy the new files to the base folder. fnames = util.multiglob(BUILD_DIR, extensions) for fname in fnames: shutil.copy(fname, '..') # Track the new files. fnames = util.multiglob('..', extensions) git.add(*fnames) # Copy but rename the folders referenced in the HTML files. # Github only recognizes images, stylesheets, and javascripts as folders. folders = [ ('_images', 'images'), ('_static', 'javascripts'), ] for (src, dst) in folders: dst = os.path.join('..', dst) # Remove the existing folder. shutil.rmtree(dst, ignore_errors=True) # Copy the new folder. shutil.copytree(os.path.join(BUILD_DIR, src), dst) # Track the new folder. git.add(dst) # Update the HTML files to reference the new folder names. html_fnames = glob(os.path.join('..', '*.html')) util.replace(html_fnames, folders) # Copy and rename the examples folder. src = os.path.join(BUILD_DIR, 'examples') dst = '../examples2' # Remove the existing folder. shutil.rmtree(dst, ignore_errors=True) # Copy the new files. os.mkdir(dst) for fname in os.listdir(src): shutil.copy(os.path.join(src, fname), os.path.join(dst, fname)) # Track the new folder. git.add(dst) # Update the HTML files to reference the new folder names. util.replace(html_fnames, [(r'"\./examples/', r'"./examples2/')]) # Update the sitemap. print((python('sitemap_gen.py', config="sitemap_conf.xml"))) # Commit the changes. try: git.commit('-a', m="Rebuilt documentation") except ErrorReturnCode_1: pass # No changes to commit # If desired, rebase and push the changes to origin. print("The gh-pages branch has been updated and is currently checked out.") if util.yes("Do you want to rebase it and push the changes to " "origin (y/n)?"): git.rebase('-i', 'origin/gh-pages') git.push.origin('gh-pages') # Return to the original state. git.checkout(branch) try: git.stash.pop() except ErrorReturnCode_1: pass # No stash was necessary in the first place. print(("Now back on " + branch))