def run(self): import benchmarks import os import platform if os.name == "nt": exit(benchmarks.run(False, "ctypes")) set_test_environ() is_cpython = platform.python_implementation() == "CPython" runs = [(False, "ctypes"), (False, "cffi"), (True, None)] for is_gi, backend in runs: if is_gi and (self.pgi_only or not is_cpython): continue pid = os.fork() if pid != 0: pid, status = os.waitpid(pid, 0) if status: exit(status) else: exit(benchmarks.run(is_gi, backend))
def benchmark(self, submit, native, modes): benchmarks.run(submit, native, modes)
slaves.append(RemoteSlave(name)) else: slaves.append(Slave(name)) if not slaves: slaves = [Slave("main")] return slaves if __name__ == "__main__": Mode = namedtuple('Mode', ['shell', 'args', 'env', 'name', 'cset']) state = sys.argv[1] fd = open(state, "rb") # pull out the global configuration utils.config = pickle.load(fd) utils.RepoPath = pickle.load(fd) utils.BenchmarkPath = pickle.load(fd) utils.DriverPath = pickle.load(fd) utils.Timeout = pickle.load(fd) utils.PythonName = pickle.load(fd) # pull out the pickled arguments submit = pickle.load(fd) native = pickle.load(fd) modes = pickle.load(fd) fd.close() # call the one true function benchmarks.run(submit, native, modes)
import benchmarks benchmarks.generate() benchmarks.configure() benchmarks.build() benchmarks.run()
slaveNames = utils.config.getDefault('main', 'slaves', None) if slaveNames: slaveNames = slaveNames.split(",") for name in slaveNames: remote = utils.config.getDefault(name, 'remote', 1) if remote == 1: slaves.append(RemoteSlave(name)) else: slaves.append(Slave(name)) if not slaves: slaves = [Slave("main")] return slaves if __name__ == "__main__": Mode = namedtuple('Mode', ['shell', 'args', 'env', 'name', 'cset']) state = sys.argv[1] fd = open(state, "rb") # pull out the global configuration utils.config = pickle.load(fd) # pull out the pickled arguments submit = pickle.load(fd) native = pickle.load(fd) modes = pickle.load(fd) fd.close() # call the one true function benchmarks.run(submit, native, modes)