def check_if_installed(self): self.log.debug('Checking ApacheBenchmark: %s' % self.tool_path) try: shell_exec([self.tool_path, '-h']) except (CalledProcessError, OSError): return False return True
def check_if_installed(self): self.log.debug('Check Siege: %s' % self.tool_path) try: shell_exec([self.tool_path, '-h']) except OSError: return False return True
def check_if_installed(self): self.log.debug('Checking Tsung at %s' % self.tool_path) try: shell_exec([self.tool_path, '-v']) except OSError: self.log.warning("Info for tsung installation: %s", self.INSTALLATION_DOCS) return False return True
def run_tests(self): """ run python tests """ executable = self.settings.get("interpreter", sys.executable) nose_command_line = [executable, self.plugin_path, '-k', self.settings.get("report-file"), '-e', self.settings.get("err-file")] if self.load.iterations: nose_command_line += ['-i', str(self.load.iterations)] if self.load.hold: nose_command_line += ['-d', str(self.load.hold)] nose_command_line += [self.working_dir] std_out = open(self.settings.get("stdout"), "wt") self.opened_descriptors.append(std_out) std_err = open(self.settings.get("stderr"), "wt") self.opened_descriptors.append(std_err) env = BetterDict() env.merge(dict(os.environ)) env.merge(self.env) self.process = shell_exec(nose_command_line, cwd=self.artifacts_dir, stdout=std_out, stderr=std_err, env=env)
def execute(self, args, cwd=None, stdout=PIPE, stderr=PIPE, stdin=PIPE, shell=False, env=None): if cwd is None: cwd = self.engine.default_cwd aliases = self.get_hostaliases() hosts_file = None if aliases: hosts_file = self.engine.create_artifact("hostaliases", "") with open(hosts_file, 'w') as fds: for key, value in iteritems(aliases): fds.write("%s %s\n" % (key, value)) environ = BetterDict() environ.merge(dict(os.environ)) if aliases: environ["HOSTALIASES"] = hosts_file if env is not None: if is_windows: # as variables in windows are case insensitive we should provide correct merging cur_env = {name.upper(): environ[name] for name in environ} old_keys = set(env.keys()) env = {name.upper(): env[name] for name in env} new_keys = set(env.keys()) if old_keys != new_keys: msg = 'Some taurus environment variables has been lost: %s' self.log.warning(msg, list(old_keys - new_keys)) environ = BetterDict() environ.merge(cur_env) environ.merge(env) environ.merge({"TAURUS_ARTIFACTS_DIR": self.engine.artifacts_dir}) environ = {key: environ[key] for key in environ.keys() if environ[key] is not None} return shell_exec(args, cwd=cwd, stdout=stdout, stderr=stderr, stdin=stdin, shell=shell, env=environ)
def execute(self, args, cwd=None, stdout=PIPE, stderr=PIPE, stdin=PIPE, shell=False, env=None): if cwd is None: cwd = self.engine.default_cwd aliases = self.get_hostaliases() hosts_file = None if aliases: hosts_file = self.engine.create_artifact("hostaliases", "") with open(hosts_file, 'w') as fds: for key, value in iteritems(aliases): fds.write("%s %s\n" % (key, value)) environ = BetterDict() environ.merge(dict(os.environ)) if aliases: environ["HOSTALIASES"] = hosts_file if env is not None: environ.merge(env) environ.merge({"TAURUS_ARTIFACTS_DIR": self.engine.artifacts_dir}) return shell_exec(args, cwd=cwd, stdout=stdout, stderr=stderr, stdin=stdin, shell=shell, env=environ)
def startup(self): args = [self.tool_path] load = self.get_load() if load.iterations: args += ['--reps', str(load.iterations)] elif load.hold: hold_for = ceil(dehumanize_time(load.hold)) args += ['--time', '%sS' % hold_for] else: raise ValueError("You must specify either 'hold-for' or 'iterations' for siege") if self.scenario.get('think-time'): think_time = dehumanize_time(self.scenario.get('think-time')) args += ['--delay', str(think_time)] else: args += ['--benchmark'] load_concurrency = load.concurrency args += ['--concurrent', str(load_concurrency)] self.reader.concurrency = load_concurrency args += ['--file', self.__url_name] for key, val in iteritems(self.scenario.get_headers()): args += ['--header', "%s: %s" % (key, val)] env = BetterDict() env.merge(dict(environ)) env.merge({"SIEGERC": self.__rc_name}) self.start_time = time.time() self.process = shell_exec(args, stdout=self.__out, stderr=self.__err, env=env)
def make_jar(self): """ move all .class files to compiled.jar """ self.log.debug("Making .jar started") with open(os.path.join(self.artifacts_dir, "jar.out"), 'ab') as jar_out: with open(os.path.join(self.artifacts_dir, "jar.err"), 'ab') as jar_err: class_files = [java_file for java_file in os.listdir(self.working_dir) if java_file.endswith(".class")] jar_name = self.settings.get("jar-name", "compiled.jar") if class_files: compile_jar_cl = ["jar", "-cf", jar_name] compile_jar_cl.extend(class_files) else: package_dir = os.listdir(self.working_dir)[0] compile_jar_cl = ["jar", "-cf", jar_name, "-C", package_dir, "."] self.log.debug("running jar: %s", compile_jar_cl) self.process = shell_exec(compile_jar_cl, cwd=self.working_dir, stdout=jar_out, stderr=jar_err) ret_code = self.process.poll() while ret_code is None: self.log.debug("Making jar file...") time.sleep(1) ret_code = self.process.poll() if ret_code != 0: with open(jar_err.name) as err_file: out = err_file.read() self.log.info("Making jar failed with code %s", ret_code) self.log.info("jar output: %s", out) raise RuntimeError("Jar exited with non-zero code") self.log.info("Making .jar file completed")
def make_jar(self): """ move all .class files to compiled.jar """ self.log.debug("Making .jar started") with open(join(self.engine.artifacts_dir, "jar.out"), 'ab') as jar_out: with open(join(self.engine.artifacts_dir, "jar.err"), 'ab') as jar_err: class_files = [java_file for java_file in listdir(self.working_dir) if java_file.endswith(".class")] jar_name = self.settings.get("jar-name", "compiled.jar") if class_files: compile_jar_cl = ["jar", "-cf", jar_name] compile_jar_cl.extend(class_files) else: compile_jar_cl = ["jar", "-cf", jar_name, "."] self.process = shell_exec(compile_jar_cl, cwd=self.working_dir, stdout=jar_out, stderr=jar_err) ret_code = self.process.poll() while ret_code is None: self.log.debug("Making jar file...") time.sleep(1) ret_code = self.process.poll() if ret_code != 0: with open(jar_err.name) as err_file: out = err_file.read() raise ToolError("Jar exited with code %s\n%s" % (ret_code, out.strip())) self.log.info("Making .jar file completed")
def compile_scripts(self): """ Compile .java files """ self.log.debug("Compiling .java files started") java_files = [] for dir_entry in os.walk(self.working_dir): if dir_entry[2]: for test_file in dir_entry[2]: if os.path.splitext(test_file)[1].lower() == ".java": java_files.append(os.path.join(dir_entry[0], test_file)) compile_cl = ["javac", "-cp", os.pathsep.join(self.base_class_path)] compile_cl.extend(java_files) with open(os.path.join(self.artifacts_dir, "javac.out"), 'ab') as javac_out: with open(os.path.join(self.artifacts_dir, "javac.err"), 'ab') as javac_err: self.process = shell_exec(compile_cl, cwd=self.working_dir, stdout=javac_out, stderr=javac_err) ret_code = self.process.poll() while ret_code is None: self.log.debug("Compiling .java files...") time.sleep(1) ret_code = self.process.poll() if ret_code != 0: self.log.debug("javac exit code: %s", ret_code) with open(javac_err.name) as err_file: out = err_file.read() raise RuntimeError("Javac exited with error:\n %s" % out.strip()) self.log.info("Compiling .java files completed") self.make_jar()
def make_jar(self): """ move all .class files to compiled.jar """ self.log.debug("Making .jar started") with open(os.path.join(self.artifacts_dir, "jar.out"), 'ab') as jar_out: with open(os.path.join(self.artifacts_dir, "jar.err"), 'ab') as jar_err: class_files = [java_file for java_file in os.listdir(self.working_dir) if java_file.endswith(".class")] jar_name = self.settings.get("jar-name", "compiled.jar") if class_files: compile_jar_cl = ["jar", "-cf", jar_name] compile_jar_cl.extend(class_files) else: package_dir = os.listdir(self.working_dir)[0] compile_jar_cl = ["jar", "-cf", jar_name, "-C", package_dir, "."] self.process = shell_exec(compile_jar_cl, cwd=self.working_dir, stdout=jar_out, stderr=jar_err) ret_code = self.process.poll() while ret_code is None: self.log.debug("Making jar file...") time.sleep(1) ret_code = self.process.poll() if ret_code != 0: with open(jar_err.name) as err_file: out = err_file.read() self.log.info("Making jar failed with code %s", ret_code) self.log.info("jar output: %s", out) raise RuntimeError("Jar exited with non-zero code") self.log.info("Making .jar file completed")
def execute(self, args, cwd=None, stdout=PIPE, stderr=PIPE, stdin=PIPE, shell=False, env=None): if cwd is None: cwd = self.engine.default_cwd environ = BetterDict() environ.merge(dict(os.environ)) if env is not None: if is_windows(): # as variables in windows are case insensitive we should provide correct merging cur_env = {name.upper(): environ[name] for name in environ} old_keys = set(env.keys()) env = {name.upper(): env[name] for name in env} new_keys = set(env.keys()) if old_keys != new_keys: msg = 'Some taurus environment variables might be been lost: %s' self.log.debug(msg, list(old_keys - new_keys)) environ = BetterDict() environ.merge(cur_env) environ.merge(env) environ.merge({"TAURUS_ARTIFACTS_DIR": self.engine.artifacts_dir}) environ = {key: environ[key] for key in environ.keys() if environ[key] is not None} self.log.debug("Executing shell from %s: %s", cwd, args) return shell_exec(args, cwd=cwd, stdout=stdout, stderr=stderr, stdin=stdin, shell=shell, env=environ)
def startup(self): """ Should start the tool as fast as possible. """ simulation = self.get_scenario().get("simulation", "") if not simulation: # TODO: guess simulation from script file raise NotImplementedError() datadir = os.path.realpath(self.engine.artifacts_dir) # NOTE: exe_suffix already in "path" cmdline = [self.settings["path"]] cmdline += ["-sf", datadir, "-df", datadir, " -rf ", datadir] cmdline += ["-on", "gatling-bzt", "-m", "-s", simulation] self.start_time = time.time() out = self.engine.create_artifact("gatling-stdout", ".log") err = self.engine.create_artifact("gatling-stderr", ".log") self.stdout_file = open(out, "w") self.stderr_file = open(err, "w") self.process = shell_exec( cmdline, cwd=self.engine.artifacts_dir, stdout=self.stdout_file, stderr=self.stderr_file )
def run_tests(self): # java -cp junit.jar:selenium-test-small.jar: # selenium-2.46.0/selenium-java-2.46.0.jar:./../selenium-server.jar # org.junit.runner.JUnitCore TestBlazemeterPass jar_list = [os.path.join(self.working_dir, jar) for jar in os.listdir(self.working_dir) if jar.endswith(".jar")] self.base_class_path.extend(jar_list) junit_command_line = ["java", "-cp", os.pathsep.join(self.base_class_path), "taurus_junit_listener.CustomRunner"] junit_command_line.extend(jar_list) junit_command_line.extend([self.report_file]) junit_out_path = os.path.join(self.artifacts_dir, "junit_out") junit_err_path = os.path.join(self.artifacts_dir, "junit_err") junit_out = open(junit_out_path, 'ab') junit_err = open(junit_err_path, 'ab') self.opened_descriptors["std_out"] = junit_out self.opened_descriptors["std_err"] = junit_err self.process = shell_exec(junit_command_line, cwd=self.artifacts_dir, stdout=junit_out, stderr=junit_err)
def run_tests(self): # java -cp junit.jar:selenium-test-small.jar: # selenium-2.46.0/selenium-java-2.46.0.jar:./../selenium-server.jar # org.junit.runner.JUnitCore TestBlazemeterPass jar_list = [os.path.join(self.working_dir, jar) for jar in os.listdir(self.working_dir) if jar.endswith(".jar")] self.base_class_path.extend(jar_list) with open(self.props_file, 'wt') as props: props.write("kpi_log=%s\n" % self.settings.get("report-file").replace(os.path.sep, '/')) props.write("error_log=%s\n" % self.settings.get("err-file").replace(os.path.sep, '/')) if self.load.iterations: props.write("iterations=%s\n" % self.load.iterations) if self.load.hold: props.write("hold_for=%s\n" % self.load.hold) for index, item in enumerate(jar_list): props.write("target_%s=%s\n" % (index, item.replace(os.path.sep, '/'))) std_out = open(self.settings.get("stdout"), "wt") self.opened_descriptors.append(std_out) std_err = open(self.settings.get("stderr"), "wt") self.opened_descriptors.append(std_err) env = BetterDict() env.merge(dict(os.environ)) env.merge(self.env) junit_command_line = ["java", "-cp", os.pathsep.join(self.base_class_path), "taurusjunit.CustomRunner", self.props_file] self.process = shell_exec(junit_command_line, cwd=self.artifacts_dir, stdout=std_out, stderr=std_err, env=env)
def startup(self): """ Should start the tool as fast as possible. """ simulation = self.get_scenario().get("simulation", "") if not simulation: # TODO: guess simulation from script file raise NotImplementedError() datadir = os.path.realpath(self.engine.artifacts_dir) # NOTE: exe_suffix already in "path" cmdline = [self.settings["path"]] cmdline += ["-sf", datadir, "-df", datadir, " -rf ", datadir] cmdline += ["-on", "gatling-bzt", "-m", "-s", simulation] self.start_time = time.time() out = self.engine.create_artifact("gatling-stdout", ".log") err = self.engine.create_artifact("gatling-stderr", ".log") self.stdout_file = open(out, "w") self.stderr_file = open(err, "w") self.process = shell_exec(cmdline, cwd=self.engine.artifacts_dir, stdout=self.stdout_file, stderr=self.stderr_file)
def run_tests(self): # java -cp junit.jar:selenium-test-small.jar: # selenium-2.46.0/selenium-java-2.46.0.jar:./../selenium-server.jar # org.junit.runner.JUnitCore TestBlazemeterPass jar_list = [ os.path.join(self.working_dir, jar) for jar in os.listdir(self.working_dir) if jar.endswith(".jar") ] self.base_class_path.extend(jar_list) junit_command_line = [ "java", "-cp", os.pathsep.join(self.base_class_path), "taurusjunit.CustomRunner" ] junit_command_line.extend([self.settings.get("report-file")]) junit_command_line.extend([self.settings.get("err-file")]) junit_command_line.extend(jar_list) std_out = open(self.settings.get("stdout"), "wt") self.opened_descriptors.append(std_out) std_err = open(self.settings.get("stderr"), "wt") self.opened_descriptors.append(std_err) self.process = shell_exec(junit_command_line, cwd=self.artifacts_dir, stdout=std_out, stderr=std_err)
def _make_jar(self): """ move all .class files to compiled.jar """ self.log.debug("Making .jar started") with open(join(self.engine.artifacts_dir, "jar.out"), 'ab') as jar_out: with open(join(self.engine.artifacts_dir, "jar.err"), 'ab') as jar_err: class_files = [java_file for java_file in listdir(self.working_dir) if java_file.endswith(".class")] jar_name = self.settings.get("jar-name", "compiled.jar") if class_files: compile_jar_cl = ["jar", "-cf", jar_name] compile_jar_cl.extend(class_files) else: compile_jar_cl = ["jar", "-cf", jar_name, "."] self.process = shell_exec(compile_jar_cl, cwd=self.working_dir, stdout=jar_out, stderr=jar_err) ret_code = self.process.poll() while ret_code is None: self.log.debug("Making jar file...") time.sleep(1) ret_code = self.process.poll() if ret_code != 0: with open(jar_err.name) as err_file: out = err_file.read() raise ToolError("Jar exited with code %s\n%s" % (ret_code, out.strip())) self.log.info("Making .jar file completed")
def startup(self): self.start_time = time.time() load = self.get_load() hatch = load.concurrency / load.ramp_up if load.ramp_up else load.concurrency wrapper = os.path.join(os.path.dirname(__file__), os.pardir, "resources", "locustio-taurus-wrapper.py") env = BetterDict() env.merge({k: os.environ.get(k) for k in os.environ.keys()}) env.merge({"PYTHONPATH": self.engine.artifacts_dir + os.pathsep + os.getcwd()}) if os.getenv("PYTHONPATH"): env['PYTHONPATH'] = os.getenv("PYTHONPATH") + os.pathsep + env['PYTHONPATH'] args = [sys.executable, os.path.realpath(wrapper), '-f', os.path.realpath(self.locustfile)] args += ['--logfile=%s' % self.engine.create_artifact("locust", ".log")] args += ["--no-web", "--only-summary", ] args += ["--clients=%d" % load.concurrency, "--hatch-rate=%d" % math.ceil(hatch), ] if load.iterations: args.append("--num-request=%d" % load.iterations) if self.is_master: args.extend(["--master", '--expect-slaves=%s' % self.expected_slaves]) env["SLAVES_LDJSON"] = self.slaves_ldjson else: env["JTL"] = self.kpi_jtl host = self.get_scenario().get("default-address", None) if host: args.append("--host=%s" % host) self.__out = open(self.engine.create_artifact("locust", ".out"), 'w') self.process = shell_exec(args, stderr=STDOUT, stdout=self.__out, env=env)
def start_subprocess(self, args, cwd, stdout, stderr, stdin, shell, env): if cwd is None: cwd = self.default_cwd env = Environment(self.log, env.get()) env.set(self.shared_env.get()) return shell_exec(args, cwd=cwd, stdout=stdout, stderr=stderr, stdin=stdin, shell=shell, env=env.get())
def check_if_installed(self): self.log.debug('Checking LocustIO: %s' % self.tool_path) try: stdout, stderr = communicate(shell_exec([self.tool_path, '--version'])) self.log.debug("Locustio check stdout/stderr: %s, %s", stdout, stderr) except (CalledProcessError, OSError, AttributeError): return False return True
def startup(self): """ Should start the tool as fast as possible. """ simulation = self.get_scenario().get("simulation", "") if not simulation: # TODO: guess simulation from script file raise ValueError("No simulation set") datadir = os.path.realpath(self.engine.artifacts_dir) cmdline = [self.settings["path"]] cmdline += ["-sf", datadir, "-df", datadir, "-rf ", datadir] cmdline += ["-on", "gatling-bzt", "-m", "-s", simulation] self.start_time = time.time() out = self.engine.create_artifact("gatling-stdout", ".log") err = self.engine.create_artifact("gatling-stderr", ".log") self.stdout_file = open(out, "w") self.stderr_file = open(err, "w") params_for_scala = {} load = self.get_load() scenario = self.get_scenario() if scenario.get('timeout', None) is not None: params_for_scala['gatling.http.ahc.requestTimeout'] = int( dehumanize_time(scenario.get('timeout')) * 1000) if scenario.get('keepalive', None) is not None: params_for_scala['gatling.http.ahc.keepAlive'] = scenario.get( 'keepalive').lower() if load.concurrency is not None: params_for_scala['concurrency'] = load.concurrency if load.ramp_up is not None: params_for_scala['ramp-up'] = int(load.ramp_up) if load.hold is not None: params_for_scala['hold-for'] = int(load.hold) if load.iterations is not None and load.iterations != 0: params_for_scala['iterations'] = int(load.iterations) env = BetterDict() env.merge(dict(os.environ)) java_opts = "".join([ " -D%s=%s" % (key, params_for_scala[key]) for key in params_for_scala ]) java_opts += " " + env.get( "JAVA_OPTS", "") + " " + self.engine.config.get("java_opts", "") env.merge({"JAVA_OPTS": java_opts}) self.process = shell_exec(cmdline, cwd=self.engine.artifacts_dir, stdout=self.stdout_file, stderr=self.stderr_file, env=env)
def start_subprocess(self, args, env, cwd=None, **kwargs): if cwd is None: cwd = self.default_cwd self.graceful_tmp = self.create_artifact(prefix="GRACEFUL", suffix="") env = env.get() env['GRACEFUL'] = self.graceful_tmp return shell_exec(args, cwd=cwd, env=env, **kwargs)
def run_command(self, cmdline, stream_name, cwd): out = open(self.obj.engine.create_artifact(stream_name, ".out"), "wt") err = open(self.obj.engine.create_artifact(stream_name, ".err"), "wt") process = shell_exec(args=cmdline, stdout=out, stderr=err, cwd=cwd) while process.poll() is None: time.sleep(0.5) out.close() err.close() self.obj.log.debug("%s out: %s", cmdline, open(out.name, 'r').read()) self.obj.log.debug("%s err: %s", cmdline, open(err.name, 'r').read())
def start(self, config_file): cmdline = [self.path, 'run', config_file] stdout = sys.stdout if not isinstance(sys.stdout, StringIO) else None stderr = sys.stderr if not isinstance(sys.stderr, StringIO) else None try: self.process = shell_exec(cmdline, cwd=self.engine.artifacts_dir, stdout=stdout, stderr=stderr) except OSError as exc: self.log.error("Failed to start phantom-benchmark utility: %s", traceback.format_exc()) self.log.error("Failed command: %s", cmdline) raise RuntimeError("Failed to start phantom-benchmark utility: %s" % exc)
def check_if_installed(self): self.log.debug("Trying Gatling: %s", self.tool_path) try: gatling_proc = shell_exec([self.tool_path, '--help'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) gatling_output = gatling_proc.communicate() self.log.debug("Gatling check: %s", gatling_output) return True except OSError: self.log.debug("Gatling check failed.") return False
def check_if_installed(self): self.log.debug("Trying Gatling: %s", self.tool_path) try: gatling_proc = shell_exec([self.tool_path, '--help'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) gatling_output = gatling_proc.communicate() self.log.debug("Gatling check is successful: %s", gatling_output) return True except OSError: self.log.info("Gatling check failed.") return False
def check_if_installed(self): self.log.debug('Checking Robot Framework: %s' % self.tool_path) try: checker = shell_exec([self.python_executable, '-c', 'import robot; print(robot.__version__)']) output = checker.communicate() self.log.debug("Robot output: %s", output) if checker.returncode != 0: return False except (CalledProcessError, OSError): return False return True
def check_if_installed(self): self.log.debug("%s path: %s", self.tool_name, self.tool_path) selenium_launch_command = ["java", "-jar", self.tool_path, "-help"] selenium_subproc = shell_exec(selenium_launch_command, stderr=subprocess.STDOUT) output = selenium_subproc.communicate() self.log.debug("%s output: %s", self.tool_name, output) if selenium_subproc.returncode == 0: self.already_installed = True return True else: return False
def check_if_installed(self): self.log.debug("Trying grinder: %s", self.tool_path) grinder_launch_command = ["java", "-classpath", self.tool_path, "net.grinder.Grinder"] grinder_subprocess = shell_exec(grinder_launch_command, stderr=subprocess.STDOUT) output = grinder_subprocess.communicate() self.log.debug("%s output: %s", self.tool_name, output) if grinder_subprocess.returncode == 0: self.already_installed = True return True else: return False
def check_if_installed(self): self.log.debug('Checking Molotov: %s' % self.tool_path) try: stdout, stderr = communicate(shell_exec([self.tool_path, '--version'])) self.log.debug("Molotov stdout/stderr: %s, %s", stdout, stderr) version_s = stdout.strip() version = LooseVersion(version_s) if version < LooseVersion("1.4"): raise ToolError("You must install molotov>=1.4 to use this executor (version %s detected)" % version) except (CalledProcessError, OSError, AttributeError): return False return True
def startup(self): self.start_time = time.time() load = self.get_load() hatch = load.concurrency / load.ramp_up if load.ramp_up else load.concurrency wrapper = os.path.join(os.path.dirname(__file__), os.pardir, "resources", "locustio-taurus-wrapper.py") env = BetterDict() env.merge({k: os.environ.get(k) for k in os.environ.keys()}) env.merge({ "PYTHONPATH": self.engine.artifacts_dir + os.pathsep + os.getcwd() }) if os.getenv("PYTHONPATH"): env['PYTHONPATH'] = os.getenv( "PYTHONPATH") + os.pathsep + env['PYTHONPATH'] args = [ sys.executable, os.path.realpath(wrapper), '-f', os.path.realpath(self.locustfile) ] args += [ '--logfile=%s' % self.engine.create_artifact("locust", ".log") ] args += [ "--no-web", "--only-summary", ] args += [ "--clients=%d" % load.concurrency, "--hatch-rate=%d" % math.ceil(hatch), ] if load.iterations: args.append("--num-request=%d" % load.iterations) if self.is_master: args.extend( ["--master", '--expect-slaves=%s' % self.expected_slaves]) env["SLAVES_LDJSON"] = self.slaves_ldjson else: env["JTL"] = self.kpi_jtl host = self.get_scenario().get("default-address", None) if host: args.append("--host=%s" % host) self.__out = open(self.engine.create_artifact("locust", ".out"), 'w') self.process = shell_exec(args, stderr=STDOUT, stdout=self.__out, env=env)
def startup(self): """ Should start the tool as fast as possible. """ simulation = self.get_scenario().get("simulation", "") if not simulation: # TODO: guess simulation from script file raise ValueError("No simulation set") datadir = os.path.realpath(self.engine.artifacts_dir) if os.path.isfile(self.script): script_path = os.path.dirname(get_full_path(self.script)) else: script_path = self.script cmdline = [self.settings["path"]] cmdline += ["-sf", script_path, "-df", datadir, "-rf ", datadir] cmdline += ["-on", self.dir_prefix, "-m", "-s", simulation] self.start_time = time.time() out = self.engine.create_artifact("gatling-stdout", ".log") err = self.engine.create_artifact("gatling-stderr", ".log") self.stdout_file = open(out, "w") self.stderr_file = open(err, "w") params_for_scala = self.settings.get('properties') load = self.get_load() scenario = self.get_scenario() if scenario.get('timeout', None) is not None: params_for_scala['gatling.http.ahc.requestTimeout'] = int(dehumanize_time(scenario.get('timeout')) * 1000) if scenario.get('keepalive', None) is not None: params_for_scala['gatling.http.ahc.keepAlive'] = scenario.get('keepalive').lower() if load.concurrency is not None: params_for_scala['concurrency'] = load.concurrency if load.ramp_up is not None: params_for_scala['ramp-up'] = int(load.ramp_up) if load.hold is not None: params_for_scala['hold-for'] = int(load.hold) if load.iterations is not None and load.iterations != 0: params_for_scala['iterations'] = int(load.iterations) env = BetterDict() env.merge(dict(os.environ)) java_opts = ''.join([" -D%s=%s" % (key, params_for_scala[key]) for key in params_for_scala]) java_opts += ' ' + env.get('JAVA_OPTS', '') + ' ' + self.settings.get('java-opts', '') env.merge({"JAVA_OPTS": java_opts}) self.process = shell_exec(cmdline, stdout=self.stdout_file, stderr=self.stderr_file, env=env)
def _compile_scripts(self): """ Compile .java files """ if not self._java_scripts: return self.log.debug("Compiling .java files started") jar_path = join(self.engine.artifacts_dir, self.working_dir, self.settings.get("jar-name", "compiled.jar")) if os.path.exists(jar_path): self.log.debug(".java files are already compiled, skipping") return compile_cl = [ "javac", "-source", self.target_java, "-target", self.target_java, "-d", self.working_dir, ] compile_cl.extend(["-cp", os.pathsep.join(self.class_path)]) compile_cl.extend(self._java_scripts) with open(self.engine.create_artifact("javac", ".out"), 'ab') as javac_out: with open(self.engine.create_artifact("javac", ".err"), 'ab') as javac_err: self.log.debug("running javac: %s", compile_cl) self.process = shell_exec(compile_cl, stdout=javac_out, stderr=javac_err) ret_code = self.process.poll() while ret_code is None: self.log.debug("Compiling .java files...") time.sleep(1) ret_code = self.process.poll() if ret_code != 0: self.log.debug("javac exit code: %s", ret_code) with open(javac_err.name) as err_file: out = err_file.read() raise ToolError("Javac exited with code: %s\n %s" % (ret_code, out.strip())) self.log.info("Compiling .java files completed") self._make_jar()
def startup(self): """ Should start the tool as fast as possible. """ self.start_time = time.time() out = self.engine.create_artifact("grinder-stdout", ".log") err = self.engine.create_artifact("grinder-stderr", ".log") self.stdout_file = open(out, "w") self.stderr_file = open(err, "w") self.process = shell_exec(self.cmd_line, cwd=self.engine.artifacts_dir, stdout=self.stdout_file, stderr=self.stderr_file)
def startup(self): args = [self.tool_path] load = self.get_load() if load.hold: hold = int(ceil(dehumanize_time(load.hold))) args += ['-t', str(hold)] elif load.iterations: args += ['-n', str(load.iterations)] else: args += ['-n', '1'] # 1 iteration by default load_concurrency = load.concurrency if load.concurrency is not None else 1 args += ['-c', str(load_concurrency)] args += ['-d'] # do not print 'Processed *00 requests' every 100 requests or so args += ['-g', str(self.__tsv_file_name)] # dump stats to TSV file # add global scenario headers for key, val in iteritems(self.scenario.get_headers()): args += ['-H', "%s: %s" % (key, val)] requests = list(self.scenario.get_requests()) if not requests: raise ValueError("You must specify at least one request for ab") if len(requests) > 1: self.log.warning("ab doesn't support multiple requests." " Only first one will be used.") request = requests[0] # add request-specific headers for header in request.headers: for key, val in iteritems(header): args += ['-H', "%s: %s" % (key, val)] if request.method != 'GET': raise ValueError("ab supports only GET requests") keepalive = True if request.config.get('keepalive') is not None: keepalive = request.config.get('keepalive') elif self.scenario.get('keepalive') is not None: keepalive = self.scenario.get('keepalive') if keepalive: args += ['-k'] args += [request.url] self.reader.setup(load_concurrency, request.label) self.start_time = time.time() self.process = shell_exec(args, stdout=self.__out, stderr=self.__err)
def check_if_installed(self): self.log.debug("Trying phantom: %s", self.tool_path) try: pbench = shell_exec([self.tool_path], stderr=subprocess.STDOUT) pbench_out, pbench_err = pbench.communicate() self.log.debug("PBench check stdout: %s", pbench_out) if pbench_err: self.log.warning("PBench check stderr: %s", pbench_err) return True except (CalledProcessError, OSError): self.log.info("Phantom check failed") return False
def tool_is_started(self): adb_path = os.path.join(get_full_path(self.tool_path, step_up=2), 'platform-tools', 'adb') if not os.path.isfile(adb_path): self.log.debug('adb is not found in sdk, trying to use an external one..') adb_path = 'adb' cmd = [adb_path, "shell", "getprop", "sys.boot_completed"] self.log.debug("Trying: %s", cmd) try: proc = shell_exec(cmd) out, _ = communicate(proc) return out.strip() == '1' except BaseException as exc: raise ToolError('Checking if android emulator starts is impossible: %s', exc)
def startup(self): self.log.debug('Starting appium...') self.stdout = open(os.path.join(self.engine.artifacts_dir, 'appium.out'), 'ab') self.stderr = open(os.path.join(self.engine.artifacts_dir, 'appium.err'), 'ab') self.appium_process = shell_exec([self.tool_path], stdout=self.stdout, stderr=self.stderr) start_time = time.time() while not self.tool_is_started(): time.sleep(1) if time.time() - start_time > self.startup_timeout: raise ToolError("Appium cannot be loaded") self.log.info('Appium was started successfully')
def check_if_installed(self): self.log.debug("Trying phantom: %s", self.tool_path) try: pbench = shell_exec([self.tool_path], stderr=subprocess.STDOUT) pbench_out, pbench_err = pbench.communicate() self.log.debug("PBench check: %s", pbench_out) if pbench_err: self.log.warning("PBench check stderr: %s", pbench_err) return True except (CalledProcessError, OSError): self.log.debug("Check failed: %s", traceback.format_exc()) self.log.error("Phantom check failed. Consider installing it") return False
def startup(self): self.log.debug('Starting android emulator...') exc = TaurusConfigError('You must choose an emulator with modules.android-emulator.avd config parameter') self.avd = self.settings.get('avd', exc) self.stdout = open(os.path.join(self.engine.artifacts_dir, 'emulator-%s.out' % self.avd), 'ab') self.stderr = open(os.path.join(self.engine.artifacts_dir, 'emulator-%s.err' % self.avd), 'ab') self.emulator_process = shell_exec([self.tool_path, '-avd', self.avd], stdout=self.stdout, stderr=self.stderr) start_time = time.time() while not self.tool_is_started(): time.sleep(1) if time.time() - start_time > self.startup_timeout: raise ToolError("Android emulator %s cannot be loaded" % self.avd) self.log.info('Android emulator %s was started successfully', self.avd)
def execute(self, args, cwd=None, stdout=PIPE, stderr=PIPE, stdin=PIPE, shell=False, env=None): hosts_file = self.engine.create_artifact("hostaliases", "") aliases = self.get_hostaliases() with open(hosts_file, 'w') as fds: for key, value in iteritems(aliases): fds.write("%s %s\n" % (key, value)) environ = BetterDict() environ.merge(dict(os.environ)) environ["HOSTALIASES"] = hosts_file if env is not None: environ.merge(env) return shell_exec(args, cwd=cwd, stdout=stdout, stderr=stderr, stdin=stdin, shell=shell, env=environ)