def _clone_repo(self): """Execute clone_from method.""" cmd = ['git', 'clone', '--depth', '1', self._git_url, self._repo_path] print('Executing command: %r', ' '.join(cmd)) proc = Popen(cmd) # collection stdout and stderr if needed. stdout, stderr = proc.communicate()
def run(): is_timeout = False code = request.form.get('code') stdin = request.form.get('stdin') code_filename = "/tmp/" + str(uuid4()) try: with open(code_filename, "w") as code_file: code_file.write(code) p = Popen( ['bbm', code_filename], stdout=PIPE, stdin=PIPE, stderr=PIPE ) stdout, stderr = p.communicate(input=stdin.encode('utf-8'), timeout=15) except TimeoutExpired: is_timeout = True p.kill() stdout, stderr = p.communicate() finally: remove(code_filename) stdout = stdout.decode('utf-8') stderr = stderr.decode('utf-8') return jsonify({ 'stdout': stdout, 'stderr': stderr, 'is_timeout': is_timeout })
def run_timed(cmd, stdin=None, timeout=None): '''Run the given command 'cmd', with optional stdin, and return its stdout, stderr and the time it took to run (in seconds). Also report whether or not the command was killed by going over the (optional) timeout.''' start = default_timer() proc = Popen(cmd, stdin=PIPE if stdin else None, stdout=PIPE, stderr=PIPE, preexec_fn=setsid) try: (stdout, stderr) = proc.communicate(input=stdin, timeout=timeout) result = {'stdout': stdout, 'stderr': stderr, 'killed': False} except TimeoutExpired: # Kill the process group, which will include all children killpg(getpgid(proc.pid), SIGTERM) result = {'stdout': None, 'stderr': None, 'killed': True} proc.wait() # Reaps zombies end = default_timer() result['time'] = end - start if timeout is not None: result['timeout'] = timeout return result
def process(self, json_data, namedblobfile=None): tmpdir_path = tempfile.mkdtemp(prefix='opengever.core.sablon_') output_path = join(tmpdir_path, 'sablon_output.docx') if namedblobfile is None: template_path = self.template.as_file(tmpdir_path) else: template_path = join(tmpdir_path, namedblobfile.filename) with open(template_path, 'wb') as template_file: template_file.write(namedblobfile.data) try: sablon_path = environ.get('SABLON_BIN', 'sablon') subprocess = Popen( [sablon_path, template_path, output_path], stdin=PIPE, stdout=PIPE, stderr=PIPE) self.stdout, self.stderr = subprocess.communicate(input=json_data) self.returncode = subprocess.returncode if not self.is_processed_successfully(): raise SablonProcessingFailed(self.stderr) with open(output_path, 'rb') as outfile: self.file_data = outfile.read() finally: shutil.rmtree(tmpdir_path) return self
def run_playbook(playbook, inventory, *args, **kwargs): env = ansible_env(os.environ.copy()) cmd = ['ansible-playbook', '-i', inventory, playbook] + list(args) if verbosity(): cmd += ['-' + ('v' * verbosity())] show_timestamp = False if 'timestamp' in kwargs: show_timestamp = kwargs['timestamp'] del kwargs['timestamp'] output = print if show_timestamp: output = timestamp logger.info('running %s', ' '.join(cmd)) logger.debug('env: %r', env) process = Popen(cmd, env=env, stdout=PIPE, bufsize=1, **kwargs) for line in iter(process.stdout.readline, b''): output(line[:-1]) # empty output buffers process.poll() return process.returncode
def run_judge_client(self): self.copy_assets() args = ["python", os.path.realpath('judge-client.py'), './attacker', './defender'] self.log += ['Running: ' + ' '.join(args)] proc = Popen(args, cwd=self.base_dir, stdin=PIPE, stdout=PIPE, stderr=PIPE) output = proc.communicate() self.log += [str(output[1])] if proc.returncode: self.log += ["Judge client crashed with return code %d." % proc.returncode] raise JudgeClientException("judge client crashed.") result = output[0].split('\n') winner = result[0] if winner == "attacker": self.record.attacker_wins() elif winner == "defender": self.record.defender_wins() else: self.log += ["Judge client return unknown winner %s." % winner] raise JudgeClientException("unknown winner.") reason = result[1] if reason == "Finished": self.record.status = ExecutionRecord.STATUS_FINISHED elif reason == "IllegalMovement": self.record.status = ExecutionRecord.STATUS_ILLEGAL_MOVE elif reason == "IllegalOutput": self.record.status = ExecutionRecord.STATUS_BAD_FORMAT elif reason == "TLE": self.record.status = ExecutionRecord.STATUS_TLE elif reason == "Crashed": self.record.status = ExecutionRecord.STATUS_RUNTIME_ERROR else: self.log += ["Judge client return unknown reason %s." % reason] raise JudgeClientException("unknown reason.") self.record.replay = result[2:]
def process(self, json_data, namedblobfile=None): tmpdir_path = tempfile.mkdtemp(prefix='opengever.core.sablon_') output_path = join(tmpdir_path, 'sablon_output.docx') if namedblobfile is None: template_path = self.template.as_file(tmpdir_path) else: template_path = join(tmpdir_path, namedblobfile.filename) with open(template_path, 'wb') as template_file: template_file.write(namedblobfile.data) try: sablon_path = environ.get('SABLON_BIN', 'sablon') subprocess = Popen([sablon_path, template_path, output_path], stdin=PIPE, stdout=PIPE, stderr=PIPE) self.stdout, self.stderr = subprocess.communicate(input=json_data) self.returncode = subprocess.returncode if self.is_processed_successfully(): with open(output_path, 'rb') as outfile: self.file_data = outfile.read() finally: shutil.rmtree(tmpdir_path) return self
def check_container_status_rkt(): """ Checks and prints the calico/node container status when running in rkt. """ list_cmd = ["sudo", "rkt", "list"] p = Popen(list_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE) stdout, stderr = p.communicate() containers = RKT_CONTAINER_RE.findall(stdout) if p.returncode: print "Unable to list rkt containers: '%s'" % stderr.strip() sys.exit(1) if len(containers) == 0: print "calico-node container not running" sys.exit(1) else: # Get statuses for all calico/node containers, and determine # if any are running. statuses = [c[2] for c in containers] running = "running" in statuses # If one is running, status is "running". Else, use the status of # the first container. status = "running" if running else statuses[0] # Print status. If it at least one is running, this will display # "running" status. print "calico-node container status: %s" % status
def install_dropbox_helper(package = None, fullpath = None, tmp = False): if not os.path.exists(HELPER_INSTALLER_PATH): raise InstallError('no installer') assert package or fullpath if not fullpath: fullpath = get_package_location(package) TRACE('Installing %s', package or fullpath) cmd = 'install-tmp' if tmp else 'install' p = Popen([HELPER_INSTALLER_PATH, cmd, BUILD_KEY, fullpath], stdout=PIPE, stderr=PIPE) out, err = p.communicate() if p.returncode != 0: if out: TRACE('stdout: %s', out) if err: TRACE('stderr: %s', err) raise InstallError('Installer returned %d' % p.returncode) if tmp: path = None for l in out.split('\n'): m = re.match('\\<path\\>(?P<path>.+)\\</path\\>', l) if m: path = m.group('path') if path: return os.path.join(path, REL_HELPERS_DIR) raise InstallError('no path')
def handle_eval(self, record): self.process = Popen( ['./sumfun_ext', array2str(record.params[0])], stdout=PIPE) val = np.nan # Continuously check for new outputs from the subprocess while True: output = self.process.stdout.readline() if output == '' and self.process.poll( ) is not None: # No new output break if output: # New intermediate output try: val = float(output.strip()) # Try to parse output if val > 350: # Terminate if too large self.process.terminate() self.finish_success(record, 350) return except ValueError: # If the output is nonsense we terminate logging.warning("Incorrect output") self.process.terminate() self.finish_cancelled(record) return rc = self.process.poll() # Check the return code if rc < 0 or np.isnan(val): logging.info("WARNING: Incorrect output or crashed evaluation") self.finish_cancelled(record) else: self.finish_success(record, val)
def git_get_tracked(dir="."): proc = Popen( ["git", "rev-parse", "--abbrev-ref", "--symbolic-full-name", "@{u}"], cwd=dir, stdout=PIPE) (stdout, _) = proc.communicate() return stdout.strip()
def handle_eval(self, record): val = np.nan # Continuously check for new outputs from the subprocess self.process = Popen(['./sumfun_ext', array2str(record.params[0])], stdout=PIPE, bufsize=1, universal_newlines=True) for line in self.process.stdout: try: val = float(line.strip()) # Try to parse output if val > 350: # Terminate if too large self.process.terminate() self.finish_success(record, 350) return except ValueError: # If the output is nonsense we terminate logging.warning("Incorrect output") self.process.terminate() self.finish_cancelled(record) return self.process.wait() rc = self.process.poll() # Check the return code if rc < 0 or np.isnan(val): logging.info("WARNING: Incorrect output or crashed evaluation") self.finish_cancelled(record) else: self.finish_success(record, val)
def pipe(cmd, stdin=None, timeout=None, env=None): '''Runs 'stdin' through 'cmd' and returns stdout, stderr and whether we timed out.''' # Extend the current environment, if requested extra_env = None if env: extra_env = environ.copy() for var in env: extra_env[var] = env[var] # setsid puts the subprocess in its own process group, rather than the group # containing this Python process killed = None proc = Popen(cmd, stdin=PIPE if stdin else None, stdout=PIPE, stderr=PIPE, preexec_fn=setsid, env=extra_env) try: (stdout, stderr) = proc.communicate(input=stdin, timeout=timeout) result = {'stdout': stdout, 'stderr': stderr, 'killed': False} except TimeoutExpired: # Kill the process group, which will include all children killpg(getpgid(proc.pid), SIGTERM) result = {'stdout': None, 'stderr': None, 'killed': True} proc.wait() # Reaps zombies return result
def invoke_side_effects(argv): log("invoke_side_effects: %s" % ' '.join(sys.argv)) gccinv = GccInvocation(argv) # Try to run each side effect in a subprocess, passing in a path # for the XML results to be written to. # Cover a multitude of possible failures by detecting if no output # was written, and capturing *that* as a failure for sourcefile in gccinv.sources: if sourcefile.endswith('.c'): # FIXME: other extensions? for script, genname in [('invoke-cppcheck', 'cppcheck'), ('invoke-clang-analyzer', 'clang-analyzer'), ('invoke-cpychecker', 'cpychecker'), # Uncomment the following to test a # checker that fails to write any XML: # ('echo', 'failing-checker'), ]: with tempfile.NamedTemporaryFile() as f: dstxmlpath = f.name assert not os.path.exists(dstxmlpath) # Restrict the invocation to just one source file at a # time: singleinv = gccinv.restrict_to_one_source(sourcefile) singleargv = singleinv.argv TIMEOUT=60 t = Timer() args = [script, dstxmlpath] + singleargv log('invoking args: %r' % args) p = Popen(args, stdout=PIPE, stderr=PIPE) try: out, err = p.communicate(timeout=TIMEOUT) write_streams(script, out, err) if os.path.exists(dstxmlpath): with open(dstxmlpath) as f: analysis = Analysis.from_xml(f) else: analysis = make_failed_analysis(genname, sourcefile, t, msgtext=('Unable to locate XML output from %s' % script), failureid='no-output-found') analysis.set_custom_field('stdout', out) analysis.set_custom_field('stderr', err) analysis.set_custom_field('returncode', p.returncode) except TimeoutExpired: analysis = make_failed_analysis(genname, sourcefile, t, msgtext='Timeout running %s' % genname, failureid='timeout') analysis.set_custom_field('timeout', TIMEOUT) analysis.set_custom_field('gcc-invocation', ' '.join(argv)) write_analysis_as_xml(analysis)
def get_docker_git_revs(docker_image, paths): proc = Popen(["docker", "run", "--rm", docker_image, "cat"] + paths, stdout=PIPE) (stdout, _) = proc.communicate() git_revs = stdout.strip().split('\n') if len(git_revs) != len(paths): return len(paths) * [""] return git_revs
def run(pro, *args, **kwargs): """ Run vagrant within a project :param pro: .project.Project :param args: list[string] :param kwargs: dict[string,string] :return: """ with cd(pro.folder()): # fix invalid exports for vagrant NFS().fix_anomalies() new_env = ansible_env(os.environ.copy()) new_env['PATH'] = os.pathsep.join([ new_env['PATH'], os.path.join(aeriscloud_path, 'venv/bin') ]) new_env['VAGRANT_DOTFILE_PATH'] = pro.vagrant_dir() new_env['VAGRANT_CWD'] = pro.vagrant_working_dir() new_env['VAGRANT_DISKS_PATH'] = os.path.join(data_dir(), 'disks') # We might want to remove that or bump the verbosity level even more if verbosity() >= 4: new_env['VAGRANT_LOG'] = 'info' new_env['AERISCLOUD_PATH'] = aeriscloud_path new_env['AERISCLOUD_ORGANIZATIONS_DIR'] = os.path.join(data_dir(), 'organizations') org = default_organization() if org: new_env['AERISCLOUD_DEFAULT_ORGANIZATION'] = org organization_name = pro.organization() if organization_name: organization = Organization(organization_name) else: organization = Organization(org) basebox_url = organization.basebox_url() if basebox_url: new_env['VAGRANT_SERVER_URL'] = basebox_url args = ['vagrant'] + list(args) logger.debug('running: %s\nenv: %r', ' '.join(args), new_env) # support for the vagrant prompt if args[1] == 'destroy': return call(args, env=new_env, **kwargs) else: process = Popen(args, env=new_env, stdout=PIPE, bufsize=1, **kwargs) for line in iter(process.stdout.readline, b''): timestamp(line[:-1]) # empty output buffers process.poll() return process.returncode
def eval(self, record_id, params, extra_args=None): try: self.process = Popen( ['./sphere_ext', array2str(params[0])], stdout=PIPE) val = self.process.communicate()[0] self.finish_success(record_id, float(val)) except ValueError: logging.info("WARNING: Incorrect output or crashed evaluation") self.finish_cancelled(record_id)
def get_relationship(refname, other_refname): proc = Popen([ "git", "rev-list", "--left-right", "--count", refname + "..." + other_refname ], stdout=PIPE) (stdout, stderr) = proc.communicate() (ahead, behind) = map(lambda x: int(x), stdout.strip().split()) return (ahead, behind)
def do_interview(self, hosts=None, num_of_segments=1, directory_pairs=None, has_mirrors=False): """ hosts: list of hosts to expand to num_of_segments: number of segments to expand directory_pairs: list of tuple directory pairs where first element is the primary directory and the 2nd is the mirror. Note: this code is done with the assumption of primary only cluster. There is an assumption that the user knows the kind of cluster to expand. Returns: string output, int returncode """ if directory_pairs is None: directory_pairs = ('/tmp/foo', '') if num_of_segments != len(directory_pairs): raise Exception( "Amount of directory_pairs needs to be the same amount as the segments." ) # If working_directory is None, then Popen will use the directory where # the python code is being ran. p1 = Popen(["gpexpand"], stdout=PIPE, stdin=PIPE, cwd=self.working_directory) # Very raw form of doing the interview part of gpexpand. # May be pexpect is the way to do this if we really need it to be more complex # Cannot guarantee that this is not flaky either. # Would you like to initiate a new System Expansion Yy|Nn (default=N): p1.stdin.write("y\n") # **Enter a blank line to only add segments to existing hosts**[]: p1.stdin.write("%s\n" % (",".join(hosts) if hosts else "")) if has_mirrors: #What type of mirroring strategy would you like? spread|grouped (default=grouped): p1.stdin.write("\n") #How many new primary segments per host do you want to add? (default=0): p1.stdin.write("%s\n" % num_of_segments) # Enter new primary data directory #<number primary segment> for directory in directory_pairs: primary, mirror = directory p1.stdin.write("%s\n" % primary) if mirror: p1.stdin.write("%s\n" % mirror) output, err = p1.communicate() return output, p1.wait()
def execute(command, env_file, name, profile, region): """Inject env. variables into an executable via <name> and/or <env_file>""" # command is a tuple if len(command) == 0: click.echo("nothing to execute") return env_dict = {} if env_file: env_vars = [] f = open(env_file, 'r') for line in f: if line.startswith('#'): continue key, value = line.strip().split('=', 1) env_vars.append({'name': key, 'value': value}) for env_var in env_vars: key = env_var['name'] value = env_var['value'] if value.startswith('ssm:'): secretKey = value[4:] out = get_param(secretKey, profile, region) env_var['value'] = out['Value'] for env_var in env_vars: key = env_var['name'] value = env_var['value'] env_dict[key] = value click.echo("injected %s" % key) if name: env_vars = [] if name[0] == '/': path = name else: path = '/' + name params = get_parameters_by_path(path, profile, region) for param in params: key = formatKey(param['Name']) formatKey(key) value = param['Value'] env_dict[key] = value click.echo("injected %s" % key) cmd_env = environ.copy() cmd_env.update(env_dict) p = Popen(command, universal_newlines=True, bufsize=0, shell=False, env=cmd_env) _, _ = p.communicate() return p.returncode
def terminate(self): """Terminates the process""" # Don't terminate a process that we know has already died. if self.returncode is not None: return if self._job: winprocess.TerminateJobObject(self._job, 127) self.returncode = 127 else: Popen.terminate(self)
def handle_eval(self, record): self.process = Popen( ['./sphere_ext', array2str(record.params[0])], stdout=PIPE) out = self.process.communicate()[0] try: val = float(out) # This raises ValueError if out is not a float self.finish_success(record, val) except ValueError: logging.warning("Function evaluation crashed/failed") self.finish_failure(record)
def _stop_node(self, container_id): proc = Popen(["docker", "kill", container_id], stdout=PIPE) (stdoutdata, stderrdata) = proc.communicate() stopped_container_id = self._get_container_id(stdoutdata) stop_successful = container_id == stopped_container_id self.logger.log( "stop_node", { "container_id": container_id, "is_head": container_id == self.head_container_id, "success": stop_successful })
def crawl_and_yield(crawlCommand): ps = Popen(crawlCommand, stdout=PIPE, stderr=PIPE, cwd=os.path.dirname(os.path.abspath(__file__))) for stdout_line in iter(ps.stdout.readline, ""): print stdout_line ps.stdout.close() return_code = ps.wait() if return_code: raise CalledProcessError(return_code, crawlCommand)
def objfunction(self, x): if len(x) != self.dim: raise ValueError('Dimension mismatch') self.f_eval_count = self.f_eval_count + 1 experimentId = 'p-'+str(len(x))+'-'+str(self.f_eval_count)+'-'+self.seed+'-'+self.server fileId = 'p-'+str(len(x))+'-'+self.seed+'-'+self.server m = self.hyper_map exp_arg = [] exp_arg.append('th'), exp_arg.append('eval_mnist_GPU.lua') exp_arg.append('--mean') exp_arg.append(str(x[m['mean']])) exp_arg.append('--std') exp_arg.append(str(x[m['std']])) exp_arg.append('--learnRate') exp_arg.append(str(x[m['learnRate']])) exp_arg.append('--momentum') exp_arg.append(str(x[m['momentum']])) exp_arg.append('--epochs') exp_arg.append(str(x[m['epochs']])) exp_arg.append('--hiddenNodes') exp_arg.append(str(x[m['hiddenNodes']])) exp_arg.append('--experimentId') exp_arg.append(experimentId) exp_arg.append('--seed') exp_arg.append(self.seed) millis_start = int(round(time.time() * 1000)) proc = Popen(exp_arg, stdout=PIPE) out, err = proc.communicate() if proc.returncode == 0: results = out.split('###') result = float(results[0]) testResult = float(results[1]) millis = int(round(time.time() * 1000)) f_eval_time = millis - millis_start if self.bestResult > result: self.bestResult = result row = [self.bestResult, f_eval_time, result, testResult, self.f_eval_count, millis] for xi in range(0, len(x)): row.append(x[xi]) with open('logs/'+fileId+'-output.csv', 'a') as f: writer = csv.writer(f) writer.writerow(row) return result else: print err raise ValueError('Function evaluation error')
def crawl_and_capture(crawlCommand): ps = Popen(crawlCommand, stdout=PIPE, stderr=PIPE, cwd=os.path.dirname(os.path.abspath(__file__))) stdout, stderr = None, None try: stdout, stderr = ps.communicate() stdout = stdout.strip() print stdout except: raise Exception(stderr)
def actuate(interval=3600): #amoubnt of time between shots image_path = '/home/pi/Desktop/output.jpg' still = Popen('sudo raspistill -o ~/Desktop/output.jpg', stdout=None, shell=True) #snap: call the camera still.wait() storage.child("images/lastpic.jpg").put(image_path) time.sleep(float(interval))
def execute_command(command, env=None, timeout=None): try: p = Popen(command, env=env, shell=True, stdout=PIPE, stderr=PIPE) output, error = p.communicate(timeout=timeout) code = p.returncode output = output.decode(errors='replace') error = error.decode(errors='replace') except Exception as e: output = '' error = traeback.format_exc() code = 255 return code, output, error
def handle_eval(self, record): try: self.process = Popen( ['./sphere_ext', array2str(record.params[0])], stdout=PIPE, bufsize=1, universal_newlines=True) val = self.process.communicate()[0] self.finish_success(record, float(val)) except ValueError: self.finish_cancelled(record) logging.info("WARNING: Incorrect output or crashed evaluation")
def run_benchmark(self, workload_script, benchmark_iteration, log_start_fn, waited_time_limit=None): proc = Popen([ "docker", "exec", self.head_container_id, "/bin/bash", "-c", "RAY_BENCHMARK_ENVIRONMENT=stress RAY_BENCHMARK_ITERATION={} RAY_REDIS_ADDRESS={}:6379 RAY_NUM_WORKERS={} python {}" .format(benchmark_iteration, self.head_container_ip, self.num_workers, workload_script) ], stdout=PIPE, stderr=PIPE) log_start_fn(proc.pid) start_time = time.time() done = False while not done: try: (stdoutdata, stderrdata) = proc.communicate( timeout=min(10, waited_time_limit)) done = True except (subprocess32.TimeoutExpired): waited_time = time.time() - start_time if waited_time_limit and waited_time > waited_time_limit: self.logger.log( "killed", { "pid": proc.pid, "waited_time": waited_time, "waited_time_limit": waited_time_limit }) proc.kill() return {"success": False, "return_code": None, "stats": {}} else: self.logger.log( "waiting", { "pid": proc.pid, "time_waited": waited_time, "waited_time_limit": waited_time_limit }) m = re.search('^BENCHMARK_STATS: ({.*})$', stdoutdata, re.MULTILINE) if m: output_stats = json.loads(m.group(1)) else: output_stats = {} print stdoutdata print stderrdata return { "success": proc.returncode == 0, "return_code": proc.returncode, "stats": output_stats }
def run(self): ''' Execute a module as a bash command. Open handles file object as input. Log output and/or errors. ''' command = self.bake_command() try: process = Popen(command, stdin=self.streams['input'], stdout=self.streams['output'], stderr=self.streams['error']) # Prepare handles input. input_data = None if self.streams['input'] == PIPE: input_data = open(self.handles).readlines() # We have to provide the temporary filename to the modules. i = -1 for line in input_data: i = i + 1 # Replace the value of the 'hdf5_filename' key. # Doing this via YAML should be saver. if re.match('hdf5_filename', line): hdf5_key = yaml.load(line) hdf5_key['hdf5_filename'] = self.tmp_filename input_data[i] = yaml.dump(hdf5_key, default_flow_style=False) # Create the new handles string. input_data = ''.join(input_data) # Execute sub-process. (stdoutdata, stderrdata) = process.communicate(input=input_data) # Write output and errors if 'logging' is requested by user if self.logging_level is not None: self.write_output_and_errors(stdoutdata, stderrdata) # Modify for nicer output to command line. ignore_list = ['INFO:'] if any([re.search(x, stderrdata) for x in ignore_list]): newstderrdata = str() for line in stderrdata.split('\n'): if not any([re.search(x, line) for x in ignore_list]): newstderrdata = newstderrdata + line stderrdata = newstderrdata print stdoutdata print stderrdata # Close STDIN file descriptor. process.stdin.close # Take care of any errors during the execution. if process.returncode > 0 or re.search('Error', stderrdata): raise JteratorError(self.get_error_message(process, input_data, stdoutdata, stderrdata)) except ValueError as error: raise JteratorError('Failed running \'%s\'. Reason: \'%s\'' % (command, str(error)))
def invoke_real_executable(argv): args = [get_real_executable(argv)] + argv[1:] if 0: log(' '.join(args)) p = Popen(args, stderr=PIPE) try: t = Timer() out, err = p.communicate() sys.stderr.write(err) parse_gcc_stderr(err, stats=make_stats(t)) except KeyboardInterrupt: pass return p.returncode
def process(self, json_data): tmpdir_path = tempfile.mkdtemp(prefix="opengever.core.sablon_") output_path = join(tmpdir_path, "sablon_output.docx") template_path = self.template.as_file(tmpdir_path) try: sablon_path = environ.get("SABLON_BIN", "sablon") subprocess = Popen([sablon_path, template_path, output_path], stdin=PIPE, stdout=PIPE, stderr=PIPE) self.stdout, self.stderr = subprocess.communicate(input=json_data) self.returncode = subprocess.returncode if self.is_processed_successfully(): with open(output_path, "rb") as outfile: self.file_data = outfile.read() finally: shutil.rmtree(tmpdir_path)
def _get_container_ip(self, container_id): proc = Popen([ "docker", "inspect", "--format={{.NetworkSettings.Networks.bridge.IPAddress}}", container_id ], stdout=PIPE, stderr=PIPE) (stdoutdata, _) = proc.communicate() p = re.compile("([0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3})") m = p.match(stdoutdata) if not m: raise RuntimeError("Container IP not found") else: return m.group(1)
class CppSim(MPIProcessWorker): def eval(self, record_id, params, extra_args=None): try: fp = open("TEST_AGAIN.txt", "a") fp.write("this is try to launch process%s\n" % (rank)) fp.write("this is try to launch process%s\n" % (str(record_id))) fp.write("this is try to launch process%s\n" % (str(params[0]))) fp.write("this is try to launch process%s\n" % (array2str(params[0]))) fp.write("this is try to launch process%s\n" % (str(params[1]))) fp.write("this is try to launch process%s\n" % (str(params[2]))) fp.write("this is try to launch process%s\n" % (type(params))) fp.close() # # self.process = Popen(['python', "object_func.py", array2str(params[0]), "%s" % rank], stdout=PIPE) # workingdir = "/home/users/nus/e0022672/delft3d/examples/" + str(params[1]) + "/" workingdir = "/home/users/nus/e0022672/delft3d/examples/" # os.chdir(workingdir) # os.system("./run_flow2d3d.sh") self.process = Popen(["python", workingdir + "run_simulation.py", array2str(params[0]), str(params[1]), str(params[2])], stdout=PIPE) out = self.process.communicate()[0] # val = 2 self.finish_success(record_id, float(out)) except ValueError: logging.info("WARNING: Incorrect output or crashed evaluation") self.finish_cancel(record_id)
def run(self, *args, **kwargs): if self.path is not None: # only None when called in the __init__ function kwargs.setdefault("cwd", self.path) # NOTE if we do want to make a copy of environmental variables, # we must remove GIT_WORK_TREE kwargs["env"] = {} kwargs["stdout"] = PIPE kwargs["stderr"] = PIPE proc = Popen(args, **kwargs) (stdout, stderr) = proc.communicate() if proc.returncode != 0: raise CommandError(args[0], proc.returncode, stdout, stderr) return stdout
def handle_eval(self, record): self.process = Popen(['./sumfun_ext', array2str(record.params[0])], stdout=PIPE) val = np.nan # Continuously check for new outputs from the subprocess while True: output = self.process.stdout.readline() if output == '' and self.process.poll() is not None: # No new output break if output: # New intermediate output try: val = float(output.strip()) # Try to parse output if val > 350: # Terminate if too large self.process.terminate() self.finish_success(record, 350) return except ValueError: # If the output is nonsense we terminate logging.warning("Incorrect output") self.process.terminate() self.finish_failure(record) return rc = self.process.poll() # Check the return code if rc < 0 or np.isnan(val): logging.warning("Incorrect output or crashed evaluation") self.finish_failure(record) else: self.finish_success(record, val)
def set_values(self, video): cmd = """avprobe -v error -show_format -show_streams "{}" 2>&1 """.format( self.url) try: with Popen(cmd, shell=True, stdout=PIPE, preexec_fn=os.setsid) as process: try: streams = process.communicate(timeout=10)[0] except TimeoutExpired: os.killpg( process.pid, signal.SIGINT) # send signal to the process group raise duration = getDurationFromStreams(streams) video.duration = duration except CalledProcessError as e: logger.error( "CalledProcessError error({}) when running command {}".format( e.returncode, cmd)) except TimeoutExpired as e: logger.error( "TimeoutExpired error when running command {}".format(cmd)) except: logger.error("Unexpected error({}) when running command {}".format( sys.exc_info()[0], cmd))
class DummySim(ProcessWorkerThread): def handle_eval(self, record): # This gives a file name / directory name that no other thread can use my_unique_filename = my_gen.next_filename() my_unique_filename = str(my_unique_filename) + ".txt" # Print to the input file f = open(my_unique_filename, 'w') f.write(array2str(record.params[0])) f.close() # Run the objective function and pass the filename of the input file self.process = Popen(['./sphere_ext_files', my_unique_filename], stdout=PIPE) out = self.process.communicate()[0] # Parse the output try: val = float(out) # This raises ValueError if out is not a float self.finish_success(record, val) os.remove(my_unique_filename) # Remove input file except ValueError: logging.warning("Function evaluation crashed/failed") self.finish_failure(record) os.remove(my_unique_filename) # Remove input file
class Voice: def __init__(self): self.subprocess = None def say(self,quote): self.stop() self.subprocess = Popen(["espeak", \ "-s", "140",\ quote]) def stop(self): if self.subprocess != None: # FIXME how do we know the process really terminated? self.subprocess.kill() self.subprocess = None
def setUpClass(cls): # In Python 3 Click makes a call to locale to determine how the # terminal wants to handle unicode. Because we mock Popen to avoid # calling the real verifier, we need to get the actual result of # locale to provide it to Click during the test run. cls.locale = Popen(['locale', '-a'], stdout=PIPE, stderr=PIPE).communicate()[0]
def run_compiler(self, language, filename, executable_name): args = ["g++" if language else "gcc", "-static", "-w", "-O2", filename, "-o", executable_name] self.log += ['Running: ' + ' '.join(args)] proc = Popen(args, cwd=self.base_dir, stdin=PIPE, stdout=PIPE, stderr=PIPE) output = proc.communicate(timeout=self.COMPILE_TIMEOUT) self.log += [str(output[1])] if proc.poll() is None: try: self.log += ['Compile timeout.'] proc.kill() except Exception: pass self.log += ["Compiler returns %d." % proc.returncode] if proc.returncode: raise CompileErrorException()
def clone(cls, remote_url, path): """Clone the remote and return a GitVcs object pointed at the new repo. :param str remote_url: the URL to clone from :param str path: path to clone to :rtype: GitVcs :returns: a GitVcs object for the new cloned repo :raises tigerhost.vcs.base.CommandError: """ args = ['git', 'clone', '--recursive', remote_url, path] proc = Popen(args, stdout=PIPE, stderr=PIPE) (stdout, stderr) = proc.communicate() if proc.returncode != 0: raise CommandError(args[0], proc.returncode, stdout, stderr) return cls(path=path)
def stop(self, job, msg, exit_code): """ Stop running job and remove it from PBS queue. :param job: :py:class:`Job` instance :param msg: Message that will be passed to the user """ _pbs_id = '' # Get Job PBS ID try: _pbs_id = str(job.scheduler.id) except: job.die('@PBS - Unable to read PBS job ID', exc_info=True) return # Run qdel logger.debug("@PBS - Killing job") # @TODO Seperate users for each sevice # Run qdel with proper user permissions # _user = self.jm.services[job.service].config['username'] # _comm = "/usr/bin/qdel %s" % _pbs_id # _sudo = "/usr/bin/sudo /bin/su -c \"%s\" %s" % (_comm, _user) # _opts = ['/usr/bin/ssh', '-t', '-t', 'local', _sudo] _opts = ['/usr/bin/qdel', '-b', str(conf.pbs_timeout), _pbs_id] logger.log(VERBOSE, "@PBS - Running command: %s", _opts) _proc = Popen(_opts, stdout=PIPE, stderr=STDOUT) _output = _proc.communicate()[0] logger.log(VERBOSE, _output) # Check return code. If qdel was not killed by signal Popen will # not rise an exception # @TODO handle temporary communication timeouts with pbs server if _proc.returncode == 170: # Job in wrong state (e.g. exiting) logger.debug("@PBS - Wait with job kill: /usr/bin/qdel " "returned 170 exit code (%s)", _output) raise CisError("PBS qdel wrong job state") if _proc.returncode != 0: raise OSError(( _proc.returncode, "/usr/bin/qdel returned non zero exit code.\n%s" % str(_output) )) # Mark as killed by user job.mark(msg, exit_code)
def invoke(self, argv): """FIXME""" self.log("Driver.invoke: %s" % ' '.join(sys.argv)) gccinv = GccInvocation(argv) self.log(' gccinv.sources: %r' % gccinv.sources) # Run the side effects on each source file: for sourcefile in gccinv.sources: self.log(' sourcefile: %r' % sourcefile) if sourcefile.endswith('.c'): # FIXME: other extensions? single_source_gccinv = gccinv.restrict_to_one_source(sourcefile) # Avoid linker errors due to splitting up the build into # multiple gcc invocations: single_source_gccinv.argv += ['-c'] self.log(' single_source_gccinv: %r' % single_source_gccinv) for side_effect in self.side_effects: analysis = self.invoke_tool(side_effect, single_source_gccinv, sourcefile) #analysis.set_custom_field('gcc-invocation', ' '.join(argv)) self.write_analysis_as_xml(analysis) # Now run the real driver. # Note that we already ran the real gcc earlier as a # side-effect per source-file, capturing warnings there. # We have to do it separately from here since the invocation # might cover multiple source files. argv = [self.real_driver] + gccinv.argv[1:] env=os.environ.copy() # FIXME: this probably shouldn't be hardcoded env['LANG'] = 'C' p = Popen(argv, stdout=PIPE, stderr=PIPE, env=env) out, err = p.communicate() self.ctxt.stdout.write(out) self.ctxt.stderr.write(err) self.returncode = p.returncode
def handle_eval(self, record): self.process = Popen(['./sphere_ext', array2str(record.params[0])], stdout=PIPE) out = self.process.communicate()[0] try: val = float(out) # This raises ValueError if out is not a float self.finish_success(record, val) except ValueError: logging.warning("Function evaluation crashed/failed") self.finish_failure(record)
def check_output(*popenargs, **kwargs): """ Re-implement check_output from subprocess32, but with a timeout that kills child processes. See https://github.com/google/python-subprocess32/blob/master/subprocess32.py#L606 """ timeout = kwargs.pop('timeout', None) if 'stdout' in kwargs: raise ValueError('stdout argument not allowed, it will be overridden.') process = Popen(stdout=PIPE, preexec_fn=os.setsid, *popenargs, **kwargs) try: output = process.communicate(timeout=timeout)[0] except TimeoutExpired as error: os.killpg(process.pid, signal.SIGINT) raise error retcode = process.poll() if retcode: raise CalledProcessError(retcode, process.args, output=output) return output
def execute_command_locally(self, command_obj, directory, job_log): # create a temporary log file temp_log_file = None if not command_obj.dont_log_output: temp_log_filename = get_absolute_path("tmp.pipelinelog." + command_obj.command_id + "." + str(random.randint(10*10, 10**11 - 1)), directory, allow_compressed_version=False) temp_log_file = open(temp_log_filename, "w+") #temp_log_file = tempfile.NamedTemporaryFile(bufsize=0) # spawned_process = Popen(command_obj.command, bufsize = 0, shell=True, cwd=directory, stdout=temp_log_file, stderr=temp_log_file, executable=self.shell) if temp_log_file: # while the job is running, continually read from the temp_log_file and copy this to the job_log self.copy_command_output_to_log(command_obj, spawned_process, temp_log_file, job_log) temp_log_file.close() os.remove(temp_log_filename) #os.remove(temp_file_path) else: spawned_process.wait() if spawned_process.returncode is not None and spawned_process.returncode != 0: raise Exception("Non-zero return code: " + str(spawned_process.returncode))
def wait(self, timeout=None): """Wait for the started process to complete. "timeout" is a floating point number of seconds after which to timeout. Default is None, which is to never timeout. If the wait time's out it will raise a ProcessError. Otherwise it will return the child's exit value. Note that in the case of a timeout, the process is still running. Use kill() to forcibly stop the process. """ if timeout is None or timeout < 0: # Use the parent call. try: return Popen.wait(self) except OSError as ex: # If the process has already ended, that is fine. This is # possible when wait is called from a different thread. if ex.errno != 10: # No child process raise return self.returncode # We poll for the retval, as we cannot rely on self.__hasTerminated # to be called, as there are some code paths that do not trigger it. # The accuracy of this wait call is between 0.1 and 1 second. time_now = time.time() time_end = time_now + timeout # These values will be used to incrementally increase the wait period # of the polling check, starting from the end of the list and working # towards the front. This is to avoid waiting for a long period on # processes that finish quickly, see bug 80794. time_wait_values = [1.0, 0.5, 0.2, 0.1] while time_now < time_end: result = self.poll() if result is not None: return result # We use hasTerminated here to get a faster notification. self.__hasTerminated.acquire() if time_wait_values: wait_period = time_wait_values.pop() self.__hasTerminated.wait(wait_period) self.__hasTerminated.release() time_now = time.time() # last chance result = self.poll() if result is not None: return result raise ProcessError("Process timeout: waited %d seconds, " "process not yet finished." % (timeout,), WAIT_TIMEOUT)
def kill(self, exitCode=-1, gracePeriod=None, sig=None): """Kill process. "exitCode" this sets what the process return value will be. "gracePeriod" [deprecated, not supported] "sig" (Unix only) is the signal to use to kill the process. Defaults to signal.SIGKILL. See os.kill() for more information. """ if gracePeriod is not None: import warnings warnings.warn("process.kill() gracePeriod is no longer used", DeprecationWarning) # Need to ensure stdin is closed, makes it easier to end the process. if self.stdin is not None: self.stdin.close() if sys.platform.startswith("win"): # TODO: 1) It would be nice if we could give the process(es) a # chance to exit gracefully first, rather than having to # resort to a hard kill. # 2) May need to send a WM_CLOSE event in the case of a GUI # application, like the older process.py was doing. Popen.kill(self) else: if sig is None: sig = signal.SIGKILL try: if self.__use_killpg: os.killpg(self.pid, sig) else: os.kill(self.pid, sig) except OSError, ex: if ex.errno != 3: # Ignore: OSError: [Errno 3] No such process raise self.returncode = exitCode
def run_popen_with_timeout(command_string, timeout): """ Run a sub-program in subprocess.Popen, pass it the input_data, kill it if the specified timeout has passed. returns a tuple of success, stdout, stderr """ kill_check = threading.Event() def _kill_process_after_a_timeout(pid): os.kill(pid, signal.SIGTERM) kill_check.set() # tell the main routine that we had to kill # use SIGKILL if hard to kill... return p = Popen(command_string, bufsize=1, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE) pid = p.pid watchdog = threading.Timer(timeout, _kill_process_after_a_timeout, args=(pid,)) watchdog.start() stdout, stderr = p.communicate() watchdog.cancel() # if it's still waiting to run success = not kill_check.isSet() kill_check.clear() return (success, stdout, stderr)
def _run_subprocess(self, sourcefile, argv, env=None): """ Support for running the bulk of the side effect in a subprocess, with timeout support. """ self.log('%s: _run_subprocess(%r, %r)' % (self.name, sourcefile, argv)) if 0: self.log('env: %s' % env) p = Popen(argv, stdout=PIPE, stderr=PIPE, env=env) try: t = Timer() out, err = p.communicate(timeout=self.timeout) self.ctxt.write_streams(argv[0], out, err) result = SubprocessResult(sourcefile, argv, p.returncode, out, err, t) analysis = self.handle_output(result) return analysis except TimeoutExpired: analysis = self._make_failed_analysis(sourcefile, t, msgtext='Timeout running %s' % self.name, failureid='timeout') analysis.set_custom_field('timeout', self.timeout) analysis.set_custom_field('command-line', ' '.join(argv)) return analysis
class UserProgram: def __init__(self, path): self.path = path self.process = None self.timeout = 1.0 self.timer = None self.tle = False def execute(self): if not self.path: raise ValueError("Path cannot be empty.") self.process = Popen(self.path, stdin=PIPE, stdout=PIPE, stderr=PIPE) def start_timer(self): self.timer = Timer(self.timeout, self.time_limit_exceeded) self.timer.start() def stop_timer(self): self.timer.cancel() self.timer = None def read_line(self): return self.process.stdout.readline() def write_line(self, line): self.process.stdout.write(line) def kill_process(self): try: self.process.kill() except Exception: pass def time_limit_exceeded(self): self.tle = True self.kill_process()
def wait(self, timeout=None): """Wait for the started process to complete. "timeout" is a floating point number of seconds after which to timeout. Default is None, which is to never timeout. If the wait time's out it will raise a ProcessError. Otherwise it will return the child's exit value. Note that in the case of a timeout, the process is still running. Use kill() to forcibly stop the process. """ if timeout is None or timeout < 0: # Use the parent call. try: return Popen.wait(self) except OSError, ex: # If the process has already ended, that is fine. This is # possible when wait is called from a different thread. if ex.errno != 10: # No child process raise return self.returncode
def run2(command, check=True, timeout=None, *args, **kwargs): ''' Run a command. If check=True (the default), then if return code is not zero or there is stderr output, raise CalledProcessError. Return any output in the exception. If timeout (in seconds) is set and command times out, raise TimeoutError. ''' ''' Parts from subprocess32.check_output(). ''' raise Exception('Deprecated. Use the sh module.') # use subprocess32 for timeout from subprocess32 import Popen, CalledProcessError, TimeoutExpired process = Popen(command, stdout=stdout, stderr=stderr, *args, **kwargs) try: process.wait(timeout=timeout) except TimeoutExpired: print('TimeoutExpired') #DEBUG #print('stdout: %s, (%d)' % (str(stdout), len(str(stdout)))) #DEBUG #print('stderr: %s, (%d)' % (str(stderr), len(str(stderr)))) #DEBUG try: process.kill() process.wait() finally: print('after kill/wait') #DEBUG #print('stdout: %s, (%d)' % (str(stdout), len(str(stdout)))) #DEBUG #print('stderr: %s, (%d)' % (str(stderr), len(str(stderr)))) #DEBUG raise TimeoutExpired(process.args, timeout) if check: retcode = process.poll() if retcode: raise CalledProcessError(retcode, process.args)
def check_output(*popenargs, **kwargs): r"""Run command with arguments and return its output as a byte string. If the exit code was non-zero it raises a CalledProcessError. The CalledProcessError object will have the return code in the returncode attribute and output in the output attribute. The arguments are the same as for the Popen constructor. Example: >>> check_output(["ls", "-l", "/dev/null"]) 'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n' The stdout argument is not allowed as it is used internally. To capture standard error in the result, use stderr=STDOUT. >>> check_output(["/bin/sh", "-c", ... "ls -l non_existent_file ; exit 0"], ... stderr=STDOUT) 'ls: non_existent_file: No such file or directory\n' """ timeout = kwargs.pop('timeout', None) if 'stdout' in kwargs: raise ValueError('stdout argument not allowed, it will be overridden.') if _kill_processes.is_set(): raise TerminateSignaled() process = Popen(stdout=PIPE, *popenargs, **kwargs) _processes.append(process) try: output, unused_err = process.communicate(timeout=timeout) _processes.remove(process) except TimeoutExpired: _processes.remove(process) process.kill() output, unused_err = process.communicate() raise TimeoutExpired(process.args, timeout, output=output) retcode = process.poll() if retcode: raise CalledProcessError(retcode, process.args, output=output) return output