def autocomplete( self, arguments: List[str], *, verbosity: int = 0, network_timeout: float = NETWORK_TIMEOUT, timeout: float = 30, ) -> str: __tracebackhide__ = True log.info("Run 'neuro %s'", " ".join(arguments)) args = self._default_args(verbosity, network_timeout) env = dict(os.environ) env["_NEURO_COMPLETE"] = "complete_zsh" env["COMP_WORDS"] = " ".join( shlex.quote(arg) for arg in args + arguments) env["COMP_CWORD"] = str(len(args + arguments) - 1) proc = subprocess.run( "neuro", encoding="utf8", stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env, timeout=timeout, ) assert proc.returncode == 1 assert not proc.stderr return proc.stdout
def kill_processes_and_all_descendants(proc_set): """Kill a set of processes and all their children.""" pids_to_kill = set() for proc in proc_set: cmd = "pstree {} -pal | cut -d',' -f2 | cut -d' ' -f1 | cut -d')' -f1".format( proc.pid) p = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE) for pid in [ int(pid_str) for pid_str in p.stdout.decode('utf-8').replace( '\n', ' ').strip().split(' ') ]: pids_to_kill.add(pid) assert (os.getpid() not in pids_to_kill) logger.error("Killing remaining processes {}".format(pids_to_kill)) cmd = "kill -9 {}".format(' '.join([str(pid) for pid in pids_to_kill])) p = subprocess.run(cmd, shell=True)
def display_process_output_on_error(process_name, stdout_file, stderr_file, max_lines=42): """Display process output when on error.""" if (stdout_file is None) and (stderr_file is None): logger.error("Cannot retrieve any log about the failed process") return for filename, fname in [(stdout_file.name, "stdout"), (stderr_file.name, "stderr")]: if filename is not None: # Let's retrieve the file length (ugly.) cmd_wc = "wc -l {}".format(filename) p = subprocess.run(cmd_wc, shell=True, stdout=subprocess.PIPE) assert (p.returncode == 0) nb_lines = int(str(p.stdout.decode('utf-8')).split(' ')[0]) if nb_lines > 0: # Let's read the whole file if it is small if nb_lines <= max_lines: with open(filename, 'r') as f: logger.error('{} {}:\n{}'.format( process_name, fname, f.read())) # Let's read the first and last lines of the file else: cmd_head = "head -n {} {}".format(max_lines // 2, filename) cmd_tail = "tail -n {} {}".format(max_lines // 2, filename) p_head = subprocess.run(cmd_head, shell=True, stdout=subprocess.PIPE) p_tail = subprocess.run(cmd_tail, shell=True, stdout=subprocess.PIPE) assert (p_head.returncode == 0) assert (p_tail.returncode == 0) logger.error( '{} {}:\n{}\n...\n...\n... (truncated... whole log in {})\n...\n...\n{}' .format(process_name, fname, str(p_head.stdout.decode('utf-8')), filename, str(p_tail.stdout.decode('utf-8'))))
def retrieve_dirs_from_instances(variables, variables_declaration_order, working_directory): """Retrieve the directories from instances.""" filename_ok = False while not filename_ok: r = random_string() script_filename = '{wd}/{rand}_script.sh'.format(wd=working_directory, rand=r) output_dir_filename = '{wd}/{rand}_out_dir'.format( wd=working_directory, rand=r) working_dir_filename = '{wd}/{rand}_working_dir'.format( wd=working_directory, rand=r) filename_ok = not os.path.exists( script_filename) and not os.path.exists( output_dir_filename) and not os.path.exists( working_dir_filename) put_variables_in_file(variables, variables_declaration_order, script_filename) # Let's add some directives to prepare the instance! text_to_add = "# Preparation\n" text_to_add += 'echo {v} > {f}\n'.format(v="${base_output_directory}", f=output_dir_filename) text_to_add += 'echo {v} > {f}\n'.format(v="${base_working_directory}", f=working_dir_filename) # Let's append the directives in the file f = open(script_filename, 'a') f.write(text_to_add) f.close() # Let's execute the script p = subprocess.run('bash {f}'.format(f=script_filename), shell=True, stdout=subprocess.PIPE) assert (p.returncode == 0) # Let's get the working directory f = open(working_dir_filename, 'r') base_working_dir = f.read().strip() f.close() # Let's get the output directory f = open(output_dir_filename, 'r') base_output_dir = f.read().strip() f.close() # Let's remove temporary files delete_file_if_exists(script_filename) delete_file_if_exists(working_dir_filename) delete_file_if_exists(output_dir_filename) return (base_working_dir, base_output_dir)
def socket_in_use(sock): """Return whether the given socket is being used.""" # Let's check whether the socket uses a port m = g_port_regex.match(sock) if m: port = int(m.group(1)) cmd = "ss -ln | grep ':{port}'".format(port=port) p = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE) return len(p.stdout.decode('utf-8')) > 0 return False
def execute_command(command, working_directory, variables_filename, output_script_filename, output_subscript_filename, output_script_output_dir, command_name, timeout=None): """Execute a command synchronously.""" # If the command is composed of different lines, # a corresponding subscript file will be created. # Otherwise, only one script will be created. if '\n' in command: write_string_into_file(command, output_subscript_filename) make_file_executable(output_subscript_filename) fake_command = os.path.abspath(output_subscript_filename) create_file_from_command( fake_command, output_filename=output_script_filename, variables_definition_filename=variables_filename) else: create_file_from_command( command=command, output_filename=output_script_filename, variables_definition_filename=variables_filename) create_dir_if_not_exists(output_script_output_dir) # Let's prepare the real command call cmd = 'bash {f}'.format(f=output_script_filename) stdout_file = open( '{out}/{name}.stdout'.format(out=output_script_output_dir, name=command_name), 'wb') stderr_file = open( '{out}/{name}.stderr'.format(out=output_script_output_dir, name=command_name), 'wb') # Let's run the command logger.info("Executing command '{}'".format(command_name)) p = subprocess.run(cmd, shell=True, stdout=stdout_file, stderr=stderr_file) if p.returncode == 0: logger.info("{} finished".format(command_name)) return True else: logger.error("Command '{name}' failed.\n--- begin of {name} ---\n" "{content}\n--- end of {name} ---".format( name=command_name, content=command)) display_process_output_on_error(command_name, stdout_file, stderr_file) return False
def validate(self): errors = [] try: proc = subprocess.run([self.exe_path, "-V"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) except Exception as e: errors.append( f"Unable to start socat process ({self.exe_path}): {e}") else: if b"socat" not in proc.stdout: errors.append( f"Unable to check socat process ({self.exe_path}) version: \n" f"{proc.stdout.decode()}") if errors: raise ValueError("\n".join(errors))
def run_cli( self, arguments: List[str], *, verbosity: int = 0, network_timeout: float = NETWORK_TIMEOUT, input: Optional[str] = None, ) -> SysCap: __tracebackhide__ = True log.info("Run 'neuro %s'", " ".join(arguments)) # 5 min timeout is overkill proc = subprocess.run( ["neuro"] + self._default_args(verbosity, network_timeout) + arguments, timeout=300, encoding="utf8", errors="replace", stdout=subprocess.PIPE, stderr=subprocess.PIPE, input=input, ) try: proc.check_returncode() except subprocess.CalledProcessError: log.error(f"Last stdout: '{proc.stdout}'") log.error(f"Last stderr: '{proc.stderr}'") raise out = proc.stdout err = proc.stderr if any(start in " ".join(arguments) for start in ("submit", "job submit", "run", "job run")): job_id = self.find_job_id(out) if job_id: self._executed_jobs.append(job_id) out = out.strip() err = err.strip() if verbosity > 0: print(f"neuro stdout: {out}") print(f"neuro stderr: {err}") return SysCap(out, err)
def cgclassify(cgroup : str, pid : int): code = subprocess.run("cgclassify -g cpu:%s %s" % (cgroup,pid), shell=True) if code.returncode != 0: errstr = "can not move pid = %s to cgroup cpu:%s" % (pid,cgroup) logging.error(errstr)
def cgdelete(cgroup : str): code = subprocess.run("cgdelete cpu:%s" % cgroup, shell=True) if code.returncode != 0: errstr = "can not delete cgroup cpu:%s" % cgroup logging.error(errstr)
def cgset_shareds(cgroup: str, value : int): code = subprocess.run("cgset -r cpu.shares=%s %s" % (value, cgroup),shell=True) if code.returncode != 0: errstr = "can not set cgroup cpu:%s" % cgroup logging.error(errstr)
def cgcreate(cgroup : [str,int]): code = subprocess.run("cgcreate -g cpu:%s" % cgroup, shell=True) if code.returncode != 0: errstr = "can not create cgroup cpu:%s" % cgroup logging.error(errstr)