def _get_char_loop(self): """ The input 'event loop'. This should return the next characters to process. """ timeout = self.input_timeout while True: r, w, x = _select([self.stdin, self._schedule_pipe[0]], [], [], timeout) if self.stdin in r: return self._read_from_stdin() # If we receive something on our "call_from_executor" pipe, process # these callbacks in a thread safe way. elif self._schedule_pipe[0] in r: # Flush all the pipe content. os.read(self._schedule_pipe[0], 1024) # Process calls from executor. calls_from_executor, self._calls_from_executor = self._calls_from_executor, [] for c in calls_from_executor: c() else: # Fire input timeout event. self.onInputTimeout.fire() timeout = None
def read(self, timeout=1): """ Read from subprocess and return new output """ output = '' read_timeout = float(timeout) / 1000 read_ct = 0 try: # read from fd until no more output while 1: s_read, s_write, s_error = select.select([self.fd], [], [], read_timeout) lines = '' for s_fd in s_read: try: # increase read buffer so huge reads don't slow down if read_ct < 10: lines = os.read(self.fd, 32) elif read_ct < 50: lines = os.read(self.fd, 512) else: lines = os.read(self.fd, 2048) read_ct += 1 except: pass output = output + lines.decode('utf-8') if lines == '' or read_ct > 100: break except: pass return output
def handle(self): dst = socket.socket(socket.AF_INET, socket.SOCK_STREAM) dst.connect(self.server.proxy_dest) readers = [self.request, dst] while readers: rlist, wlist, xlist = select.select(readers, [], [], TIMEOUT) # If the server generation has been incremented, close the # connection. if self._generation != self.server._generation: return if self.request in rlist: chunk = os.read(self.request.fileno(), 1024) dst.send(chunk) if chunk == "": readers.remove(self.request) dst.shutdown(socket.SHUT_WR) if dst in rlist: chunk = os.read(dst.fileno(), 1024) self.request.send(chunk) if chunk == "": readers.remove(dst) self.request.shutdown(socket.SHUT_WR)
def read(self, timeout=None, bufsize=4096): """ Read from the process' stdout and stderr. Blocks until data is available on either stdout or stderr, or until the timeout expires. Once data is available, a tuple is returned consisting of the data read on stdout, and the data read on stderr. If stdout is not available (e.g., it was not opened as a pipe when the process was started), or if the write end of the pipe has been closed by the child process, None will be returned in the first entry of tuple. If data becomes available on stderr before any data is ready on stdout, '' will be returned in the first entry of the tuple. The return value for stderr behaves the same way, respective to the stderr pipe. If the timeout expires before any data becomes available, a TimeoutError will be raised. """ read_fds = [] if self.stdout is not None and not self.stdoutEOF: read_fds.append(self.stdout.fileno()) if self.stderr is not None and not self.stderrEOF: read_fds.append(self.stderr.fileno()) if not read_fds: return (None, None) (read_ready, ignore, ignore) = select.select(read_fds, [], [], timeout) if not read_ready: raise TimeoutError(self) if self.stdoutEOF: stdout_buf = None elif self.stdout.fileno() in read_ready: stdout_buf = os.read(self.stdout.fileno(), bufsize) if not stdout_buf: self.stdoutEOF = True stdout_buf = None # Close stdout now, so our pipe will get closed # even if we don't get garbage collected for a while self.stdout.close() self.stdout = None else: stdout_buf = '' if self.stderrEOF: stderr_buf = None elif self.stderr.fileno() in read_ready: stderr_buf = os.read(self.stderr.fileno(), bufsize) if not stderr_buf: self.stderrEOF = True stderr_buf = None # Close stderr now, so our pipe will get closed # even if we don't get garbage collected for a while self.stderr.close() self.stderr = None else: stderr_buf = '' return (stdout_buf, stderr_buf)
def print_outputs(self): read_set = [] stdout = None # Return stderr = None # Return if self.stdout: read_set.append(self.stdout) if self.stderr: read_set.append(self.stderr) while read_set: try: rlist, wlist, xlist = select.select(read_set, [], []) except select.error, e: if e.args[0] == errno.EINTR: continue raise if self.stdout in rlist: data = os.read(self.stdout.fileno(), 1024) if data == "": self.stdout.close() read_set.remove(self.stdout) else: sys.stdout.write(data) sys.stdout.flush() if self.stderr in rlist: data = os.read(self.stderr.fileno(), 1024) if data == "": self.stderr.close() read_set.remove(self.stderr) else: sys.stderr.write(data) sys.stderr.flush()
def _runReaperThread(self): while True: try: os.read(self._signalPipeReadFD, 1) except IOError as e: if e.errno != errno.EINTR: raise # System call was interrupted by signal - restart it continue else: self._sigchldPending = False while True: try: r = os.waitpid(0, os.WNOHANG) except OSError as e: if e.errno != errno.ECHILD: raise break else: pid, exitStatus = r if os.WIFSIGNALED(exitStatus): returnCode = -os.WTERMSIG(exitStatus) elif os.WIFEXITED(exitStatus): returnCode = os.WEXITSTATUS(exitStatus) else: # Should never happen raise RuntimeError("Unexpected child exit status: %r" % (exitStatus,)) with self.mutex: for cb in self._callbacks: cb(pid, returnCode)
def hash_file(path, file_type): ret = "" # some files you can't hash if(file_type == 'inode/chardevice' \ or file_type == 'inode/symlink' \ or file_type == 'inode/socket' \ or file_type == 'inode/blockdevice' \ or file_type == 'inode/x-empty' \ or file_type == 'application/x-coredump' \ or file_type == 'inode/directory'): ret = "0" return ret fd = None try: h = hashlib.sha1() fd = os.open(path, os.O_RDONLY | os.O_NONBLOCK) data = os.read(fd, BUFFER) while(len(data)>0): h.update(data) data = os.read(fd, BUFFER) ret = h.hexdigest() except Exception, err: # print "Hash Error: {} on file {} with type {}".format(err, path, # file_type) pass
def read(self, timeout=None): """return 0 when finished else return 1 every timeout seconds data will be in outdata and errdata""" currtime=time.time() while 1: tocheck=[] if not self._outeof: tocheck.append(self.outr) if not self._erreof: tocheck.append(self.errr) ready = select.select(tocheck,[],[],timeout) if len(ready[0]) == 0: #no data timeout return 1 else: if self.outr in ready[0]: #~ os.fsync(self.outr) outchunk = os.read(self.outr,self.BUFSIZ) if outchunk == '': self._outeof = 1 self.outdata += outchunk self.outchunk = outchunk if self.errr in ready[0]: #~ os.fsync(self.errr) errchunk = os.read(self.errr,self.BUFSIZ) if errchunk == '': self._erreof = 1 self.errdata += errchunk self.errchunk = errchunk if self._outeof and self._erreof: return 0 elif timeout: if (time.time()-currtime) > timeout: return 1 #may be more data but time to go
def __init__(self, reactor, proc, name, fileno, forceReadHack=False): """ Initialize, specifying a Process instance to connect to. """ abstract.FileDescriptor.__init__(self, reactor) fdesc.setNonBlocking(fileno) self.proc = proc self.name = name self.fd = fileno if not stat.S_ISFIFO(os.fstat(self.fileno()).st_mode): # If the fd is not a pipe, then the read hack is never # applicable. This case arises when ProcessWriter is used by # StandardIO and stdout is redirected to a normal file. self.enableReadHack = False elif forceReadHack: self.enableReadHack = True else: # Detect if this fd is actually a write-only fd. If it's # valid to read, don't try to detect closing via read. # This really only means that we cannot detect a TTY's write # pipe being closed. try: os.read(self.fileno(), 0) except OSError: # It's a write-only pipe end, enable hack self.enableReadHack = True if self.enableReadHack: self.startReading()
def create(netns, libc=None): ''' Create a network namespace. ''' rctl, wctl = os.pipe() pid = os.fork() if pid == 0: # child error = None try: _create(netns, libc) except Exception as e: error = e error.tb = traceback.format_exc() msg = pickle.dumps(error) os.write(wctl, struct.pack('I', len(msg))) os.write(wctl, msg) os._exit(0) else: # parent msglen = struct.unpack('I', os.read(rctl, 4))[0] error = pickle.loads(os.read(rctl, msglen)) os.close(rctl) os.close(wctl) os.waitpid(pid, 0) if error is not None: raise error
def runProg(prog, argv=[ ]): args = [ prog ] + argv (rfd, wfd) = os.pipe() pid = os.fork() if pid == 0: try: fd = os.open("/dev/null", os.O_RDONLY) if fd != 0: os.dup2(fd, 0) os.close(fd) if wfd != 1: os.dup2(wfd, 1) os.close(wfd) os.dup2(1, 2) e = { "LANG": "C" } os.execve(args[0], args, e) finally: os._exit(255) os.close(wfd) cret = b'' cout = os.read(rfd, 8192) while cout: cret += cout cout = os.read(rfd, 8192) os.close(rfd) (cpid, status) = os.waitpid(pid, 0) cret = cret.rstrip().decode('utf-8', 'replace') return (status, cret)
def __init__(self, reactor, proc, name, fileno, forceReadHack=False): """ Initialize, specifying a Process instance to connect to. """ abstract.FileDescriptor.__init__(self, reactor) fdesc.setNonBlocking(fileno) self.proc = proc self.name = name self.fd = fileno if forceReadHack: self.enableReadHack = True else: # Detect if this fd is actually a write-only fd. If it's # valid to read, don't try to detect closing via read. # This really only means that we cannot detect a TTY's write # pipe being closed. try: os.read(self.fileno(), 0) except OSError: # It's a write-only pipe end, enable hack self.enableReadHack = True if self.enableReadHack: self.startReading()
def _run_tee(proc, result, stdout=sys.stdout, stderr=sys.stderr): """Run test collecting and passing through stdout, stderr:""" log_stdout, log_stderr = [], [] fd_stdout = proc.stdout.fileno() fd_stderr = proc.stderr.fileno() read_set = [fd_stdout, fd_stderr] while read_set: try: rlist, _wlist, _elist = select.select(read_set, [], []) except select.error, ex: if ex.args[0] == errno.EINTR: continue raise if fd_stdout in rlist: data = os.read(fd_stdout, 1024) if data == '': read_set.remove(fd_stdout) proc.stdout.close() else: stdout.write(data) log_stdout.append(data) if fd_stderr in rlist: data = os.read(fd_stderr, 1024) if data == '': read_set.remove(fd_stderr) proc.stderr.close() else: stderr.write(data) log_stderr.append(data)
def startCoq(name): global currentState global coqtop global script script = "" currentState = 1 if (coqtop!=None): coqtop.terminate() coqtop = Popen(['coqtop','-emacs','-R','.',name], stdin = PIPE, stdout = PIPE, stderr = PIPE, shell = False) # set the O_NONBLOCK flag of p.stdout file descriptor: flags = fcntl(coqtop.stdout, F_GETFL) # get current p.stdout flags fcntl(coqtop.stdout, F_SETFL, flags | O_NONBLOCK) flags = fcntl(coqtop.stderr, F_GETFL) # get current p.stderr flags fcntl(coqtop.stderr, F_SETFL, flags | O_NONBLOCK) sleep(0.1) r = "" c= True while (c): try: l = read(coqtop.stdout.fileno(), 4096) r += l except OSError: try: x = read(coqtop.stderr.fileno(), 4096) c = False except OSError: sleep(0.1)
def getchar(): ''' Equivale al comando getchar() di C ''' fd = sys.stdin.fileno() if os.isatty(fd): old = termios.tcgetattr(fd) new = termios.tcgetattr(fd) new[3] = new[3] & ~termios.ICANON & ~termios.ECHO new[6] [termios.VMIN] = 1 new[6] [termios.VTIME] = 0 try: termios.tcsetattr(fd, termios.TCSANOW, new) termios.tcsendbreak(fd,0) ch = os.read(fd,7) finally: termios.tcsetattr(fd, termios.TCSAFLUSH, old) else: ch = os.read(fd,7) return(ch)
def getResults(results, pid, inF, csFile): (gotResult, status) = os.waitpid(pid, os.WNOHANG) if not gotResult: return None if os.WIFSIGNALED(status): results.setExitSignal(os.WTERMSIG(status)) else: assert(os.WIFEXITED(status)) results.setExitStatus(os.WEXITSTATUS(status)) if results.isBuildSuccess(): results.setChangeSetFile(csFile) elif results.getExitSignal(): results.setFailureReason(BuildFailed('Build exited with signal %s' % results.getExitSignal())) else: errReason = [] buffer = os.read(inF, 1024) while buffer: errReason.append(buffer) buffer = os.read(inF, 1024) errReason = ''.join(errReason) errTag, data = errReason.split('\002', 1) results.setFailureReason(thaw('FailureReason', (errTag, data))) os.close(inF) return results
def empty_resize_pipe(): # clean out the pipe used to signal external event loops # that a resize has occured try: while True: os.read(self._resize_pipe_rd, 1) except OSError: pass
def read( fd ): ''' Returns: (serial_number, unpickled_object) or raises a FileCorrupted exception ''' os.lseek(fd, 0, os.SEEK_SET) md5hash = os.read(fd, 16) data1 = os.read(fd, 8) data2 = os.read(fd, 8) if ( (not md5hash or len(md5hash) != 16) or (not data1 or len(data1) != 8) or (not data2 or len(data2) != 8) ): raise FileTruncated() serial_number = struct.unpack('>Q', data1)[0] pickle_length = struct.unpack('>Q', data2)[0] data3 = os.read(fd, pickle_length) if not data3 or len(data3) != pickle_length: raise FileTruncated() m = hashlib.md5() m.update( data1 ) m.update( data2 ) m.update( data3 ) if not m.digest() == md5hash: raise HashMismatch() return serial_number, pickle.loads(data3)
def __init__(self, num_bits, filename, max_bytes_in_memory): self.num_bits = num_bits num_chars = (self.num_bits + 7) // 8 self.filename = filename self.max_bytes_in_memory = max_bytes_in_memory self.bits_in_memory = min(num_bits, self.max_bytes_in_memory * 8) self.bits_in_file = max(self.num_bits - self.bits_in_memory, 0) self.bytes_in_memory = (self.bits_in_memory + 7) // 8 self.bytes_in_file = (self.bits_in_file + 7) // 8 self.array_ = array.array('B', [0]) * self.bytes_in_memory flags = os.O_RDWR | os.O_CREAT if hasattr(os, 'O_BINARY'): flags |= getattr(os, 'O_BINARY') self.file_ = os.open(filename, flags) os.lseek(self.file_, num_chars + 1, os.SEEK_SET) os.write(self.file_, python2x3.null_byte) os.lseek(self.file_, 0, os.SEEK_SET) offset = 0 intended_block_len = 2 ** 17 while True: if offset + intended_block_len < self.bytes_in_memory: block = os.read(self.file_, intended_block_len) elif offset < self.bytes_in_memory: block = os.read(self.file_, self.bytes_in_memory - offset) else: break for index_in_block, character in enumerate(block): self.array_[offset + index_in_block] = ord(character) offset += intended_block_len
def test_basic_pty(): try: debug("Calling master_open()") master_fd, slave_name = pty.master_open() debug("Got master_fd '%d', slave_name '%s'"%(master_fd, slave_name)) debug("Calling slave_open(%r)"%(slave_name,)) slave_fd = pty.slave_open(slave_name) debug("Got slave_fd '%d'"%slave_fd) except OSError: # " An optional feature could not be imported " ... ? raise TestSkipped, "Pseudo-terminals (seemingly) not functional." if not os.isatty(slave_fd): raise TestFailed, "slave_fd is not a tty" debug("Writing to slave_fd") os.write(slave_fd, TEST_STRING_1) s1 = os.read(master_fd, 1024) sys.stdout.write(normalize_output(s1)) debug("Writing chunked output") os.write(slave_fd, TEST_STRING_2[:5]) os.write(slave_fd, TEST_STRING_2[5:]) s2 = os.read(master_fd, 1024) sys.stdout.write(normalize_output(s2)) os.close(slave_fd) os.close(master_fd)
def handle_queue(self, source, condition): global event_queue os.read(source, 1) items = event_queue.get() func = items[0] args = items[1:] func(*args)
def get_output(self, fo, fe): # The command has finished. Read output and write stdout. # We don't know when output has stopped so just keep trying # until it is all gone. empty_reads = 0 stderr = "" stdout = "" while True: so = os.read(fo, 1024 * 1024) se = os.read(fe, 1024 * 1024) if so == "" and se == "": empty_reads += 1 else: stdout += so stderr += se empty_reads = 0 if empty_reads > 10: break time.sleep(2) return (stdout, stderr)
def __iter__(self): if self._type == 'pf_packet': yield self.socket.recv(1024) elif self._type == 'bpf': buf, idx, idxp = None, 0, 0 if not buf: buf = os.read(self.socket.fileno(), self.bufsz) ctr = 0 while idx+18 < len(buf): (secs,usecs, caplen, datalen, hdrlen) = struct.unpack('IIIIH', buf[idx:idx+18]) idxp = wordalign(idx+hdrlen+caplen) print(ctr, idx, idxp, idxp-idx, "\n\t", secs,usecs, caplen, datalen, hdrlen, "\n\t ", buf[idx:idx+18]) if idxp >= len(buf): buf = buf[idx:] + os.read(self.socket.fileno(), self.bufsz) idx = 0 idxp = idx+hdrlen+caplen print("!!!", idx, idxp) yield buf[idx:idxp] idx = idxp ctr += 1 print("EEK") print(idx, len(buf), buf[idx:])
def update(self, timeout=None): """ return whether the program wrote anything. If this is the case, the callbacks are called accordingly. `timeout` determines the frequency of polling the results. If it is not given, the value set at instantiation is used. """ if timeout is None: timeout = self.timeout ready, _, _ = select.select([self._pipe_out_r, self._pipe_err_r], [], [], self.timeout) if ready: if self._pipe_out_r in ready: output = os.read(self._pipe_out_r, self.bufsize) self.handle_stdout(output.decode()) if self._pipe_err_r in ready: output = os.read(self._pipe_err_r, self.bufsize) self.handle_stderr(output.decode()) return True else: return False
def fetch_fork_result(r, pid): """ Used with fork clients @type r: pipe @param r: Input pipe @type pid: int @param pid: pid of the child @rtype: Object @return: Unpickled object """ try: rin = "" s = os.read(r, 1024*1024) while (s != ""): # "" means EOF rin += s s = os.read(r, 1024*1024) finally: os.close(r) os.waitpid(pid, 0) out = cPickle.loads(rin) return out
def find_ext_cap(self, cap): path = find_sysfs_mnt()+SYSFS_PCI_DEVS_PATH+'/'+ \ self.name+SYSFS_PCI_DEV_CONFIG_PATH ttl = 480; # 3840 bytes, minimum 8 bytes per capability pos = 0x100 try: fd = os.open(path, os.O_RDONLY) os.lseek(fd, pos, 0) h = os.read(fd, 4) if len(h) == 0: # MMCONF is not enabled? return 0 header = struct.unpack('I', h)[0] if header == 0 or header == -1: return 0 while ttl > 0: if (header & 0x0000ffff) == cap: return pos pos = (header >> 20) & 0xffc if pos < 0x100: break os.lseek(fd, pos, 0) header = struct.unpack('I', os.read(fd, 4))[0] ttl = ttl - 1 os.close(fd) except OSError, (errno, strerr): raise PciDeviceParseError(('Error when accessing sysfs: %s (%d)' % (strerr, errno)))
def getchar(prompt, hidden=False, end='\n'): '''读取一个字符''' import termios sys.stdout.write(prompt) sys.stdout.flush() fd = sys.stdin.fileno() if os.isatty(fd): old = termios.tcgetattr(fd) new = termios.tcgetattr(fd) if hidden: new[3] = new[3] & ~termios.ICANON & ~termios.ECHO else: new[3] = new[3] & ~termios.ICANON new[6][termios.VMIN] = 1 new[6][termios.VTIME] = 0 try: termios.tcsetattr(fd, termios.TCSANOW, new) termios.tcsendbreak(fd, 0) ch = os.read(fd, 7) finally: termios.tcsetattr(fd, termios.TCSAFLUSH, old) else: ch = os.read(fd, 7) sys.stdout.write(end) return(ch.decode())
def wait_for_test_process(proc, timeout): """Waits for a test process with a timeout, and reads stderr. Reading stderr while polling is important, or a deadlock can occur if the pipe's internal buffer fills up. """ endtime = time.time() + timeout err_chunks = [] while proc.returncode is None and time.time() < endtime: time.sleep(0.1) err_chunks.append(os.read(proc.stderr.fileno(), 4096)) proc.poll() # Kill the child if it hasn't stopped yet, and wait for it. timed_out = False if proc.returncode is None: proc.kill() proc.wait() timed_out = True print 'Test process timed out after 30s...' # Read the rest of stderr. chunk = True while chunk: chunk = os.read(proc.stderr.fileno(), 4096) err_chunks.append(chunk) lines = ''.join(err_chunks).split('\n') return (timed_out, lines)
def test__copy_to_each(self): """Test the normal data case on both master_fd and stdin.""" read_from_stdout_fd, mock_stdout_fd = self._pipe() pty.STDOUT_FILENO = mock_stdout_fd mock_stdin_fd, write_to_stdin_fd = self._pipe() pty.STDIN_FILENO = mock_stdin_fd socketpair = self._socketpair() masters = [s.fileno() for s in socketpair] # Feed data. Smaller than PIPEBUF. These writes will not block. os.write(masters[1], b"from master") os.write(write_to_stdin_fd, b"from stdin") # Expect two select calls, the last one will cause IndexError pty.select = self._mock_select self.select_rfds_lengths.append(2) self.select_rfds_results.append([mock_stdin_fd, masters[0]]) self.select_rfds_lengths.append(2) with self.assertRaises(IndexError): pty._copy(masters[0]) # Test that the right data went to the right places. rfds = select.select([read_from_stdout_fd, masters[1]], [], [], 0)[0] self.assertEqual([read_from_stdout_fd, masters[1]], rfds) self.assertEqual(os.read(read_from_stdout_fd, 20), b"from master") self.assertEqual(os.read(masters[1], 20), b"from stdin")
def _copy(): """Main select loop. Passes control to _master_read() or _stdin_read() when new data arrives. """ while 1: try: rfds, wfds, xfds = select.select([master_fd, pty.STDIN_FILENO], [], []) except select.error as e: if e[0] == 4: # Interrupted system call. continue if master_fd in rfds: data = os.read(master_fd, 1024) if len(data) == 0: break _handle_master_read(data) if pty.STDIN_FILENO in rfds: data = os.read(pty.STDIN_FILENO, 1024) _handle_stdin_read(data)
def test_stdin(self, tmpfile): cap = capture.FDCapture(0) cap.start() x = os.read(0, 100).strip() cap.done() assert x == b""
def cb(): data = os.read(pipe_rd, PIPE_BUFFER_READ_SIZE) rval = callback(data) if rval is False: self.event_loop.remove_watch_file(watch_handle) os.close(pipe_rd)
#!/usr/bin/python3 import os import sys # Read the pid of the motor.py that was lauched at start and killed # everytime you launch dcmotor.py and it lauch again with new values # pid del programa motor.py que controla el funcionamiento del motor dc, # se finaliza el programa y se vuelve a lanzar con los nuevos parametros directorio = '/tmp/miriadax/' archivo = directorio + 'motor.pid' file = os.open(archivo, os.O_RDONLY) matar = os.read(file, 30) os.close(file) # Folder of the program // directorio del programa basedir = os.path.dirname(os.path.abspath(__file__)) matar = int(matar) # Kills the script // finaliza el programa os.kill(matar, 9) # Launch again the scrip with the new values // lanza de nuevo el programa con los nuevos parametros # first parameter = modo (0->stop, 1->forward, 2->backward) # second parameter = velocidad(between 0->min and 1->max) uno = sys.argv[1] if uno != '0': dos = sys.argv[2] comando = basedir + '/inicio/motor.py ' + uno + ' ' + dos + ' &' else: comando = basedir + '/inicio/motor.py 0 &'
def processCommandLines(self): self.app._debugApp('CommandLineHandlerPosix') fifo_name = os.environ.get('BEMACS_FIFO', '.bemacs8/.emacs_command') if fifo_name.startswith('/'): server_fifo = fifo_name else: e = pwd.getpwuid(os.geteuid()) server_fifo = os.path.join(tempfile.gettempdir(), e.pw_name, fifo_name) client_fifo = '%s_response' % (server_fifo, ) if self.opt_name is not None: server_fifo += '_' + self.opt_name client_fifo += '_' + self.opt_name fifo_dir = os.path.dirname(server_fifo) if not os.path.exists(fifo_dir): os.makedirs(fifo_dir) self.__makeFifo(server_fifo) self.__makeFifo(client_fifo) try: emacs_server_read_fd = os.open(server_fifo, os.O_RDONLY | os.O_NONBLOCK) except OSError: self.log.error('Failed to open %s for read' % (server_fifo, )) return try: emacs_server_write_fd = os.open(server_fifo, os.O_WRONLY | os.O_NONBLOCK) except OSError: self.log.error('Failed to open %s for write' % (server_fifo, )) return self.app._debugApp('CommandLineHandlerPosix before read loop') while True: r, w, x = select.select([emacs_server_read_fd], [], [], 1.0) reply = b' ' if emacs_server_read_fd in r: reply = b'R' b'Unknown client command' client_command = os.read(emacs_server_read_fd, 16384) self.app._debugApp('CommandLineHandlerPosix command %r' % (client_command, )) if len(client_command) > 0: if client_command[0] == ord('C'): new_argv = [ b.decode('utf-8') for b in client_command[1:].split(b'\x00') ] self.app.handleClientCommand(new_argv) reply = b' ' elif client_command[0] == ord('W'): if self.app.release_waiting_client_reply is not None: reply = b'w' + self.app.release_waiting_client_reply.encode( 'utf-8') self.app.release_waiting_client_reply = None else: reply = b' ' emacs_client_write_fd = os.open(client_fifo, os.O_WRONLY | os.O_NONBLOCK) if emacs_client_write_fd < 0: return self.app._debugApp('CommandLineHandlerPosix response %r' % (reply, )) os.write(emacs_client_write_fd, reply) os.close(emacs_client_write_fd)
def safe_communicate(proc, input, outlimit=None, errlimit=None): if outlimit is None: outlimit = 10485760 if errlimit is None: errlimit = outlimit if proc.stdin: # Flush stdio buffer. This might block, if the user has # been writing to .stdin in an uncontrolled fashion. proc.stdin.flush() if not input: proc.stdin.close() stdout = None # Return stderr = None # Return fd2file = {} fd2output = {} fd2length = {} fd2limit = {} poller = select.poll() def register_and_append(file_obj, eventmask): poller.register(file_obj.fileno(), eventmask) fd2file[file_obj.fileno()] = file_obj def close_unregister_and_remove(fd): poller.unregister(fd) fd2file[fd].close() fd2file.pop(fd) if proc.stdin and input: register_and_append(proc.stdin, select.POLLOUT) select_POLLIN_POLLPRI = select.POLLIN | select.POLLPRI if proc.stdout: register_and_append(proc.stdout, select_POLLIN_POLLPRI) fd2output[proc.stdout.fileno()] = stdout = [] fd2length[proc.stdout.fileno()] = 0 fd2limit[proc.stdout.fileno()] = outlimit if proc.stderr: register_and_append(proc.stderr, select_POLLIN_POLLPRI) fd2output[proc.stderr.fileno()] = stderr = [] fd2length[proc.stderr.fileno()] = 0 fd2limit[proc.stderr.fileno()] = errlimit input_offset = 0 while fd2file: try: ready = poller.poll() except select.error, e: if e.args[0] == errno.EINTR: continue raise for fd, mode in ready: if mode & select.POLLOUT: chunk = input[input_offset:input_offset + _PIPE_BUF] try: input_offset += os.write(fd, chunk) except OSError as e: if e.errno == errno.EPIPE: close_unregister_and_remove(fd) else: raise else: if input_offset >= len(input): close_unregister_and_remove(fd) elif mode & select_POLLIN_POLLPRI: data = os.read(fd, 4096) if not data: close_unregister_and_remove(fd) fd2output[fd].append(data) fd2length[fd] += len(data) if fd2length[fd] > fd2limit[fd]: if stdout is not None: stdout = ''.join(stdout) if stderr is not None: stderr = ''.join(stderr) raise OutputLimitExceeded( ['stderr', 'stdout'][proc.stdout.fileno() == fd], stdout, stderr) else: # Ignore hang up or errors. close_unregister_and_remove(fd)
def getOutput(self, o): import fcntl fl = fcntl.fcntl(o, fcntl.F_GETFL) fcntl.fcntl(o, fcntl.F_SETFL, fl | os.O_NONBLOCK) return os.read(o.fileno(), 1000000).decode("utf-8")
def reader(fd): c = os.read(fd, 1024) while c: output.append(c) c = os.read(fd, 1024)
def CheckCallAndFilter(args, print_stdout=False, filter_fn=None, show_header=False, always_show_header=False, retry=False, **kwargs): """Runs a command and calls back a filter function if needed. Accepts all subprocess2.Popen() parameters plus: print_stdout: If True, the command's stdout is forwarded to stdout. filter_fn: A function taking a single string argument called with each line of the subprocess2's output. Each line has the trailing newline character trimmed. show_header: Whether to display a header before the command output. always_show_header: Show header even when the command produced no output. retry: If the process exits non-zero, sleep for a brief interval and try again, up to RETRY_MAX times. stderr is always redirected to stdout. Returns the output of the command as a binary string. """ def show_header_if_necessary(needs_header, attempt): """Show the header at most once.""" if not needs_header[0]: return needs_header[0] = False # Automatically generated header. We only prepend a newline if # always_show_header is false, since it usually indicates there's an # external progress display, and it's better not to clobber it in that case. header = '' if always_show_header else '\n' header += '________ running \'%s\' in \'%s\'' % ( ' '.join(args), kwargs.get('cwd', '.')) if attempt: header += ' attempt %s / %s' % (attempt + 1, RETRY_MAX + 1) header += '\n' if print_stdout: stdout_write = getattr(sys.stdout, 'buffer', sys.stdout).write stdout_write(header.encode()) if filter_fn: filter_fn(header) def filter_line(command_output, line_start): """Extract the last line from command output and filter it.""" if not filter_fn or line_start is None: return command_output.seek(line_start) filter_fn(command_output.read().decode('utf-8')) # Initialize stdout writer if needed. On Python 3, sys.stdout does not accept # byte inputs and sys.stdout.buffer must be used instead. if print_stdout: sys.stdout.flush() stdout_write = getattr(sys.stdout, 'buffer', sys.stdout).write else: stdout_write = lambda _: None sleep_interval = RETRY_INITIAL_SLEEP run_cwd = kwargs.get('cwd', os.getcwd()) for attempt in range(RETRY_MAX + 1): # If our stdout is a terminal, then pass in a psuedo-tty pipe to our # subprocess when filtering its output. This makes the subproc believe # it was launched from a terminal, which will preserve ANSI color codes. if sys.stdout.isatty(): pipe_reader, pipe_writer = os.openpty() else: pipe_reader, pipe_writer = os.pipe() kid = subprocess2.Popen(args, bufsize=0, stdout=pipe_writer, stderr=subprocess2.STDOUT, **kwargs) # Close the write end of the pipe once we hand it off to the child proc. os.close(pipe_writer) GClientChildren.add(kid) # Store the output of the command regardless of the value of print_stdout or # filter_fn. command_output = io.BytesIO() # Passed as a list for "by ref" semantics. needs_header = [show_header] if always_show_header: show_header_if_necessary(needs_header, attempt) # Also, we need to forward stdout to prevent weird re-ordering of output. # This has to be done on a per byte basis to make sure it is not buffered: # normally buffering is done for each line, but if the process requests # input, no end-of-line character is output after the prompt and it would # not show up. try: line_start = None while True: try: in_byte = os.read(pipe_reader, 1) except (IOError, OSError) as e: if e.errno == errno.EIO: # An errno.EIO means EOF? in_byte = None else: raise e is_newline = in_byte in (b'\n', b'\r') if not in_byte: break show_header_if_necessary(needs_header, attempt) if is_newline: filter_line(command_output, line_start) line_start = None elif line_start is None: line_start = command_output.tell() stdout_write(in_byte) command_output.write(in_byte) # Flush the rest of buffered output. sys.stdout.flush() if line_start is not None: filter_line(command_output, line_start) os.close(pipe_reader) rv = kid.wait() # Don't put this in a 'finally,' since the child may still run if we get # an exception. GClientChildren.remove(kid) except KeyboardInterrupt: print('Failed while running "%s"' % ' '.join(args), file=sys.stderr) raise if rv == 0: return command_output.getvalue() if not retry: break print( "WARNING: subprocess '%s' in %s failed; will retry after a short " 'nap...' % (' '.join('"%s"' % x for x in args), run_cwd)) time.sleep(sleep_interval) sleep_interval *= 2 raise subprocess2.CalledProcessError(rv, args, kwargs.get('cwd', None), None, None)
i_right = doors_right[i][1] top_top = 1 + abs(i_right - i_1_right) right_top = 1 + abs(corner - i_1_up) + abs(corner - i_right) top_right = 1 + abs(corner - i_1_right) + abs(corner - i_up) right_right = 1 + abs(i_1_up - i_up) topdoor[i] = min((topdoor[i-1] + top_top), (rightdoor[i-1], right_top)) rightdoor[i] = min(rightdoor[i-1] +right_right, topdoor[i-1] +top_right) for q in queries: ax, ay, bx, by = q kstart = max(ax, ay) import os import io if __name__ == "__main__": input = io.BytesIO(os.read(0, os.fstat(0).st_size)).readline n = int(input().decode().strip()) doors_top = [] doors_right =[] for i in range(n-1): d1x,d1y,d2x,d2y = [int(x) for x in input().decode().strip().split()] doors_top.append((d1x,d1y)) assert d1x == i+1 doors_right.append((d2x,d2y)) assert d2y == i+1 q = int(input().decode().strip()) queries=[] for i in range(q): ax,ay,bx,by = [int(x) for x in input().decode().strip().split()] queries.append((ax,ay,bx,by)) res1= solve(n,doors_top, doors_right, queries)
def posix_shell(self): """ Use paramiko channel connect server interactive. 使用paramiko模块的channel,连接后端,进入交互式 """ log_file_f, log_time_f, log = self.get_log() termlog = TermLogRecorder(User.objects.get(id=self.user.id)) termlog.setid(log.id) old_tty = termios.tcgetattr(sys.stdin) pre_timestamp = time.time() data = '' input_mode = False try: tty.setraw(sys.stdin.fileno()) tty.setcbreak(sys.stdin.fileno()) self.channel.settimeout(0.0) while True: try: r, w, e = select.select([self.channel, sys.stdin], [], []) flag = fcntl.fcntl(sys.stdin, fcntl.F_GETFL, 0) fcntl.fcntl(sys.stdin.fileno(), fcntl.F_SETFL, flag | os.O_NONBLOCK) except Exception: pass if self.channel in r: try: x = self.channel.recv(10240) if len(x) == 0: break index = 0 len_x = len(x) while index < len_x: try: n = os.write(sys.stdout.fileno(), x[index:]) sys.stdout.flush() index += n except OSError as msg: if msg.errno == errno.EAGAIN: continue now_timestamp = time.time() termlog.write(x) termlog.recoder = False log_time_f.write( '%s %s\n' % (round(now_timestamp - pre_timestamp, 4), len(x))) log_time_f.flush() log_file_f.write(x) log_file_f.flush() pre_timestamp = now_timestamp log_file_f.flush() self.vim_data += x if input_mode: data += x except socket.timeout: pass if sys.stdin in r: try: x = os.read(sys.stdin.fileno(), 4096) except OSError: pass termlog.recoder = True input_mode = True if self.is_output(str(x)): # 如果len(str(x)) > 1 说明是复制输入的 if len(str(x)) > 1: data = x match = self.vim_end_pattern.findall(self.vim_data) if match: if self.vim_flag or len(match) == 2: self.vim_flag = False else: self.vim_flag = True elif not self.vim_flag: self.vim_flag = False data = self.deal_command(data)[0:200] if data is not None: TtyLog(log=log, datetime=datetime.datetime.now(), cmd=data).save() data = '' self.vim_data = '' input_mode = False if len(x) == 0: break self.channel.send(x) finally: termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_tty) log_file_f.write('End time is %s' % datetime.datetime.now()) log_file_f.close() log_time_f.close() termlog.save() log.filename = termlog.filename log.is_finished = True log.end_time = datetime.datetime.now() log.save()
def read(self): if self.buffer.tell(): return self.buffer.read().decode("ascii") return os.read(self._fd, os.fstat(self._fd).st_size).decode("ascii")
def get_message(fifo: int) -> str: msg_size_bytes = os.read(fifo, 4) msg_size = decode_msg_size(msg_size_bytes) msg_content = os.read(fifo, msg_size).decode("utf8") return msg_content
def read(self, fd, offset, length): os.lseek(fd, offset, os.SEEK_SET) return os.read(fd, length)
def read(self, begin, index, length): pos = index * self.torrent.info.piece_length os.lseek(self.fd, pos, os.SEEK_SET) return os.read(self.fd, length)
def read(space, fd, buffersize): """Read data from a file descriptor.""" try: s = os.read(fd, buffersize) except OSError, e: raise wrap_oserror(space, e)
def recv(self, *args): return os.read(self.fd, *args)
def test_fork(self): debug("calling pty.fork()") pid, master_fd = pty.fork() if pid == pty.CHILD: # stdout should be connected to a tty. if not os.isatty(1): debug("Child's fd 1 is not a tty?!") os._exit(3) # After pty.fork(), the child should already be a session leader. # (on those systems that have that concept.) debug("In child, calling os.setsid()") try: os.setsid() except OSError: # Good, we already were session leader debug("Good: OSError was raised.") pass except AttributeError: # Have pty, but not setsid()? debug("No setsid() available?") pass except: # We don't want this error to propagate, escaping the call to # os._exit() and causing very peculiar behavior in the calling # regrtest.py ! # Note: could add traceback printing here. debug("An unexpected error was raised.") os._exit(1) else: debug("os.setsid() succeeded! (bad!)") os._exit(2) os._exit(4) else: debug("Waiting for child (%d) to finish." % pid) # In verbose mode, we have to consume the debug output from the # child or the child will block, causing this test to hang in the # parent's waitpid() call. The child blocks after a # platform-dependent amount of data is written to its fd. On # Linux 2.6, it's 4000 bytes and the child won't block, but on OS # X even the small writes in the child above will block it. Also # on Linux, the read() will raise an OSError (input/output error) # when it tries to read past the end of the buffer but the child's # already exited, so catch and discard those exceptions. It's not # worth checking for EIO. while True: try: data = os.read(master_fd, 80) except OSError: break if not data: break sys.stdout.write(str(data.replace(b'\r\n', b'\n'), encoding='ascii')) ##line = os.read(master_fd, 80) ##lines = line.replace('\r\n', '\n').split('\n') ##if False and lines != ['In child, calling os.setsid()', ## 'Good: OSError was raised.', '']: ## raise TestFailed("Unexpected output from child: %r" % line) (pid, status) = os.waitpid(pid, 0) res = status >> 8 debug("Child (%d) exited with status %d (%d)." % (pid, res, status)) if res == 1: self.fail("Child raised an unexpected exception in os.setsid()") elif res == 2: self.fail("pty.fork() failed to make child a session leader.") elif res == 3: self.fail("Child spawned by pty.fork() did not have a tty as stdout") elif res != 4: self.fail("pty.fork() failed for unknown reasons.") ##debug("Reading from master_fd now that the child has exited") ##try: ## s1 = os.read(master_fd, 1024) ##except OSError: ## pass ##else: ## raise TestFailed("Read from master_fd did not raise exception") os.close(master_fd)
def spawn( cls, argv, cwd=None, env=None, echo=True, preexec_fn=None, dimensions=(24, 80)): '''Start the given command in a child process in a pseudo terminal. This does all the fork/exec type of stuff for a pty, and returns an instance of PtyProcess. If preexec_fn is supplied, it will be called with no arguments in the child process before exec-ing the specified command. It may, for instance, set signal handlers to SIG_DFL or SIG_IGN. Dimensions of the psuedoterminal used for the subprocess can be specified as a tuple (rows, cols), or the default (24, 80) will be used. ''' # Note that it is difficult for this method to fail. # You cannot detect if the child process cannot start. # So the only way you can tell if the child process started # or not is to try to read from the file descriptor. If you get # EOF immediately then it means that the child is already dead. # That may not necessarily be bad because you may have spawned a child # that performs some task; creates no stdout output; and then dies. if not isinstance(argv, (list, tuple)): raise TypeError("Expected a list or tuple for argv, got %r" % argv) # Shallow copy of argv so we can modify it argv = argv[:] command = argv[0] command_with_path = which(command) if command_with_path is None: raise FileNotFoundError('The command was not found or was not ' + 'executable: %s.' % command) command = command_with_path argv[0] = command # [issue #119] To prevent the case where exec fails and the user is # stuck interacting with a python child process instead of whatever # was expected, we implement the solution from # http://stackoverflow.com/a/3703179 to pass the exception to the # parent process # [issue #119] 1. Before forking, open a pipe in the parent process. exec_err_pipe_read, exec_err_pipe_write = os.pipe() if use_native_pty_fork: pid, fd = pty.fork() else: # Use internal fork_pty, for Solaris pid, fd = _fork_pty.fork_pty() # Some platforms must call setwinsize() and setecho() from the # child process, and others from the master process. We do both, # allowing IOError for either. if pid == CHILD: # set window size try: _setwinsize(STDIN_FILENO, *dimensions) except IOError as err: if err.args[0] not in (errno.EINVAL, errno.ENOTTY): raise # disable echo if spawn argument echo was unset if not echo: try: _setecho(STDIN_FILENO, False) except (IOError, termios.error) as err: if err.args[0] not in (errno.EINVAL, errno.ENOTTY): raise # [issue #119] 3. The child closes the reading end and sets the # close-on-exec flag for the writing end. os.close(exec_err_pipe_read) fcntl.fcntl(exec_err_pipe_write, fcntl.F_SETFD, fcntl.FD_CLOEXEC) # Do not allow child to inherit open file descriptors from parent, # with the exception of the exec_err_pipe_write of the pipe # Impose ceiling on max_fd: AIX bugfix for users with unlimited # nofiles where resource.RLIMIT_NOFILE is 2^63-1 and os.closerange() # occasionally raises out of range error max_fd = min(1048576, resource.getrlimit(resource.RLIMIT_NOFILE)[0]) os.closerange(3, exec_err_pipe_write) os.closerange(exec_err_pipe_write+1, max_fd) if cwd is not None: os.chdir(cwd) if preexec_fn is not None: try: preexec_fn() except Exception as e: ename = type(e).__name__ tosend = '{}:0:{}'.format(ename, str(e)) if PY3: tosend = tosend.encode('utf-8') os.write(exec_err_pipe_write, tosend) os.close(exec_err_pipe_write) os._exit(1) try: if env is None: os.execv(command, argv) else: os.execvpe(command, argv, env) except OSError as err: # [issue #119] 5. If exec fails, the child writes the error # code back to the parent using the pipe, then exits. tosend = 'OSError:{}:{}'.format(err.errno, str(err)) if PY3: tosend = tosend.encode('utf-8') os.write(exec_err_pipe_write, tosend) os.close(exec_err_pipe_write) os._exit(os.EX_OSERR) # Parent inst = cls(pid, fd) # Set some informational attributes inst.argv = argv if env is not None: inst.env = env if cwd is not None: inst.launch_dir = cwd # [issue #119] 2. After forking, the parent closes the writing end # of the pipe and reads from the reading end. os.close(exec_err_pipe_write) exec_err_data = os.read(exec_err_pipe_read, 4096) os.close(exec_err_pipe_read) # [issue #119] 6. The parent reads eof (a zero-length read) if the # child successfully performed exec, since close-on-exec made # successful exec close the writing end of the pipe. Or, if exec # failed, the parent reads the error code and can proceed # accordingly. Either way, the parent blocks until the child calls # exec. if len(exec_err_data) != 0: try: errclass, errno_s, errmsg = exec_err_data.split(b':', 2) exctype = getattr(builtins, errclass.decode('ascii'), Exception) exception = exctype(errmsg.decode('utf-8', 'replace')) if exctype is OSError: exception.errno = int(errno_s) except: raise Exception('Subprocess failed, got bad error data: %r' % exec_err_data) else: raise exception try: inst.setwinsize(*dimensions) except IOError as err: if err.args[0] not in (errno.EINVAL, errno.ENOTTY, errno.ENXIO): raise return inst
if s is None: print('could not open socket\n') sys.exit(1) framedSocket.sendMessage(s, sys.argv[0].encode()) # "Send" input framedSocket.sendMessage( s, serverFile.encode()) # file name to be saved in server response = framedSocket.receiveMessage(s) # gets response from the server if (response == "OK"): fd = os.open("./client/" + clientFile, os.O_RDONLY) next = 0 limit = 0 buf = "" message = "" while 1: buf = os.read(fd, 100).decode() limit = len(buf) if limit == 0: break message += buf # string buffer adds to real message to be sent framedSocket.sendMessage( s, message.encode()) # message sent through a framed socket result = framedSocket.receiveMessage( s) # recieves a result of the transfer print(result + "\n") # prints success result elif (response == "NO"): print("File name already taken on server file\n") else: print("File currently being written into\n") s.close()
def read(self, path, length, offset, fh): print "read called : ",path os.lseek(fh, offset, os.SEEK_SET) return os.read(fh, length)
timeBase = 0 iterWait = 0 anchors_ind = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] zs = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] try: os.mkfifo(IPC_FIFO_NAME) except OSError: print("File Exists") try: while True: # Reading from Pipe fifor = os.open(IPC_FIFO_NAME, os.O_RDONLY) line = os.read(fifor, 500).decode('utf8') line = line.split(';') line= line[0] time = int(line[6:10]) print("\n******************************\nAt "+ str(time) + "\nReceived encoded data: " + line) os.close(fifor) if timeBase != time : timeBase = time anchors_ind = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] zs = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] iterWait = 1 else : iterWait += 1 dist = calculate_dist(int(line[2:5]))
def _recv(self, maxsize): rfds = [] if self.child_fd: rfds.append(self.child_fd) if self.child_fde: rfds.append(self.child_fde) if not self.isalive(): if not rfds: return None, None rlist, _, _ = select.select(rfds, [], [], 0) if not rlist: self.flag_eof_stdout = self.flag_eof_stderr = True log.debug('End of file(EOL). Brain-dead platform.') return None, None elif self.__irix_hack: # Irix takes a long time before it realizes a child was # terminated. # FIXME So does this mean Irix systems are forced to always # have a 2 second delay when calling read_nonblocking? # That sucks. rlist, _, _ = select.select(rfds, [], [], 2) if not rlist: self.flag_eof_stdout = self.flag_eof_stderr = True log.debug('End of file(EOL). Slow platform.') return None, None stderr = '' stdout = '' # ----- Store FD Flags ------------------------------------------> if self.child_fd: fd_flags = fcntl.fcntl(self.child_fd, fcntl.F_GETFL) if self.child_fde: fde_flags = fcntl.fcntl(self.child_fde, fcntl.F_GETFL) # <---- Store FD Flags ------------------------------------------- # ----- Non blocking Reads --------------------------------------> if self.child_fd: fcntl.fcntl(self.child_fd, fcntl.F_SETFL, fd_flags | os.O_NONBLOCK) if self.child_fde: fcntl.fcntl(self.child_fde, fcntl.F_SETFL, fde_flags | os.O_NONBLOCK) # <---- Non blocking Reads --------------------------------------- # ----- Check for any incoming data -----------------------------> rlist, _, _ = select.select(rfds, [], [], 0) # <---- Check for any incoming data ------------------------------ # ----- Nothing to Process!? ------------------------------------> if not rlist: if not self.isalive(): self.flag_eof_stdout = self.flag_eof_stderr = True log.debug('End of file(EOL). Very slow platform.') return None, None # <---- Nothing to Process!? ------------------------------------- # ----- Process STDERR ------------------------------------------> if self.child_fde in rlist: try: stderr = self._translate_newlines( salt.utils.stringutils.to_unicode( os.read(self.child_fde, maxsize))) if not stderr: self.flag_eof_stderr = True stderr = None else: if self.stream_stderr: self.stream_stderr.write(stderr) self.stream_stderr.flush() if self.stderr_logger: stripped = stderr.rstrip() if stripped.startswith(os.linesep): stripped = stripped[len(os.linesep):] if stripped: self.stderr_logger.log( self.stderr_logger_level, stripped) except OSError: os.close(self.child_fde) self.child_fde = None self.flag_eof_stderr = True stderr = None finally: if self.child_fde is not None: fcntl.fcntl(self.child_fde, fcntl.F_SETFL, fde_flags) # <---- Process STDERR ------------------------------------------- # ----- Process STDOUT ------------------------------------------> if self.child_fd in rlist: try: stdout = self._translate_newlines( salt.utils.stringutils.to_unicode( os.read(self.child_fd, maxsize))) if not stdout: self.flag_eof_stdout = True stdout = None else: if self.stream_stdout: self.stream_stdout.write( salt.utils.data.encode(stdout)) self.stream_stdout.flush() if self.stdout_logger: stripped = stdout.rstrip() if stripped.startswith(os.linesep): stripped = stripped[len(os.linesep):] if stripped: self.stdout_logger.log( self.stdout_logger_level, stripped) except OSError: os.close(self.child_fd) self.child_fd = None self.flag_eof_stdout = True stdout = None finally: if self.child_fd is not None: fcntl.fcntl(self.child_fd, fcntl.F_SETFL, fd_flags) # <---- Process STDOUT ------------------------------------------- return salt.utils.data.encode(stdout), salt.utils.data.encode( stderr)
def exec_command(self, cmd, tmp_path, sudo_user, sudoable=False, executable='/bin/sh'): ''' run a command on the remote host ''' ssh_cmd = self._password_cmd() ssh_cmd += ["ssh", "-tt", "-q"] + self.common_args if self.ipv6: ssh_cmd += ['-6'] ssh_cmd += [self.host] if not self.runner.sudo or not sudoable: if executable: ssh_cmd.append(executable + ' -c ' + pipes.quote(cmd)) else: ssh_cmd.append(cmd) else: sudocmd, prompt = utils.make_sudo_cmd(sudo_user, executable, cmd) ssh_cmd.append(sudocmd) vvv("EXEC %s" % ssh_cmd, host=self.host) not_in_host_file = self.not_in_host_file(self.host) if C.HOST_KEY_CHECKING and not_in_host_file: # lock around the initial SSH connectivity so the user prompt about whether to add # the host to known hosts is not intermingled with multiprocess output. fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX) fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_EX) try: # Make sure stdin is a proper (pseudo) pty to avoid: tcgetattr errors import pty master, slave = pty.openpty() p = subprocess.Popen(ssh_cmd, stdin=slave, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdin = os.fdopen(master, 'w', 0) except: p = subprocess.Popen(ssh_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdin = p.stdin self._send_password() if self.runner.sudo and sudoable and self.runner.sudo_pass: fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK) sudo_output = '' while not sudo_output.endswith(prompt): rfd, wfd, efd = select.select([p.stdout], [], [p.stdout], self.runner.timeout) if p.stdout in rfd: chunk = p.stdout.read() if not chunk: raise errors.AnsibleError( 'ssh connection closed waiting for sudo password prompt' ) sudo_output += chunk else: stdout = p.communicate() raise errors.AnsibleError( 'ssh connection error waiting for sudo password prompt' ) stdin.write(self.runner.sudo_pass + '\n') fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK) # We can't use p.communicate here because the ControlMaster may have stdout open as well stdout = '' stderr = '' while True: rfd, wfd, efd = select.select([p.stdout, p.stderr], [], [p.stdout, p.stderr], 1) # fail early if the sudo password is wrong if self.runner.sudo and sudoable and self.runner.sudo_pass: incorrect_password = gettext.dgettext("sudo", "Sorry, try again.") if stdout.endswith("%s\r\n%s" % (incorrect_password, prompt)): raise errors.AnsibleError('Incorrect sudo password') if p.stdout in rfd: dat = os.read(p.stdout.fileno(), 9000) stdout += dat if dat == '': p.wait() break elif p.stderr in rfd: dat = os.read(p.stderr.fileno(), 9000) stderr += dat if dat == '': p.wait() break elif p.poll() is not None: break stdin.close( ) # close stdin after we read from stdout (see also issue #848) if C.HOST_KEY_CHECKING and not_in_host_file: # lock around the initial SSH connectivity so the user prompt about whether to add # the host to known hosts is not intermingled with multiprocess output. fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_UN) fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN) if p.returncode != 0 and stderr.find( 'Bad configuration option: ControlPersist') != -1: raise errors.AnsibleError( 'using -c ssh on certain older ssh versions may not support ControlPersist, set ANSIBLE_SSH_ARGS="" (or ansible_ssh_args in the config file) before running again' ) return (p.returncode, '', stdout, stderr)
def __exit__(self, *args): os.dup2(self.copy_fd, self.capture_fd) os.close(self.copy_fd) os.close(self.write_fd) self._value = os.read(self.read_fd, 512) os.close(self.read_fd)
def _communicate(self, p, stdin, indata, su=False, sudoable=False, prompt=None): fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK) fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK) # We can't use p.communicate here because the ControlMaster may have stdout open as well stdout = '' stderr = '' rpipes = [p.stdout, p.stderr] if indata: try: stdin.write(indata) stdin.close() except: raise AnsibleConnectionFailure( 'SSH Error: data could not be sent to the remote host. Make sure this host can be reached over ssh' ) # Read stdout/stderr from process while True: rfd, wfd, efd = select.select(rpipes, [], rpipes, 1) # FIXME: su/sudo stuff # fail early if the sudo/su password is wrong #if self.runner.sudo and sudoable: # if self.runner.sudo_pass: # incorrect_password = gettext.dgettext( # "sudo", "Sorry, try again.") # if stdout.endswith("%s\r\n%s" % (incorrect_password, # prompt)): # raise AnsibleError('Incorrect sudo password') # # if stdout.endswith(prompt): # raise AnsibleError('Missing sudo password') # #if self.runner.su and su and self.runner.su_pass: # incorrect_password = gettext.dgettext( # "su", "Sorry") # if stdout.endswith("%s\r\n%s" % (incorrect_password, prompt)): # raise AnsibleError('Incorrect su password') if p.stdout in rfd: dat = os.read(p.stdout.fileno(), 9000) stdout += dat if dat == '': rpipes.remove(p.stdout) if p.stderr in rfd: dat = os.read(p.stderr.fileno(), 9000) stderr += dat if dat == '': rpipes.remove(p.stderr) # only break out if no pipes are left to read or # the pipes are completely read and # the process is terminated if (not rpipes or not rfd) and p.poll() is not None: break # No pipes are left to read but process is not yet terminated # Only then it is safe to wait for the process to be finished # NOTE: Actually p.poll() is always None here if rpipes is empty elif not rpipes and p.poll() == None: p.wait() # The process is terminated. Since no pipes to read from are # left, there is no need to call select() again. break # close stdin after process is terminated and stdout/stderr are read # completely (see also issue #848) stdin.close() return (p.returncode, stdout, stderr)
def read(self, length, offset): os.lseek(self.fd, offset, 0) return os.read(self.fd, length)
def communicate(self, input=None): """Interact with process: Send data to stdin. Read data from stdout and stderr, until end-of-file is reached. Wait for process to terminate. The optional input argument should be a string to be sent to the child process, or None, if no data should be sent to the child. communicate() returns a tuple (stdout, stderr).""" read_set = [] write_set = [] stdout = None # Return stderr = None # Return if self.stdin: # Flush stdio buffer. This might block, if the user has # been writing to .stdin in an uncontrolled fashion. self.stdin.flush() if input: write_set.append(self.stdin) else: self.stdin.close() if self.stdout: read_set.append(self.stdout) stdout = [] if self.stderr: read_set.append(self.stderr) stderr = [] while read_set or write_set: rlist, wlist, xlist = select.select(read_set, write_set, []) if self.stdin in wlist: # When select has indicated that the file is writable, # we can write up to PIPE_BUF bytes without risk # blocking. POSIX defines PIPE_BUF >= 512 bytes_written = os.write(self.stdin.fileno(), input[:512]) input = input[bytes_written:] if not input: self.stdin.close() write_set.remove(self.stdin) if self.stdout in rlist: data = os.read(self.stdout.fileno(), 1024) if data == "": self.stdout.close() read_set.remove(self.stdout) stdout.append(data) if self.stderr in rlist: data = os.read(self.stderr.fileno(), 1024) if data == "": self.stderr.close() read_set.remove(self.stderr) stderr.append(data) # All data exchanged. Translate lists into strings. if stdout != None: stdout = ''.join(stdout) if stderr != None: stderr = ''.join(stderr) # Translate newlines, if requested. We cannot let the file # object do the translation: It is based on stdio, which is # impossible to combine with select (unless forcing no # buffering). if self.universal_newlines and hasattr(open, 'newlines'): if stdout: stdout = self._translate_newlines(stdout) if stderr: stderr = self._translate_newlines(stderr) self.wait() return (stdout, stderr)
TUNSETIFF = 0x400454ca IFF_TUN = 0x0001 IFF_TAP = 0x0002 IFF_NO_PI = 0x1000 SERVER_IP = "10.0.2.13" SERVER_PORT = 9090 # Create the tun interface tun = os.open("/dev/net/tun", os.O_RDWR) ifr = struct.pack('16sH', b'tun%d', IFF_TUN | IFF_NO_PI) ifname_bytes = fcntl.ioctl(tun, TUNSETIFF, ifr) # Get the interface name ifname = ifname_bytes.decode('UTF-8')[:16].strip("\x00") print("Interface Name: {}".format(ifname)) # Assign IP address to the interface os.system("ip addr add 192.168.53.99/24 dev {}".format(ifname)) os.system("ip link set dev {} up".format(ifname)) # Create UDP socket --> VPN Server's UDP Server sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) while True: # Get a packet from the tun interface packet = os.read(tun, 2048) if True: # send the packet via the tunnel sock.sendto(packet, (SERVER_IP, SERVER_PORT))
sys.exit(1) # hashlib is only available in python 2.5 or higher, but the 'sha' module # produces a DeprecationWarning in python 2.6 or higher. We want to support # python 2.4 and above without any stupid warnings, so let's try using hashlib # first, and downgrade if it fails. try: import hashlib except ImportError: import sha sh = sha.sha() else: sh = hashlib.sha1() while 1: b = os.read(0, 4096) sh.update(b) if not b: break csum = sh.hexdigest() if not vars.TARGET: sys.exit(0) me = os.path.join(vars.STARTDIR, os.path.join(vars.PWD, vars.TARGET)) f = state.File(name=me) changed = (csum != f.csum) debug2('%s: old = %s\n' % (f.name, f.csum)) debug2('%s: sum = %s (%s)\n' % (f.name, csum, changed and 'changed' or 'unchanged')) f.is_generated = True
def _execute_child(self, args, executable, preexec_fn, close_fds, cwd, env, universal_newlines, startupinfo, creationflags, shell, p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite): """Execute program (POSIX version)""" if isinstance(args, str): args = [args] if shell: args = ["/bin/sh", "-c"] + args if executable == None: executable = args[0] # For transferring possible exec failure from child to parent # The first char specifies the exception type: 0 means # OSError, 1 means some other error. errpipe_read, errpipe_write = os.pipe() self._set_cloexec_flag(errpipe_write) self.pid = os.fork() if self.pid == 0: # Child try: # Close parent's pipe ends if p2cwrite: os.close(p2cwrite) if c2pread: os.close(c2pread) if errread: os.close(errread) os.close(errpipe_read) # Dup fds for child if p2cread: os.dup2(p2cread, 0) if c2pwrite: os.dup2(c2pwrite, 1) if errwrite: os.dup2(errwrite, 2) # Close pipe fds. Make sure we doesn't close the same # fd more than once. if p2cread: os.close(p2cread) if c2pwrite and c2pwrite not in (p2cread, ): os.close(c2pwrite) if errwrite and errwrite not in (p2cread, c2pwrite): os.close(errwrite) # Close all other fds, if asked for if close_fds: self._close_fds(but=errpipe_write) if cwd != None: os.chdir(cwd) if preexec_fn: apply(preexec_fn) if env == None: os.execvp(executable, args) else: os.execvpe(executable, args, env) except: exc_type, exc_value, tb = sys.exc_info() # Save the traceback and attach it to the exception object exc_lines = traceback.format_exception( exc_type, exc_value, tb) exc_value.child_traceback = ''.join(exc_lines) os.write(errpipe_write, pickle.dumps(exc_value)) # This exitcode won't be reported to applications, so it # really doesn't matter what we return. os._exit(255) # Parent os.close(errpipe_write) if p2cread and p2cwrite: os.close(p2cread) if c2pwrite and c2pread: os.close(c2pwrite) if errwrite and errread: os.close(errwrite) # Wait for exec to fail or succeed; possibly raising exception data = os.read(errpipe_read, 1048576) # Exceptions limited to 1 MB os.close(errpipe_read) if data != "": os.waitpid(self.pid, 0) child_exception = pickle.loads(data) raise child_exception