def __init__(self, nonblock=False, cloexec=True, mtu=150): self.fd = socket.socket(PF_SYSTEM, socket.SOCK_DGRAM, SYSPROTO_CONTROL) info = ctl_info(0, UTUN_CONTROL_NAME) fcntl.ioctl(self.fd, CTLIOCGINFO, info) self.fd.connect((info.ctl_id, 0)) self.iface = self.fd.getsockopt(SYSPROTO_CONTROL, UTUN_OPT_IFNAME, 256)[:-1] if nonblock: fcntl.fcntl(self.fd, fcntl.F_SETFL, os.O_NONBLOCK) if cloexec: fcntl.fcntl(self.fd, fcntl.F_SETFD, fcntl.FD_CLOEXEC) self.mtu = mtu # From ifconfig.8: ## Basic IPv6 node operation requires a link-local address on each ## interface configured for IPv6. Normally, such an address is ## automatically configured by the kernel on each interface added to ## the system; this behaviour may be disabled by setting the sysctl MIB ## variable net.inet6.ip6.auto_linklocal to 0. ## If you delete such an address using ifconfig, the kernel may act very odd. Do this at your own risk. # force generation of link-local address and routes subprocess.Popen(['ifconfig', self.iface, 'inet6', 'fe80::1/64', 'mtu', str(self.mtu)]).wait() subprocess.Popen(['ifconfig', self.iface, 'inet6', 'delete', 'fe80::1']).wait() time.sleep(.5) subprocess.Popen(['route', '-q', 'delete', '-inet6', '-net', 'fe80::%%%s/64' % self.iface]).wait() subprocess.Popen(['route', '-q', 'add', '-inet6', '-net', 'fe80::%%%s/64' % self.iface, '-interface', self.iface]).wait() time.sleep(.5) subprocess.Popen(['ifconfig', self.iface, 'inet6', 'fe80::1/64', 'mtu', str(self.mtu)]).wait() subprocess.Popen(['ifconfig', self.iface, 'inet6', 'delete', 'fe80::1']).wait() time.sleep(.5) subprocess.Popen(['route', '-q', 'delete', '-inet6', '-net', 'fe80::%%%s/64' % self.iface]).wait() subprocess.Popen(['route', '-q', 'add', '-inet6', '-net', 'fe80::%%%s/64' % self.iface, '-interface', self.iface]).wait() time.sleep(.5) self.fileno = self.fd.fileno
def startMitlm(self): """Start MITLM estimate-ngram in 0MQ entropy query mode, unless already running.""" if not self.mitlmSocket == None : if ucParanoid: assert not self.mitlmSocket.closed assert self.mitlmProc.poll() == None # Already running self.checkMitlm() return assert os.path.exists(self.readCorpus), "No such corpus." assert not allWhitespace.match(slurp(self.readCorpus)), "Corpus is full of whitespace!" assert os.path.exists(self.estimateNgramPath), "No such estimate-ngram." self.mitlmProc = subprocess.Popen([self.estimateNgramPath, "-t", self.readCorpus, "-o", str(self.order), "-s", "ModKN", "-u", "-live-prob", self.mitlmSocketPath], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) debug("Started MITLM as PID %i." % self.mitlmProc.pid) fd = self.mitlmProc.stdout.fileno() fl = fcntl.fcntl(fd, fcntl.F_GETFL) fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK) time.sleep(1) self.checkMitlm() self.mitlmSocket = self.zctx.socket(zmq.REQ) self.mitlmSocket.connect(self.mitlmSocketPath) self.checkMitlm() self.mitlmSocket.send("for ( i =") self.checkMitlm() r = float(self.mitlmSocket.recv()) debug("MITLM said %f" % r) self.checkMitlm()
def __init__( self, addr, requestHandler=SimpleJSONRPCRequestHandler, logRequests=True, encoding=None, bind_and_activate=True, address_family=socket.AF_INET, ): self.logRequests = logRequests SimpleJSONRPCDispatcher.__init__(self, encoding) # TCPServer.__init__ has an extra parameter on 2.6+, so # check Python version and decide on how to call it vi = sys.version_info self.address_family = address_family if USE_UNIX_SOCKETS and address_family == socket.AF_UNIX: # Unix sockets can't be bound if they already exist in the # filesystem. The convention of e.g. X11 is to unlink # before binding again. if os.path.exists(addr): try: os.unlink(addr) except OSError: logging.warning("Could not unlink socket %s", addr) # if python 2.5 and lower if vi[0] < 3 and vi[1] < 6: SocketServer.TCPServer.__init__(self, addr, requestHandler) else: SocketServer.TCPServer.__init__(self, addr, requestHandler, bind_and_activate) if fcntl is not None and hasattr(fcntl, "FD_CLOEXEC"): flags = fcntl.fcntl(self.fileno(), fcntl.F_GETFD) flags |= fcntl.FD_CLOEXEC fcntl.fcntl(self.fileno(), fcntl.F_SETFD, flags)
def shell_call(cmd): # subprocess.call(cmd) ON_POSIX = 'posix' in sys.builtin_module_names hpipe = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=1, close_fds=ON_POSIX, shell=True) # Change to non-blocking mode, so read returns even if there is no data. fcntl.fcntl(hpipe.stdout, fcntl.F_SETFL, os.O_NONBLOCK) fcntl.fcntl(hpipe.stderr, fcntl.F_SETFL, os.O_NONBLOCK) total_output_stdout = '' total_output_stderr = '' while True: # wait for data to become available select.select([hpipe.stdout, hpipe.stderr], [], []) # Try reading some data from each output_stdout = read_async(hpipe.stdout) output_stderr = read_async(hpipe.stderr) if output_stdout: stdout_write(output_stdout) total_output_stdout += output_stdout if output_stderr: stdout_write(output_stderr) total_output_stderr += output_stderr rc = hpipe.poll() if rc != None: return total_output_stdout + total_output_stderr
def open_dialog(self, command, *args): self.inf = open(infifo, 'w') fcntl.fcntl(self.inf.fileno(), fcntl.F_SETFD, fcntl.FD_CLOEXEC) self.write_line(command, self.current_disk, *args) self.outf = open(outfifo, 'r') fcntl.fcntl(self.outf.fileno(), fcntl.F_SETFD, fcntl.FD_CLOEXEC) self.error_handler()
def set_cloexec(filelike): """Sets the underlying filedescriptor to automatically close on exec. If set_cloexec is called for all open files, then subprocess.Popen does not require the close_fds option. """ fcntl.fcntl(filelike.fileno(), fcntl.FD_CLOEXEC, 1)
def _CaptureOutput(self): p = self.process s_in = [_sfd(p.stdout, sys.stdout, 'stdout'), _sfd(p.stderr, sys.stderr, 'stderr')] self.stdout = '' self.stderr = '' for s in s_in: flags = fcntl.fcntl(s.fd, fcntl.F_GETFL) fcntl.fcntl(s.fd, fcntl.F_SETFL, flags | os.O_NONBLOCK) while s_in: in_ready, _, _ = select.select(s_in, [], []) for s in in_ready: buf = s.fd.read(4096) if not buf: s_in.remove(s) continue if s.std_name == 'stdout': self.stdout += buf else: self.stderr += buf if self.tee[s.std_name]: s.dest.write(buf) s.dest.flush() return p.wait()
def getChar(): import termios, fcntl, sys, os, select fd = sys.stdin.fileno() oldterm = termios.tcgetattr(fd) newattr = oldterm[:] newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO termios.tcsetattr(fd, termios.TCSANOW, newattr) oldflags = fcntl.fcntl(fd, fcntl.F_GETFL) fcntl.fcntl(fd, fcntl.F_SETFL, oldflags | os.O_NONBLOCK) try: # while 1: r, w, e = select.select([fd], [], []) if r: c = sys.stdin.read(1) print "Got character", repr(c) if c == "q": return finally: termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm) fcntl.fcntl(fd, fcntl.F_SETFL, oldflags) return c
def nonblock (fd): # if O_NDELAY is set read() returns 0 (ambiguous with EOF). # if O_NONBLOCK is set read() returns -1 and sets errno to EAGAIN original_flags = fcntl.fcntl (fd, fcntl.F_GETFL, 0) flags = original_flags | os.O_NONBLOCK fcntl.fcntl(fd, fcntl.F_SETFL, flags) return original_flags
def _open_nonblocking(self): """Open a FIFO as read-only and non-blocking.""" fifo = open(self.path, "r") fd = fifo.fileno() flag = fcntl.fcntl(fd, fcntl.F_GETFL) fcntl.fcntl(fd, fcntl.F_SETFL, flag | os.O_NONBLOCK) return fifo
def connect(self): while True: try: for self.audit_socket_path in self.audit_socket_paths: if self.audit_socket_path is not None: try: record_format = derive_record_format(self.audit_socket_path) self.record_reader = AuditRecordReader(record_format) self.audit_socket = Socket.socket(Socket.AF_UNIX, Socket.SOCK_STREAM) fcntl.fcntl(self.audit_socket.fileno(), fcntl.F_SETFD, fcntl.FD_CLOEXEC) self.audit_socket.connect(self.audit_socket_path) self.audit_socket_fd = self.audit_socket.makefile() log_debug("audit socket (%s) connected" % self.audit_socket_path) return except Socket.error, e: errno, strerror = get_error_from_socket_exception(e) log_debug( "attempt to open audit socket (%s) failed, error='%s'" % (self.audit_socket_path, strerror) ) log_debug( "could not open any audit sockets (%s), retry in %d seconds" % (", ".join(self.audit_socket_paths), self.retry_interval) ) except Socket.error, e: errno, strerror = get_error_from_socket_exception(e) log_debug( "audit socket (%s) failed, error='%s', retry in %d seconds" % (self.audit_socket_path, strerror, self.retry_interval) )
def worker(wait_timeout, match_threshold, add_arguments): cmd = "./fingermatch --match %s --quiet --fp-file ../nmap-os-db %s" % ( str(match_threshold), add_arguments) p = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) # Switch the process's stderr to non-blocking mode # (might not work on Windows) fd = p.stderr.fileno() fl = fcntl.fcntl(fd, fcntl.F_GETFL) fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK) try: while True: line = q.get(timeout=wait_timeout) try: process_line(line, p) finally: q.task_done() except Queue.Empty: pass except IOError: # broken pipe due to CTRL+C pass finally: p.stdin.close() p.terminate()
def test_basic(self): try: debug("Calling master_open()") master_fd, slave_name = pty.master_open() debug("Got master_fd '%d', slave_name '%s'" % (master_fd, slave_name)) debug("Calling slave_open(%r)" % (slave_name,)) slave_fd = pty.slave_open(slave_name) debug("Got slave_fd '%d'" % slave_fd) except OSError: # " An optional feature could not be imported " ... ? raise TestSkipped, "Pseudo-terminals (seemingly) not functional." self.assertTrue(os.isatty(slave_fd), 'slave_fd is not a tty') # Solaris requires reading the fd before anything is returned. # My guess is that since we open and close the slave fd # in master_open(), we need to read the EOF. # Ensure the fd is non-blocking in case there's nothing to read. orig_flags = fcntl.fcntl(master_fd, fcntl.F_GETFL) fcntl.fcntl(master_fd, fcntl.F_SETFL, orig_flags | os.O_NONBLOCK) try: s1 = os.read(master_fd, 1024) self.assertEquals('', s1) except OSError, e: if e.errno != errno.EAGAIN: raise
def createInternetSocket(self): s = socket.socket(self.addressFamily, self.socketType) s.setblocking(0) if fcntl and hasattr(fcntl, 'FD_CLOEXEC'): old = fcntl.fcntl(s.fileno(), fcntl.F_GETFD) fcntl.fcntl(s.fileno(), fcntl.F_SETFD, old | fcntl.FD_CLOEXEC) return s
def stream_output(proc: "subprocess.Popen"): """ Take a subprocess.Popen object and generate its output, line by line, annotated with "stdout" or "stderr". At process termination it generates one last element: ("result", return_code) with the return code of the process. """ fds = [proc.stdout, proc.stderr] bufs = [b"", b""] types = ["stdout", "stderr"] # Set both pipes as non-blocking for fd in fds: fcntl.fcntl(fd, fcntl.F_SETFL, os.O_NONBLOCK) # Multiplex stdout and stderr with different prefixes while len(fds) > 0: s = select.select(fds, (), ()) for fd in s[0]: idx = fds.index(fd) buf = fd.read() if len(buf) == 0: fds.pop(idx) if len(bufs[idx]) != 0: yield types[idx], bufs.pop(idx).decode("utf-8") types.pop(idx) else: bufs[idx] += buf lines = bufs[idx].split(b"\n") bufs[idx] = lines.pop() for l in lines: yield types[idx], l.decode("utf-8") res = proc.wait() yield "result", res
def _open(self): _stream = logging.handlers.WatchedFileHandler._open(self) fd = _stream.fileno() r = fcntl.fcntl(fd, fcntl.F_GETFD, 0) r = fcntl.fcntl(fd, fcntl.F_SETFD, r | fcntl.FD_CLOEXEC) return _stream
def get_users(max_wait=90): t0 = time.time() users = {} p = subprocess.Popen(['/usr/bin/getent', 'passwd'], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE) fcntl.fcntl(p.stdout.fileno(), fcntl.F_SETFL, os.O_NONBLOCK) alive = True user_data = '' while (alive): try: if (p.poll() is not None): alive = False user_data += p.stdout.read() except IOError: if (time.time() - t0 < max_wait): continue except Exception, e: logger.exception(e) p.terminate() uf = user_data.split('\n') #If the feed ends in \n, the last element will be '', if not, it will #be a partial line to be processed next time around. user_data = uf[-1] for u in uf[:-1]: ufields = u.split(':') if (len(ufields) > 3): charset = chardet.detect( ufields[0] ) uname = ufields[0].decode( charset['encoding'] ) users[uname] = (int(ufields[2]), int(ufields[3]), str(ufields[6])) if (time.time() - t0 > max_wait): p.terminate() break
def set_nonblocking(fd): """ Sets the descriptor to be nonblocking. Works on many file-like objects as well as sockets. Only sockets can be nonblocking on Windows, however. """ try: setblocking = fd.setblocking except AttributeError: # fd has no setblocking() method. It could be that this version of # Python predates socket.setblocking(). In that case, we can still set # the flag "by hand" on the underlying OS fileno using the fcntl # module. try: import fcntl except ImportError: # Whoops, Windows has no fcntl module. This might not be a socket # at all, but rather a file-like object with no setblocking() # method. In particular, on Windows, pipes don't support # non-blocking I/O and therefore don't have that method. Which # means fcntl wouldn't help even if we could load it. raise NotImplementedError("set_nonblocking() on a file object " "with no setblocking() method " "(Windows pipes don't support non-blocking I/O)") # We managed to import fcntl. fileno = fd.fileno() flags = fcntl.fcntl(fileno, fcntl.F_GETFL) fcntl.fcntl(fileno, fcntl.F_SETFL, flags | os.O_NONBLOCK) else: # socket supports setblocking() setblocking(0)
def __init__(self, args, shell = False, executable = None, env = None, timeout = 'default', log_level = log_levels.INFO): super(process, self).__init__(timeout, log_level) if executable: self.program = executable elif isinstance(args, (str, unicode)): self.program = args elif isinstance(args, (list, tuple)): self.program = args[0] else: log.error("process(): Do not understand the arguments %r" % args) self.proc = subprocess.Popen( args, shell = shell, executable = executable, env = env, stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.STDOUT) self.stop_noticed = False # Set in non-blocking mode so that a call to call recv(1000) will # return as soon as a the first byte is available fd = self.proc.stdout.fileno() fl = fcntl.fcntl(fd, fcntl.F_GETFL) fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK) log.success("Started program %r" % self.program, log_level = self.log_level)
def __init__(self, path='/usr/bin/gdb', *extra_args): self.path = path args = [self.path] args += self.REQUIRED_ARGS args += extra_args self.process = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) fcntl.fcntl(self.process.stdout.fileno(), fcntl.F_SETFL, os.O_NONBLOCK) self.read_until_break() # If this instance is connected to another target. If so, what # tcp port it's connected to self.connected_to = None # any GDB MI async messages self.async_messages = [] self.commands_history = [] # whatever comes from the app that is not a GDB MI message self.output_messages = [] self.output_messages_queue = []
def _opensock(self): if self.debug & 0x01 : print("_opensock ", self.authtuple[0]) # self.event_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server_address = (self.authtuple[0], 80) self.event_sock = socket.create_connection(server_address, 10) #sn = sock.getsockname() #self.myip = sn[0] #print "P ", self.myip #self.myurl = "http://{0}:{1}/".format(sn[0], self.server_address[1]) #print "myurl ", self.myurl if fcntl is not None and hasattr(fcntl, 'FD_CLOEXEC'): flags = fcntl.fcntl(self.event_sock.fileno(), fcntl.F_GETFD) flags |= fcntl.FD_CLOEXEC fcntl.fcntl(self.event_sock.fileno(), fcntl.F_SETFD, flags) self.event_rf = self.event_sock.makefile("rb") self.event_wf = self.event_sock.makefile("wb") return self.event_sock
def open(self, device_id): """ Open the joystick device. The device_id is given by available_devices """ if self.opened: return raise Exception("A joystick is already opened") self.device_id = device_id self.jsfile = open("/dev/input/js{}".format(self.device_id), "r") fcntl.fcntl(self.jsfile.fileno(), fcntl.F_SETFL, os.O_NONBLOCK) val = ctypes.c_int() if fcntl.ioctl(self.jsfile.fileno(), JSIOCGAXES, val) != 0: self.jsfile.close() raise Exception("Failed to read number of axes") self.axes = list(0 for i in range(val.value)) if fcntl.ioctl(self.jsfile.fileno(), JSIOCGBUTTONS, val) != 0: self.jsfile.close() raise Exception("Failed to read number of axes") self.buttons = list(0 for i in range(val.value)) self.__initvalues() self.opened = True print self.devices[device_id]
def monitor_log(log_file, threshold): f = open(log_file) fd = f.fileno() oldflags = fcntl.fcntl(fd, fcntl.F_GETFL) #print bin(oldflags), bin(os.O_NONBLOCK) #print bin(oldflags | os.O_NONBLOCK) fcntl.fcntl(fd, fcntl.F_SETFL, oldflags | os.O_NONBLOCK) files = [f] # process old data f.read() p_func = log_reader(time.time(), 60, []) while True: #print rs rs, ws, es = select.select(files, [], []) if f in rs: data = '' while True: try: output = f.readline() if not output: break data += output except IOError, e: #print e break #yield data if len(data): #process_lines(data, threshold) p_func(data, threshold) time.sleep(1)
def connectionMade(self): self._tapBuff = "" self._mbDataToWrite = "" self._mbBuffLock = threading.Lock() self._tapBuffLock = threading.Lock() self._tapLock = threading.Lock() self._mbParseLock = threading.Lock() self._decoder = modbus.ModbusDecoder() print "sending login" self.sendMessage("login secret") #print "starting command thread" #self._commandThread = Thread(target = self.commandLoop, args = []) #self._commandThread.start() print "starting query thread" self._queryThread = Thread(target = self.pollLoop) # adjust accordingly self._queryThread.start() print "opening tap" self._tap = open('/dev/net/tun', 'w+b') self._ifr = struct.pack('16sH', 'tap1', IFF_TAP | IFF_NO_PI) fcntl.ioctl(self._tap, TUNSETIFF, self._ifr) # need to make the tap device nonblocking tapfd = self._tap.fileno() tapfl = fcntl.fcntl(tapfd, fcntl.F_GETFL) fcntl.fcntl(tapfd, fcntl.F_SETFL, tapfl | os.O_NONBLOCK) # Optionally, we want it be accessed by the normal user. fcntl.ioctl(self._tap, TUNSETOWNER, 1000) # subprocess.check_call('ifconfig tun0 192.168.7.1 pointopoint 192.168.7.2 up', subprocess.check_call('ifconfig tap1 192.168.7.2 netmask 255.255.255.0', shell=True) print "starting tap thread" self._tapThread = Thread(target = self.handle_tap, args = []) self._tapThread.start()
def burn(button, filename): global GBLprocess global GBLline global GBLoutput global GBLtimeStartedBurn print 'burning ' + filename showProgressWindow() # close the 'ready to burn' window parentWindow = button.get_parent_window() parentWindow.destroy() #command = globals.BURNINGPROGRAM, 'dev=' + globals.DEVICE, "gracetime=0", "blank=fast" command = globals.BURNINGPROGRAM, 'dev=' + globals.DEVICE, 'gracetime=0', '-tao', '-v', '-eject', filename GBLprocess = subprocess.Popen(command, 0, "wodim", subprocess.PIPE, subprocess.PIPE, subprocess.STDOUT) flags = fcntl.fcntl(GBLprocess.stdout, fcntl.F_GETFL) fcntl.fcntl(GBLprocess.stdout, fcntl.F_SETFL, flags | os.O_NONBLOCK) GBLline = '' GBLoutput = [] GBLtimeStartedBurn = datetime.datetime.now() # have gtk call updateProgress every second gobject.timeout_add(1000, updateProgress)
def lock_file(pidfile): """Actually the code below is needless...""" import fcntl try: fp = open(pidfile, "r+" if os.path.isfile(pidfile) else "w+") except IOError: raise MirrorError("Can't open or create %s", pidfile) try: fcntl.flock(fp, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError: try: pid = int(fp.read().strip()) except: raise MirrorError("Can't lock %s", pidfile) raise MirrorError("Can't lock %s, maybe another mirrord with pid %d is running", pidfile, pid) fcntl.fcntl(fp, fcntl.F_SETFD, 1) fp.seek(0) fp.write("%d\n" % os.getpid()) fp.truncate() fp.flush() # We need to return fp to keep a reference on it return fp
def _recv(self, which, maxsize): conn, maxsize = self.get_conn_maxsize(which, maxsize) if conn is None: return None flags = fcntl.fcntl(conn, fcntl.F_GETFL) if not conn.closed: fcntl.fcntl(conn, fcntl.F_SETFL, flags | os.O_NONBLOCK) try: if not select.select([conn], [], [], 0)[0]: return '' buff = conn.read(maxsize) if not buff: return self._close(which) if self.universal_newlines: buff = self._translate_newlines(buff) getattr(self, '{0}_buff'.format(which)).write(buff) getattr(self, '_{0}_logger'.format(which)).debug(buff.rstrip()) if self.stream_stds: getattr(sys, which).write(buff) return buff finally: if not conn.closed: fcntl.fcntl(conn, fcntl.F_SETFL, flags)
def twitch_connect(self): print("Connecting to twitch.tv") s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #s.setblocking(False) #s.settimeout(1.0) connect_host = "irc.twitch.tv" connect_port = 6667 try: s.connect((connect_host, connect_port)) except: pass #expected, because is non-blocking socket sys.exit() print("Connected to twitch") print("Sending our details to twitch...") #s.send('USER %s\r\n' % self.user) s.send('PASS %s\r\n' % self.oauth) s.send('NICK %s\r\n' % self.user) if not TwitchChatStream.twitch_login_status(s.recv(1024)): print("... and they didn't accept our details") sys.exit() else: print("... they accepted our details") print("Connected to twitch.tv!") fcntl.fcntl(s, fcntl.F_SETFL, os.O_NONBLOCK) if self.s is not None: self.s.close() self.s = s s.send('JOIN #%s\r\n' % self.user)
def _watch(self): pipes = [] stdout = self._worker.stdout stderr = self._worker.stderr output = self._output if self.out: pipes.append(stdout) if self.err and self.err != 'stdout': pipes.append(stderr) if pipes: for pipe in pipes: fd = pipe.fileno() fl = fcntl.fcntl(fd, fcntl.F_GETFL) fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK) data = True while data and not self._auto_close: out = select.select(pipes, [], [])[0] for p in out: data = p.read() if data: output.add(OutputQueue.STDOUT if p == stdout else OutputQueue.STDERR, data) if stdout: stdout.close() if stderr: stderr.close() out = self._worker.wait() self.running = False if self._auto_close: instance_globals = self.instance['globals'] with instance_globals['lock']: log.info('Removed process %s', self) instance_globals['processes'].pop(self.name, None) else: output.add(OutputQueue.CLOSED, out)
def _piped_execute(self, cmd1, cmd2): """Pipe output of cmd1 into cmd2.""" LOG.debug("Piping cmd1='%s' into..." % ' '.join(cmd1)) LOG.debug("cmd2='%s'" % ' '.join(cmd2)) try: p1 = subprocess.Popen(cmd1, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError as e: LOG.error(_("Pipe1 failed - %s ") % unicode(e)) raise # NOTE(dosaboy): ensure that the pipe is blocking. This is to work # around the case where evenlet.green.subprocess is used which seems to # use a non-blocking pipe. flags = fcntl.fcntl(p1.stdout, fcntl.F_GETFL) & (~os.O_NONBLOCK) fcntl.fcntl(p1.stdout, fcntl.F_SETFL, flags) try: p2 = subprocess.Popen(cmd2, stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError as e: LOG.error(_("Pipe2 failed - %s ") % unicode(e)) raise p1.stdout.close() stdout, stderr = p2.communicate() return p2.returncode, stderr
def setup_subprocess_pipe(self, pipe): descriptor = pipe.fileno() flags = fcntl.fcntl(descriptor, fcntl.F_GETFL) fcntl.fcntl(descriptor, fcntl.F_SETFL, flags | os.O_NONBLOCK)
def _set_nonblocking(fd): flags = fcntl.fcntl(fd, fcntl.F_GETFL) fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
def set_close_exec(fd): flags = fcntl.fcntl(fd, fcntl.F_GETFD) fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
def assert_not_inheritable(f): if not fcntl.fcntl(f, fcntl.F_GETFD) & fcntl.FD_CLOEXEC: raise SystemExit('File handle is inheritable!')
def setCloseOnExec(self): flags = fcntl.fcntl(self.stream.fileno(), fcntl.F_GETFD) flags |= fcntl.FD_CLOEXEC fcntl.fcntl(self.stream.fileno(), fcntl.F_SETFD, flags)
import sys import socket import fcntl, os import errno from time import sleep s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect(('localhost',10101)) fcntl.fcntl(s, fcntl.F_SETFL, os.O_NONBLOCK) while True: try: msg = s.recv(16383) except socket.error as e: err = e.args[0] if err == errno.EAGAIN or err == errno.EWOULDBLOCK: sleep(1) print('No data available') continue else: # a "real" error occurred print(e) sys.exit(1) else: # got a message, do something :) data = msg.decode('utf-8') #print(type(data)) print(data) print('sending ack...') #ACcmd1_str = "5.0 Point SC[0].B[0] Primary Vector [0.0 0.0 1.0] at MOON\nEOF"
def cmd_gather(cmd, set_env=None, realtime=False): """ Runs a command and returns rc,stdout,stderr as a tuple. If called while the `Dir` context manager is in effect, guarantees that the process is executed in that directory, even if it is no longer the current directory of the process (i.e. it is thread-safe). :param cmd: The command and arguments to execute :param set_env: Dict of env vars to set for command (overriding existing) :param realtime: If True, output stdout and stderr in realtime instead of all at once. :return: (rc,stdout,stderr) """ if not isinstance(cmd, list): cmd_list = shlex.split(cmd) else: cmd_list = cmd cwd = pushd.Dir.getcwd() cmd_info = '[cwd={}]: {}'.format(cwd, cmd_list) env = os.environ.copy() if set_env: cmd_info = '[env={}] {}'.format(set_env, cmd_info) env.update(set_env) # Make sure output of launched commands is utf-8 env['LC_ALL'] = 'en_US.UTF-8' logger.debug("Executing:cmd_gather {}".format(cmd_info)) try: proc = subprocess.Popen(cmd_list, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError as exc: logger.error( "Subprocess errored running:\n{}\nWith error:\n{}\nIs {} installed?" .format(cmd_info, exc, cmd_list[0])) return exc.errno, "", "See previous error description." if not realtime: out, err = proc.communicate() rc = proc.returncode else: out = "" err = "" # Many thanks to http://eyalarubas.com/python-subproc-nonblock.html # setup non-blocking read # set the O_NONBLOCK flag of proc.stdout file descriptor: flags = fcntl(proc.stdout, F_GETFL) # get current proc.stdout flags fcntl(proc.stdout, F_SETFL, flags | O_NONBLOCK) # set the O_NONBLOCK flag of proc.stderr file descriptor: flags = fcntl(proc.stderr, F_GETFL) # get current proc.stderr flags fcntl(proc.stderr, F_SETFL, flags | O_NONBLOCK) rc = None while rc is None: output = None try: output = read(proc.stdout.fileno(), 256) green_print(output.rstrip()) out += output except OSError: pass error = None try: error = read(proc.stderr.fileno(), 256) yellow_print(error.rstrip()) out += error except OSError: pass rc = proc.poll() time.sleep(0.0001) # reduce busy-wait # We read in bytes representing utf-8 output; decode so that python recognizes them as unicode strings out = out.decode('utf-8') err = err.decode('utf-8') logger.debug( "Process {}: exited with: {}\nstdout>>{}<<\nstderr>>{}<<\n".format( cmd_info, rc, out, err)) return rc, out, err
def __enter__(self): self.orig_fl = fcntl.fcntl(self.fd, fcntl.F_GETFL) fcntl.fcntl(self.fd, fcntl.F_SETFL, self.orig_fl | os.O_NONBLOCK)
def _run( self, command, cwd, shell, env, log, join=False, interactive=False, timeout=None): """ Run command, capture the output By default stdout and stderr are captured separately. Use join=True to merge stderr into stdout. Use timeout=<seconds> to finish process after given time """ # By default command ouput is logged using debug if not log: log = self.debug # Prepare the environment if env: if not isinstance(env, dict): raise GeneralError(f"Invalid environment '{env}'.") # Do not modify current process environment environment = os.environ.copy() environment.update(env) else: environment = None self.debug('environment', pprint.pformat(environment), level=4) # Run the command in interactive mode if requested if interactive: try: subprocess.run( command, cwd=cwd, shell=shell, env=environment, check=True) except subprocess.CalledProcessError as error: # Interactive mode can return non-zero if the last command # failed, ignore errors here pass finally: return None if join else (None, None) # Create the process process = subprocess.Popen( command, cwd=cwd, shell=shell, env=environment, stdin=subprocess.DEVNULL, stdout=subprocess.PIPE, stderr=subprocess.STDOUT if join else subprocess.PIPE) if join: descriptors = [process.stdout.fileno()] else: descriptors = [process.stdout.fileno(), process.stderr.fileno()] stdout = '' stderr = '' # Prepare kill function for the timer def kill(): """ Kill the process and adjust the return code """ process.kill() process.returncode = PROCESS_TIMEOUT try: # Start the timer timer = Timer(timeout, kill) timer.start() # Make sure that the read operation on the file descriptors # never blocks for fd in descriptors: fcntl.fcntl(fd, fcntl.F_SETFL, os.O_NONBLOCK) # Capture the output while process.poll() is None: # Check which file descriptors are ready for read selected = select.select( descriptors, [], [], DEFAULT_SELECT_TIMEOUT) for descriptor in selected[0]: # Handle stdout if descriptor == process.stdout.fileno(): line = process.stdout.readline().decode( 'utf-8', errors='replace') stdout += line if line != '': log('out', line.rstrip('\n'), 'yellow', level=3) # Handle stderr if not join and descriptor == process.stderr.fileno(): line = process.stderr.readline().decode( 'utf-8', errors='replace') stderr += line if line != '': log('err', line.rstrip('\n'), 'yellow', level=3) finally: # Cancel the timer timer.cancel() # Check for possible additional output selected = select.select(descriptors, [], [], DEFAULT_SELECT_TIMEOUT) for descriptor in selected[0]: if descriptor == process.stdout.fileno(): for line in process.stdout.readlines(): line = line.decode('utf-8', errors='replace') stdout += line log('out', line.rstrip('\n'), 'yellow', level=3) if not join and descriptor == process.stderr.fileno(): for line in process.stderr.readlines(): line = line.decode('utf-8', errors='replace') stderr += line log('err', line.rstrip('\n'), 'yellow', level=3) # Handle the exit code, return output if process.returncode != 0: if isinstance(command, (list, tuple)): command = ' '.join(command) raise RunError( message=f"Command returned '{process.returncode}'.", command=command, returncode=process.returncode, stdout=stdout, stderr=stderr) return stdout if join else (stdout, stderr)
def __exit__(self, *args): fcntl.fcntl(self.fd, fcntl.F_SETFL, self.orig_fl)
def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None): ''' run a command on the remote host ''' if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported: raise errors.AnsibleError( "Internal Error: this module does not support running commands via %s" % self.runner.become_method) ssh_cmd = self._password_cmd() ssh_cmd += ["ssh", "-C"] if not in_data: # we can only use tty when we are not pipelining the modules. piping data into /usr/bin/python # inside a tty automatically invokes the python interactive-mode but the modules are not # compatible with the interactive-mode ("unexpected indent" mainly because of empty lines) ssh_cmd += ["-tt"] if utils.VERBOSITY > 3: ssh_cmd += ["-vvv"] else: if self.runner.module_name == 'raw': ssh_cmd += ["-q"] else: ssh_cmd += ["-v"] ssh_cmd += self.common_args if self.ipv6: ssh_cmd += ['-6'] ssh_cmd += [self.host] if self.runner.become and sudoable: becomecmd, prompt, success_key = utils.make_become_cmd( cmd, become_user, executable, self.runner.become_method, '', self.runner.become_exe) ssh_cmd.append(becomecmd) else: prompt = None if executable: ssh_cmd.append(executable + ' -c ' + pipes.quote(cmd)) else: ssh_cmd.append(cmd) vvv("EXEC %s" % ' '.join(ssh_cmd), host=self.host) # Only read hosts file if checking is turned on if C.HOST_KEY_CHECKING: not_in_host_file = self.not_in_host_file(self.host) if C.HOST_KEY_CHECKING and not_in_host_file: # lock around the initial SSH connectivity so the user prompt about whether to add # the host to known hosts is not intermingled with multiprocess output. fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX) fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_EX) # create process (p, stdin) = self._run(ssh_cmd, in_data) self._send_password() no_prompt_out = '' no_prompt_err = '' if sudoable and self.runner.become and self.runner.become_pass: # several cases are handled for escalated privileges with password # * NOPASSWD (tty & no-tty): detect success_key on stdout # * without NOPASSWD: # * detect prompt on stdout (tty) # * detect prompt on stderr (no-tty) fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK) fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK) become_output = '' become_errput = '' while True: if success_key in become_output or \ (prompt and become_output.endswith(prompt)) or \ utils.su_prompts.check_su_prompt(become_output): break rfd, wfd, efd = select.select([p.stdout, p.stderr], [], [p.stdout], self.runner.timeout) if p.stderr in rfd: chunk = p.stderr.read() if not chunk: raise errors.AnsibleError( 'ssh connection closed waiting for a privilege escalation password prompt' ) become_errput += chunk incorrect_password = gettext.dgettext( "become", "Sorry, try again.") if become_errput.strip().endswith( "%s%s" % (prompt, incorrect_password)): raise errors.AnsibleError('Incorrect become password') elif prompt and become_errput.endswith(prompt): stdin.write(self.runner.become_pass + '\n') if p.stdout in rfd: chunk = p.stdout.read() if not chunk: raise errors.AnsibleError( 'ssh connection closed waiting for %s password prompt' % self.runner.become_method) become_output += chunk if not rfd: # timeout. wrap up process communication stdout = p.communicate() raise errors.AnsibleError( 'ssh connection error while waiting for %s password prompt' % self.runner.become_method) if success_key in become_output: no_prompt_out += become_output no_prompt_err += become_errput elif sudoable: stdin.write(self.runner.become_pass + '\n') (returncode, stdout, stderr) = self._communicate(p, stdin, in_data, sudoable=sudoable, prompt=prompt) if C.HOST_KEY_CHECKING and not_in_host_file: # lock around the initial SSH connectivity so the user prompt about whether to add # the host to known hosts is not intermingled with multiprocess output. fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_UN) fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN) controlpersisterror = 'Bad configuration option: ControlPersist' in stderr or \ 'unknown configuration option: ControlPersist' in stderr if C.HOST_KEY_CHECKING: if ssh_cmd[0] == "sshpass" and p.returncode == 6: raise errors.AnsibleError( 'Using a SSH password instead of a key is not possible because Host Key checking is enabled and sshpass does not support this. Please add this host\'s fingerprint to your known_hosts file to manage this host.' ) if p.returncode != 0 and controlpersisterror: raise errors.AnsibleError( 'using -c ssh on certain older ssh versions may not support ControlPersist, set ANSIBLE_SSH_ARGS="" (or ssh_args in [ssh_connection] section of the config file) before running again' ) if p.returncode == 255 and (in_data or self.runner.module_name == 'raw'): raise errors.AnsibleError( 'SSH Error: data could not be sent to the remote host. Make sure this host can be reached over ssh' ) if p.returncode == 255: ip = None port = None for line in stderr.splitlines(): match = re.search( 'Connecting to .*\[(\d+\.\d+\.\d+\.\d+)\] port (\d+)', line) if match: ip = match.group(1) port = match.group(2) if 'UNPROTECTED PRIVATE KEY FILE' in stderr: lines = [ line for line in stderr.splitlines() if 'ignore key:' in line ] else: lines = stderr.splitlines()[-1:] if ip and port: lines.append(' while connecting to %s:%s' % (ip, port)) lines.append( 'It is sometimes useful to re-run the command using -vvvv, ' 'which prints SSH debug output to help diagnose the issue.') raise errors.AnsibleError('SSH Error: %s' % '\n'.join(lines)) return (p.returncode, '', no_prompt_out + stdout, no_prompt_err + stderr)
def main(): # initalize the class usb1408FS = usb_1408FS() # print out the calibration tables for chan in range(4): for gain in range(8): print('Calibration Table (Differential): Chan =',chan,' Range = ',gain, \ 'Slope = ',format(usb1408FS.CalDF[chan][gain].slope,'.5f'),\ 'Intercept = ',format(usb1408FS.CalDF[chan][gain].intercept,'5f')) gain = 1 for chan in range(7): print('Calibration Table (Single Ended): Chan =',chan,' Range = ',gain, \ 'Slope = ',format(usb1408FS.CalSE[chan].slope,'.5f'),\ ' Intercept = ',format(usb1408FS.CalSE[chan].intercept, '.5f')) print('wMaxPacketSize =', usb1408FS.wMaxPacketSize) usb1408FS.DConfig(usb1408FS.DIO_PORTA, usb1408FS.DIO_DIR_OUT) usb1408FS.DConfig(usb1408FS.DIO_PORTB, usb1408FS.DIO_DIR_IN) usb1408FS.DOut(usb1408FS.DIO_PORTA, 0x0) usb1408FS.MemRead(0x200, 4) while True: print("\nUSB-1408FS Testing") print("----------------") print("Hit 'a' to test analog output scan.") print("Hit 'A' for continuous output scan.") print("Hit 'b' to blink LED.") print("Hit 'c' to test counter. ") print("Hit 'd' to test digital I/O.") print("Hit 'e' to exit.") print("Hit 'f' to get all values") print("Hit 'g' to test analog input scan.") print("Hit 'i' to test analog input. (differential)") print("Hit 'j' to test analog input. (single ended)") print("Hit 'I' for information.") print("Hit 'o' to test analog output.") print("Hit 'r' to reset the device.") print("Hit 'S' to get status") print("Hit 's' to get serial number.") ch = input('\n') if ch == 'b': usb1408FS.Blink() elif ch == 'c': usb1408FS.CInit() # initialize the counter print('Connect pin 20 and 21') for i in range(20): usb1408FS.DOut(usb1408FS.DIO_PORTA, 1) time.sleep(.01) usb1408FS.DOut(usb1408FS.DIO_PORTA, 0) print('Counter =', usb1408FS.CIn()) # read the current count elif ch == 'd': print('Testing Digital I/O ...') print('Connect pins 21 through 28 <--> 32 through 39 (Port A to Port B)') usb1408FS.DConfig(usb1408FS.DIO_PORTA, 0x0) # Port A output usb1408FS.DConfig(usb1408FS.DIO_PORTB, 0xff) # Port B input usb1408FS.DOut(usb1408FS.DIO_PORTA, 0x0) while (True): try: num = int(input('Enter a byte number [0x0-0xff]: '),16) usb1408FS.DOut(usb1408FS.DIO_PORTA, num) value = usb1408FS.DIn(usb1408FS.DIO_PORTB) print('PortB: The number you entered =', hex(value)) for i in range(8): value = usb1408FS.DBitIn(usb1408FS.DIO_PORTB, i) print('Port B Bit',i,' =', hex(value)) except: pass if (toContinue() != True): break elif ch == 'e': usb1408FS.udev.close() exit(0) elif ch == 'f': print('Get all values') value = usb1408FS.GetAll() for i in range(4): print('Differential Reference Low channel[',i,'] = ',hex(value[i])) for i in range(4): print('Differential Reference High channel[',i,'] = ',hex(value[i+4])) for i in range(8): print('Single Ended Input channel[',i,'] = ', hex(value[i+8])) print('DIO Port A = ', hex(value[16])) print('DIO Port B = ', hex(value[17])) elif ch == 'i': print('Connect pin 1 <-> pin 21 and pin 2 <-> pin 3') chan = int(input('Select channel [0-3]: ')) print("\t\t1. +/- 20.V") print("\t\t2. +/- 10.V") print("\t\t3. +/- 5.V") print("\t\t4. +/- 4.V") print("\t\t5. +/- 2.5V") print("\t\t6. +/- 2.0V") print("\t\t7. +/- 1.25V") print("\t\t8. +/- 1.0V") gain = int(input("Select gain [1-8]: ")) if gain == 1: gain = usb1408FS.BP_20_00V elif gain == 2: gain = usb1408FS.BP_10_00V elif gain == 3: gain = usb1408FS.BP_5_00V elif gain == 4: gain = usb1408FS.BP_4_00V elif gain == 5: gain = usb1408FS.BP_2_50V elif gain == 6: gain = usb1408FS.BP_2_00V elif gain == 7: gain = usb1408FS.BP_1_25V elif gain == 8: gain = usb1408FS.BP_1_00V for i in range(20): usb1408FS.DOut(usb1408FS.DIO_PORTA, 0) time.sleep(0.01) value = usb1408FS.AIn(chan, gain) print('Channel: ',chan,' value =', hex(value),'\t',format(usb1408FS.volts(gain, value),'.3f'),'V') usb1408FS.DOut(usb1408FS.DIO_PORTA, 1) time.sleep(0.01) value = usb1408FS.AIn(chan, gain) print('Channel: ',chan,' value =', hex(value),'\t',format(usb1408FS.volts(gain, value),'.3f'),'V') elif ch == 'g': print('Testing Analog input scan') freq = int(input('Enter desired frequency [Hz]: ')) count = int(input('Enter number of samples [1-1024]: ')) chan = int(input('Enter channel [0-3]: ')) print("\t\t1. +/- 20.V") print("\t\t2. +/- 10.V") print("\t\t3. +/- 5.V") print("\t\t4. +/- 4.V") print("\t\t5. +/- 2.5V") print("\t\t6. +/- 2.0V") print("\t\t7. +/- 1.25V") print("\t\t8. +/- 1.0V") print("\t\t9. Single Ended +/- 10V") gain = int(input("Select gain [1-9]: ")) if gain == 1: gain = usb1408FS.BP_20_00V elif gain == 2: gain = usb1408FS.BP_10_00V elif gain == 3: gain = usb1408FS.BP_5_00V elif gain == 4: gain = usb1408FS.BP_4_00V elif gain == 5: gain = usb1408FS.BP_2_50V elif gain == 6: gain = usb1408FS.BP_2_00V elif gain == 7: gain = usb1408FS.BP_1_25V elif gain == 8: gain = usb1408FS.BP_1_00V elif gain == 9: gain = usb1408FS.SE_10_00V gains = [0]*8 for i in range(8): gains[i] = gain options = usb1408FS.AIN_EXECUTION | usb1408FS.AIN_GAIN_QUEUE raw_data = usb1408FS.AInScan(chan,chan,gains,count,freq,options) data = [0]*len(raw_data) for i in range(count): # Apply correction if gain == usb1408FS.SE_10_00V: data[i] = int(usb1408FS.CalSE[chan].slope*raw_data[i] + usb1408FS.CalSE[chan].intercept) else: data[i] = int(usb1408FS.CalDF[chan][gain].slope*raw_data[i] + usb1408FS.CalDF[chan][gain].intercept) print('raw_data[',i,'] = ', hex(raw_data[i]),'\t',format(usb1408FS.volts(gain, raw_data[i]),'.3f'),'V\t',\ 'data[',i,'] = ', hex(data[i]),'\t',format(usb1408FS.volts(gain, data[i]),'.3f'),'V') usb1408FS.AInStop() elif ch == 'j': print('Testing Analog Input Single Ended Mode') chan = int(input('Select channel [0-7]: ')) gain = usb1408FS.SE_10_00V value = usb1408FS.AIn(chan, gain) print('Channel: ',chan,' value = ', hex(value),'\t',format(usb1408FS.volts(gain, value),'.3f'),'V') elif ch == 'o': print('Testing the analog output for the USB-1408FS') chan = int(input('Enter channel [0-1]: ')) value = int(input('Enter value [0 - 0xfff]: '),16) usb1408FS.AOut(chan,value) elif ch == 'a': out_data = [0]*512 print('Testing Analog Output Scan') frequency = int(input('Entered desired frequency [Hz]: ')) for i in range(512): if i%2 == 0: out_data[i] = 0 else: out_data[i] = 0xfff for j in range(5): usb1408FS.AOutScan(0,0,frequency,out_data,1) usb1408FS.AOutStop() elif ch == 'A': print('Analog output scan continuous. Hit <space> <CR> to Stop') frequency = 1000 nSamples = 32*32 out_data = [0]*1024 for i in range(512): value = int(0.5*(math.sin(2*math.pi*i/128) + 1.)*0xffff) out_data[2*i] = value & 0xff out_data[2*i+1] = (value>>8) & 0xff flag = fcntl.fcntl(sys.stdin, fcntl.F_GETFL) fcntl.fcntl(sys.stdin, fcntl.F_SETFL, flag|os.O_NONBLOCK) usb1408FS.AOutScan(0,0,frequency,out_data,0) while True: usb1408FS.AOutWrite(out_data, 1000) c = sys.stdin.readlines() if (len(c) != 0): break fcntl.fcntl(sys.stdin, fcntl.F_SETFL, flag) usb1408FS.AOutStop() try: # clear out the input buffer ret = usb1408FS.udev.interruptRead(libusb1.LIBUSB_ENDPOINT_IN | 1, 10, 100) except: pass elif ch == 's': print("Serial No: %s" % usb1408FS.getSerialNumber()) elif ch == 'I': print("Manufacturer: %s" % usb1408FS.getManufacturer()) print("Product: %s" % usb1408FS.getProduct()) print("Serial No: %s" % usb1408FS.getSerialNumber()) elif ch == 'S': usb1408FS.printStatus()
def nonblocking(self): """internal - not portable!""" if not self.is_open: raise portNotOpenError fcntl.fcntl(self.fd, fcntl.F_SETFL, os.O_NONBLOCK)
def __init__(self, fd): self.fd = fd fcntl.fcntl(self.fd, fcntl.F_SETFL, os.O_NONBLOCK) self.w_pending = "" self.r_pending = ""
def set_nonblocking(pipe): fd = pipe.fileno() fl = fcntl.fcntl(fd, fcntl.F_GETFL) fcntl.fcntl(fd, fcntl.F_SETFL, fl | O_NONBLOCK)
def _communicate(self, p, stdin, indata, sudoable=False, prompt=None): fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK) fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK) # We can't use p.communicate here because the ControlMaster may have stdout open as well stdout = '' stderr = '' rpipes = [p.stdout, p.stderr] if indata: try: stdin.write(indata) stdin.close() except: raise errors.AnsibleError( 'SSH Error: data could not be sent to the remote host. Make sure this host can be reached over ssh' ) # Read stdout/stderr from process while True: rfd, wfd, efd = select.select(rpipes, [], rpipes, 1) # fail early if the become password is wrong if self.runner.become and sudoable: incorrect_password = gettext.dgettext( self.runner.become_method, C.BECOME_ERROR_STRINGS[self.runner.become_method]) if prompt: if self.runner.become_pass: if stdout.endswith("%s\r\n%s" % (incorrect_password, prompt)): raise errors.AnsibleError( 'Incorrect become password') if stdout.endswith(prompt): raise errors.AnsibleError('Missing become password') elif stdout.endswith("%s\r\n%s" % (incorrect_password, prompt)): raise errors.AnsibleError('Incorrect become password') if p.stdout in rfd: dat = os.read(p.stdout.fileno(), 9000) stdout += dat if dat == '': rpipes.remove(p.stdout) if p.stderr in rfd: dat = os.read(p.stderr.fileno(), 9000) stderr += dat if dat == '': rpipes.remove(p.stderr) # only break out if no pipes are left to read or # the pipes are completely read and # the process is terminated if (not rpipes or not rfd) and p.poll() is not None: break # No pipes are left to read but process is not yet terminated # Only then it is safe to wait for the process to be finished # NOTE: Actually p.poll() is always None here if rpipes is empty elif not rpipes and p.poll() == None: p.wait() # The process is terminated. Since no pipes to read from are # left, there is no need to call select() again. break # close stdin after process is terminated and stdout/stderr are read # completely (see also issue #848) stdin.close() return (p.returncode, stdout, stderr)
def assimilate(archives, total_size=None, dir_to_archive=".", passphrases=None, verb="create"): """ Run and manage multiple `borg create` commands. Args: archives: A list containing Location objects for the archives to create. total_size: The total size of all files being backed up. As borg normally only makes one pass over the data, it can't calculate percentages on its own. Setting this to None disables progress calculation. dir_to_archive: The directory to archive. Defaults to the current directory. Returns: A boolean indicating if any borg processes failed (True = failed). """ if dir_to_archive is None: dir_to_archive = [] else: dir_to_archive = [dir_to_archive] if passphrases is None: passphrases = get_passphrases(archives) if sys.stdout.isatty() else {} if get_borg_version() < LooseVersion("1.1.0"): # borg <1.1 doesn't support --log-json for the progress display print( "You are using an old version of borg, progress indication is disabled", file=sys.stderr) recent_borg = False progress = False else: recent_borg = True progress = total_size is not None borg_processes = [] borg_failed = False try: with selectors.DefaultSelector() as sel: for idx, archive in enumerate(archives): if progress: archive.extra_args.append("--progress") if recent_borg: archive.extra_args.append("--log-json") env = os.environ.copy() passphrase = passphrases.get(archive, os.environ.get("BORG_PASSPHRASE")) if passphrase is not None: env["BORG_PASSPHRASE"] = passphrase master, slave = openpty() settings = termios.tcgetattr(master) settings[3] &= ~termios.ECHO termios.tcsetattr(master, termios.TCSADRAIN, settings) proc = subprocess.Popen([ "borg", verb, str(archive), *dir_to_archive, *archive.extra_args ], env=env, stdout=slave, stderr=slave, stdin=slave, close_fds=True, start_new_session=True) fl = fcntl.fcntl(master, fcntl.F_GETFL) fcntl.fcntl(master, fcntl.F_SETFL, fl | os.O_NONBLOCK) proc.stdin = os.fdopen(master, "w") proc.stdout = os.fdopen(master, "r") proc.archive = archive proc.json_buf = [] proc.progress = 0 borg_processes.append(proc) sel.register(proc.stdout, selectors.EVENT_READ, data=proc) if progress: print("backup progress: 0%".ljust(25), end="\u001b[25D", flush=True) else: # give the user some feedback so the program doesn't look frozen print("starting backup", flush=True) while len(sel.get_map()) > 0: for key, mask in sel.select(1): for line in iter(key.fileobj.readline, ""): process_line(key.data, line.rstrip("\n"), total_size) for key in [*sel.get_map().values()]: if key.data.poll() is not None: key.data.wait() key.data.progress = 1 if key.data.returncode != 0: borg_failed = True sel.unregister(key.fileobj) if progress: total_progress = sum(p.progress for p in borg_processes) print("backup progress: {}%".format( int(total_progress / len(borg_processes) * 100)).ljust(25), end="\u001b[25D") if progress: print() finally: for p in borg_processes: if p.poll() is not None: p.kill() try: p.communicate() except (ValueError, OSError): p.wait() return borg_failed
def _set_non_blocking(self, fd): flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0) flags = flags | os.O_NONBLOCK fcntl.fcntl(fd, fcntl.F_SETFL, flags)
def main_i(): """internal main routine parse command line, decide what action will be taken; we can either: - query/manipulate configuration - format gsyncd urls using gsyncd's url parsing engine - start service in following modes, in given stages: - agent: startup(), ChangelogAgent() - monitor: startup(), monitor() - master: startup(), connect_remote(), connect(), service_loop() - slave: startup(), connect(), service_loop() """ rconf = {'go_daemon': 'should'} def store_abs(opt, optstr, val, parser): if val and val != '-': val = os.path.abspath(val) setattr(parser.values, opt.dest, val) def store_local(opt, optstr, val, parser): rconf[opt.dest] = val def store_local_curry(val): return lambda o, oo, vx, p: store_local(o, oo, val, p) def store_local_obj(op, dmake): return lambda o, oo, vx, p: store_local( o, oo, FreeObject(op=op, **dmake(vx)), p) op = OptionParser(usage="%prog [options...] <master> <slave>", version="%prog 0.0.1") op.add_option('--gluster-command-dir', metavar='DIR', default='') op.add_option('--gluster-log-file', metavar='LOGF', default=os.devnull, type=str, action='callback', callback=store_abs) op.add_option('--gluster-log-level', metavar='LVL') op.add_option('--changelog-log-level', metavar='LVL', default="INFO") op.add_option('--gluster-params', metavar='PRMS', default='') op.add_option('--glusterd-uuid', metavar='UUID', type=str, default='', help=SUPPRESS_HELP) op.add_option('--gluster-cli-options', metavar='OPTS', default='--log-file=-') op.add_option('--mountbroker', metavar='LABEL') op.add_option('-p', '--pid-file', metavar='PIDF', type=str, action='callback', callback=store_abs) op.add_option('-l', '--log-file', metavar='LOGF', type=str, action='callback', callback=store_abs) op.add_option('--iprefix', metavar='LOGD', type=str, action='callback', callback=store_abs) op.add_option('--changelog-log-file', metavar='LOGF', type=str, action='callback', callback=store_abs) op.add_option('--log-file-mbr', metavar='LOGF', type=str, action='callback', callback=store_abs) op.add_option('--state-file', metavar='STATF', type=str, action='callback', callback=store_abs) op.add_option('--state-detail-file', metavar='STATF', type=str, action='callback', callback=store_abs) op.add_option('--georep-session-working-dir', metavar='STATF', type=str, action='callback', callback=store_abs) op.add_option('--access-mount', default=False, action='store_true') op.add_option('--ignore-deletes', default=False, action='store_true') op.add_option('--isolated-slave', default=False, action='store_true') op.add_option('--use-rsync-xattrs', default=False, action='store_true') op.add_option('--sync-xattrs', default=True, action='store_true') op.add_option('--sync-acls', default=True, action='store_true') op.add_option('--log-rsync-performance', default=False, action='store_true') op.add_option('--max-rsync-retries', type=int, default=10) op.add_option('--pause-on-start', default=False, action='store_true') op.add_option('-L', '--log-level', metavar='LVL') op.add_option('-r', '--remote-gsyncd', metavar='CMD', default=os.path.abspath(sys.argv[0])) op.add_option('--volume-id', metavar='UUID') op.add_option('--slave-id', metavar='ID') op.add_option('--session-owner', metavar='ID') op.add_option('--local-id', metavar='ID', help=SUPPRESS_HELP, default='') op.add_option('--local-node', metavar='NODE', help=SUPPRESS_HELP, default='') op.add_option('--local-node-id', metavar='NODEID', help=SUPPRESS_HELP, default='') op.add_option('--local-path', metavar='PATH', help=SUPPRESS_HELP, default='') op.add_option('-s', '--ssh-command', metavar='CMD', default='ssh') op.add_option('--ssh-port', metavar='PORT', type=int, default=22) op.add_option('--ssh-command-tar', metavar='CMD', default='ssh') op.add_option('--rsync-command', metavar='CMD', default='rsync') op.add_option('--rsync-options', metavar='OPTS', default='') op.add_option('--rsync-ssh-options', metavar='OPTS', default='--compress') op.add_option('--timeout', metavar='SEC', type=int, default=120) op.add_option('--connection-timeout', metavar='SEC', type=int, default=60, help=SUPPRESS_HELP) op.add_option('--sync-jobs', metavar='N', type=int, default=3) op.add_option('--replica-failover-interval', metavar='N', type=int, default=1) op.add_option('--changelog-archive-format', metavar='N', type=str, default="%Y%m") op.add_option('--use-meta-volume', default=False, action='store_true') op.add_option('--meta-volume-mnt', metavar='N', type=str, default="/var/run/gluster/shared_storage") op.add_option('--turns', metavar='N', type=int, default=0, help=SUPPRESS_HELP) op.add_option('--allow-network', metavar='IPS', default='') op.add_option('--socketdir', metavar='DIR') op.add_option('--state-socket-unencoded', metavar='SOCKF', type=str, action='callback', callback=store_abs) op.add_option('--checkpoint', metavar='LABEL', default='0') # tunables for failover/failback mechanism: # None - gsyncd behaves as normal # blind - gsyncd works with xtime pairs to identify # candidates for synchronization # wrapup - same as normal mode but does not assign # xtimes to orphaned files # see crawl() for usage of the above tunables op.add_option('--special-sync-mode', type=str, help=SUPPRESS_HELP) # changelog or xtime? (TODO: Change the default) op.add_option('--change-detector', metavar='MODE', type=str, default='xtime') # sleep interval for change detection (xtime crawl uses a hardcoded 1 # second sleep time) op.add_option('--change-interval', metavar='SEC', type=int, default=3) # working directory for changelog based mechanism op.add_option('--working-dir', metavar='DIR', type=str, action='callback', callback=store_abs) op.add_option('--use-tarssh', default=False, action='store_true') op.add_option('-c', '--config-file', metavar='CONF', type=str, action='callback', callback=store_local) # duh. need to specify dest or value will be mapped to None :S op.add_option('--monitor', dest='monitor', action='callback', callback=store_local_curry(True)) op.add_option('--agent', dest='agent', action='callback', callback=store_local_curry(True)) op.add_option('--resource-local', dest='resource_local', type=str, action='callback', callback=store_local) op.add_option('--resource-remote', dest='resource_remote', type=str, action='callback', callback=store_local) op.add_option('--feedback-fd', dest='feedback_fd', type=int, help=SUPPRESS_HELP, action='callback', callback=store_local) op.add_option('--rpc-fd', dest='rpc_fd', type=str, help=SUPPRESS_HELP) op.add_option('--subvol-num', dest='subvol_num', type=str, help=SUPPRESS_HELP) op.add_option('--listen', dest='listen', help=SUPPRESS_HELP, action='callback', callback=store_local_curry(True)) op.add_option('-N', '--no-daemon', dest="go_daemon", action='callback', callback=store_local_curry('dont')) op.add_option('--verify', type=str, dest="verify", action='callback', callback=store_local) op.add_option('--slavevoluuid-get', type=str, dest="slavevoluuid_get", action='callback', callback=store_local) op.add_option('--create', type=str, dest="create", action='callback', callback=store_local) op.add_option('--delete', dest='delete', action='callback', callback=store_local_curry(True)) op.add_option('--path-list', dest='path_list', action='callback', type=str, callback=store_local) op.add_option('--reset-sync-time', default=False, action='store_true') op.add_option('--status-get', dest='status_get', action='callback', callback=store_local_curry(True)) op.add_option('--debug', dest="go_daemon", action='callback', callback=lambda *a: (store_local_curry('dont') (*a), setattr(a[-1].values, 'log_file', '-'), setattr(a[-1].values, 'log_level', 'DEBUG'), setattr(a[-1].values, 'changelog_log_file', '-'))) op.add_option('--path', type=str, action='append') for a in ('check', 'get'): op.add_option('--config-' + a, metavar='OPT', type=str, dest='config', action='callback', callback=store_local_obj(a, lambda vx: {'opt': vx})) op.add_option('--config-get-all', dest='config', action='callback', callback=store_local_obj('get', lambda vx: {'opt': None})) for m in ('', '-rx', '-glob'): # call this code 'Pythonic' eh? # have to define a one-shot local function to be able # to inject (a value depending on the) # iteration variable into the inner lambda def conf_mod_opt_regex_variant(rx): op.add_option('--config-set' + m, metavar='OPT VAL', type=str, nargs=2, dest='config', action='callback', callback=store_local_obj( 'set', lambda vx: { 'opt': vx[0], 'val': vx[1], 'rx': rx })) op.add_option('--config-del' + m, metavar='OPT', type=str, dest='config', action='callback', callback=store_local_obj( 'del', lambda vx: { 'opt': vx, 'rx': rx })) conf_mod_opt_regex_variant(m and m[1:] or False) op.add_option('--normalize-url', dest='url_print', action='callback', callback=store_local_curry('normal')) op.add_option('--canonicalize-url', dest='url_print', action='callback', callback=store_local_curry('canon')) op.add_option('--canonicalize-escape-url', dest='url_print', action='callback', callback=store_local_curry('canon_esc')) op.add_option('--is-hottier', default=False, action='store_true') tunables = [ norm(o.get_opt_string()[2:]) for o in op.option_list if (o.callback in (store_abs, 'store_true', None) and o.get_opt_string() not in ('--version', '--help')) ] remote_tunables = [ 'listen', 'go_daemon', 'timeout', 'session_owner', 'config_file', 'use_rsync_xattrs', 'local_id', 'local_node', 'access_mount' ] rq_remote_tunables = {'listen': True} # precedence for sources of values: 1) commandline, 2) cfg file, 3) # defaults for this to work out we need to tell apart defaults from # explicitly set options... so churn out the defaults here and call # the parser with virgin values container. defaults = op.get_default_values() opts, args = op.parse_args(values=optparse.Values()) # slave url cleanup, if input comes with vol uuid as follows # 'ssh://fvm1::gv2:07dfddca-94bb-4841-a051-a7e582811467' temp_args = [] for arg in args: # Split based on :: data = arg.split("::") if len(data) > 1: slavevol_name = data[1].split(":")[0] temp_args.append("%s::%s" % (data[0], slavevol_name)) else: temp_args.append(data[0]) args = temp_args args_orig = args[:] voluuid_get = rconf.get('slavevoluuid_get') if voluuid_get: slave_host, slave_vol = voluuid_get.split("::") svol_uuid = slave_vol_uuid_get(slave_host, slave_vol) print svol_uuid return r = rconf.get('resource_local') if r: if len(args) == 0: args.append(None) args[0] = r r = rconf.get('resource_remote') if r: if len(args) == 0: raise GsyncdError('local resource unspecfied') elif len(args) == 1: args.append(None) args[1] = r confdata = rconf.get('config') if not (len(args) == 2 or (len(args) == 1 and rconf.get('listen')) or (len(args) <= 2 and confdata) or rconf.get('url_print')): sys.stderr.write("error: incorrect number of arguments\n\n") sys.stderr.write(op.get_usage() + "\n") sys.exit(1) verify = rconf.get('verify') if verify: logging.info(verify) logging.info("Able to spawn gsyncd.py") return restricted = os.getenv('_GSYNCD_RESTRICTED_') if restricted: allopts = {} allopts.update(opts.__dict__) allopts.update(rconf) bannedtuns = set(allopts.keys()) - set(remote_tunables) if bannedtuns: raise GsyncdError('following tunables cannot be set with ' 'restricted SSH invocaton: ' + ', '.join(bannedtuns)) for k, v in rq_remote_tunables.items(): if not k in allopts or allopts[k] != v: raise GsyncdError('tunable %s is not set to value %s required ' 'for restricted SSH invocaton' % (k, v)) confrx = getattr(confdata, 'rx', None) def makersc(aa, check=True): if not aa: return ([], None, None) ra = [resource.parse_url(u) for u in aa] local = ra[0] remote = None if len(ra) > 1: remote = ra[1] if check and not local.can_connect_to(remote): raise GsyncdError("%s cannot work with %s" % (local.path, remote and remote.path)) return (ra, local, remote) if confrx: # peers are regexen, don't try to parse them if confrx == 'glob': args = ['\A' + fnmatch.translate(a) for a in args] canon_peers = args namedict = {} else: dc = rconf.get('url_print') rscs, local, remote = makersc(args_orig, not dc) if dc: for r in rscs: print( r.get_url( **{ 'normal': {}, 'canon': { 'canonical': True }, 'canon_esc': { 'canonical': True, 'escaped': True } }[dc])) return pa = ([], [], []) urlprms = ({}, { 'canonical': True }, { 'canonical': True, 'escaped': True }) for x in rscs: for i in range(len(pa)): pa[i].append(x.get_url(**urlprms[i])) _, canon_peers, canon_esc_peers = pa # creating the namedict, a dict representing various ways of referring # to / repreenting peers to be fillable in config templates mods = (lambda x: x, lambda x: x[0].upper() + x[1:], lambda x: 'e' + x[0].upper() + x[1:]) if remote: rmap = {local: ('local', 'master'), remote: ('remote', 'slave')} else: rmap = {local: ('local', 'slave')} namedict = {} for i in range(len(rscs)): x = rscs[i] for name in rmap[x]: for j in range(3): namedict[mods[j](name)] = pa[j][i] namedict[name + 'vol'] = x.volume if name == 'remote': namedict['remotehost'] = x.remotehost if not 'config_file' in rconf: rconf['config_file'] = TMPL_CONFIG_FILE # Upgrade Config File only if it is session conf file if rconf['config_file'] != TMPL_CONFIG_FILE: upgrade_config_file(rconf['config_file'], confdata) gcnf = GConffile(rconf['config_file'], canon_peers, confdata, defaults.__dict__, opts.__dict__, namedict) conf_change = False if confdata: opt_ok = norm(confdata.opt) in tunables + [None] if confdata.op == 'check': if opt_ok: sys.exit(0) else: sys.exit(1) elif not opt_ok: raise GsyncdError("not a valid option: " + confdata.opt) if confdata.op == 'get': gcnf.get(confdata.opt) elif confdata.op == 'set': gcnf.set(confdata.opt, confdata.val, confdata.rx) elif confdata.op == 'del': gcnf.delete(confdata.opt, confdata.rx) # when modifying checkpoint, it's important to make a log # of that, so in that case we go on to set up logging even # if its just config invocation if confdata.op in ('set', 'del') and not confdata.rx: conf_change = True if not conf_change: return gconf.__dict__.update(defaults.__dict__) gcnf.update_to(gconf.__dict__) gconf.__dict__.update(opts.__dict__) gconf.configinterface = gcnf delete = rconf.get('delete') if delete: logging.info('geo-replication delete') # remove the stime xattr from all the brick paths so that # a re-create of a session will start sync all over again stime_xattr_name = getattr(gconf, 'master.stime_xattr_name', None) # Delete pid file, status file, socket file cleanup_paths = [] if getattr(gconf, 'pid_file', None): cleanup_paths.append(gconf.pid_file) if getattr(gconf, 'state_file', None): cleanup_paths.append(gconf.state_file) if getattr(gconf, 'state_detail_file', None): cleanup_paths.append(gconf.state_detail_file) if getattr(gconf, 'state_socket_unencoded', None): cleanup_paths.append(gconf.state_socket_unencoded) cleanup_paths.append(rconf['config_file'][:-11] + "*") # Cleanup changelog working dirs if getattr(gconf, 'working_dir', None): try: shutil.rmtree(gconf.working_dir) except (IOError, OSError): if sys.exc_info()[1].errno == ENOENT: pass else: raise GsyncdError('Error while removing working dir: %s' % gconf.working_dir) for path in cleanup_paths: # To delete temp files for f in glob.glob(path + "*"): _unlink(f) reset_sync_time = boolify(gconf.reset_sync_time) if reset_sync_time and stime_xattr_name: path_list = rconf.get('path_list') paths = [] for p in path_list.split('--path='): stripped_path = p.strip() if stripped_path != "": # set stime to (0,0) to trigger full volume content resync # to slave on session recreation # look at master.py::Xcrawl hint: zero_zero Xattr.lsetxattr(stripped_path, stime_xattr_name, struct.pack("!II", 0, 0)) return if restricted and gconf.allow_network: ssh_conn = os.getenv('SSH_CONNECTION') if not ssh_conn: # legacy env var ssh_conn = os.getenv('SSH_CLIENT') if ssh_conn: allowed_networks = [ IPNetwork(a) for a in gconf.allow_network.split(',') ] client_ip = IPAddress(ssh_conn.split()[0]) allowed = False for nw in allowed_networks: if client_ip in nw: allowed = True break if not allowed: raise GsyncdError("client IP address is not allowed") ffd = rconf.get('feedback_fd') if ffd: fcntl.fcntl(ffd, fcntl.F_SETFD, fcntl.FD_CLOEXEC) # normalize loglevel lvl0 = gconf.log_level if isinstance(lvl0, str): lvl1 = lvl0.upper() lvl2 = logging.getLevelName(lvl1) # I have _never_ _ever_ seen such an utterly braindead # error condition if lvl2 == "Level " + lvl1: raise GsyncdError('cannot recognize log level "%s"' % lvl0) gconf.log_level = lvl2 if not privileged() and gconf.log_file_mbr: gconf.log_file = gconf.log_file_mbr if conf_change: try: GLogger._gsyncd_loginit(log_file=gconf.log_file, label='conf') gconf.log_exit = False if confdata.op == 'set': if confdata.opt == 'checkpoint': logging.info("Checkpoint Set: %s" % (human_time_utc(confdata.val))) else: logging.info("Config Set: %s = %s" % (confdata.opt, confdata.val)) elif confdata.op == 'del': if confdata.opt == 'checkpoint': logging.info("Checkpoint Reset") else: logging.info("Config Reset: %s" % confdata.opt) except IOError: if sys.exc_info()[1].errno == ENOENT: # directory of log path is not present, # which happens if we get here from # a peer-multiplexed "config-set checkpoint" # (as that directory is created only on the # original node) pass else: raise return create = rconf.get('create') if create: if getattr(gconf, 'state_file', None): set_monitor_status(gconf.state_file, create) try: GLogger._gsyncd_loginit(log_file=gconf.log_file, label='monitor') gconf.log_exit = False logging.info("Monitor Status: %s" % create) except IOError: if sys.exc_info()[1].errno == ENOENT: # If log dir not present pass else: raise return go_daemon = rconf['go_daemon'] be_monitor = rconf.get('monitor') be_agent = rconf.get('agent') rscs, local, remote = makersc(args) status_get = rconf.get('status_get') if status_get: master_name, slave_data = get_master_and_slave_data_from_args(args) for brick in gconf.path: brick_status = GeorepStatus(gconf.state_file, gconf.local_node, brick, gconf.local_node_id, master_name, slave_data, getattr(gconf, "pid_file", None)) checkpoint_time = int(getattr(gconf, "checkpoint", "0")) brick_status.print_status(checkpoint_time=checkpoint_time) return if not be_monitor and isinstance(remote, resource.SSH) and \ go_daemon == 'should': go_daemon = 'postconn' log_file = None else: log_file = gconf.log_file if be_monitor: label = 'monitor' elif be_agent: label = gconf.local_path elif remote: # master label = gconf.local_path else: label = 'slave' startup(go_daemon=go_daemon, log_file=log_file, label=label) resource.Popen.init_errhandler() if be_agent: os.setsid() logging.debug('rpc_fd: %s' % repr(gconf.rpc_fd)) return agent(Changelog(), gconf.rpc_fd) if be_monitor: return monitor(*rscs) if remote: go_daemon = remote.connect_remote(go_daemon=go_daemon) if go_daemon: startup(go_daemon=go_daemon, log_file=gconf.log_file) # complete remote connection in child remote.connect_remote(go_daemon='done') local.connect() if ffd: os.close(ffd) local.service_loop(*[r for r in [remote] if r])
def manage_createBackup(self, includeEvents=None, includeMysqlLogin=None, timeout=120, REQUEST=None, writeMethod=None): """ Create a new backup file using zenbackup and the options specified in the request. This method makes use of the fact that DataRoot is a Commandable in order to use Commandable.write """ import popen2 import fcntl import time import select def write(s): if writeMethod: writeMethod(s) elif REQUEST: self.write(REQUEST.RESPONSE, s) footer = None if REQUEST and not writeMethod: header, footer = self.commandOutputTemplate().split('OUTPUT_TOKEN') REQUEST.RESPONSE.write(str(header)) write('') try: cmd = binPath('zenbackup') + ' -v10' if not includeEvents: cmd += ' --no-eventsdb' if not includeMysqlLogin: cmd += ' --no-save-mysql-access' try: timeout = int(timeout) except ValueError: timeout = 120 timeout = max(timeout, 1) child = popen2.Popen4(cmd) flags = fcntl.fcntl(child.fromchild, fcntl.F_GETFL) fcntl.fcntl(child.fromchild, fcntl.F_SETFL, flags | os.O_NDELAY) endtime = time.time() + timeout write('%s' % cmd) write('') pollPeriod = 1 firstPass = True while time.time() < endtime and (firstPass or child.poll() == -1): firstPass = False r, w, e = select.select([child.fromchild], [], [], pollPeriod) if r: t = child.fromchild.read() # We are sometimes getting to this point without any data # from child.fromchild. I don't think that should happen # but the conditional below seems to be necessary. if t: write(t) if child.poll() == -1: write('Backup timed out after %s seconds.' % timeout) import signal os.kill(child.pid, signal.SIGKILL) write('DONE') except Exception: write('Exception while performing backup.') write('type: %s value: %s' % tuple(sys.exc_info()[:2])) else: if REQUEST or writeMethod: audit('UI.Backup.Create') write('') if REQUEST and footer: REQUEST.RESPONSE.write(footer)
'G3': 196.00, 'B3': 246.94, 'E4': 329.63 } GUITAR_STRINGS_K = { 82.41: 'E2', 110.00: 'A2', 146.83: 'D3', 196.00: 'G3', 246.94: 'B3', 329.63: 'E4' } GFREQ = [82.41, 110.00, 146.83, 196.00, 246.94, 329.63] THRESH = 2 ''' UNIX STDIN SETUP ''' fl = fcntl.fcntl(sys.stdin.fileno(), fcntl.F_GETFL) fcntl.fcntl(sys.stdin.fileno(), fcntl.F_SETFL, fl | os.O_NONBLOCK) ''' SETTING UP PYAUDIO ''' pyaud = pyaudio.PyAudio() stream = pyaud.open(format=pyaudio.paInt16, channels=1, rate=44100, input=True) ''' THE MAIN LOOP ''' while True: #for i in xrange(10): for x in xrange(num_samples): try: rawsamps = stream.read(1024) samps = np.fromstring(rawsamps, dtype=np.int16) except: print "failed" crossings = 0
odir = "%s/%s" % (ORI, dstr2) rdir = "%s/%s/linkedrawdata" % (ORI, dstr2) F_ft_lock_link = "%s/%s/linkedrawdata.flock" % (ORI, dstr2) lockdata = struct.pack('hhllhh', fcntl.F_WRLCK, 0, 0, 0, 0, 0) lockdatalink = struct.pack('hhllhh', fcntl.F_WRLCK, 0, 0, 0, 0, 0) # creats output directory if os.path.exists(odir) == False: os.makedirs(odir) if os.path.exists(rdir) == False: os.makedirs(rdir) with open(F_ft_lock_link, "a+") as lockdata1: try: fcntl.fcntl(lockdata1.fileno(), fcntl.F_SETLKW, lockdatalink) except IOError: pass create_file_links(orirdir, rdir, dstr2) #fetch list of zip raw data os.chdir(rdir) obt_list = [] zipfiles = glob.glob("S*RBT*zip") for zzip in zipfiles: obt = zzip.split("_")[-7] if obt not in obt_list: obt_list.append(obt) os.chdir(odir)
lockdata = struct.pack(off_t + off_t + pid_t + 'hh', 0, 0, 0, fcntl.F_WRLCK, 0) elif sys.platform in ['aix3', 'aix4', 'hp-uxB', 'unixware7']: lockdata = struct.pack('hhlllii', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0) elif sys.platform in ['os2emx']: lockdata = None else: lockdata = struct.pack('hh' + start_len + 'hh', fcntl.F_WRLCK, 0, 0, 0, 0, 0) if lockdata: if verbose: print 'struct.pack: ', repr(lockdata) # the example from the library docs f = open(filename, 'w') rv = fcntl.fcntl(f.fileno(), fcntl.F_SETFL, os.O_NONBLOCK) if verbose: print 'Status from fcntl with O_NONBLOCK: ', rv if sys.platform not in ['os2emx']: rv = fcntl.fcntl(f.fileno(), fcntl.F_SETLKW, lockdata) if verbose: print 'String from fcntl with F_SETLKW: ', repr(rv) f.close() os.unlink(filename) # Again, but pass the file rather than numeric descriptor: f = open(filename, 'w') rv = fcntl.fcntl(f, fcntl.F_SETFL, os.O_NONBLOCK)
def set_close_on_exec(fd): s = fcntl.fcntl(fd, fcntl.F_GETFD) fcntl.fcntl(fd, fcntl.F_SETFD, s | fcntl.FD_CLOEXEC)
def open(self, index=0): """ Opens the file, creates the pipe. """ logger.debug('Open video `%s`' % self.filename) # close video if it was opened if not self.closed: self.close() # wait some time until we reopen the video reopen_delay = self.parameters['reopen_delay'] if reopen_delay > 0: logger.debug('Wait %g seconds before reopening video', reopen_delay) time.sleep(reopen_delay) if index > 0: # we have to seek to another index/time # determine the time that we have to seek to # the -0.1 is necessary to prevent rounding errors starttime = (index - 0.1) / self.fps # determine which method to use for seeking seek_method = self.parameters['seek_method'] if seek_method == 'auto': if FFMPEG_VERSION > (2, 1): seek_method = 'exact' else: seek_method = 'keyframe' if seek_method == 'exact': # newer ffmpeg version, which supports accurate seeking i_arg = ['-ss', "%.03f" % starttime, '-i', self.filename] elif seek_method == 'keyframe': # older ffmpeg version, which does not support accurate seeking if index < self.parameters['seek_max_frames']: # we only have to seek a little bit i_arg = ['-i', self.filename, '-ss', "%.03f" % starttime] else: # we first seek to a keyframe and then proceed from there seek_offset = self.parameters['seek_offset'] i_arg = ['-ss', "%.03f" % (starttime - seek_offset), '-i', self.filename, '-ss', "%.03f" % seek_offset] else: raise ValueError('Unknown seek method `%s`' % seek_method) logger.debug('Seek video to frame %d (=%.03fs)', index, starttime) else: # we can just open the video at the first frame i_arg = ['-i', self.filename] # build ffmpeg command line cmd = ([FFMPEG_BINARY] + i_arg + ['-loglevel', 'error', '-f', 'image2pipe', '-pix_fmt', self.pix_fmt, '-vcodec', 'rawvideo', '-']) self.proc = subprocess.Popen(cmd, bufsize=self.bufsize, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # set the stderr to non-blocking; used the idea from # http://stackoverflow.com/a/8980466/932593 # this only works on UNIX! fcntl.fcntl(self.proc.stderr.fileno(), fcntl.F_SETFL, os.O_NONBLOCK) self._frame_pos = index
def _spawn(self, args, fd_pipes, **kwargs): """ Fork a subprocess, apply local settings, and call dblink.merge(). """ elog_reader_fd, elog_writer_fd = os.pipe() fcntl.fcntl(elog_reader_fd, fcntl.F_SETFL, fcntl.fcntl(elog_reader_fd, fcntl.F_GETFL) | os.O_NONBLOCK) blockers = None if self.blockers is not None: # Query blockers in the main process, since closing # of file descriptors in the subprocess can prevent # access to open database connections such as that # used by the sqlite metadata cache module. blockers = self.blockers() mylink = portage.dblink(self.mycat, self.mypkg, settings=self.settings, treetype=self.treetype, vartree=self.vartree, blockers=blockers, scheduler=self.scheduler, pipe=elog_writer_fd) fd_pipes[elog_writer_fd] = elog_writer_fd self._elog_reg_id = self.scheduler.register(elog_reader_fd, self._registered_events, self._elog_output_handler) # If a concurrent emerge process tries to install a package # in the same SLOT as this one at the same time, there is an # extremely unlikely chance that the COUNTER values will not be # ordered correctly unless we lock the vdb here. # FEATURES=parallel-install skips this lock in order to # improve performance, and the risk is practically negligible. self._lock_vdb() counter = None if not self.unmerge: counter = self.vartree.dbapi.counter_tick() pid = os.fork() if pid != 0: if not isinstance(pid, int): raise AssertionError("fork returned non-integer: %s" % (repr(pid), )) os.close(elog_writer_fd) self._elog_reader_fd = elog_reader_fd self._buf = "" self._elog_keys = set() # invalidate relevant vardbapi caches if self.vartree.dbapi._categories is not None: self.vartree.dbapi._categories = None self.vartree.dbapi._pkgs_changed = True self.vartree.dbapi._clear_pkg_cache(mylink) portage.process.spawned_pids.append(pid) return [pid] os.close(elog_reader_fd) portage.locks._close_fds() # Disable close_fds since we don't exec (see _setup_pipes docstring). portage.process._setup_pipes(fd_pipes, close_fds=False) # Use default signal handlers since the ones inherited # from the parent process are irrelevant here. signal.signal(signal.SIGINT, signal.SIG_DFL) signal.signal(signal.SIGTERM, signal.SIG_DFL) portage.output.havecolor = self.settings.get('NOCOLOR') \ not in ('yes', 'true') # In this subprocess we want mylink._display_merge() to use # stdout/stderr directly since they are pipes. This behavior # is triggered when mylink._scheduler is None. mylink._scheduler = None # Avoid wastful updates of the vdb cache. self.vartree.dbapi._flush_cache_enabled = False # In this subprocess we don't want PORTAGE_BACKGROUND to # suppress stdout/stderr output since they are pipes. We # also don't want to open PORTAGE_LOG_FILE, since it will # already be opened by the parent process, so we set the # "subprocess" value for use in conditional logging code # involving PORTAGE_LOG_FILE. if not self.unmerge: # unmerge phases have separate logs if self.settings.get("PORTAGE_BACKGROUND") == "1": self.settings["PORTAGE_BACKGROUND_UNMERGE"] = "1" else: self.settings["PORTAGE_BACKGROUND_UNMERGE"] = "0" self.settings.backup_changes("PORTAGE_BACKGROUND_UNMERGE") self.settings["PORTAGE_BACKGROUND"] = "subprocess" self.settings.backup_changes("PORTAGE_BACKGROUND") rval = 1 try: if self.unmerge: if not mylink.exists(): rval = os.EX_OK elif mylink.unmerge( ldpath_mtimes=self.prev_mtimes) == os.EX_OK: mylink.lockdb() try: mylink.delete() finally: mylink.unlockdb() rval = os.EX_OK else: rval = mylink.merge(self.pkgloc, self.infloc, myebuild=self.myebuild, mydbapi=self.mydbapi, prev_mtimes=self.prev_mtimes, counter=counter) except SystemExit: raise except: traceback.print_exc() finally: # Call os._exit() from finally block, in order to suppress any # finally blocks from earlier in the call stack. See bug #345289. os._exit(rval)
from fcntl import fcntl, F_GETFL, F_SETFL from os import O_NONBLOCK, read, fdopen import pty host_name = '0.0.0.0' # Change this to your Raspberry Pi IP address host_port = 80 master, slave = pty.openpty() p = Popen(['./printOutput'], stdin=PIPE, stdout=slave, stderr=slave, shell=True, bufsize=1) stdout = fdopen(master) flags = fcntl(stdout, F_GETFL) # get current p.stdout flags fcntl(stdout, F_SETFL, flags | O_NONBLOCK) class MyServer(BaseHTTPRequestHandler): """ A special implementation of BaseHTTPRequestHander for reading data from and control GPIO of a Raspberry Pi """ def do_HEAD(self): """ do_HEAD() can be tested use curl command 'curl -I http://server-ip-address:port' """ self.send_response(200) self.send_header('Content-type', 'text/html') self.end_headers()
def __init__(self, filename, size, fps, is_color=True, codec="libx264", bitrate=None): """ Initializes the video writer. `filename` is the name of the video `size` is a tuple determining the width and height of the video `fps` determines the frame rate in 1/seconds `is_color` is a flag indicating whether the video is in color `codec` selects a codec supported by FFmpeg `bitrate` determines the associated bitrate """ self.filename = os.path.expanduser(filename) self.codec = codec self.ext = self.filename.split(".")[-1] self.size = size self.is_color = is_color self.frames_written = 0 if size[0] % 2 != 0 or size[1] % 2 != 0: raise ValueError('Both dimensions of the video must be even for ' 'the video codec to work properly') # determine whether we are in debug mode debug = (logger.getEffectiveLevel() >= logging.DEBUG) #FIXME: consider adding the flags # "-f ismv" "-movflags frag_keyframe" # to avoid corrupted mov files, if movie writing is interrupted # build the FFmpeg command cmd = ( [FFMPEG_BINARY, '-y', '-loglevel', 'verbose' if debug else 'error', '-threads', '1', #< single threaded encoding for safety '-f', 'rawvideo', '-vcodec','rawvideo', '-s', "%dx%d" % tuple(size), '-pix_fmt', 'rgb24' if is_color else 'gray', '-r', '%.02f' % fps, '-i', '-', '-an'] # no audio + ([] if (codec is None) else ['-c:v', codec]) + ([] if (bitrate is None) else ['-b:v', bitrate]) # http://trac.FFmpeg.org/ticket/658 + (['-pix_fmt', 'yuv420p'] if ((codec == 'libx264') and (size[0] % 2 == 0) and (size[1] % 2 == 0)) else []) + ['-r', "%.02f" % fps, filename] ) # estimate the buffer size with some safety margins depth = 3 if is_color else 1 bufsize = 2 * depth * size[0] * size[1] + 100 # start FFmpeg, which should wait for input self.proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=DEVNULL, stderr=subprocess.PIPE, bufsize=bufsize) # set the stderr to non-blocking; used the idea from # http://stackoverflow.com/a/8980466/932593 # this only works on UNIX! fcntl.fcntl(self.proc.stderr.fileno(), fcntl.F_SETFL, os.O_NONBLOCK) logger.info('Start writing video `%s` with codec `%s` using FFmpeg.', filename, codec)
def _set_fd_cloexec(fd): if os.name != 'nt': import fcntl flags = fcntl.fcntl(fd, fcntl.F_GETFD) fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
def remove_cloexec(fd): fcntl.fcntl(fd, fcntl.F_SETFD, fcntl.fcntl(fd, fcntl.F_GETFD) & ~fcntl.FD_CLOEXEC)