def __init__(self, reactor, proc, name, fileno, forceReadHack=False): """ Initialize, specifying a Process instance to connect to. """ abstract.FileDescriptor.__init__(self, reactor) fdesc.setNonBlocking(fileno) self.proc = proc self.name = name self.fd = fileno if forceReadHack: self.enableReadHack = True else: # Detect if this fd is actually a write-only fd. If it's # valid to read, don't try to detect closing via read. # This really only means that we cannot detect a TTY's write # pipe being closed. try: os.read(self.fileno(), 0) except OSError: # It's a write-only pipe end, enable hack self.enableReadHack = True if self.enableReadHack: self.startReading()
def _dump_file(self, file, submission_gus, file_gus): result = {} result['file_gus'] = file_gus result['name'] = file['filename'] result['type'] = file['content_type'] result['size'] = len(file['body']) # XXX verify this token what's is it result['token'] = submission_gus if not os.path.isdir(config.advanced.submissions_dir): print "%s does not exist. Creating it." % config.advanced.submissions_dir os.mkdir(config.advanced.submissions_dir) the_submission_dir = config.advanced.submissions_dir # this happen only at the first execution if not os.path.isdir(the_submission_dir): os.mkdir(the_submission_dir) filelocation = os.path.join(the_submission_dir, file_gus) print "Saving file \"%s\" of %d byte [%s] type, to %s" % \ (result['name'], result['size'], result['type'], filelocation ) with open(filelocation, 'w+') as fd: fdesc.setNonBlocking(fd.fileno()) fdesc.writeToFD(fd.fileno(), file['body']) return result
def startService(self): """docstring for startService""" log.msg("Starting %s" % self) filename = self.getFileName() def checkAndMove(filename, offset=0): if os.path.isfile(self._outfileName) and offset <= self.keepFiles: offset += 1 newFileName = self.outdir + os.path.sep + self.filename[:-( len(self.suffix))] + '.%s%s' % (offset, self.suffix) checkAndMove(newFileName, offset) if os.path.isfile(filename): shutil.move(filename, newFileName) checkAndMove(self._outfileName) self.outfile = os.open(self._outfileName, os.O_WRONLY | os.O_NONBLOCK | os.O_CREAT) if not self.outfile: raise ("Could not open %s" % self._outfileName) else: log.msg("Opened %s" % self._outfileName) fdesc.setNonBlocking(self.outfile) self.running = 1 def syncToDisc(): """docstring for flushToDisk""" os.fsync(self.outfile) if self.running: reactor.callLater(5, syncToDisc) syncToDisc()
def receiveUpload(self, upload): previous = getattr(self, 'beginning', 2) type, self.beginning, self.sequence = unpack('<BBL', upload) self.remaining -= DATA_UPLOAD_LEN log.msg('Upload control %s prev: %s' % (self.beginning, previous)) if self.beginning == 1: log.msg('A middle part of a file received') return self.receiveContent, self.remaining elif self.beginning == 2 and previous != 2 : log.msg('Last chunk of file received') return self.receiveContent, self.remaining elif self.beginning == 0 or (self.beginning == 2 and previous == 2): log.msg('First chunk of new file received') self.stamp = str(time.time()) self.path = os.path.join(self.factory.storage, self.certificate, self.identity, self.stamp) self.generateUpdate(self.stamp) self.file = os.open(self.path, os.O_RDWR | os.O_CREAT) fdesc.setNonBlocking(self.file) return self.receiveContent, self.remaining if self.confirm: self.confirmToggle() self.sendConfirmation() else: log.msg('Something strange happend invalid las_chunk field!') return self.uselessReceived, self.remaining
def __init__(self, trigger, fname, mode): super(FD, self).__init__() self.trigger = trigger self._init_fp(fname, mode) fdesc.setNonBlocking(self) self.connected = True # Required by FileDescriptor class self.data = b""
def _write_file(self, file_path, data): with open(file_path, 'w') as f: # d = Deferred() fd = f.fileno() setNonBlocking(fd) writeToFD(fd, data)
def startReading(self): if self._fd is not None and not self._fd.closed: FileDescriptor.startReading(self) return self._fd = self._path.open('r') setNonBlocking(self._fd) FileDescriptor.startReading(self)
def getFile(filename): with open(filename) as f: d = Deferred() fd = f.fileno() setNonBlocking(fd) readFromFD(fd, d.callback) return d
def run(self, cd_path=None, action_name=None, parameters=None): """ Run main event loop. """ # Set stdin non blocking and raw fdesc.setNonBlocking(sys.stdin) tcattr = termios.tcgetattr(sys.stdin.fileno()) tty.setraw(sys.stdin.fileno()) # Report size self._send_size() self.socket.sendall(pickle.dumps(('_start-interaction', { 'cd_path': cd_path, 'action_name': action_name, 'parameters': parameters, }))) self._read_loop() # Reset terminal state termios.tcsetattr(sys.stdin, termios.TCSAFLUSH, tcattr) # Put the cursor again at the left margin. sys.stdout.write('\r\n') # Set exit status sys.exit(self.exit_status)
def __init__(self, reactor, proc, name, fileno, forceReadHack=False): """ Initialize, specifying a Process instance to connect to. """ abstract.FileDescriptor.__init__(self, reactor) fdesc.setNonBlocking(fileno) self.proc = proc self.name = name self.fd = fileno if not stat.S_ISFIFO(os.fstat(self.fileno()).st_mode): # If the fd is not a pipe, then the read hack is never # applicable. This case arises when ProcessWriter is used by # StandardIO and stdout is redirected to a normal file. self.enableReadHack = False elif forceReadHack: self.enableReadHack = True else: # Detect if this fd is actually a write-only fd. If it's # valid to read, don't try to detect closing via read. # This really only means that we cannot detect a TTY's write # pipe being closed. try: os.read(self.fileno(), 0) except OSError: # It's a write-only pipe end, enable hack self.enableReadHack = True if self.enableReadHack: self.startReading()
def __init__(self, fp, callback): self.fp = fp fdesc.setNonBlocking(fp) self.callback = callback self.fileno = self.fp.fileno abstract.FileDescriptor.__init__(self, reactor)
def _create_ipfile(self, username, ip_addr): """ Create configuration file for static IP allocation. :param username (str): account username :param ip_addr (IPv4Address): IP address allocated for the account. """ log.info("ACCOUNTS:: Creating IP file for client.") # OpenVPN will look for files inside this directory. Its filename must # be the same as the username ip_filename = os.path.join(self.path['client-ips'], username) log.debug("ACCOUNTS: Creating {} with {}.".format( ip_filename, str(ip_addr))) # From `Configuring client-specific rules and access policies` in # https://openvpn.net/index.php/open-source/documentation/howto.html virtual_client = ip_addr server_endpoint = ip_addr + 1 log.debug("ACCOUNTS:: ifconfig-push {} {}".format( virtual_client, server_endpoint)) data = "ifconfig-push {} {}\n".format(str(virtual_client), str(server_endpoint)) with open(ip_filename, 'w+') as f: fd = f.fileno() setNonBlocking(fd) writeToFD(fd, data) fp = FilePath(ip_filename) fp.chmod(0644)
def open(self): self.nflog_handle = call_nf(nflog_open, error=0) self.nfnl_handle = call_nf(nflog_nfnlh, self.nflog_handle, error=0) self.fd = nflog_fd(self.nflog_handle) if self.non_blocking: # set the fd non blocking so we can use nfnl_catch to do the processing fdesc.setNonBlocking(self.fd)
def updateReport(self, report_filename, data): try: with open(report_filename, 'a+') as fd: fdesc.setNonBlocking(fd.fileno()) fdesc.writeToFD(fd.fileno(), data) except IOError as e: web.HTTPError(404, "Report not found")
def startService(self): """docstring for startService""" log.msg("Starting %s" % self) self.filename = self.getFileName() def checkAndMove(filename,offset=0): if os.path.isfile(self._outfileName) and offset <= self.keepFiles: offset += 1 newFileName = self.outdir + os.path.sep + os.path.basename(self.filename[:-(len(self.suffix))]) + '.%s%s' % (offset,self.suffix) checkAndMove(newFileName,offset) if os.path.isfile(filename): shutil.move(filename,newFileName) checkAndMove(self._outfileName) self.outfile = os.open(self._outfileName,os.O_WRONLY | os.O_NONBLOCK | os.O_CREAT) if not self.outfile: raise("Could not open %s" % self._outfileName) else: log.msg("Opened %s" % self._outfileName) fdesc.setNonBlocking(self.outfile) self.running = 1 def syncToDisc(): """docstring for flushToDisk""" os.fsync(self.outfile) if self.running: reactor.callLater(5,syncToDisc) syncToDisc()
def __init__(self, factory, fd, reactor = None): # Read from standard input, make it Non Blocking self._fd = fd self.factory = factory fdesc.setNonBlocking(fd) # We open a file to read from super(AdoptReadDescriptor, self).__init__(fd, reactor)
def setUp(self): """ Create a non-blocking pipe that can be used in tests. """ self.r, self.w = os.pipe() fdesc.setNonBlocking(self.r) fdesc.setNonBlocking(self.w)
def updateReport(self, report_filename, data): try: with open(report_filename, 'a+') as fd: fdesc.setNonBlocking(fd.fileno()) fdesc.writeToFD(fd.fileno(), data) except IOError as e: e.OONIBError(404, "Report not found")
def __init__(self, reactor): """Initialize. """ self.reactor = reactor self.i, self.o = os.pipe() fdesc.setNonBlocking(self.i) fdesc.setNonBlocking(self.o) self.fileno = lambda: self.i
def start(level=None, name="twisted", to_stdout=None, to_file=None, log_file_name=None): """ Starts the logging for a single module. Each module should import this logging module and decide its level. The first time this is called, don't give any argument. It will log everything with the name "twisted". The programmer can choose the level from which to log, discarding any message with a lower level. Example : If level is INFO, the DEBUG messages (lower level) will not be displayed but the CRITICAL ones will. @param level: debug, info, error, warning or critical @type level: str @param to_stdout: Whether it should be printed to stdout. (False to disable) @param to_file: Whether it should be printed to file. (True to enable) @param name: What string to prefix with. @rtype: L{twisted.python.logging.PythonLoggingObserver} """ global SYSTEMWIDE_TO_STDOUT global SYSTEMWIDE_TO_FILE global SYSTEMWIDE_LOG_FILE_NAME global SYSTEMWIDE_LEVEL if log_file_name is not None: SYSTEMWIDE_LOG_FILE_NAME = log_file_name if to_file is True: SYSTEMWIDE_TO_FILE = True if level is not None: SYSTEMWIDE_LEVEL = level logger = logging.getLogger(name) formatter = logging.Formatter('%(asctime)s %(name)-13s %(levelname)-8s %(message)s') set_level(SYSTEMWIDE_LEVEL, name) if to_stdout is True or to_stdout is False: SYSTEMWIDE_TO_STDOUT = to_stdout if to_file is True or to_file is False: SYSTEMWIDE_TO_FILE = to_file if SYSTEMWIDE_TO_STDOUT: so_handler = logging.StreamHandler(sys.stdout) if ENABLE_NON_BLOCKING_OUTPUT: fdesc.setNonBlocking(so_handler.stream) # NON-BLOCKING OUTPUT so_handler.setFormatter(formatter) logger.addHandler(so_handler) if SYSTEMWIDE_TO_FILE: if SYSTEMWIDE_LOG_FILE_NAME is None: raise RuntimeError("The log file name has not been set.") # file_handler = logging.FileHandler(log_file_name, mode='a', encoding='utf-8') file_handler = logging.FileHandler(SYSTEMWIDE_LOG_FILE_NAME) # FIXME: not catching IOError that could occur. if ENABLE_NON_BLOCKING_OUTPUT: fdesc.setNonBlocking(file_handler.stream) # NON-BLOCKING OUTPUT file_handler.setFormatter(formatter) logger.addHandler(file_handler) if name == 'twisted': observer = twisted_log.PythonLoggingObserver(name) observer.start() return logging.getLogger(name) else: return logging.getLogger(name)
def process_image(self, payload, **kwargs): """ Writes images to the cache """ filecache_loc = settings.CACHE_LOCATION webcache_loc = settings.WEB_CACHE_LOCATION cache_filename_parts = payload['image_path'].split('.') filefront = cache_filename_parts[0] fileend = cache_filename_parts[1] cache_filename = '' original_filename = '%s.%s' % ( filefront, fileend, ) cache_filename = '%s_%sx%s_%s.%s' % ( filefront, payload['width'], payload['height'], payload['mode'], fileend, ) file_cache = os.path.join(filecache_loc, cache_filename) web_cache = os.path.join(webcache_loc, cache_filename) # Files are normally binned in subdir, create them in cache dirs = os.path.dirname(file_cache) try: os.makedirs(dirs) except os.error: pass if 'skip_resize' in payload.keys(): # Just save/servwe original image as there is no resized image file_cache = os.path.join(filecache_loc, original_filename) web_cache = os.path.join(webcache_loc, original_filename) # Save image to be served fd = open(file_cache, 'w') fdesc.setNonBlocking(fd.fileno()) yield fdesc.writeToFD(fd.fileno(), payload['image']) fd.close() if 'skip_resize' not in payload.keys(): # If image to be served has beenr esized, also cache full size image file_cache = os.path.join(filecache_loc, original_filename) fd = open(file_cache, 'w') fdesc.setNonBlocking(fd.fileno()) yield fdesc.writeToFD(fd.fileno(), payload['original_image']) fd.close() if settings.DEBUG: log.msg( "[%s] Cached image location: %s" % (datetime.now().isoformat(), file_cache), logLevel=logging.DEBUG ) defer.returnValue(web_cache)
def __init__(self, fid, offset, length, bufLen = 8): self._fd = fid os.lseek(self._fd, offset, os.SEEK_SET) setNonBlocking(self._fd) self._readBufLen = bufLen self._readMaxLen = length self._readLen = 0 self._allData = '' self.dataRecieved = AsyncFileReader.read_finish
def getConf(self): try: logger.debug("Getting config file") with open(self.configPath) as f: fd = f.fileno() setNonBlocking(fd) readFromFD(fd, self.runConf) except IOError, e: self.d.errback(e)
def __init__(self, reactor, executable, args, environment, path, proto, uid=None, gid=None, usePTY=None): """ Spawn an operating-system process. This is where the hard work of disconnecting all currently open files / forking / executing the new process happens. (This is executed automatically when a Process is instantiated.) This will also run the subprocess as a given user ID and group ID, if specified. (Implementation Note: this doesn't support all the arcane nuances of setXXuid on UNIX: it will assume that either your effective or real UID is 0.) """ if pty is None and not isinstance(usePTY, (tuple, list)): # no pty module and we didn't get a pty to use raise NotImplementedError( "cannot use PTYProcess on platforms without the pty module.") abstract.FileDescriptor.__init__(self, reactor) if isinstance(usePTY, (tuple, list)): masterfd, slavefd, ttyname = usePTY else: masterfd, slavefd = pty.openpty() ttyname = os.ttyname(slavefd) self._fork(path, uid, gid, executable, args, environment, masterfd=masterfd, slavefd=slavefd) # we are now in parent process: os.close(slavefd) fdesc.setNonBlocking(masterfd) self.fd = masterfd self.startReading() self.connected = 1 self.proto = proto self.lostProcess = 0 self.status = -1 try: self.proto.makeConnection(self) except: log.err() registerReapProcessHandler(self.pid, self)
def process_image(self, payload, **kwargs): """ Writes images to the cache """ filecache_loc = settings.CACHE_LOCATION webcache_loc = settings.WEB_CACHE_LOCATION cache_filename_parts = payload['image_path'].split('.') filefront = cache_filename_parts[0] fileend = cache_filename_parts[1] cache_filename = '' original_filename = '%s.%s' % ( filefront, fileend, ) cache_filename = '%s_%sx%s_%s.%s' % ( filefront, payload['width'], payload['height'], payload['mode'], fileend, ) file_cache = os.path.join(filecache_loc, cache_filename) web_cache = os.path.join(webcache_loc, cache_filename) # Files are normally binned in subdir, create them in cache dirs = os.path.dirname(file_cache) try: os.makedirs(dirs) except os.error: pass if 'skip_resize' in payload.keys(): # Just save/servwe original image as there is no resized image file_cache = os.path.join(filecache_loc, original_filename) web_cache = os.path.join(webcache_loc, original_filename) # Save image to be served fd = open(file_cache, 'w') fdesc.setNonBlocking(fd.fileno()) yield fdesc.writeToFD(fd.fileno(), payload['image']) fd.close() if 'skip_resize' not in payload.keys(): # If image to be served has beenr esized, also cache full size image file_cache = os.path.join(filecache_loc, original_filename) fd = open(file_cache, 'w') fdesc.setNonBlocking(fd.fileno()) yield fdesc.writeToFD(fd.fileno(), payload['original_image']) fd.close() if settings.DEBUG: log.msg("[%s] Cached image location: %s" % (datetime.now().isoformat(), file_cache), logLevel=logging.DEBUG) defer.returnValue(web_cache)
def openOutputFile(self, path): if path is not None: try: # Open the file descriptor self.outputFile = open(path + '.h264') # Make it nonblocking fdesc.setNonBlocking(self.outputFile.fileno()) except: print "Warning! Incoming video stream will not save to local file!" self.outputFile = None
def __init__(self, file_name, reactor=None): abstract.FileDescriptor.__init__(self, reactor=reactor) self.file_name = file_name try: os.mkfifo(file_name) except OSError: pass self.fp = open(self.file_name, 'a+') fdesc.setNonBlocking(self.fp) self.fileno = self.fp.fileno
def test_setBlocking(self): """ L{fdesc.setBlocking} sets a file description to blocking. """ r, w = os.pipe() self.addCleanup(os.close, r) self.addCleanup(os.close, w) fdesc.setNonBlocking(r) fdesc.setBlocking(r) self.assertFalse(fcntl.fcntl(r, fcntl.F_GETFL) & os.O_NONBLOCK)
def __init__(self, reactor, proc, name, fileno): """ Initialize, specifying a process to connect to. """ abstract.FileDescriptor.__init__(self, reactor) fdesc.setNonBlocking(fileno) self.proc = proc self.name = name self.fd = fileno self.startReading()
def pipe(self): """ Create a non-blocking pipe which will be closed after the currently running test. """ read, write = os.pipe() self.addCleanup(os.close, read) self.addCleanup(os.close, write) setNonBlocking(read) setNonBlocking(write) return read, write
def _read_from_cache(self, url, post, file_name): d = defer.Deferred() file_name = file_name or self.scraper.cache.make_key(url=url, post=post) path = os.path.join(self.scraper.cache.location, file_name) with open(path, 'rb') as f: fd = f.fileno() setNonBlocking(fd) _readFromFD(fd, d.callback) return d
def __new__(cls, *args, **kwargs): obj = getattr(cls, '_instance_', None) if obj is not None: return obj else: obj = super(INotify, cls).__new__(cls, *args, **kwargs) # Check inotify support by checking for the required functions obj.libc = ctypes.cdll.LoadLibrary(ctypes.util.find_library('c')) if len([ function for function in "inotify_add_watch inotify_init inotify_rm_watch".split() if hasattr(obj.libc, function) ]) == 3: obj.inotify_init = obj.libc.inotify_init obj.inotify_add_watch = obj.libc_inotify_add_watch obj.inotify_rm_watch = obj.libc_inotify_rm_watch else: print("inotify.py - can't use libc6, 2.4 or higher needed") import platform if platform.system() != 'Linux': raise SystemError( "unknown system '%r', INotify support disabled" % platform.uname()) machine = platform.machine() try: obj._init_syscall_id = _inotify_syscalls[machine][0] obj._add_watch_syscall_id = _inotify_syscalls[machine][1] obj._rm_watch_syscall_id = _inotify_syscalls[machine][2] obj.inotify_init = obj._inotify_init obj.inotify_add_watch = obj._inotify_add_watch obj.inotify_rm_watch = obj._inotify_rm_watch except: raise SystemError( "unknown system '%s', INotify support disabled" % machine) FileDescriptor.__init__(obj) obj._fd = obj.inotify_init() if obj._fd < 0: raise SystemError("INotify initialization error.") fdesc.setNonBlocking(obj._fd) reactor.addReader(obj) obj._buffer = '' # Mapping from wds to Watch objects obj._watchpoints = {} # Mapping from paths to wds obj._watchpaths = {} cls._instance_ = obj return obj
def listen(self, factory): if self._used: return defer.fail(AlreadyListened()) self._used = True try: fdesc.setNonBlocking(self.fileno) port = Port._fromListeningDescriptor( self.reactor, self.fileno, factory) port.startListening() os.close(self.fileno) except: return defer.fail() return defer.succeed(port)
def updateReport(self, report_id, parsed_request): log.debug("Got this request %s" % parsed_request) report_filename = os.path.join(config.main.report_dir, report_id) config.reports[report_id].refresh() try: with open(report_filename, 'a+') as fd: fdesc.setNonBlocking(fd.fileno()) fdesc.writeToFD(fd.fileno(), parsed_request['content']) except IOError as exc: e.OONIBError(404, "Report not found") self.write({})
def __init__(self, reactor, executable, args, environment, path, proto, uid=None, gid=None, usePTY=None): """ Spawn an operating-system process. This is where the hard work of disconnecting all currently open files / forking / executing the new process happens. (This is executed automatically when a Process is instantiated.) This will also run the subprocess as a given user ID and group ID, if specified. (Implementation Note: this doesn't support all the arcane nuances of setXXuid on UNIX: it will assume that either your effective or real UID is 0.) """ if pty is None and not isinstance(usePTY, (tuple, list)): # no pty module and we didn't get a pty to use raise NotImplementedError( "cannot use PTYProcess on platforms without the pty module.") abstract.FileDescriptor.__init__(self, reactor) _BaseProcess.__init__(self, proto) if isinstance(usePTY, (tuple, list)): masterfd, slavefd, ttyname = usePTY else: masterfd, slavefd = pty.openpty() ttyname = os.ttyname(slavefd) try: self._fork(path, uid, gid, executable, args, environment, masterfd=masterfd, slavefd=slavefd) except: if not isinstance(usePTY, (tuple, list)): os.close(masterfd) os.close(slavefd) raise # we are now in parent process: os.close(slavefd) fdesc.setNonBlocking(masterfd) self.fd = masterfd self.startReading() self.connected = 1 self.status = -1 try: self.proto.makeConnection(self) except: log.err() registerReapProcessHandler(self.pid, self)
def get_file_checksum(filepath): sha = SHA256.new() chunk_size = 8192 with open(filepath, 'rb') as fd: fdesc.setNonBlocking(fd.fileno()) while True: chunk = fd.read(chunk_size) if len(chunk) == 0: break sha.update(chunk) return sha.hexdigest()
def __init__(self, trigger, fname, mode): super(FD, self).__init__() self.trigger = trigger self.fp = open(fname, mode, buffering=2048) fdesc.setNonBlocking(self) self.connected = True # Required by FileDescriptor class self.data = b"" if "w" in mode: self.startWriting() if "r" in mode: # In order to determine when we have reached EOF self.filelen = os.path.getsize(fname) self.totalread = 0 self.startReading()
def _read_loop(self): while True: try: # I/O routing r, w, e = select.select([self.socket, sys.stdin], [], []) if self.socket in r: data = self.socket.recv(1024) if data: self._receive(data) else: # Nothing received? End of stream. break if sys.stdin in r: # Non blocking read. (Write works better in blocking mode. # Especially on OS X.) fdesc.setNonBlocking(sys.stdin) data = sys.stdin.read(1024) # We read larger chuncks (more than just one byte) in # one go. This is important for meta and arrow keys # which consist of multiple bytes. Many applications # rely on this that they'll receive them together. fdesc.setBlocking(sys.stdin) # If we're finish and 'wait_for_closing' was set. Any key # press will terminate the client. if self.wait_for_closing: break if chr(14) in data: # Ctrl-N # Tell the server to open a new window. self.socket.sendall( pickle.dumps(('open-new-window', ''))) else: self.socket.sendall(pickle.dumps(('_input', data))) except socket.error: print '\nConnection closed...' break except Exception as e: # SIGWINCH will abort select() call. Just ignore this error if e.args and e.args[0] == errno.EINTR: continue else: raise
def __init__(self, reactor, executable, args, environment, path, protocol, name): abstract.FileDescriptor.__init__(self, reactor) process._BaseProcess.__init__(self, protocol) masterfd, slavefd = pty.openpty() ttyname = os.ttyname(slavefd) uid=None gid=None try: self._fork(path, uid, gid, executable, args, environment, masterfd=masterfd, slavefd=slavefd) except: os.close(masterfd) os.close(slavefd) raise self._masterfd = masterfd self._slavefd = slavefd self._args = args self._environment = environment self._path = path self._protocol = protocol self._ttyname = ttyname self._executable = executable # we are now in parent process: os.close(slavefd) fdesc.setNonBlocking(masterfd) self.fd = masterfd self.startReading() self.connected = 1 self.status = -1 self._info = info.Info(pid=self.pid, name=name, type='proc', executable=executable) try: self.proto.makeConnection(self) except: log.err() process.registerReapProcessHandler(self.pid, self)
def __new__(cls, *args, **kwargs): obj = getattr(cls, '_instance_', None) if obj is not None: return obj else: obj = super(INotify, cls).__new__(cls, *args, **kwargs) # Check inotify support by checking for the required functions obj.libc = ctypes.cdll.LoadLibrary(ctypes.util.find_library('c')) if len([function for function in "inotify_add_watch inotify_init inotify_rm_watch".split() if hasattr(obj.libc, function)]) == 3: obj.inotify_init = obj.libc.inotify_init obj.inotify_add_watch = obj.libc_inotify_add_watch obj.inotify_rm_watch = obj.libc_inotify_rm_watch else: print("inotify.py - can't use libc6, 2.4 or higher needed") import platform if platform.system() != 'Linux': raise SystemError("unknown system '%r', INotify support disabled" % platform.uname()) machine = platform.machine() try: obj._init_syscall_id = _inotify_syscalls[machine][0] obj._add_watch_syscall_id = _inotify_syscalls[machine][1] obj._rm_watch_syscall_id = _inotify_syscalls[machine][2] obj.inotify_init = obj._inotify_init obj.inotify_add_watch = obj._inotify_add_watch obj.inotify_rm_watch = obj._inotify_rm_watch except: raise SystemError("unknown system '%s', INotify support disabled" % machine) FileDescriptor.__init__(obj) obj._fd = obj.inotify_init() if obj._fd < 0: raise SystemError("INotify initialization error.") fdesc.setNonBlocking(obj._fd) reactor.addReader(obj) obj._buffer = '' # Mapping from wds to Watch objects obj._watchpoints = {} # Mapping from paths to wds obj._watchpaths = {} cls._instance_ = obj return obj
def _read_loop(self): while True: try: # I/O routing r, w, e = select.select([ self.socket, sys.stdin ], [], []) if self.socket in r: data = self.socket.recv(1024) if data: self._receive(data) else: # Nothing received? End of stream. break if sys.stdin in r: # Non blocking read. (Write works better in blocking mode. # Especially on OS X.) fdesc.setNonBlocking(sys.stdin) data = sys.stdin.read(1024) # We read larger chuncks (more than just one byte) in # one go. This is important for meta and arrow keys # which consist of multiple bytes. Many applications # rely on this that they'll receive them together. fdesc.setBlocking(sys.stdin) # If we're finish and 'wait_for_closing' was set. Any key # press will terminate the client. if self.wait_for_closing: break if chr(14) in data: # Ctrl-N # Tell the server to open a new window. self.socket.sendall(pickle.dumps(('open-new-window', ''))) else: self.socket.sendall(pickle.dumps(('_input', data))) except socket.error: print '\nConnection closed...' break except Exception as e: # SIGWINCH will abort select() call. Just ignore this error if e.args and e.args[0] == errno.EINTR: continue else: raise
def downloadReceived(self, download): log.msg('Download info received') all = data_type, self.beginning, source, resolutionx, resolutiony, latitude, longitude, heigth, timestamp = unpack('!BBLLLLLL19s', download) self.remaining -= DATA_DOWNLOAD_LEN log.msg('Metadata:', all) if self.beginning == 0: log.msg('File transfer beginning') filename = str(time.time()) self.file = os.open(filename, os.O_RDWR | os.O_CREAT) fdesc.setNonBlocking(self.file) return self.contentReceived, self.remaining elif self.beginning == 1 or self.beginning == 2: return self.contentReceived, self.remaining else: log.msg('Something strange happend!')
def __init__(self, protocol, filename, reactor=None, mode='r+'): """ Creates an instance of FileTransport. :param protocol: Twisted protocol instance. Must implement the {IProtocol} interface :param filename: name of file to open (e.g. '/dev/ttyUSB0') :param reactor: Twisted reactor instance :param mode: file read/write mode (e.g. 'r', 'w', 'r+', etc...) """ FileDescriptor.__init__(self, reactor) self.filename = filename self._f = open(filename, mode=mode) fdesc.setNonBlocking(self._f) self.flushInput() self.flushOutput() self.protocol = protocol self.protocol.makeConnection(self) self.startReading() self.connected = 1
def __init__(self, reactor=None): FileDescriptor.__init__(self, reactor=reactor) # Smart way to allow parametrization of libc so I can override # it and test for the system errors. self._fd = self._inotify.init() fdesc.setNonBlocking(self._fd) fdesc._setCloseOnExec(self._fd) # The next 2 lines are needed to have self.loseConnection() # to call connectionLost() on us. Since we already created the # fd that talks to inotify we want to be notified even if we # haven't yet started reading. self.connected = 1 self._writeDisconnected = True self._buffer = '' self._watchpoints = {} self._watchpaths = {}
def logTask(self, packet, data): """ Log the data from the packet """ fileDate = time.strftime('%Y-%m-%d') # I disagree with ^%&*@# PM but I had to keep it for compatibility logDate = time.strftime('%Y.%m.%d %I:%M:%S %p') # TODO # Map of file descriptors fileName = '%s/%s-%s' % (outdir, data['busNum'], fileDate) try: if (os.path.realpath(fileName).startswith(outdir)): with open(fileName, 'a') as output: fdesc.setNonBlocking(output) output.write('%s:%s, %s\n' % (logDate, packet.latitude, packet.longitude)) else: log.err("Escalation attempt") except: log.err("Error logging packets")
def run(self, cd_path=None): """ Run main event loop. """ # Set stdin non blocking and raw fdesc.setNonBlocking(sys.stdin) tcattr = termios.tcgetattr(sys.stdin.fileno()) tty.setraw(sys.stdin.fileno()) # Report size self._send_size() self.socket.sendall( pickle.dumps(('_start-interaction', { 'cd_path': cd_path, }))) self._read_loop() sys.stdout.write('\n') # Reset terminal state termios.tcsetattr(sys.stdin, termios.TCSAFLUSH, tcattr)
def __init__(self, pty, rootHandler): self.pty = pty self.stdin = pty.stdin self.stdout = pty.stdout self.root = rootHandler self.lines_history = [] self.history_position = 0 # 0 is behind the history, -1 is browsing one backwards self.tcattr = termios.tcgetattr(self.stdin.fileno()) self.line = [] # Character array self.insert_pos = 0 self.scroll_pos = 0 # Horizontal scrolling self.vi_navigation = False # In vi-navigation mode (instead of insert mode.) # Additional pipe through which this shell can receive messages while # waiting for IO. r, w = os.pipe() self._extra_stdout = os.fdopen(w, 'w', 0) self._extra_stdin = os.fdopen(r, 'r', 0) fdesc.setNonBlocking(self._extra_stdin) self.currently_running = None
class TuntapPort(base.BasePort): """A Port that reads and writes packets from/to a TUN/TAP-device. TODO: Share general start/stop etc implementation details with twisted.internet.udp.Port. """ maxThroughput = 256 * 1024 # max bytes we read in one eventloop iteration def __init__(self, interface, proto, maxPacketSize=8192, reactor=None): if components.implements(proto, ethernet.IEthernetProtocol): self.ethernet = 1 else: self.ethernet = 0 assert components.implements(proto, ip.IIPProtocol) # XXX: fix me base.BasePort.__init__(self, reactor) self.interface = interface self.protocol = proto self.maxPacketSize = maxPacketSize self.setLogStr() def __repr__(self): return "<%s on %s>" % (self.protocol.__class__, self.interface) def startListening(self): """Create and bind my socket, and begin listening on it. This is called on unserialization, and must be called after creating a server to begin listening on the specified port. """ self._bindSocket() self._connectToProtocol() def _bindSocket(self): log.msg("%s starting on %s" % (self.protocol.__class__, self.interface)) try: fd, name = opentuntap(name=self.interface, ethernet=self.ethernet, packetinfo=0) except OSError, e: raise error.CannotListenError, (None, self.interface, e) fdesc.setNonBlocking(fd) self.interface = name self.connected = 1 self.fd = fd