def handle(self): debugger = self.getDebugger() if debugger: uu = debugger.handle_start(self.peerName, PyRQTimeUtils.getTime()) quiet = self.getQuiet() response = None if not quiet: self.logger.debug(">> HANDLE start @%(T)s\r\n"%{"T":PyRQTimeUtils.getTime()}) try: marshaller = self.getMarshaller() if self.abort: response = Errors.TooManyClientsError() self._doResponse(self.request, marshaller.package(response)) return target = self.getTarget() if not quiet: self.logger.info("Serving new client [%(C)s]\r\n"%{"C":self.client}) while True: try: self._work(target, marshaller) except FinishedWithSocket, _e: if not quiet: self.logger.debug("Finished with socket for client: %(C)s\r\n"%{"C":self.client}) return except Exception, _e: # This is fatal, so just return. self.logger.error("Error in work for client: %(C)s\r\n%(T)s"%{"C":self.client, "T":traceback.format_exc()}) return
def _create(self, marshaller, pp, clientData, quiet): if not quiet: self.logger.debug("CREATE - START @%(T)s"%{"T":PyRQTimeUtils.getTime()}) try: debugger = self.getDebugger() socketDetails = self.client_address queueType = pp.queueType maxsize = pp.maxsize pollInterval = pp.pollInterval if debugger: uu = debugger.create_start(self.peerName, PyRQTimeUtils.getTime(), queueType, maxsize, pollInterval) namespace = uuid.uuid4().hex with self.getClientLock(): # There is a remote chance that the uuids will be identical so: if namespace in clientData["queues"].keys(): namespace += namespace q = RQueueWrapper(queueType, self.getNewLogger, namespace, maxsize=maxsize, pollInterval=pollInterval, quiet=quiet) clientData["queues"][namespace] = q if debugger: uu = debugger.create_end(self.peerName, PyRQTimeUtils.getTime(), namespace, uu=uu) if not quiet: self.logger.info("CREATE - namespace: %(R)s from %(C)s\r\n"%{"R":namespace, "C":socketDetails}) self._doResponse(self.request, marshaller.package(Messages.ACK(namespace))) finally: if not quiet: self.logger.debug("CREATE - END @%(T)s"%{"T":PyRQTimeUtils.getTime()})
def closeClient(c): try: self._doResponse(c.request, marshaller.package(Messages.CLOSED(namespace=namespace))) PyRQTimeUtils.delayTime(1) c.request.shutdown(socket.SHUT_WR) c.request.close() del c except Exception, _e: pass
def _addClient(self, who, namespace): debugger = self.getDebugger() if debugger: uu = debugger.addClient_start(self.peerName, PyRQTimeUtils.getTime(), namespace) with self.getClientLock(): if namespace not in self._clients: self._clients[namespace] = [] self._clients[namespace].append(who) if debugger: debugger.addClient_end(self.peerName, PyRQTimeUtils.getTime(), namespace, uu=uu)
def _work(self, target, marshaller): debugger = self.getDebugger() tOut = self.getReadTimeout() # tOut = 1 self.request.setblocking(True) self.request.settimeout(tOut) clientData = self.getClientData() while self.getTerminate()==False: # Receive the data from the socket: quiet = self.getQuiet() try: data = self.request.recv(self.getRecvChunkSize()) except socket.timeout, _e: print ">" else: if data=='': raise RRQHandler.FinishedWithSocket() # Pump the data into the marshaller, piping the packages onto the target: for p in marshaller.receive(data): if not quiet: self.logger.debug("WORK %(P)s for: %(C)s\r\n"%{"P":p, "C":self.client_address}) if isinstance(p, RRQPackage): pp = p.data namespace = p.namespace if debugger: uu = debugger.work_start(self.peerName, PyRQTimeUtils.getTime(), pp) if not quiet: self.logger.debug("WORK data: %(P)s for: %(C)s\r\n"%{"P":pp, "C":self.client_address}) if isinstance(pp, Messages.CREATE): self._addClient(self, namespace) self._stall("CREATE", clientData, quiet) self._create(marshaller, pp, clientData, quiet) elif isinstance(pp, Messages.CLOSE): self._addClient(self, namespace) self._stall("CLOSE", clientData, quiet) self._close(marshaller, namespace, clientData, quiet) elif isinstance(pp, Messages.PUT): self._addClient(self, namespace) self._stall("PUT", clientData, quiet) self._put(namespace, pp, clientData, marshaller, quiet) elif isinstance(pp, Messages.GET): self._addClient(self, namespace) self._stall("GET", clientData, quiet) self._get(namespace, pp, clientData, marshaller, quiet) elif isinstance(pp, Messages.QSIZE): self._addClient(self, namespace) self._stall("QSIZE", clientData, quiet) self._qSize(namespace, clientData, marshaller, quiet) elif isinstance(pp, Messages.MAXQSIZE): self._addClient(self, namespace) self._stall("MAXQSIZE", clientData, quiet) self._maxQSize(namespace, clientData, marshaller, quiet) elif isinstance(pp, Messages.DEBUG): self._debug(pp, marshaller, clientData, quiet) if debugger: debugger.work_end(self.peerName, PyRQTimeUtils.getTime(), uu=uu)
def _stall(self, where, clientData, quiet): if not quiet: self.logger.debug("STALL %(W)s.\r\n"%{"W":where}) try: t = clientData["stall"][where] if not quiet: self.logger.info("STALL time: %(W)s\r\n"%{"W":t}) debugger = self.getDebugger() if (t!=None) and (t>0): if debugger: uu = debugger.delay_start(self.peerName, PyRQTimeUtils.getTime(), t, where) PyRQTimeUtils.delayTime(t) if (t!=None) and (t>0): if debugger: uu = debugger.delay_end(self.peerName, PyRQTimeUtils.getTime(), uu=uu) except Exception, _e: pass
def _maxQSize(self, namespace, clientData, marshaller): debugger = self.getDebugger() if debugger: uu = debugger.maxqsize_start(self.peerName, PyRQTimeUtils.getTime(), namespace) with self.getClientLock(): if not namespace in clientData["queues"]: result = Errors.NoSuchQueue(namespace) if namespace in clientData["stale-queues"]: result = Errors.ClosedError() else: size = clientData["queues"][namespace].maxQSize() result = Messages.MAXQSIZE(size) if debugger: debugger.maxqsize_end(self.peerName, PyRQTimeUtils.getTime(), result, uu=uu) self._doResponse(self.request, marshaller.package(result))
def setup(self): self.peerName = self._getPeerName() debugger = self.getDebugger(inst=RRQDebugger()) if debugger: uu = debugger.setup_start(self.peerName, PyRQTimeUtils.getTime()) quiet = self.getQuiet() self.logger = self.getNewLogger(self.peerName) if not quiet: self.logger.debug(">> SETUP start @ %(T)s"%{"T":PyRQTimeUtils.getTime()}) try: def setClient(getter, setter): data = getter() if data==None: setter(self._getDefaultClientData()) self.client = self.newClient(self, setClientData=setClient) except Errors.TooManyClientsError, _e: self.abort = True
def closeClients(self, namespace, clientData, marshaller, quiet): debugger = self.getDebugger() if debugger: uu = debugger.closeClients_start(self.peerName, PyRQTimeUtils.getTime(), namespace) with self.getClientLock(): numClients = len(self._clients) if namespace in self._clients: # Close clients in parallel: def closeClient(clients): while len(clients)>0: c = clients.pop() try: self._doResponse(c.request, marshaller.package(Messages.CLOSED(namespace=namespace))) PyRQTimeUtils.delayTime(1) c.request.shutdown(socket.SHUT_WR) c.request.close() del c except Exception, _e: pass maxPackages = RRQHandler.MAX_CLOSE_THREADS if len(self._clients[namespace])>0: packages = self._packageClients(self._clients[namespace], maxPackages) # Now farm out the work packages: for index, clients in enumerate(packages): if not quiet: self.logger.debug("Closing client for namespace: %(Q)s\r\n"%{"Q":namespace}) t = threading.Timer(0, closeClient, args=[clients]) t.setDaemon(True) t.setName("ClientCloser_%(U)s_%(I)s"%{"U":namespace, "I":index}) t.start() del self._clients[namespace] # Now discard the buffer and queue: try: qData = clientData["queues"][namespace] except Exception, _e: pass
def closeClients(self, namespace, clientData, marshaller, quiet): debugger = self.getDebugger() if debugger: uu = debugger.closeClients_start(self.peerName, PyRQTimeUtils.getTime(), namespace) with self.getClientLock(): numClients = len(self._clients) if namespace in self._clients: # Close clients in parallel: while len(self._clients[namespace])>0: client = self._clients[namespace].pop() if not quiet: self.logger.debug("Closing client for namespace: %(Q)s\r\n"%{"Q":namespace}) def closeClient(c): try: self._doResponse(c.request, marshaller.package(Messages.CLOSED(namespace=namespace))) PyRQTimeUtils.delayTime(1) c.request.shutdown(socket.SHUT_WR) c.request.close() del c except Exception, _e: pass t = threading.Timer(0, closeClient, args=[client]) t.setDaemon(True) t.setName("ClientCloser_%(U)s_%(C)s"%{"U":namespace, "C":client}) t.start() del self._clients[namespace] # Now discard the buffer and queue: try: qData = clientData["queues"][namespace] except Exception, _e: pass
def finish(self): debugger = self.getDebugger() if debugger: uu = debugger.finish_start(self.peerName, PyRQTimeUtils.getTime()) try: self.clientFinished(self.client) except Exception, _e: pass
def get(self, block=True, timeout=None): data = None # Calculate the maxTime: pollInterval = self._pollInterval timeStart = PyRQTimeUtils.getTime() maxTime = None if block==True: try: maxTime = timeStart + timeout except: pass if not self._quiet: self._logger.debug("GET blocking, maxTime: %(MT)s, timeStart: %(TS)s"%{"MT":maxTime, "TS":timeStart, "TO":timeout}) else: if not self._quiet: self._logger.debug("GET non-blocking") try: while True: timeDelay = None with self._lock: if self._closed==True: break if block==False: if len(self._data)==0: raise Empty() data = self._data.pop(0) break else: if len(self._data)==0: timeNow = PyRQTimeUtils.getTime() # Calculate the maxTime: if maxTime==None: remainingTime = pollInterval else: remainingTime = maxTime-timeNow if not self._quiet: self._logger.debug("GET blocking, maxTime: %(MT)s, timeStart: %(TS)s, timeNow: %(TN)s, remainingTime: %(RT)s"%{"TN":timeNow, "MT":maxTime, "TS":timeStart, "TO":timeout, "RT":remainingTime}) if remainingTime<=0: raise Empty() else: # Wait for minPeriod and try again: timeDelay = min(pollInterval, min(pollInterval, remainingTime)) else: data = self._data.pop(0) break if timeDelay!=None: PyRQTimeUtils.delayTime(timeDelay) except Exception, _e: raise
def _debug(self, cmd, marshaller, clientData, quiet): r""" @summary: Configure the debug. """ # pydevd.settrace(stdoutToServer = True, stderrToServer = True) debugger = self.getDebugger() if isinstance(cmd, Messages.DEBUG_START): try: result = debugger.start(self.peerName, PyRQTimeUtils.getTime(), cmd.filename, cmd.server) except Exception, e: self.logger.error("DEBUG_START filename: <%(F)s>, server: <%(S)s>...\r\n%(E)s.\r\n"%{"E":e, "S":cmd.server, "F":cmd.filename})
def _close(self, marshaller, namespace, clientData, quiet): if not quiet: self.logger.debug("CLOSE - START @%(T)s"%{"T":PyRQTimeUtils.getTime()}) try: debugger = self.getDebugger() if debugger: uu = debugger.close_start(self.peerName, PyRQTimeUtils.getTime(), namespace) try: with self.getClientLock(): alreadyClosed = (namespace in clientData["stale-queues"]) if alreadyClosed==True: # Subsequent close already! self._doResponse(self.request, marshaller.package(Messages.CLOSED(result=False, namespace=namespace))) return self.closeClients(namespace, clientData, marshaller, quiet) raise RRQHandler.FinishedWithSocket() finally: if debugger: debugger.close_end(self.peerName, PyRQTimeUtils.getTime(), namespace, uu=uu) finally: if not quiet: self.logger.debug("CLOSE - END @%(T)s"%{"T":PyRQTimeUtils.getTime()})
def get(self, block=True, timeout=None): data = None # Calculate the maxTime: pollInterval = self._pollInterval timeStart = PyRQTimeUtils.getTime() maxTime = None if block==True: try: maxTime = timeStart + timeout except: pass try: while True: timeDelay = None with self._lock: if self._closed==True: break if block==False: if len(self._data)==0: raise Empty() data = self._data.pop(0) break else: if len(self._data)==0: timeNow = PyRQTimeUtils.getTime() # Calculate the maxTime: if maxTime==None: remainingTime = pollInterval else: remainingTime = maxTime-timeNow if remainingTime<=0: raise Empty() else: # Wait for minPeriod and try again: timeDelay = min(pollInterval, min(pollInterval, remainingTime)) else: data = self._data.pop(0) break if timeDelay!=None: PyRQTimeUtils.delayTime(timeDelay) except Exception, _e: # self._logger.error("EEEEEEEERRRRRRRRROOOOOOOOORRRRRRRR.total: %(T)s.\r\n%(NS)s\r\n"%{"T":self._totalGot, "NS":traceback.format_exc()}) raise
def _maxQSize(self, namespace, clientData, marshaller, quiet): if not quiet: self.logger.debug("MAXQSIZE - START @%(T)s"%{"T":PyRQTimeUtils.getTime()}) try: debugger = self.getDebugger() if debugger: uu = debugger.maxqsize_start(self.peerName, PyRQTimeUtils.getTime(), namespace) if not quiet: self.logger.debug("MAXQSIZE - ns = %(D)s"%{"D":namespace}) with self.getClientLock(): if not namespace in clientData["queues"]: result = Errors.NoSuchQueue(namespace) if namespace in clientData["stale-queues"]: result = Errors.ClosedError() else: size = clientData["queues"][namespace].maxQSize() result = Messages.MAXQSIZE(size) if debugger: debugger.maxqsize_end(self.peerName, PyRQTimeUtils.getTime(), namespace, result, uu=uu) if not quiet: self.logger.info("MAXQSIZE result for %(NS)s - %(D)s"%{"D":result, "NS":namespace}) self._doResponse(self.request, marshaller.package(result)) finally: if not quiet: self.logger.debug("MAXQSIZE - END @%(T)s"%{"T":PyRQTimeUtils.getTime()})
def _get(self, namespace, pp, clientData, marshaller, quiet): if not quiet: self.logger.debug("GET - START @%(T)s"%{"T":PyRQTimeUtils.getTime()}) try: # Get the data from the queue: debugger = self.getDebugger() q = self._getQ(namespace, clientData, marshaller) if q!=None: block=pp.block timeout=pp.timeout if debugger: uu = debugger.get_start(self.peerName, PyRQTimeUtils.getTime(), namespace, block, timeout) result = None r""" FYI: if block=True, make the timeout=1 and loop --> allows us to abort when another client closes this 'Queue'. """ try: data = q.get(block=block, timeout=timeout) except Empty, result: if not quiet: self.logger.debug("Queue empty for [%(NS)s].\r\n"%{"NS":namespace}) except Errors.ClosedError, result: pass
def _put(self, namespace, pp, clientData, marshaller, quiet, pollDelay=0.1): if not quiet: self.logger.debug("PUT - START @%(T)s"%{"T":PyRQTimeUtils.getTime()}) try: # Add the data onto the queue for namespace: debugger = self.getDebugger() socketDetails = self.client_address q = self._getQ(namespace, clientData, marshaller) if q!=None: # Now loop every 'pollDelay' seconds, checking if we're closed. block = pp.block timeout = pp.timeout data = pp.data if debugger: uu = debugger.put_start(self.peerName, PyRQTimeUtils.getTime(), namespace, block, timeout, data) result = Messages.ACK(namespace) try: if not quiet: self.logger.debug("PUT - %(D)s"%{"D":data}) if block==False: if not quiet: self.logger.debug("PUT start (non-blocking) @%(T)s - %(D)s"%{"D":data, "T":namespace}) q.put(data, block=False) else: if not quiet: self.logger.debug("PUT start (blocking) @%(T)s - %(D)s"%{"D":data, "T":namespace}) maxTime=None if timeout!=None: maxTime = PyRQTimeUtils.getTime()+timeout while True: with self.getClientLock(): try: q = clientData["queues"][namespace].q() except Exception, _e: raise Errors.ClosedError() else: try: q.put(data, block=False) except Full, _e: pass else: break
def _debug(self, cmd, marshaller, clientData, quiet): r""" @summary: Configure the debug mechanism. """ debugger = self.getDebugger() try: if isinstance(cmd, Messages.DEBUG_START): if not quiet: self.logger.info("DEBUG_START filename: <%(F)s>, server: <%(S)s>...\r\n"%{"S":cmd.server, "F":cmd.filename}) try: result = debugger.start(self.peerName, PyRQTimeUtils.getTime(), cmd.filename, cmd.server) except Exception, result: if not quiet: self.logger.error("Unable to start debugger!") elif isinstance(cmd, Messages.DEBUG_STOP): result = debugger.stop()
self.logger.debug("PUT %(R)s OK from: %(W)s. "%{"R":ref, "W":iface.getLastSockDetails()}) with plock: ePuts.append(eData) self.logger.info("PUT %(R)s now: %(RR)s."%{"R":ref, "RR":len(ePuts)}) self.logger.error("PUT %(R)s COMPLETE."%{"R":ref}) putters.release() # Now create the data: for k in xrange(0, numData): J = k+1 delay = self.random.random() eData = "hello.world...%(C)s"%{"C":J} allPutData.append(eData) dataQ.put((delay, eData)) self.logger.debug("DATA contains %(D)s items...\r\n"%{"D":numData}) while dataQ.qsize()!=numData: PyRQTimeUtils.delayTime(1) putterThreads = [] for i in xrange(0, numPutters): k = i+1 t = threading.Thread(target=doPut, args=[initialDelay, k, putTimeout, dataQ]) t.setDaemon(True) t.setName("Parallel_PUT_%(C)s"%{"C":k}) self._timers.append(t) t.start() putterThreads.append(t) for k in xrange(0, numPutters): putters.acquire() # Now wait for all the data to be received: maxPutDelay = (1+(count*numGetters*1))*2 self.logger.info("Now waiting for all data to be put and got for MAX: %(T)s seconds..."%{"T":maxPutDelay}) gotData = []
def _work(self, target, marshaller): debugger = self.getDebugger() tOut = self.getReadTimeout() # tOut = 1 self.request.setblocking(True) self.request.settimeout(tOut) clientData = self.getClientData() while self.getTerminate()==False: # Receive the data from the socket: quiet = self.getQuiet() try: data = self.request.recv(self.getRecvChunkSize()) except socket.timeout, _e: print ">" else: if data=='': raise FinishedWithSocket() # Pump the data into the marshaller, piping the packages onto the target: for p in marshaller.receive(data): if not quiet: self.logger.debug("WORK %(P)s for: %(C)s\r\n"%{"P":p, "C":self.client_address}) if isinstance(p, RRQPackage): pp = p.data namespace = p.namespace if debugger: uu = debugger.work_start(self.peerName, PyRQTimeUtils.getTime(), pp) if not quiet: self.logger.info("WORK data: %(P)s for: %(C)s\r\n"%{"P":pp, "C":self.client_address}) if isinstance(pp, Messages.CREATE): namespace = uuid.uuid4().hex with self.getClientLock(): # There is a remote chance that the uuids will be identical so: if namespace in clientData["queues"].keys(): namespace += namespace queueType = pp.queueType maxsize = pp.maxsize pollInterval = pp.pollInterval q = MockQObject(queueType, self.getNewLogger, namespace, maxsize=maxsize, pollInterval=pollInterval, quiet=quiet) clientData["queues"][namespace] = q self._addClient(self, namespace) self._stall("Messages.CREATE", clientData, quiet) self._doResponse(self.request, marshaller.package(Messages.ACK(namespace))) elif isinstance(pp, Messages.CLOSE): with self.getClientLock(): alreadyClosed = (namespace in clientData["stale-queues"]) if alreadyClosed==True: # Subsequent close already! self._doResponse(self.request, marshaller.package(Messages.CLOSED(result=False, namespace=namespace))) return self._addClient(self, namespace) self._stall("Messages.CLOSE", clientData, quiet) self.closeClients(namespace, clientData, marshaller, quiet) raise FinishedWithSocket() elif isinstance(pp, Messages.PUT): self._addClient(self, namespace) self._stall("PUT", clientData, quiet) self._put(namespace, pp, clientData, marshaller, quiet) elif isinstance(pp, Messages.GET): self._addClient(self, namespace) self._stall("Messages.GET", clientData, quiet) self._get(namespace, pp, clientData, marshaller, quiet) elif isinstance(pp, Messages.QSIZE): self._addClient(self, namespace) self._qSize(namespace, clientData, marshaller) elif isinstance(pp, Messages.MAXQSIZE): self._addClient(self, namespace) self._maxQSize(namespace, clientData, marshaller) elif isinstance(pp, Messages.DEBUG): self._debug(pp, marshaller, clientData, quiet) if debugger: debugger.work_end(self.peerName, PyRQTimeUtils.getTime(), uu=uu)
def _setClosed(self, namespace): PyRQIface.closedQueues[namespace]=PyRQTimeUtils.getTime() self._logger.error("Q now closed.") self._closed=True
t.start() del self._clients[namespace] # Now discard the buffer and queue: try: qData = clientData["queues"][namespace] except Exception, _e: pass else: # Tell each GET client to close: q = qData.q() for _ in xrange(numClients): try: q.put(_CLOSE(), block=False) except Exception, _e: pass PyRQTimeUtils.delayTime(1) qData.close() del clientData["queues"][namespace] clientData["stale-queues"].append(namespace) if debugger: debugger.closeClients_end(self.peerName, PyRQTimeUtils.getTime(), uu=uu) def _getPeerName(self): name = self.request.getpeername() return ":".join([str(name[0]), str(name[1])]) def setup(self): self.peerName = self._getPeerName() debugger = self.getDebugger(inst=RRQDebugger()) if debugger: uu = debugger.setup_start(self.peerName, PyRQTimeUtils.getTime()) quiet = self.getQuiet() self.logger = self.getNewLogger(self.peerName)