def bind(self, local_endpoint): """binds the socket to the given local endpoint""" if self._is_bound: raise AlreadyBoundError() try: self._sock.bind(local_endpoint) except _socket.timeout: raise TimeoutError() except _socket.error, (errno, info): if errno in timeout_errnos: raise TimeoutError() else: raise BindError(errno, info)
def accept(self): """accepts a connection -- returns a real-socket that should be wrap()ped by a subclass of ClientSocket""" if not self._is_bound: raise NotBoundError() try: newsock, addrinfo = self._sock.accept() except _socket.timeout: raise TimeoutError() except _socket.error, (errno, info): if errno in timeout_errnos: raise TimeoutError() else: raise AcceptError(errno, info)
def connect(self, endpoint): """connects this socket to a remote endpoint. if the socket is not already bound, it is automatically bound to a free local endpoint""" if self._is_connected: raise AlreadyConnectedError() try: self._sock.connect(endpoint) except _socket.timeout: raise TimeoutError() except _socket.error, (errno, info): if errno in timeout_errnos: raise TimeoutError() else: raise ConnectError(errno, info)
def send(self, data): """sends the given data over the socket, returns the number of bytes actually transmitted""" if not self._is_connected: raise NotConnectedError() try: return self._sock.send(data) except _socket.timeout: raise TimeoutError() except _socket.error, (errno, info): if errno in timeout_errnos: raise TimeoutError() else: raise SocketError(errno, info)
def run(self): with open(self._path, 'rb') as r: last_match = self.start_from started = time.time() while not self.stopped(): r.seek(last_match) stdout = r.read() if not stdout: time.sleep(0.1) continue else: match = self.compiled[0].search(stdout) if match: logging.debug('Pattern %r found in runner STDOUT.' % match.re.pattern) self.compiled.pop(0) last_match = self.start_from if self.compiled: continue self.stop() break if time.time() - started > self.timeout: r.seek(self.start_from) stdout = r.read() self.error = TimeoutError( 'Runner {!r} did not have patterns {!r}' ' after {} seconds.' .format(self.runner_name, [rx.pattern for rx in self.compiled], self.timeout)) self.stop() break
def run(self): started = time.time() while not self.stopped(): msgs = len(self.sink.data) if msgs and msgs > self.position: while self.position < msgs: sink_data = self.func(self.sink.data[self.position]) for val in list(self.values): if sink_data == val: self.values.discard(val) logging.log( 1, "{} matched on value {!r}.".format( self.name, val)) if not self.values: self.stop() break self.position += 1 if time.time() - started > self.timeout: self.error = TimeoutError('{}: has timed out after {} seconds' ', with {} messages. before ' 'receiving the awaited values ' '{!r}.'.format( self.name, self.timeout, msgs, self.values)) self.stop() break time.sleep(0.1)
def send(self, data, addr): try: return self._sock.sendto(data, addr) except _socket.timeout: raise TimeoutError() except _socket.error, (errno, info): raise SocketError(errno, info)
def wait_for_cluster_to_resume_processing(runners, timeout=30): # Wait until all workers have resumed processing waiting = set() for r in runners: if not r.is_alive(): continue obs = ObservabilityNotifier(cluster_status_query, r.external, tests=is_processing, timeout=timeout) waiting.add(obs) obs.start() # Cycle through waiting until its empty or error t0 = time.time() while True: for obs in list(waiting): # short join obs.join(0.05) if obs.error: raise obs.error if obs.is_alive(): continue else: waiting.remove(obs) # check completion if waiting: # check timeout! if time.time() - t0 > timeout: raise TimeoutError( "Timed out after {} seconds while waiting " "for cluster to resume processing.".format(timeout)) else: return # done!
def timeout_handler(): p.stop() error = \ "Message handler {} in {} for message {} exceeded timeout: {}".format(self.message_handler, self.message_handler.__module__, message.id, message.body) self.logger.error(error) if self.exception_handler is not None: self.exception_handler(message, TimeoutError(error))
def wait_for_sender(self, sender=-1, timeout=30): logging.debug("wait_for_sender(sender={}, timeout={})" .format(sender, timeout)) if isinstance(sender, Sender): pass else: sender = self.senders[sender] sender.join(timeout) if sender.error: raise sender.error if sender.is_alive(): raise TimeoutError('Sender did not complete in the expected ' 'period')
def _doLookup(self, req, timeout, callback=None, **kwargs): """ Performs the actual lookup(s), handling all the socket connections """ ret = None timeout = float(timeout) mask = select.EPOLLIN | select.EPOLLPRI fdMap = {} p = select.poll() for resolver in self.resolvers: # Send the request to all resolvers sock = self._getSock(resolver, timeout) sock.sendall(req.getBuf()) # Close the buffer and free the memory used req.close() fd = sock.fileno() p.register(fd, mask) fdMap[fd] = sock if self.useFirst: break res = p.poll(timeout) if res: # Get the first result in the list fd, event = res[0] sock = fdMap[fd] packet = sock.recv(65535) ret = drr.DnsResult(packet) # Close all the sockets for s in fdMap.itervalues(): s.close() if ret.id != req.id: # Make sure the ids match raise ResError('Result id, %d, does not match ' % ret.id + 'request id, %d. Possible forgery' % req.id) if not res: # Timed out tried = [] if self.useFirst: tried.append(self.resolvers[0]) else: tried = self.resolvers raise TimeoutError('Hit timeout of %f when querying %r' % (timeout, tried)) return ret
def run(self): with open(self._path, 'rb') as r: started = time.time() while not self.stopped(): r.seek(0) stdout = r.read() if not stdout: time.sleep(0.1) continue else: if self.pattern.search(stdout): logging.debug('Application reports it is ready.') self.stop() break if time.time() - started > self.timeout: outputs = runners_output_format(self.runners) self.error = TimeoutError( 'Application did not report as ready after {} ' 'seconds. It had the following outputs:\n===\n{}' .format(self.timeout, outputs)) self.stop() break
def run(self): waiting = set() for r in self.runners: if not r.is_alive(): continue obs = ObservabilityNotifier(cluster_status_query, r.external, tests=is_processing, timeout=self.timeout) waiting.add(obs) obs.start() # Cycle through waiting until its empty or error t0 = time.time() while not self.stopped(): for obs in list(waiting): # short join obs.join(self.interval) if obs.error: self.stop(obs.error) break if obs.is_alive(): continue else: logging.log( 1, "ObservabilityNotifier completed: {}".format(obs)) waiting.remove(obs) # check completion if waiting: # check timeout! if time.time() - t0 > self.timeout: self.stop( TimeoutError( "Timed out after {} seconds while waiting " "for cluster to resume processing.".format( self.timeout))) break else: break # done!
def run(self): started = time.time() while not self.stopped(): msgs = len(self.sink.data) if msgs > self.expected: if not self.allow_more: self.error = ExpectationError( '{}: has received too many ' 'messages. Expected {} but got ' '{}.'.format(self.name, self.expected, msgs)) self.stop() break if msgs == self.expected: self.stop() break if time.time() - started > self.timeout: self.error = TimeoutError('{}: has timed out after {} seconds' ', with {} messages. Expected {} ' 'messages.'.format( self.name, self.timeout, msgs, self.expected)) self.stop() break time.sleep(0.1)
def run_multi(self, queries, cached=False): """ Function: run_multi ------------------- Runs multiple SQL statements at once. """ # Consume old results if needed. [row for row in self.cursor] sql_list = split(queries) # Consume any additional result-sets that might have been left # on the connection. # try: # while self.cursor.nextset(): # pass # except Error: # pass result = Result() for sql in sql_list: sql = sql.rstrip().rstrip(";") if len(sql) == 0: continue query_results = Cache.get(sql) # Results are not to be cached or are not in the cache and needs to # be cached. Run the query. if not query_results or not cached: try: self.clear_cursor() self.cursor.execute(sql) # except DatabaseError as e: # if 'already exists' in str(e): # print("[warning: %s]" % str(e)) # else: # # Reraise the exception # raise e # If the query times out. except mysql.connector.errors.OperationalError as e: raise TimeoutError(e) # If something is wrong with their query. except mysql.connector.errors.ProgrammingError as e: if 'already exists' in str(e): log("[warning: %s]" % str(e)) else: raise DatabaseError(e) # If the query can't be run as a single query, attempt to do it with a # multi-line query. except mysql.connector.errors.Error as e: print("ERROR while executing SQL: %s" % sql) print(str(e)) raise DatabaseError(e) query_results = self.get_results() if cached: Cache.put(sql, query_results) result = query_results # If no longer in a transaction, remove all savepoints. if not self.db.in_transaction: self.savepoints = [] return result
def _on_timeout(signum, frame): raise TimeoutError(err_msg)
def get_connection_info(self, timeout=10): is_connected = self.event.wait(timeout) if not is_connected: raise TimeoutError("{} Couldn't get connection info after {}" " seconds".format(self.__base_name__, timeout)) return self.sock.getsockname()