def handle_error(self): """Called to handle any uncaught exceptions.""" try: raise except Exception: logger.error(traceback.format_exc()) self.close()
def handle_accepted(self, sock, addr): """Called when remote client initiates a connection.""" handler = None ip = None try: handler = self.handler(sock, self, ioloop=self.ioloop) if not handler.connected: return ip = addr[0] self.ip_map.append(ip) # For performance and security reasons we should always set a # limit for the number of file descriptors that socket_map # should contain. When we're running out of such limit we'll # use the last available channel for sending a 421 response # to the client before disconnecting it. if not self._accept_new_cons(): handler.handle_max_cons() return # accept only a limited number of connections from the same # source address. if self.max_cons_per_ip: if self.ip_map.count(ip) > self.max_cons_per_ip: handler.handle_max_cons_per_ip() return try: handler.handle() except: handler.handle_error() else: return handler except Exception: # This is supposed to be an application bug that should # be fixed. We do not want to tear down the server though # (DoS). We just log the exception, hoping that someone # will eventually file a bug. References: # - https://github.com/giampaolo/pyftpdlib/issues/143 # - https://github.com/giampaolo/pyftpdlib/issues/166 # - https://groups.google.com/forum/#!topic/pyftpdlib/h7pPybzAx14 logger.error(traceback.format_exc()) if handler is not None: handler.close() else: if ip is not None and ip in self.ip_map: self.ip_map.remove(ip)
def _validate_with_url(self, username, password, url): try: parsed_url, connection = http_connection(url) if parsed_url.query: url_path = parsed_url.path + '?' + parsed_url.query else: url_path = parsed_url.path connection.putrequest('GET', url_path) credentials = base64.b64encode(b'%s:%s' % (username, password)) connection.putheader('Authorization', 'Basic %s' % credentials) connection.endheaders() response = connection.getresponse() return response.status // 100 == 2 except Exception as error: logger.error(error) return False
def get_teacher_msgs(): try: db = pymysql.connect(host=db_host, user=db_user, password=db_password, db=db_name, port=db_port) except Exception as error: logger.error(error) return None cur = db.cursor() sql = "select userName, password, name from teacher" try: cur.execute(sql) results = cur.fetchall() except Exception as error: logger.error(error) results = None finally: db.close() return results
def close(self): """Closes the IOLoop, freeing any resources used.""" self.__class__._instance = None # free connections instances = sorted(self.socket_map.values(), key=lambda x: x._fileno) for inst in instances: try: inst.close() except OSError: err = sys.exc_info()[1] if err.args[0] != errno.EBADF: logger.error(traceback.format_exc()) except Exception: logger.error(traceback.format_exc()) self.socket_map.clear() # free scheduled functions for x in self.sched._tasks: try: if not x.cancelled: x.cancel() except Exception: logger.error(traceback.format_exc()) del self.sched._tasks[:]
def poll(self): """Run the scheduled functions due to expire soonest and return the timeout of the next one (if any, else None). """ now = time.time() calls = [] while self._tasks: if now < self._tasks[0].timeout: break call = heapq.heappop(self._tasks) if call.cancelled: self._cancellations -= 1 else: calls.append(call) for call in calls: if call._repush: heapq.heappush(self._tasks, call) call._repush = False continue try: call.call() except Exception: logger.error(traceback.format_exc()) # remove cancelled tasks and re-heapify the queue if the # number of cancelled tasks is more than the half of the # entire queue if self._cancellations > 512 \ and self._cancellations > (len(self._tasks) >> 1): self._cancellations = 0 self._tasks = [x for x in self._tasks if not x.cancelled] self.reheapify() try: return max(0, self._tasks[0].timeout - now) except IndexError: pass
def logerror(msg): _depwarn("pyftpdlib.ftpserver.logline() is deprecated") logger.error(msg)
def logerror(self, msg): """Log an error including additional indentifying session data.""" prefix = self.log_prefix % self.__dict__ _time = datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f') logger.error("[%s] %s %s" % (_time, prefix, msg))