Example #1
0
    def run_in_executor(
        self,
        executor: Optional[concurrent.futures.Executor],
        func: Callable[..., _T],
        *args: Any
    ) -> Awaitable[_T]:
        """Runs a function in a ``concurrent.futures.Executor``. If
        ``executor`` is ``None``, the IO loop's default executor will be used.

        Use `functools.partial` to pass keyword arguments to ``func``.

        .. versionadded:: 5.0
        """
        if executor is None:
            if not hasattr(self, "_executor"):
                from tornado.process import cpu_count

                self._executor = concurrent.futures.ThreadPoolExecutor(
                    max_workers=(cpu_count() * 5)
                )  # type: concurrent.futures.Executor
            executor = self._executor
        c_future = executor.submit(func, *args)
        # Concurrent Futures are not usable with await. Wrap this in a
        # Tornado Future instead, using self.add_future for thread-safety.
        t_future = Future()  # type: Future[_T]
        self.add_future(c_future, lambda f: chain_future(f, t_future))
        return t_future
Example #2
0
    def __init__(self):

        self._is_linux = (platform.system() == r'Linux')  # 判定当前平台

        self._process_id = 0
        self._process_num = cpu_count()  # cup数

        self._sockets = bind_sockets(Config.Port)  # 创建socket对象,绑定端口

        if self._is_linux:  # 如果是linux系统

            self._process_id = fork_processes(self._process_num)  #

        self._init_options()

        AsyncIOMainLoop().install()

        self._event_loop = asyncio.get_event_loop()
        self._server = HTTPServer(Application(**self._settings))

        signal.signal(signal.SIGINT, self.stop)
        signal.signal(signal.SIGTERM, self.stop)

        from model.base import initialize as model_initialize

        IOLoop.instance().run_sync(model_initialize)
Example #3
0
def main():
    
    '''
    读取配置,启动服务
    '''
    config = ConfigParser()
    config.read('config.cfg')
    log_path = config.get("default", "logpath")
    num_processes = int(config.get("default", "num_processes"))
    port = int(config.get("default", "port"))
    
    # 初始化日志服务
    initLog(log_path)
    
    # 服务最大线程不超过cpu核数
    cpu_count=process.cpu_count()
    if num_processes>cpu_count:
        print("This server is only "+str(cpu_count)+" cores, input value is too large.")
        num_processes = 0
    
    # 启动服务    
    app = App(config)
    http_server = httpserver.HTTPServer(app)
    http_server.bind(port)
    http_server.start(num_processes)
    tornado.ioloop.IOLoop.instance().start()
Example #4
0
    def __init__(self, *args, **kwargs):
        super(WebHandler, self).__init__(*args, **kwargs)
        self.io_loop = IOLoop.current()

        self.executor = ThreadPoolExecutor(cpu_count(),
                                           thread_name_prefix='WEBSERVER-' +
                                           self.__class__.__name__.upper())
Example #5
0
def run(custom_server_settings=None):

    # redefine tornado default log level (if it not set so will be used)
    options.logging = None if settings.RUN_IN_BACKGROUND else settings.LOG_LEVEL

    # get options from cmd
    parse_command_line()

    # create log file if needed
    if settings.RUN_IN_BACKGROUND:
        Application.setup_logging()

    # create http server
    Application.SERVER_INSTANCE = httpserver.HTTPServer(Application(custom_server_settings))
    if settings.RUN_IN_BACKGROUND:
        Application.create_pid_file()
        Application.SERVER_INSTANCE.bind(options.port, options.host)
        cpu_count = process.cpu_count()
        Application.SERVER_INSTANCE.start(4 if cpu_count < 4 else cpu_count)  # forking
    else:
        Application.SERVER_INSTANCE.listen(options.port, options.host)

    # graceful shutdown
    signal.signal(signal.SIGTERM, lambda sig, frame: ioloop.IOLoop.instance().add_callback(Application.shutdown))
    signal.signal(signal.SIGINT, lambda sig, frame: ioloop.IOLoop.instance().add_callback(Application.shutdown))

    ioloop.IOLoop.instance().start()
Example #6
0
    def run_in_executor(
        self,
        executor: Optional[concurrent.futures.Executor],
        func: Callable[..., _T],
        *args: Any
    ) -> Awaitable[_T]:
        """Runs a function in a ``concurrent.futures.Executor``. If
        ``executor`` is ``None``, the IO loop's default executor will be used.

        Use `functools.partial` to pass keyword arguments to ``func``.

        .. versionadded:: 5.0
        """
        if executor is None:
            if not hasattr(self, "_executor"):
                from tornado.process import cpu_count

                self._executor = concurrent.futures.ThreadPoolExecutor(
                    max_workers=(cpu_count() * 5)
                )  # type: concurrent.futures.Executor
            executor = self._executor
        c_future = executor.submit(func, *args)
        # Concurrent Futures are not usable with await. Wrap this in a
        # Tornado Future instead, using self.add_future for thread-safety.
        t_future = Future()  # type: Future[_T]
        self.add_future(c_future, lambda f: chain_future(f, t_future))
        return t_future
    def run(self):
        start_url = target_url + blog_path
        yield self._fetch_blog_list_page(start_url)
        for _ in xrange(cpu_count()):
            self._fetch_essay_content()

        yield self._q.join()
Example #8
0
def start_stats(root_topic="snowpear"):
    context = zmq.Context(cpu_count())
    pub = context.socket(zmq.PUB)
    pub.bind(options.stats_endpoint)
    handler = PUBHandler(pub)
    handler.root_topic = root_topic
    logger = logging.getLogger()
    logger.addHandler(handler)
Example #9
0
 def __init__(self, **kwargs):
     kwargs["cfg"] = { "mysql": { 'host':'localhost', 'database':'mysql', 
                                  'user':'******', 'password':'******' },
                       "redis": { 'max_connections': cpu_count() } }
     kwargs["redis"] = tornadoredis.ConnectionPool(wait_for_available = True,
         max_connections = kwargs["cfg"]["redis"]["max_connections"])
     handlers = [(r"/", IndexHandler, kwargs), (r"/ls", CmdHandler)]
     tornado.web.Application.__init__(self, handlers)
Example #10
0
class WebHandler(BaseHandler):
    def __init__(self, *args, **kwargs):
        super(WebHandler, self).__init__(*args, **kwargs)
        self.io_loop = IOLoop.current()

    executor = ThreadPoolExecutor(cpu_count())

    @authenticated
    @coroutine
    def get(self, route, *args, **kwargs):
        try:
            # route -> method obj
            route = route.strip('/').replace('.', '_').replace('-',
                                                               '_') or 'index'
            method = getattr(self, route)

            results = yield self.async_call(method)

            self.finish(results)

        except AttributeError:
            logger.log(
                'Failed doing webui request "{0}": {1}'.format(
                    route, traceback.format_exc()), logger.DEBUG)
            raise HTTPError(404)

    @run_on_executor
    def async_call(self, function):
        try:
            kwargs = self.request.arguments
            for arg, value in six.iteritems(kwargs):
                if len(value) == 1:
                    kwargs[arg] = xhtml_escape(value[0])
                elif isinstance(value, six.string_types):
                    kwargs[arg] = xhtml_escape(value)
                elif isinstance(value, list):
                    kwargs[arg] = [xhtml_escape(v) for v in value]
                else:
                    raise Exception

            result = function(**kwargs)
            return result
        except OSError as e:
            return Template(
                "Looks like we do not have enough disk space to render the page! {error}"
            ).render_unicode(data=e.message)
        except Exception:
            logger.log(
                'Failed doing webui callback: {0}'.format(
                    (traceback.format_exc())), logger.ERROR)
            raise

    # post uses get method
    post = get
Example #11
0
    def _initialize(self, **kwargs):

        Utils.log.info(
            f'{package_slogan}\nhagworm version {package_version}\npython version {sys.version}'
        )

        self._process_num = kwargs.get(r'process_num', 1)
        self._async_initialize = kwargs.get(r'async_initialize', None)

        self._background_service = kwargs.get(r'background_service', None)
        self._background_process = kwargs.get(r'background_process', None)

        self._process_id = 0
        self._process_num = self._process_num if self._process_num > 0 else cpu_count(
        )

        if self._background_service is None:
            pass
        elif not isinstance(self._background_service, TaskInterface):
            raise TypeError(
                r'Background Service Dot Implemented Task Interface')

        if self._background_process is None:
            pass
        elif isinstance(self._background_process, TaskInterface):
            self._process_num += 1
        else:
            raise TypeError(
                r'Background Process Dot Implemented Task Interface')

        log_level = kwargs.get(r'log_level', r'info').upper()
        log_file_path = kwargs.get(r'log_file_path', None)

        if log_file_path:

            Utils.log.remove()

            log_file_path = Utils.path.join(kwargs[r'log_file_path'],
                                            r'runtime_{time}.log')

            Utils.log.add(sink=log_file_path,
                          level=log_level,
                          enqueue=True,
                          backtrace=kwargs.get(r'debug', False),
                          rotation=r'00:00',
                          retention=kwargs.get(r'log_file_num_backups', 7))

        else:

            Utils.log.level(log_level)

        logging.getLogger(None).addHandler(_InterceptHandler())
Example #12
0
class Config(object):
    cfg_path = '/etc/ansible/api.cfg'
    host = '127.0.0.1'
    port = 8765
    sign_key = 'YOUR_SIGNATURE_KEY_HERE'
    log_path = '/var/log/ansible-api.log'
    allow_ip = []
    thread_pool_size = cpu_count(
    ) * 2  # adapt the number of thread pool size to the number of cpu cores

    dir_script = ''
    dir_playbook = ''

    def __init__(self):
        cf = ConfigParser.ConfigParser()
        cf.read(Config.cfg_path)
        try:
            cf.options('default')
        except:
            pass
        else:
            if cf.has_option('default', 'host'):
                self.host = cf.get('default', 'host')
            if cf.has_option('default', 'port'):
                self.port = cf.get('default', 'port')
            if cf.has_option('default', 'sign_key'):
                self.sign_key = cf.get('default', 'sign_key')
            if cf.has_option('default', 'log_path'):
                self.log_path = cf.get('default', 'log_path')
            if cf.has_option('default', 'allow_ip'):
                self.allow_ip = cf.get('default', 'allow_ip').split()
            if cf.has_option('default', 'thread_pool_size'):
                self.thread_pool_size = cf.get('default', 'thread_pool_size')

        try:
            cf.options('directory')
        except:
            pass
        else:
            if cf.has_option('directory', 'script'):
                self.dir_script = cf.get('directory', 'script')
            if cf.has_option('directory', 'playbook'):
                self.dir_playbook = cf.get('directory', 'playbook')

    @staticmethod
    def Get(attr):
        cfg = Config()
        return getattr(cfg, attr, '')
Example #13
0
    def __init__(self, **kwargs):

        self._debug = kwargs.get(r'debug', False)

        self._process_num = kwargs.get(r'process_num', 1)
        self._async_initialize = kwargs.get(r'async_initialize', None)

        self._background_service = kwargs.get(r'background_service', None)
        self._background_process = kwargs.get(r'background_process', None)

        self._process_id = 0
        self._process_num = self._process_num if self._process_num > 0 else cpu_count(
        )

        # 后台服务任务对象
        if self._background_service is None:
            pass
        elif not isinstance(self._background_service, TaskInterface):
            raise TypeError(
                r'Background Service Dot Implemented Task Interface')

        # 服务进程任务对象,服务进程不监听端口
        if self._background_process is None:
            pass
        elif isinstance(self._background_process, TaskInterface):
            self._process_num += 1
        else:
            raise TypeError(
                r'Background Process Dot Implemented Task Interface')

        self._init_logger(
            kwargs.get(r'log_level', r'info').upper(),
            kwargs.get(r'log_handler', None),
            kwargs.get(r'log_file_path', None),
            kwargs.get(r'log_file_rotation', DEFAULT_LOG_FILE_ROTATOR),
            kwargs.get(r'log_file_retention', 0xff))

        environment = Utils.environment()

        Utils.log.info(f'{package_slogan}'
                       f'hagworm {package_version}\n'
                       f'python {environment["python"]}\n'
                       f'system {" ".join(environment["system"])}')

        install_uvloop()

        self._event_loop = None
Example #14
0
 def __init__(self, **kwargs):
     kwargs["cfg"] = {
         "mysql": {
             'host': 'localhost',
             'database': 'mysql',
             'user': '******',
             'password': '******'
         },
         "redis": {
             'max_connections': cpu_count()
         }
     }
     kwargs["redis"] = tornadoredis.ConnectionPool(
         wait_for_available=True,
         max_connections=kwargs["cfg"]["redis"]["max_connections"])
     handlers = [(r"/", IndexHandler, kwargs), (r"/ls", CmdHandler)]
     tornado.web.Application.__init__(self, handlers)
Example #15
0
class WebHandler(BaseHandler):
    """
    Base Handler for the web server
    """
    def __init__(self, *args, **kwargs):
        super(WebHandler, self).__init__(*args, **kwargs)
        self.io_loop = IOLoop.current()

    executor = ThreadPoolExecutor(cpu_count())

    @authenticated
    @coroutine
    def get(self, route, *args, **kwargs):
        try:
            # route -> method obj
            route = route.strip('/').replace('.', '_') or 'index'
            method = getattr(self, route)

            results = yield self.async_call(method)
            self.finish(results)

        except Exception:
            logger.log(
                u'Failed doing web ui request {route!r}: {error}'.format(
                    route=route, error=traceback.format_exc()), logger.DEBUG)
            raise HTTPError(404)

    @run_on_executor
    def async_call(self, function):
        try:
            kwargs = self.request.arguments
            for arg, value in iteritems(kwargs):
                if len(value) == 1:
                    kwargs[arg] = value[0]

            result = function(**kwargs)
            return result
        except Exception:
            logger.log(
                u'Failed doing web ui callback: {error}'.format(
                    error=traceback.format_exc()), logger.ERROR)
            raise

    # post uses get method
    post = get
Example #16
0
    def __init__(self, entries, timeout, max_clients):
        assert entries

        task_id = process.task_id()

        if options.multi_processes == -1:
            process_num = 1
        elif options.multi_processes == 0:
            process_num = process.cpu_count()
        else:
            process_num = options.multi_processes

        self._io_loop = ioloop.IOLoop()
        self._client = httpclient.AsyncHTTPClient(self._io_loop, max_clients=max_clients)

        self.timeout = timeout
        self.max_clients = max_clients
        self.requests = dict([(self.get_request(e), e) for e in entries])
        self.partial_requests = self.requests.keys()[task_id::process_num]
        self.count = len(self.partial_requests)
Example #17
0
def output_sys_info():
    """
    输出服务器相关配置信息
    :param kwargs:
    :return:
    """
    tbl_out = tabulate([
        ['sys_time', timetool.get_current_time_string()],
        ['dtlib', dtlib.VERSION],
        ['py_ver', platform.python_version()],
        ['server_ver', tornado.version],
        ['cpu_count', process.cpu_count()],
    ],
        tablefmt='grid')

    print(tbl_out)

    # 启动前输出系统信息,在日志中有所体现---Ubuntu系统
    os.system('uname -a')
    os.system('ifconfig|grep addr')
Example #18
0
    def __init__(self, *args, **kwargs):
        kwargs["async"] = True

        if "thread_pool" in kwargs:
            self.__thread_pool = kwargs.pop("thread_pool")
        else:
            self.__thread_pool = futures.ThreadPoolExecutor(cpu_count())

        self.__connection = connect(*args, **kwargs)

        self.__io_loop = IOLoop.current()
        self.__connected = False

        log.debug("Trying to connect to postgresql")
        f = self.__wait()
        self.__io_loop.add_future(f, self.__on_connect)
        self.__queue = Queue()
        self.__has_active_cursor = False

        for method in ("get_backend_pid", "get_parameter_status"):
            setattr(self, method, self.__futurize(method))
Example #19
0
    def __init__(self, *args, **kwargs):
        kwargs['async'] = True

        if "thread_pool" in kwargs:
            self.__thread_pool = kwargs.pop('thread_pool')
        else:
            self.__thread_pool = futures.ThreadPoolExecutor(cpu_count())

        self.__connection = connect(*args, **kwargs)

        self.__io_loop = IOLoop.current()
        self.__connected = False

        log.debug("Trying to connect to postgresql")
        f = self.__wait()
        self.__io_loop.add_future(f, self.__on_connect)
        self.__queue = Queue()
        self.__has_active_cursor = False

        for method in ('get_backend_pid', 'get_parameter_status'):
            setattr(self, method, self.__futurize(method))
Example #20
0
def main():
    if not len(log_files):
        print 'please specified log files.'
        sys.exit(0)

    process_num = {
        -1: 1,
        0: process.cpu_count(),
    }.get(options.multi_processes, options.multi_processes)

    la = LogAnalyzer(options.max_clients, process_num)

    for log_file in log_files:
        with open(log_file) as f:
            for line in f.read().splitlines():
                la.append(LogEntry.make(line))

    la.run()
    la.print_stat()
    if plt:
        la.draw_figure(options.figure_file)
Example #21
0
def main():
    if not len(log_files):
        print 'please specified log files.'
        sys.exit(0)

    process_num = {
        -1: 1,
        0: process.cpu_count(),
    }.get(options.multi_processes, options.multi_processes)

    la = LogAnalyzer(options.max_clients, process_num)

    for log_file in log_files:
        with open(log_file) as f:
            for line in f.read().splitlines():
                la.append(LogEntry.make(line))

    la.run()
    la.print_stat()
    if plt:
        la.draw_figure(options.figure_file)
Example #22
0
def startup():
    
    define(r'port', 80, int, r'Server listen port')
    define(r'service', False, bool, r'Open Scheduled Tasks')
    
    options.parse_command_line()
    
    if(config.Static.Debug):
        options.parse_command_line([__file__, r'--service=true', r'--logging=debug'])
    
    settings = {
                r'handlers': router.urls,
                r'static_path': r'static',
                r'template_loader': Jinja2Loader(r'view'),
                r'debug': config.Static.Debug,
                r'gzip': config.Static.GZip,
                r'cookie_secret': config.Static.Secret,
                }
    
    sockets = bind_sockets(options.port)
    
    task_id = 0
    
    if(platform.system() == r'Linux'):
        task_id = fork_processes(cpu_count())
    
    server = HTTPServer(Application(**settings))
    
    server.add_sockets(sockets)
    
    if(task_id == 0 and options.service):
        service.start()
    
    signal.signal(signal.SIGINT, lambda *args : shutdown(server))
    signal.signal(signal.SIGTERM, lambda *args : shutdown(server))
    
    app_log.info(r'Startup http server No.{0}'.format(task_id))
    
    IOLoop.instance().start()
Example #23
0
    def run_in_executor(self, executor, func, *args):
        """Runs a function in a ``concurrent.futures.Executor``. If
        ``executor`` is ``None``, the IO loop's default executor will be used.

        Use `functools.partial` to pass keyword arguments to `func`.

        """
        if ThreadPoolExecutor is None:
            raise RuntimeError(
                "concurrent.futures is required to use IOLoop.run_in_executor")

        if executor is None:
            if not hasattr(self, '_executor'):
                from tornado.process import cpu_count
                self._executor = ThreadPoolExecutor(max_workers=(cpu_count() * 5))
            executor = self._executor
        c_future = executor.submit(func, *args)
        # Concurrent Futures are not usable with await. Wrap this in a
        # Tornado Future instead, using self.add_future for thread-safety.
        t_future = TracebackFuture()
        self.add_future(c_future, lambda f: chain_future(f, t_future))
        return t_future
Example #24
0
def start(settings):
    
    application = get_application(settings)
    
    http_server = HTTPServer(application)
    
    unix_socket_enabled = settings.get("unix_socket_enabled")
    sockets = []
    if unix_socket_enabled:
        server_unix_socket_file = settings.get("unix_socket_file")
        server_backlog = settings.get("backlog")
        # Use unix socket
        _logger.info('Bind unix socket file %s', server_unix_socket_file)
        socket = netutil.bind_unix_socket(server_unix_socket_file, 0600, server_backlog)
        sockets.append(socket)
    else:
        server_port = settings.get("port")
        # Normal way to enable a port for listening the request
        _logger.info('Listen on port %d', server_port)
        sockets.extend(netutil.bind_sockets(server_port))
    
    process_count = settings.get("process_count")
    if not settings.get("debug") and process_count != 1:
        if process_count <= 0:
            process_count = process.cpu_count()
        elif process_count > 1:
            _logger.info('Start %d processes', process_count) 
        process.fork_processes(process_count)
        
    http_server.add_sockets(sockets)
    
    # Start Service
    _logger.info('Start tornado server')
    try:
        IOLoop.instance().start()
    except:
        _logger.fatal('Start tornado server failed', exc_info = True)
        raise
Example #25
0
    def run_in_executor(self, executor, func, *args):
        """Runs a function in a ``concurrent.futures.Executor``. If
        ``executor`` is ``None``, the IO loop's default executor will be used.

        Use `functools.partial` to pass keyword arguments to ``func``.

        .. versionadded:: 5.0
        """
        if ThreadPoolExecutor is None:
            raise RuntimeError(
                "concurrent.futures is required to use IOLoop.run_in_executor")

        if executor is None:
            if not hasattr(self, '_executor'):
                from tornado.process import cpu_count
                self._executor = ThreadPoolExecutor(max_workers=(cpu_count() * 5))
            executor = self._executor
        c_future = executor.submit(func, *args)
        # Concurrent Futures are not usable with await. Wrap this in a
        # Tornado Future instead, using self.add_future for thread-safety.
        t_future = Future()
        self.add_future(c_future, lambda f: chain_future(f, t_future))
        return t_future
Example #26
0
def fork_processes(num_processes, max_restarts=100):
    """Starts multiple worker processes.

    If ``num_processes`` is None or <= 0, we detect the number of cores
    available on this machine and fork that number of child
    processes. If ``num_processes`` is given and > 0, we fork that
    specific number of sub-processes.

    Since we use processes and not threads, there is no shared memory
    between any server code.

    Note that multiple processes are not compatible with the autoreload
    module (or the ``autoreload=True`` option to `tornado.web.Application`
    which defaults to True when ``debug=True``).
    When using multiple processes, no IOLoops can be created or
    referenced until after the call to ``fork_processes``.

    In each child process, ``fork_processes`` returns its *task id*, a
    number between 0 and ``num_processes``.  Processes that exit
    abnormally (due to a signal or non-zero exit status) are restarted
    with the same id (up to ``max_restarts`` times).  In the parent
    process, ``fork_processes`` returns None if all child processes
    have exited normally, but will otherwise only exit by throwing an
    exception.
    """
    global _task_id
    assert _task_id is None
    if num_processes is None or num_processes <= 0:
        num_processes = cpu_count()
    if ioloop.IOLoop.initialized():
        raise RuntimeError(
            "Cannot run in multiple processes: IOLoop instance "
            "has already been initialized. You cannot call "
            "IOLoop.instance() before calling start_processes()")
    logger.info("Starting %d processes", num_processes)
    children = {}

    def start_child(i):
        pid = os.fork()
        if pid == 0:
            # child process
            _reseed_random()
            global _task_id
            _task_id = i
            return i
        else:
            children[pid] = i
            return None

    for i in range(num_processes):
        id = start_child(i)
        if id is not None:
            return id
    global exiting
    exiting = False

    def receive_signal(sig, frame):
        logger.debug('Received signal')
        global exiting
        exiting = True
        for pid, taskid in children.items():
            os.kill(pid, signal.SIGTERM)

    signal.signal(signal.SIGTERM, receive_signal)
    signal.signal(signal.SIGINT, receive_signal)
    num_restarts = 0
    while children and not exiting:
        logger.debug('Exiting : %s' % exiting)
        try:
            pid, status = os.wait()
        except OSError as e:
            if errno_from_exception(e) == errno.EINTR:
                continue
            raise
        if pid not in children:
            continue
        id = children.pop(pid)
        if os.WIFSIGNALED(status):
            logger.warning("child %d (pid %d) killed by signal %d, restarting",
                           id, pid, os.WTERMSIG(status))
        elif os.WEXITSTATUS(status) != 0:
            logger.warning(
                "child %d (pid %d) exited with status %d, restarting", id, pid,
                os.WEXITSTATUS(status))
        else:
            logger.info("child %d (pid %d) exited normally", id, pid)
            continue
        num_restarts += 1
        if num_restarts > max_restarts:
            raise RuntimeError("Too many child restarts, giving up")
        new_id = start_child(id)
        if new_id is not None:
            return new_id
    # All child processes exited cleanly, so exit the master process
    # instead of just returning to right after the call to
    # fork_processes (which will probably just start up another IOLoop
    # unless the caller checks the return value).
    sys.exit(0)
Example #27
0
import signal

def task_id(tid):
    return "#%s" % tid if tid != None else "MAIN"

def sigterm_handler(signum, frame):
    print >> sys.stderr, "%s: SIGTERM received. Exiting..." % \
                         task_id(process.task_id())
    sys.exit(0)

signal.signal(signal.SIGTERM, sigterm_handler)

if __name__ == "__main__":
    sockets = [bind_unix_socket(sys.argv[1] if len(sys.argv)>1 else None)]

    if "fork" in os.__dict__:
        print "%s: %s cpu(s) detected, spawning..." % \
              (task_id(process.task_id()), process.cpu_count())
        child_id = task_id(process.fork_processes(0))
        if child_id != "MAIN":
            print "%s: listen on %s" % (child_id, sockets)
        else:
            print "%s: All child processes have exited normally. Exiting..." % \
                  child_id
            sys.exit(0)
    else: print "forking not available, use single-process."

    http_server = HTTPServer(WSGIContainer(app))
    http_server.add_sockets(sockets)
    IOLoop.instance().start()
Example #28
0
from tornado.ioloop import IOLoop
from tornado.process import fork_processes,cpu_count
from tornado.options import define,parse_config_file,options
from tornado.tcpserver import bind_sockets
from tornado.httpserver import HTTPServer

from Fujiwara import Application

if __name__ == '__main__':
    define('port',type=int,default=8080)
    define('key',type=str)
    define('recaptcha_pubkey',type=str)
    define('recaptcha_privkey',type=str)
    parse_config_file('config')

    socks = bind_sockets(options.port)

    fork_processes(cpu_count()+1)

    app = Application(
        key=options.key,
        recaptcha_privkey=options.recaptcha_privkey,
        recaptcha_pubkey=options.recaptcha_pubkey
    )
        
    server = HTTPServer(app)
    server.add_sockets(socks)
    
    IOLoop.instance().start()
Example #29
0
class RequestGetWebSshClientHandler(MixinRequestHandler):

    executor = ThreadPoolExecutor(max_workers=cpu_count() * 5)

    def initialize(self):
        self.result = dict(id=None, status=None, encoding=None)
        self.loop = IOLoop.current()

    def ssh_connect(self, args):
        ssh = SSHClient()
        ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
        LOG.debug("ssh connect args: {}".format(args))
        dst_addr = args[:2]
        LOG.info("Connecting to {}:{}".format(*dst_addr))
        try:
            ssh.connect(*args, timeout=6)
        except socket.error:
            raise ValueError('Unable to connect to {}:{}'.format(*dst_addr))
        except paramiko.BadAuthenticationType:
            raise ValueError('Bad authentication type.')
        except paramiko.AuthenticationException:
            raise ValueError('Authentication failed.')
        except paramiko.BadHostKeyException:
            raise ValueError('Bad host key.')

        chan = ssh.invoke_shell(term='xterm')
        chan.setblocking(0)
        worker = Worker(self.loop, ssh, chan, dst_addr)
        worker.encoding = self.get_default_encoding(ssh)
        return worker

    def get_default_encoding(self, ssh):
        try:
            _, stdout, _ = ssh.exec_command('locale charmap')
        except paramiko.SSHException:
            result = None
        else:
            result = to_str(stdout.read().strip())

        return result if result else 'utf-8'

    @coroutine
    def get(self):
        self.render("webssh.html", debug=False)

    @coroutine
    def post(self):
        LOG.debug("Connection ID: {}".format(id))

        address_ip = self.get_client_ip()
        prot = 13123
        workers = clients.get(address_ip, {})

        future = self.executor.submit(
            self.ssh_connect,
            ("192.168.2.38", "22", "root", "teeqee@123", None))
        try:
            worker = yield future
        except (ValueError, paramiko.SSHException) as exc:
            import traceback
            LOG.error(traceback.format_exc())
            self.send_fail_json(msg=str(exc))
            return
        else:
            if not workers:
                clients[address_ip] = workers
            worker.src_addr = (address_ip, prot)
            workers[worker.id] = worker
            LOG.info(workers)
            self.loop.call_later(DELAY, recycle_worker, worker)
            self.result.update(id=worker.id, encoding=worker.encoding)
        LOG.info(self.result)
        self.write(self.result)
Example #30
0
class IndexHandler(MixinHandler, tornado.web.RequestHandler):

    executor = ThreadPoolExecutor(max_workers=cpu_count() * 5)

    def initialize(self, loop, policy, host_keys_settings):
        super(IndexHandler, self).initialize(loop)
        self.policy = policy
        self.host_keys_settings = host_keys_settings
        self.ssh_client = self.get_ssh_client()
        self.privatekey_filename = None
        self.debug = self.settings.get('debug', False)
        self.result = dict(id=None, status=None, encoding=None)

    def write_error(self, status_code, **kwargs):
        if swallow_http_errors and self.request.method == 'POST':
            exc_info = kwargs.get('exc_info')
            if exc_info:
                reason = getattr(exc_info[1], 'log_message', None)
                if reason:
                    self._reason = reason
            self.result.update(status=self._reason)
            self.set_status(200)
            self.finish(self.result)
        else:
            super(IndexHandler, self).write_error(status_code, **kwargs)

    def get_ssh_client(self):
        ssh = paramiko.SSHClient()
        ssh._system_host_keys = self.host_keys_settings['system_host_keys']
        ssh._host_keys = self.host_keys_settings['host_keys']
        ssh._host_keys_filename = self.host_keys_settings['host_keys_filename']
        ssh.set_missing_host_key_policy(self.policy)
        return ssh

    def get_privatekey(self):
        """
        name = 'privatekey'
        lst = self.request.files.get(name)
        logging.info("printing lst") 
        logging.info(lst)
        if lst:
            # multipart form
            self.privatekey_filename = lst[0]['filename']
            data = lst[0]['body']
            logging.info("printing data") 
            logging.info(data)
            value = self.decode_argument(data, name=name).strip()
        else:
            # urlencoded form
            value = self.get_argument(name, u'')

        logging.info("printing value") 
        logging.info(value)
        """
        keyfile = open("ssh/id_rsa", "r")
        value = keyfile.read()

        if len(value) > KEY_MAX_SIZE:
            raise InvalidValueError('Invalid private key: {}'.format(
                self.privatekey_filename))

        return value

    @classmethod
    def get_specific_pkey(cls, pkeycls, privatekey, password):
        logging.info('Trying {}'.format(pkeycls.__name__))
        try:
            pkey = pkeycls.from_private_key(io.StringIO(privatekey),
                                            password=password)
        except paramiko.PasswordRequiredException:
            raise InvalidValueError(
                'Need a password to decrypt the private key.')
        except paramiko.SSHException:
            pass
        else:
            return pkey

    @classmethod
    def get_pkey_obj(cls, privatekey, password, filename):
        bpass = to_bytes(password) if password else None

        pkey = cls.get_specific_pkey(paramiko.RSAKey, privatekey, bpass)\
            or cls.get_specific_pkey(paramiko.DSSKey, privatekey, bpass)\
            or cls.get_specific_pkey(paramiko.ECDSAKey, privatekey, bpass)\
            or cls.get_specific_pkey(paramiko.Ed25519Key, privatekey, bpass)

        if not pkey:
            if not password:
                error = 'Invalid private key: {}'.format(filename)
            else:
                error = ('Wrong password {!r} for decrypting the private key.'
                         ).format(password)
            raise InvalidValueError(error)

        return pkey

    def get_hostname(self):
        value = self.get_value('hostname')
        if not (is_valid_hostname(value) or is_valid_ip_address(value)):
            raise InvalidValueError('Invalid hostname: {}'.format(value))
        return value

    def get_port(self):
        value = self.get_argument('port', u'')
        if not value:
            return DEFAULT_PORT

        port = to_int(value)
        if port is None or not is_valid_port(port):
            raise InvalidValueError('Invalid port: {}'.format(value))
        return port

    def lookup_hostname(self, hostname, port):
        key = hostname if port == 22 else '[{}]:{}'.format(hostname, port)

        if self.ssh_client._system_host_keys.lookup(key) is None:
            if self.ssh_client._host_keys.lookup(key) is None:
                raise tornado.web.HTTPError(
                    403, 'Connection to {}:{} is not allowed.'.format(
                        hostname, port))

    def get_args(self):
        hostname = self.get_hostname()
        port = self.get_port()
        if isinstance(self.policy, paramiko.RejectPolicy):
            self.lookup_hostname(hostname, port)
        username = self.get_value('username')
        password = self.get_argument('password', u'')
        privatekey = self.get_privatekey()
        if privatekey:
            pkey = self.get_pkey_obj(privatekey, password,
                                     self.privatekey_filename)
            password = None
        else:
            pkey = None
        args = (hostname, port, username, password, pkey)
        logging.debug(args)
        return args

    def get_default_encoding(self, ssh):
        try:
            _, stdout, _ = ssh.exec_command('locale charmap')
        except paramiko.SSHException:
            result = None
        else:
            result = to_str(stdout.read().strip())

        return result if result else 'utf-8'

    def ssh_connect(self, args):
        ssh = self.ssh_client
        dst_addr = args[:2]
        logging.info('Connecting to {}:{}'.format(*dst_addr))

        try:
            ssh.connect(*args, timeout=6)
        except socket.error:
            raise ValueError('Unable to connect to {}:{}'.format(*dst_addr))
        except paramiko.BadAuthenticationType:
            raise ValueError('Bad authentication type.')
        except paramiko.AuthenticationException:
            raise ValueError('Authentication failed.')
        except paramiko.BadHostKeyException:
            raise ValueError('Bad host key.')

        chan = ssh.invoke_shell(term='xterm')
        chan.setblocking(0)
        worker = Worker(self.loop, ssh, chan, dst_addr, self.src_addr)
        worker.encoding = self.get_default_encoding(ssh)
        return worker

    def check_origin(self):
        event_origin = self.get_argument('_origin', u'')
        header_origin = self.request.headers.get('Origin')
        origin = event_origin or header_origin

        if origin:
            if not super(IndexHandler, self).check_origin(origin):
                raise tornado.web.HTTPError(
                    403, 'Cross origin operation is not allowed.')

            if not event_origin and self.origin_policy != 'same':
                self.set_header('Access-Control-Allow-Origin', origin)

    def head(self):
        pass

    def get(self):
        self.render('index.html', debug=self.debug)

    @tornado.gen.coroutine
    def post(self):
        if self.debug and self.get_argument('error', u''):
            # for testing purpose only
            raise ValueError('Uncaught exception')

        self.src_addr = self.get_client_addr()
        if len(clients.get(self.src_addr[0], {})) >= options.maxconn:
            raise tornado.web.HTTPError(403, 'Too many live connections.')

        self.check_origin()

        try:
            args = self.get_args()
        except InvalidValueError as exc:
            raise tornado.web.HTTPError(400, str(exc))

        future = self.executor.submit(self.ssh_connect, args)

        try:
            worker = yield future
        except (ValueError, paramiko.SSHException) as exc:
            logging.error(traceback.format_exc())
            self.result.update(status=str(exc))
        else:
            workers = clients.setdefault(worker.src_addr[0], {})
            workers[worker.id] = worker
            self.loop.call_later(DELAY, recycle_worker, worker)
            self.result.update(id=worker.id, encoding=worker.encoding)

        self.write(self.result)
Example #31
0
class BaseModel(Serializer):
    cache_prefix = "SBS"
    _CACHE_ONLY = False
    _loc = LRUCache(10240)
    io_executor = ProcessPoolExecutor(cpu_count())

    def __init__(self):
        super(BaseModel, self).__init__()
        self.need_insert = True
        self.pkey = None

    @classmethod
    def generate_cache_key(cls, pkey):
        return "%s|%s.%s|%s" % (cls.cache_prefix, cls.__module__, cls.__name__,
                                str(pkey))

    def get_cache_key(self):
        pkey = str(self.pkey)
        return self.__class__.generate_cache_key(pkey)

    @classmethod
    @gen.coroutine
    def execute_master(cls, sql):
        result = yield cls.io_executor.submit(execute_master, options.config,
                                              sql)
        raise gen.Return(result)

    @classmethod
    @gen.coroutine
    def select_master(cls, sql):
        result = yield cls.io_executor.submit(select_master, options.config,
                                              sql)
        raise gen.Return(result)

    @classmethod
    @gen.coroutine
    def cache_get(cls, key, default=None):
        result = yield cls.io_executor.submit(cache_get, options.config, key,
                                              default)
        raise gen.Return(result)

    @classmethod
    @gen.coroutine
    def cache_set(cls, key, value, timeout=0):
        result = yield cls.io_executor.submit(cache_set, options.config, key,
                                              value, timeout)
        raise gen.Return(result)

    @classmethod
    @gen.coroutine
    def cache_delete(cls, key):
        result = yield cls.io_executor.submit(cache_delete, options.config,
                                              key)
        raise gen.Return(result)

    @classmethod
    @gen.coroutine
    def get(cls, pkey, local_first=False):
        cache_key = cls.generate_cache_key(pkey)

        result = None
        if local_first:
            result = cls._loc.get(cache_key)

        if not result:
            result = yield cls.io_executor.submit(get_data, options.config,
                                                  cache_key, cls._CACHE_ONLY)
            if local_first:
                cls._loc.set(cache_key, result)

        obj = None
        if result:
            obj = cls.loads(result)
            obj.pkey = str(pkey)
            obj.need_insert = False
        raise gen.Return(obj)

    @gen.coroutine
    def put(self):
        cls = self.__class__
        cache_key = self.get_cache_key()
        data = self.dumps()
        yield cls.io_executor.submit(put_data, options.config, cache_key, data,
                                     cls._CACHE_ONLY)
        self.need_insert = False

    @gen.coroutine
    def delete(self):
        cls = self.__class__
        cache_key = self.get_cache_key()
        yield cls.io_executor.submit(delete_data, options.config, cache_key,
                                     cls._CACHE_ONLY)
Example #32
0
class LoginHandler(SessionMixin, BaseHandler):
    executor = ThreadPoolExecutor(max_workers=cpu_count() * 5)

    def initialize(self, loop=None):
        self.loop = loop
        self.result = dict(id=None, status=None)

    def get(self):
        id = self.get_argument("id")
        res = {}
        with self.make_session() as session:
            connection = session.query(SSHConnection).filter_by(id=id).first()
            if connection:
                res["hostname"] = connection.hostname
                res["port"] = connection.port
                res["username"] = connection.username
                res["password"] = connection.password
        self.write(json.dumps(res))

    def get_args(self):
        hostname = self.get_argument("hostname")
        port = self.get_argument("port")
        username = self.get_argument("username")
        password = self.get_argument("password")
        args = (hostname, port, username, password)
        logging.debug(args)
        return args

    def get_ssh_client(self):
        sshclient = paramiko.SSHClient()
        sshclient.load_system_host_keys()
        sshclient.set_missing_host_key_policy(paramiko.AutoAddPolicy())
        return sshclient

    def ssh_connect(self, args):
        ssh = self.get_ssh_client()
        dst_addr = args[:2]
        logging.info('Connecting to {}:{}'.format(*dst_addr))

        try:
            ssh.connect(*args, timeout=10)
        except socket.error:
            raise ValueError('Unable to connect to {}:{}'.format(*dst_addr))
        except paramiko.BadAuthenticationType:
            raise ValueError('Bad authentication type.')
        except paramiko.AuthenticationException:
            raise ValueError('Authentication failed.')
        except paramiko.BadHostKeyException:
            raise ValueError('Bad host key.')

        term = self.get_argument('term', u'') or u'xterm'
        chan = ssh.invoke_shell(term=term)
        chan.setblocking(0)
        worker = Worker(self.loop, ssh, chan, dst_addr)
        worker.encoding = "utf-8"
        return worker

    @tornado.gen.coroutine
    def post(self):
        ip, port = self.request.connection.context.address[:2]
        workers = clients.get(ip, {})
        if workers and len(workers) >= 10:
            raise tornado.web.HTTPError(403, 'Too many live connections.')
        args = self.get_args()
        future = self.executor.submit(self.ssh_connect, args)

        try:
            worker = yield future
        except (ValueError, paramiko.SSHException) as exc:
            logging.error(traceback.format_exc())
            self.result.update(status=str(exc))
        else:
            if not workers:
                clients[ip] = workers
            worker.src_addr = (ip, port)
            workers[worker.id] = worker
            self.loop.call_later(3, recycle_worker, worker)
            self.result.update(id=worker.id, hostname=args[0])

        self.write(self.result)
Example #33
0
def workers():
    if options.workers is None:
        return process.cpu_count() - 2
    return options.workers
Example #34
0
"""

import datetime

import concurrent.futures
import momoko
import psycopg2.extras
from tornado import process
from tornado.ioloop import IOLoop

from foundation import const
from foundation.parseConfig import load_conf

load_conf('postgresql')

executor = concurrent.futures.ThreadPoolExecutor(max_workers=int(process.cpu_count()))  # 创建一个线程池

ioloop = IOLoop.instance()
# 创建数据库连接池
dbpool = momoko.Pool(
    dsn='dbname={dbname} user={user} password={pwd} host={host} port={port}'.format(
    dbname=const.postgresql.get("dbname"),
    user=const.postgresql.get("user"),
    pwd=const.postgresql.get("password"),
    host=const.postgresql.get("host"),
    port=const.postgresql.get("port")),
    cursor_factory=psycopg2.extras.RealDictCursor,
    size=int(const.postgresql.get("size")),
    max_size=int(const.postgresql.get("max_size")),
    raise_connect_errors=True if const.postgresql.get("raise_connect_errors") == 'True' else False,
    reconnect_interval=int(const.postgresql.get("reconnect_interval")),
Example #35
0
 def start_pusher(self, push_url):
     self.context = zmq.Context(cpu_count())
     self.ventilator = self.context.socket(zmq.PUSH)
     self.ventilator.bind(push_url)
Example #36
0
 def __init__(self, **kwargs):
     kwargs["cfg"] = { "redis": { 'max_connections': cpu_count() } }
     kwargs["redis"] = tornadoredis.ConnectionPool(wait_for_available = True,
         max_connections = kwargs["cfg"]["redis"]["max_connections"])
     handlers = [(r"/", IndexHandler, kwargs), (r"/ls", CmdHandler)]
     tornado.web.Application.__init__(self, handlers)
Example #37
0
       default=os.getenv('ADDRESS', "127.0.0.1"))

define("port", help="Listen port (default 8080) [ENV:PORT]",
       type=int, default=int(os.getenv('PORT', '8080')))

define("debug", help="Use for attach a debugger",
       default=bool(os.getenv("DEBUG")), type=bool)

define("gzip", help="Compress responses (default False) [ENV:GZIP]",
       default=bool(os.getenv("GZIP", '0')), type=bool)

define("proxy-mode", help="Process X-headers on requests (default True) [ENV:PROXY_MODE]",
       default=bool(os.getenv('PROXY_MODE', '1')), type=bool)

define("pool-size", help="Thread pool size (default cou_count * 2) [ENV:POOL_SIZE]",
       type=int, default=int(os.getenv('POOL_SIZE', cpu_count() * 2)))

define("secret", help="Cookie secret (default random) [ENV:SECRET]",
       default=os.getenv("SECRET", uuid.uuid4().bytes))

define("user", help="Change UID of current process (not change by default)", default=None)

define(
    "storage", help="Packages storage (default $CWD/packages) [ENV:STORAGE]", type=str,
    default=os.path.abspath(
        os.getenv(
            "STORAGE",
            os.path.join(os.path.abspath(os.path.curdir), 'packages')
        )
    )
)
Example #38
0
def start_pyrox(config):
    if config is None:
        raise ConfigurationError('No configuration object passed in')

    # Log some important things
    if config.routing.upstream_hosts is not None:
        _LOG.info('Upstream targets are: {}'.format(
            [dst for dst in config.routing.upstream_hosts]))

    # Set bind host
    bind_host = config.core.bind_host.split(':')
    if len(bind_host) != 2:
        raise ConfigurationError('bind_host must have a port specified')

    # Bind the sockets in the main process
    sockets = None

    try:
        sockets = bind_sockets(port=bind_host[1], address=bind_host[0])
    except Exception as ex:
        _LOG.exception(ex)
        return

    # Bind the server port(s)
    _LOG.info('Pyrox listening on: http://{0}:{1}'.format(
        bind_host[0], bind_host[1]))

    # Are we trying to profile Pyrox?
    if config.core.enable_profiling:
        _LOG.warning("""
**************************************************************************
Notice: Pyrox Profiling Mode Enabled

You have enabled Pyrox with profiling enabled in the Pyrox config. This
will restrict Pyrox to one resident process. It is not recommended that
you run Pyrox in production with this feature enabled.
**************************************************************************
""")
        start_proxy(sockets, config)
        return

    # Number of processess to spin
    num_processes = config.core.processes

    if num_processes <= 0:
        num_processes = cpu_count()

    global _active_children_pids

    for i in range(num_processes):
        pid = os.fork()
        if pid == 0:
            _LOG.info('Starting process {}'.format(i))
            start_proxy(sockets, config)
            sys.exit(0)
        else:
            _active_children_pids.append(pid)

    # Take over SIGTERM and SIGINT
    signal.signal(signal.SIGTERM, stop_parent)
    signal.signal(signal.SIGINT, signal.SIG_IGN)

    while len(_active_children_pids):
        try:
            pid, status = os.wait()
        except OSError as oserr:
            if oserr.errno != errno.EINTR:
                _LOG.exception(oserr)
            continue
        except Exception as ex:
            _LOG.exception(ex)
            continue

        _LOG.info('Child process {} exited with status {}'.format(pid, status))
        _active_children_pids.remove(pid)
Example #39
0
    (r'/add', runs.AddRunHandler),
    (r'/remove', runs.RemoveRunHandler),
    (r'/data/([A-Za-z0-9]{24})/this_week', data.ThisWeekHandler),
    (r'/data/([A-Za-z0-9]{24})/mileage/weekly', data.WeeklyMileageHandler),
    (r'/data/([A-Za-z0-9]{24})/runs/weekday', data.WeekdayRunsHandler),
    (r'/data/([A-Za-z0-9]{24})/runs/year', data.DailyRunsHandler),
    (r'/data/([A-Za-z0-9]{24})/runs/month', data.MonthRunsHandler),
], **settings)

application.config = config
application.thread_pool = futures.ThreadPoolExecutor(max_workers=3)
application.tf = tornadotinyfeedback.Client('openrunlog')
application.redis = tornadoredis.Client()

application.redis.connect()


if __name__ == '__main__':
    define('port', default=11000, help='TCP port to listen on')
    parse_command_line()

    mongoengine.connect(
            config['db_name'], 
            host=config['db_uri'])

    if not config['debug']:
        process.fork_processes(process.cpu_count()*2 + 1)
    application.listen(options.port)
    ioloop.IOLoop.instance().start()

Example #40
0
 def test_workers_options_default(self):
     w = opts.workers()
     self.assertEquals(w, process.cpu_count() - 2)
Example #41
0
class ConnectionUploadHandler(SessionMixin, BaseHandler):
    executor = ThreadPoolExecutor(max_workers=cpu_count() * 5)

    def update_upload_progress(self,
                               filename,
                               value,
                               task_id,
                               rel_filename=None,
                               total=None,
                               create=True):
        up = UploadProgress(filename=filename,
                            cur_value=value,
                            total=total,
                            rel_filename=rel_filename,
                            task_id=task_id)
        with self.make_session() as session:
            if create:
                session.add(up)
            else:
                data = {"cur_value": value, "task_id": task_id}
                if total:
                    data.update({"total": total})
                if rel_filename:
                    data.update({"rel_filename": rel_filename})
                session.query(UploadProgress).filter_by(
                    filename=filename).update(data)
            session.commit()

    def initialize(self):
        self.chunk_bytes = 0

    def prepare(self):
        if self.request.method.lower() == "post":
            self.request.connection.set_max_body_size(MAX_STREAM_SIZE)
        try:
            total = int(self.request.headers.get("Content-Length", "0"))
        except KeyError:
            total = 0
        # 每次请求创建临时目录,上传完成后,清除目录
        # 保证并行上传,或其他请求
        uuid_str = str(uuid.uuid4())
        tmp_dir = os.path.join(TMP_DIR, uuid_str)
        if not os.path.exists(tmp_dir):
            os.mkdir(tmp_dir)
        self.ps = MultiPartStreamer(total, tmp_dir=tmp_dir)
        self.update_upload_progress(filename=uuid_str,
                                    value=0,
                                    total=total,
                                    task_id=1)

    @run_on_executor
    def data_received(self, chunk):
        self.chunk_bytes += len(chunk)
        self.ps.data_received(chunk)
        if self.ps.get_parts_by_name("upload_file"):
            rel_filename = self.ps.get_parts_by_name(
                "upload_file")[0].get_filename()
        self.update_upload_progress(filename=os.path.basename(self.ps.tmp_dir),
                                    value=self.chunk_bytes,
                                    task_id=1,
                                    rel_filename=rel_filename,
                                    create=False)

    @tornado.gen.coroutine
    def post(self):
        try:
            self.ps.data_complete()
            id = filedir = filepath = None
            # 三个part, 第一个文件,第二个filepath, 第三个id
            for part in self.ps.parts:
                if part.get_filename():
                    filepath = os.path.join(os.path.dirname(part.f_out.name),
                                            part.get_filename())
                    part.move(filepath)
                if part.get_name() == "id":
                    id = int(part.get_payload())
                if part.get_name() == "filepath":
                    filedir = part.get_payload()
            self.ps.release_parts()
            if filepath or filedir:
                if id:
                    with self.make_session() as session:
                        connection = session.query(SSHConnection).filter_by(
                            id=id).first()
                        futrue = self.executor.submit(self.ftp_upload,
                                                      connection, filepath,
                                                      filedir)
                        yield futrue
                else:
                    self.write({"status": 500, "result": "id is None"})
            else:
                self.write({"status": 500, "result": "file path is None"})
        except Exception as e:
            self.write({"status": 500, "result": str(e)})
            raise e
        self.write({"status": 200, "result": ""})

    def ftp_upload(self, connection, filepath, filedir):
        def callback(cur, total):
            uuid_str = os.path.basename(os.path.dirname(filepath))
            self.update_upload_progress(filename=uuid_str,
                                        value=cur,
                                        total=total,
                                        task_id=2,
                                        create=False)

        ftp_transport = paramiko.Transport(connection.hostname,
                                           connection.port)
        ftp_transport.connect(username=connection.username,
                              password=connection.password)
        sftp_client = paramiko.SFTPClient.from_transport(ftp_transport)
        filename = os.path.basename(filepath)
        if connection.username != "root":
            target_file = "%s/%s" % (
                bytes.decode(filedir),
                filename) if filedir else "/home/%s/%s" % (connection.username,
                                                           filename)
        else:
            target_file = "%s/%s" % (bytes.decode(filedir), filename
                                     ) if filedir else "/root/%s" % filename
        sftp_client.put(filepath, target_file, callback=callback)
        self.rm_file(filepath)

    def rm_file(self, filepath):
        work_dir = os.path.dirname(filepath)
        files = os.listdir(work_dir)
        for file in files:
            os.remove(os.path.join(work_dir, file))
        os.rmdir(work_dir)
Example #42
0
        super().__init__(app_handlers, **app_settings)

    def log_request(self, request):
        """Override default to avoid logging requests"""
        pass


if __name__ == "__main__":
    boat = BoatPi(app_settings["boatpi_ws"], 5)
    boatLogger = MongoLogger()

    if app_settings["env"] == "dev":
        boatLogger.clear_logs()

        log.info("Starting applications", mode="single")
        Application().listen(app_settings["port"])

        autoreload.start()
        autoreload.watch(r'assets/')
        autoreload.watch(r'templates/')
        autoreload.watch(r'templates/modules/')
    else:
        log.info("Starting applications", mode="forked", cpu_count=cpu_count())
        server = HTTPServer(Application())
        server.bind(app_settings["port"])
        server.start(0)  # multi process mode (one process per cpu)

    ioloop = IOLoop.instance()
    ioloop.start()
Example #43
0
def main():
    reverse_proxy_server = ReverseProxyServer()
    reverse_proxy_server.bind(7777)
    reverse_proxy_server.start(cpu_count())
    IOLoop.instance().start()
Example #44
0
# coding: utf-8

from tornado.process import cpu_count

MYSQL_HOST = '127.0.0.1'
MYSQL_PORT = 3306
MYSQL_USER = '******'
MYSQL_PASSWORD = '******'

MYSQL_PUBLIC = 'test'

DEBUG = True
ECHOSQL = False

PUBLIC_STRING = 'mysql+mysqldb://%s:%s@%s:%s/%s?charset=utf8' % (
    MYSQL_USER, MYSQL_PASSWORD, MYSQL_HOST, MYSQL_PORT, MYSQL_PUBLIC)

THREAD_COUNT = cpu_count()
Example #45
0
from concurrent.futures import ThreadPoolExecutor

import bcrypt
import tornado.gen
from tornado.process import cpu_count

pool = ThreadPoolExecutor(cpu_count())


@tornado.gen.coroutine
def make_password(password: str):
    """
    Generate password hash.
    
    :param password: str Password raw
    :return: str Hashed password
    """
    return (
        yield pool.submit(
            bcrypt.hashpw,
            password.encode(),
            bcrypt.gensalt()
        )
    )


@tornado.gen.coroutine
def check_password(password: str, hashed_password: str) -> bool:
    """
    Compare password raw with hashed password.
    
Example #46
0
       default=os.getenv('ADDRESS', "127.0.0.1"))

define("port", help="Listen port (default 8080) [ENV:PORT]",
       type=int, default=int(os.getenv('PORT', '8080')))

define("debug", help="Use for attach a debugger",
       default=bool(os.getenv("DEBUG")), type=bool)

define("gzip", help="Compress responses (default False) [ENV:GZIP]",
       default=bool(os.getenv("GZIP")), type=bool)

define("proxy-mode", help="Process X-headers on requests (default True) [ENV:PROXY_MODE]",
       default=bool(os.getenv('PROXY_MODE', '1')), type=bool)

define("pool-size", help="Thread pool size (default cou_count * 2) [ENV:POOL_SIZE]",
       type=int, default=int(os.getenv('POOL_SIZE', cpu_count() * 2)))

define("secret", help="Cookie secret (default random) [ENV:SECRET]",
       default=os.getenv("SECRET", uuid.uuid4().bytes))

define("user", help="Change UID of current process (not change by default)", default=None)

default_storage=os.path.abspath(
        os.getenv(
            "STORAGE",
            os.path.join(os.path.abspath(os.path.curdir), 'packages')
        )
    )
define(
    "storage", help="Packages storage (default $CWD/packages) [ENV:STORAGE]", type=str,
    default=default_storage
class IndexHandler(MixinHandler, tornado.web.RequestHandler):

    executor = ThreadPoolExecutor(max_workers=cpu_count()*5)

    def initialize(self, loop, policy, host_keys_settings):
        super(IndexHandler, self).initialize(loop)
        self.policy = policy
        self.host_keys_settings = host_keys_settings
        self.ssh_client = self.get_ssh_client()
        self.debug = self.settings.get('debug', False)
        self.result = dict(id=None, status=None, encoding=None)

    def write_error(self, status_code, **kwargs):
        if swallow_http_errors and self.request.method == 'POST':
            exc_info = kwargs.get('exc_info')
            if exc_info:
                reason = getattr(exc_info[1], 'log_message', None)
                if reason:
                    self._reason = reason
            self.result.update(status=self._reason)
            self.set_status(200)
            self.finish(self.result)
        else:
            super(IndexHandler, self).write_error(status_code, **kwargs)

    def get_ssh_client(self):
        ssh = SSHClient()
        ssh._system_host_keys = self.host_keys_settings['system_host_keys']
        ssh._host_keys = self.host_keys_settings['host_keys']
        ssh._host_keys_filename = self.host_keys_settings['host_keys_filename']
        ssh.set_missing_host_key_policy(self.policy)
        return ssh

    def get_privatekey(self):
        name = 'privatekey'
        lst = self.request.files.get(name)
        if lst:
            # multipart form
            filename = lst[0]['filename']
            data = lst[0]['body']
            value = self.decode_argument(data, name=name).strip()
        else:
            # urlencoded form
            value = self.get_argument(name, u'')
            filename = ''

        return value, filename

    def get_hostname(self):
        value = self.get_json_argument('hostname')
        if not (is_valid_hostname(value) or is_valid_ip_address(value)):
            raise InvalidValueError('Invalid hostname: {}'.format(value))
        return value

    def get_port(self):
        value = self.get_json_argument('port')
        if not value:
            return DEFAULT_PORT

        port = to_int(value)
        if port is None or not is_valid_port(port):
            raise InvalidValueError('Invalid port: {}'.format(value))
        return port

    def lookup_hostname(self, hostname, port):
        key = hostname if port == 22 else '[{}]:{}'.format(hostname, port)

        if self.ssh_client._system_host_keys.lookup(key) is None:
            if self.ssh_client._host_keys.lookup(key) is None:
                raise tornado.web.HTTPError(
                        403, 'Connection to {}:{} is not allowed.'.format(
                            hostname, port)
                    )

    def get_args(self):
        hostname = self.get_hostname()
        port = self.get_port()
        username = self.get_json_argument('username')
        password = self.get_json_argument('password')
        privatekey, filename = self.get_privatekey()
        passphrase = self.get_argument('passphrase', u'')
        totp = self.get_argument('totp', u'')

        if isinstance(self.policy, paramiko.RejectPolicy):
            self.lookup_hostname(hostname, port)

        if privatekey:
            pkey = PrivateKey(privatekey, passphrase, filename).get_pkey_obj()
        else:
            pkey = None

        self.ssh_client.totp = totp
        args = (hostname, port, username, password, pkey)
        logging.debug(args)

        return args

    def get_default_encoding(self, ssh):
        try:
            _, stdout, _ = ssh.exec_command('locale charmap')
        except paramiko.SSHException:
            result = None
        else:
            result = to_str(stdout.read().strip())

        return result if result else 'utf-8'

    def ssh_connect(self, args):
        ssh = self.ssh_client
        dst_addr = args[:2]
        logging.info('Connecting to {}:{}'.format(*dst_addr))

        try:
            ssh.connect(*args, timeout=6)
        except socket.error:
            raise ValueError('Unable to connect to {}:{}'.format(*dst_addr))
        except paramiko.BadAuthenticationType:
            raise ValueError('Bad authentication type.')
        except paramiko.AuthenticationException:
            raise ValueError('Authentication failed.')
        except paramiko.BadHostKeyException:
            raise ValueError('Bad host key.')

        chan = ssh.invoke_shell(term='xterm')
        chan.setblocking(0)
        worker = Worker(self.loop, ssh, chan, dst_addr)
        worker.encoding = self.get_default_encoding(ssh)
        return worker

    def check_origin(self):
        event_origin = self.get_argument('_origin', u'')
        header_origin = self.request.headers.get('Origin')
        origin = event_origin or header_origin

        if origin:
            if not super(IndexHandler, self).check_origin(origin):
                raise tornado.web.HTTPError(
                    403, 'Cross origin operation is not allowed.'
                )

            if not event_origin and self.origin_policy != 'same':
                self.set_header('Access-Control-Allow-Origin', origin)

    def get_json_argument(self, name, default=None):
        args = json_decode(self.request.body)
        name = to_unicode(name)
        if name in args:
            return args[name]
        elif default is not None:
            return default
        else:
            raise tornado.web.MissingArgumentError(name)

    def head(self):
        pass

    def get(self):
        self.render('index.html', debug=self.debug)

    @tornado.gen.coroutine
    def post(self):
        if self.debug and self.get_argument('error', u''):
            # for testing purpose only
            raise ValueError('Uncaught exception')

        ip, port = self.get_client_addr()
        workers = clients.get(ip, {})
        if workers and len(workers) >= options.maxconn:
            raise tornado.web.HTTPError(403, 'Too many live connections.')

        self.check_origin()

        try:
            args = self.get_args()
        except InvalidValueError as exc:
            raise tornado.web.HTTPError(400, str(exc))

        future = self.executor.submit(self.ssh_connect, args)

        try:
            worker = yield future
        except (ValueError, paramiko.SSHException) as exc:
            logging.error(traceback.format_exc())
            self.result.update(status=str(exc))
        else:
            if not workers:
                clients[ip] = workers
            worker.src_addr = (ip, port)
            workers[worker.id] = worker
            self.loop.call_later(DELAY, recycle_worker, worker)
            self.result.update(id=worker.id, encoding=worker.encoding)

        self.write(self.result)
Example #48
0
class IndexHandler(MixinHandler, tornado.web.RequestHandler):

    executor = ThreadPoolExecutor(max_workers=cpu_count() * 5)

    def initialize(self, loop, policy, host_keys_settings):
        super(IndexHandler, self).initialize(loop)
        self.policy = policy
        self.host_keys_settings = host_keys_settings
        self.ssh_client = self.get_ssh_client()
        self.debug = self.settings.get('debug', False)
        self.result = dict(id=None, status=None, encoding=None)

    def write_error(self, status_code, **kwargs):
        if swallow_http_errors and self.request.method == 'POST':
            exc_info = kwargs.get('exc_info')
            if exc_info:
                reason = getattr(exc_info[1], 'log_message', None)
                if reason:
                    self._reason = reason
            self.result.update(status=self._reason)
            self.set_status(200)
            self.finish(self.result)
        else:
            super(IndexHandler, self).write_error(status_code, **kwargs)

    def get_ssh_client(self):
        ssh = SSHClient()
        ssh._system_host_keys = self.host_keys_settings['system_host_keys']
        ssh._host_keys = self.host_keys_settings['host_keys']
        ssh._host_keys_filename = self.host_keys_settings['host_keys_filename']
        ssh.set_missing_host_key_policy(self.policy)
        return ssh

    def get_privatekey(self):
        name = 'privatekey'
        lst = self.request.files.get(name)
        if lst:
            # multipart form
            filename = lst[0]['filename']
            data = lst[0]['body']
            value = self.decode_argument(data, name=name).strip()
        else:
            # urlencoded form
            value = self.get_argument(name, u'')
            filename = ''

        return value, filename

    def get_hostname(self):
        value = self.get_value('hostname')
        if not (is_valid_hostname(value) or is_valid_ip_address(value)):
            raise InvalidValueError('Invalid hostname: {}'.format(value))
        return value

    def get_port(self):
        value = self.get_argument('port', u'')
        if not value:
            return DEFAULT_PORT

        port = to_int(value)
        if port is None or not is_valid_port(port):
            raise InvalidValueError('Invalid port: {}'.format(value))
        return port

    def lookup_hostname(self, hostname, port):
        key = hostname if port == 22 else '[{}]:{}'.format(hostname, port)

        if self.ssh_client._system_host_keys.lookup(key) is None:
            if self.ssh_client._host_keys.lookup(key) is None:
                raise tornado.web.HTTPError(
                    403, 'Connection to {}:{} is not allowed.'.format(
                        hostname, port))

    def check_remote_ip_trusted(self, remote_ip):
        try:
            for i in range(len(self.host_keys_settings["ip_trusted_list"])):
                if self.host_keys_settings["ip_trusted_list"][i] == remote_ip:
                    return True
        except:
            return False
        return False

    def check_remote_ip_blacklist(self, remote_ip):
        try:
            for i in range(len(self.host_keys_settings["ip_black_list"])):
                if self.host_keys_settings["ip_black_list"][i] == remote_ip:
                    return True
        except:
            return False
        return False

    def get_args(self):
        hostname = self.get_hostname()
        port = self.get_port()
        username = self.get_value('username')
        rsakey = self.get_value("rsakey")

        password = self.get_argument('password', u'')

        privatekey, filename = self.get_privatekey()
        passphrase = self.get_argument('passphrase', u'')
        totp = self.get_argument('totp', u'')

        if isinstance(self.policy, paramiko.RejectPolicy):
            self.lookup_hostname(hostname, port)

        if privatekey:
            pkey = PrivateKey(privatekey, passphrase, filename).get_pkey_obj()
        else:
            if rsakey:
                with open(
                        os.path.join(self.host_keys_settings["certs_home"],
                                     rsakey)) as file_obj:
                    privatekey = file_obj.read()
                if privatekey:
                    pkey = PrivateKey(privatekey, passphrase,
                                      rsakey).get_pkey_obj()
                else:
                    pkey = None
            else:
                pkey = None

        self.ssh_client.totp = totp
        args = (hostname, port, username, password, pkey)
        logging.debug(args)

        return args

    def parse_encoding(self, data):
        try:
            encoding = to_str(data.strip(), 'ascii')
        except UnicodeDecodeError:
            return

        if is_valid_encoding(encoding):
            return encoding

    def get_default_encoding(self, ssh):
        commands = [
            '$SHELL -ilc "locale charmap"', '$SHELL -ic "locale charmap"'
        ]

        for command in commands:
            _, stdout, _ = ssh.exec_command(command, get_pty=True)
            data = stdout.read()
            logging.debug('{!r} => {!r}'.format(command, data))
            result = self.parse_encoding(data)
            if result:
                return result

        logging.warn('Could not detect the default ecnoding.')
        return 'utf-8'

    def ssh_connect(self, args):
        ssh = self.ssh_client
        dst_addr = args[:2]
        logging.info('Connecting to {}:{}'.format(*dst_addr))

        try:
            ssh.connect(*args, timeout=6)
        except socket.error:
            raise ValueError('Unable to connect to {}:{}'.format(*dst_addr))
        except paramiko.BadAuthenticationType:
            raise ValueError('Bad authentication type.')
        except paramiko.AuthenticationException:
            raise ValueError('Authentication failed.')
        except paramiko.BadHostKeyException:
            raise ValueError('Bad host key.')

        term = self.get_argument('term', u'') or u'xterm'
        chan = ssh.invoke_shell(term=term)
        chan.setblocking(0)
        worker = Worker(self.loop, ssh, chan, dst_addr)
        worker.encoding = self.get_default_encoding(ssh)
        return worker

    def check_origin(self):
        event_origin = self.get_argument('_origin', u'')
        header_origin = self.request.headers.get('Origin')
        origin = event_origin or header_origin

        if origin:
            if not super(IndexHandler, self).check_origin(origin):
                raise tornado.web.HTTPError(
                    403, 'Cross origin operation is not allowed.')

            if not event_origin and self.origin_policy != 'same':
                self.set_header('Access-Control-Allow-Origin', origin)

    def head(self):
        pass

    def get(self):
        self.render('index.html', debug=self.debug)

    @tornado.gen.coroutine
    def post(self):
        if self.debug and self.get_argument('error', u''):
            # for testing purpose only
            raise ValueError('Uncaught exception')

        now = (int(time.time()))

        ip, port = self.get_client_addr()
        workers = clients.get(ip, {})

        x_real_ip = self.get_x_real_ip()
        client_ip = self.get_value("ip")

        logging.info('Client>>>> on {}:{} now time {}'.format(
            x_real_ip, port, now))

        if self.check_remote_ip_trusted(x_real_ip):
            logging.info('Client {}:{} in trusted list; now time {}'.format(
                x_real_ip, port, now))
        else:
            if self.check_remote_ip_blacklist(x_real_ip):
                raise tornado.web.HTTPError(403, 'IP in blacklist.')

            if client_ip != x_real_ip:
                raise tornado.web.HTTPError(403, 'IP fake.')

            tm = (int(self.get_value("tm")))
            if now - tm > 30 * 60:
                raise tornado.web.HTTPError(403, 'connect timeout.')
            if now < tm:
                raise tornado.web.HTTPError(403, 'invalid time.')

        if workers and len(workers) >= options.maxconn:
            raise tornado.web.HTTPError(403, 'Too many live connections.')

        self.check_origin()

        try:
            args = self.get_args()
        except InvalidValueError as exc:
            raise tornado.web.HTTPError(400, str(exc))

        future = self.executor.submit(self.ssh_connect, args)

        try:
            worker = yield future
        except (ValueError, paramiko.SSHException) as exc:
            logging.error(traceback.format_exc())
            self.result.update(status=str(exc))
        else:
            if not workers:
                clients[ip] = workers
            worker.src_addr = (ip, port)
            workers[worker.id] = worker
            self.loop.call_later(DELAY, recycle_worker, worker)
            self.result.update(id=worker.id, encoding=worker.encoding)

        self.write(self.result)
Example #49
0
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

        self.executor = ThreadPoolExecutor(cpu_count(),
                                           thread_name_prefix='WEBSERVER-' +
                                           self.__class__.__name__.upper())
Example #50
0
def start_pyrox(other_cfg=None):
    config = load_pyrox_config(other_cfg) if other_cfg else load_pyrox_config()

    # Init logging
    logging_manager = get_log_manager()
    logging_manager.configure(config)

    # dst = config.routing.upstream_hosts
    # print("________ dst:{}".format(dst))
    # print("________ dst0/1:{0}/{1}".format(dst[0], dst[1]))

    _LOG.info('Upstream targets areeee: {}'.format(
        ['https://{0}:{1}'.format(dst[0], dst[1])
            for dst in config.routing.upstream_hosts]))

    # Set bind host
    bind_host = config.core.bind_host.split(':')
    if len(bind_host) != 2:
        raise ConfigurationError('bind_host must have a port specified')

    # Bind the sockets in the main process
    sockets = bind_sockets(port=bind_host[1], address=bind_host[0])

    # Bind the server port(s)
    _LOG.info('Pyrox listening on: http://{0}:{1}'.format(
        bind_host[0], bind_host[1]))

    # Start Tornado
    num_processes = config.core.processes

    if num_processes <= 0:
        num_processes = cpu_count()

    global _active_children_pids

    for i in range(num_processes):
        pid = os.fork()
        if pid == 0:
            print('Starting process {}'.format(i))
            start_proxy(sockets, config)
            sys.exit(0)
        else:
            _active_children_pids.append(pid)

    # Take over SIGTERM and SIGINT
    signal.signal(signal.SIGTERM, stop_parent)
    signal.signal(signal.SIGINT, signal.SIG_IGN)

    while len(_active_children_pids):
        try:
            pid, status = os.wait()
        except OSError as oserr:
            if oserr.errno != errno.EINTR:
                _LOG.exception(oserr)
            continue
        except Exception as ex:
            _LOG.exception(ex)
            continue

        _LOG.info('Child process {} exited with status {}'.format(
            pid, status))
        _active_children_pids.remove(pid)
Example #51
0
import os
import os.path
import logging
import time
import json
import binascii
import bcrypt
import re

SESSION_AUTH_LENGTH_BYTES = 16
SESSION_MAX_AGE_DAYS = 1

lg = logging.getLogger(__name__)

pool = ThreadPoolExecutor(cpu_count())

APPDIR = os.path.dirname(__file__)

class BaseHandler(RequestHandler):
    def initialize(self, database):
        self.db = database

    # RequestHandler.get_current_user cannot be a coroutine so we use
    # prepare to set self.current_user
    # self.current_user is a dictionary with the same fields
    # as the user:<user_id> hash in the database.
    # It is non-None if the user is logged in.
    # Session expiration is handled via expiring Tornado secure cookies.
    @gen.coroutine
    def prepare(self):
Example #52
0
def start_pyrox(config):
    if config is None:
        raise ConfigurationError('No configuration object passed in')

    # Log some important things
    if config.routing.upstream_hosts is not None:
        _LOG.info('Upstream targets are: {}'.format(
            [dst for dst in config.routing.upstream_hosts]))

    # Set bind host
    bind_host = config.core.bind_host.split(':')
    if len(bind_host) != 2:
        raise ConfigurationError('bind_host must have a port specified')

    # Bind the sockets in the main process
    sockets = None

    try:
        sockets = bind_sockets(port=bind_host[1], address=bind_host[0])
    except Exception as ex:
        _LOG.exception(ex)
        return

    # Bind the server port(s)
    _LOG.info('Pyrox listening on: http://{0}:{1}'.format(
        bind_host[0], bind_host[1]))

    # Are we trying to profile Pyrox?
    if config.core.enable_profiling:
        _LOG.warning("""
**************************************************************************
Notice: Pyrox Profiling Mode Enabled

You have enabled Pyrox with profiling enabled in the Pyrox config. This
will restrict Pyrox to one resident process. It is not recommended that
you run Pyrox in production with this feature enabled.
**************************************************************************
""")
        start_proxy(sockets, config)
        return

    # Number of processess to spin
    num_processes = config.core.processes

    if num_processes <= 0:
        num_processes = cpu_count()

    global _active_children_pids

    for i in range(num_processes):
        pid = os.fork()
        if pid == 0:
            _LOG.info('Starting process {}'.format(i))
            start_proxy(sockets, config)
            sys.exit(0)
        else:
            _active_children_pids.append(pid)

    # Take over SIGTERM and SIGINT
    signal.signal(signal.SIGTERM, stop_parent)
    signal.signal(signal.SIGINT, signal.SIG_IGN)

    while len(_active_children_pids):
        try:
            pid, status = os.wait()
        except OSError as oserr:
            if oserr.errno != errno.EINTR:
                _LOG.exception(oserr)
            continue
        except Exception as ex:
            _LOG.exception(ex)
            continue

        _LOG.info('Child process {} exited with status {}'.format(
            pid, status))
        _active_children_pids.remove(pid)