Beispiel #1
0
    def _open(self, host, port, **kwargs):
        # Note: does not get called if host is False. That way we can
        # run Flexx in e.g. JLab's application.
        
        # Hook Tornado up with asyncio. Flexx' BaseServer makes sure
        # that the correct asyncio event loop is current (for this thread).
        # http://www.tornadoweb.org/en/stable/asyncio.html
        # todo: Since Tornado v5.0 asyncio is autom used, deprecating AsyncIOMainLoop
        self._io_loop = AsyncIOMainLoop()
        # I am sorry for this hack, but Tornado wont work otherwise :(
        # I wonder how long it will take before this will bite me back. I guess
        # we will be alright as long as there is no other Tornado stuff going on.
        IOLoop._current.instance = None
        self._io_loop.make_current()
        
        # handle ssl, wether from configuration or given args
        if config.ssl_certfile:
            if 'ssl_options' not in kwargs:
                kwargs['ssl_options'] = {}
            if 'certfile' not in kwargs['ssl_options']:
                kwargs['ssl_options']['certfile'] = config.ssl_certfile

        if config.ssl_keyfile:
            if 'ssl_options' not in kwargs:
                kwargs['ssl_options'] = {}
            if 'keyfile' not in kwargs['ssl_options']:
                kwargs['ssl_options']['keyfile'] = config.ssl_keyfile

        if config.tornado_debug:
            app_kwargs = dict(debug=True)
        else:
            app_kwargs = dict()
        # Create tornado application
        self._app = Application([(r"/flexx/ws/(.*)", WSHandler),
                                 (r"/flexx/(.*)", MainHandler),
                                 (r"/(.*)", AppHandler), ], **app_kwargs)
        self._app._io_loop = self._io_loop
        # Create tornado server, bound to our own ioloop
        if tornado.version_info < (5, ):
            kwargs['io_loop'] = self._io_loop
        self._server = HTTPServer(self._app, **kwargs)
        
        # Start server (find free port number if port not given)
        if port:
            # Turn port into int, use hashed port number if a string was given
            try:
                port = int(port)
            except ValueError:
                port = port_hash(port)
            self._server.listen(port, host)
        else:
            # Try N ports in a repeatable range (easier, browser history, etc.)
            prefered_port = port_hash('Flexx')
            for i in range(8):
                port = prefered_port + i
                try:
                    self._server.listen(port, host)
                    break
                except (OSError, IOError):
                    pass  # address already in use
            else:
                # Ok, let Tornado figure out a port
                [sock] = netutil.bind_sockets(None, host, family=socket.AF_INET)
                self._server.add_sockets([sock])
                port = sock.getsockname()[1]

        # Notify address, so its easy to e.g. copy and paste in the browser
        self._serving = self._app._flexx_serving = host, port
        proto = 'http'
        if 'ssl_options' in kwargs:
            proto = 'https'
        logger.info('Serving apps at %s://%s:%i/' % (proto, host, port))
Beispiel #2
0
def main(argv=None):  # pragma: no cover
    'Main function'
    parser = argparse.ArgumentParser(
        description='ngshare, a REST API nbgrader exchange')
    parser.add_argument(
        '--vngshare',
        help='Use vngshare (stand-alone mode)',
        action='store_true',
    )
    parser.add_argument('--jupyterhub_api_url',
                        help='override $JUPYTERHUB_API_URL')
    parser.add_argument(
        '--prefix',
        help='URL prefix (default: $JUPYTERHUB_SERVICE_PREFIX or "/api/")',
    )
    parser.add_argument('--debug', action='store_true', help='enable debug')
    parser.add_argument(
        '--database',
        help='database url',
        default='sqlite:////srv/ngshare/ngshare.db',
    )
    parser.add_argument('--storage',
                        help='path to store files',
                        default='/srv/ngshare/files/')
    parser.add_argument('--admins',
                        help='admin user ids (comma splitted)',
                        default='')
    parser.add_argument('--host',
                        help='bind hostname (vngshare only)',
                        default='127.0.0.1')
    parser.add_argument('--port',
                        help='bind port (vngshare only)',
                        type=int,
                        default=12121)
    parser.add_argument(
        '--no-upgrade-db',
        action='store_true',
        help='do not automatically upgrade database',
    )
    args = parser.parse_args(argv)
    if args.jupyterhub_api_url is not None:
        os.environ['JUPYTERHUB_API_URL'] = args.jupyterhub_api_url

    prefix = args.prefix or os.environ.get('JUPYTERHUB_SERVICE_PREFIX',
                                           '/api/')

    if not prefix.startswith('/') or not prefix.endswith('/'):
        raise ValueError('API prefix should start and end with /')

    if prefix.startswith('/healthz/'):
        raise ValueError("API prefix may not start with /healthz/")

    if not args.no_upgrade_db:
        CmdOpts = namedtuple('CmdOpts', ['x'])
        cmd_opts = CmdOpts(['data=true', 'storage=' + args.storage])
        dbutil.upgrade(args.database, cmd_opts=cmd_opts)

    if args.vngshare:
        MyRequestHandler.__bases__ = (MockAuth, RequestHandler, MyHelpers)

    app = MyApplication(
        prefix,
        args.database,
        args.storage,
        admin=args.admins.split(','),
        debug=args.debug,
    )

    http_server = HTTPServer(app)

    if args.vngshare:
        http_server.listen(args.port, args.host)
        app.vngshare = True
        print()
        print('Starting vngshare (Vserver-like Notebook Grader Share)')
        print('DO NOT USE IN PRODUCTION')
        print('Database file is %s' % repr(args.database))
        print('Storage directory is %s' % repr(args.storage))
        print('admin users are %s' % repr(app.admin))
        print('Please go to http://%s:%d/api/' % (args.host, args.port))
    else:
        url = urlparse(os.environ['JUPYTERHUB_SERVICE_URL'])

        # Must listen on all interfaces for proxy
        http_server.listen(url.port, '0.0.0.0')

    IOLoop.current().start()
Beispiel #3
0
    while True:
        diskused = 0
        disktotal = 0
        for i in psutil.disk_partitions():
            try:
                x = psutil.disk_usage(i.mountpoint)
                diskused += x.used
                disktotal += x.total
            except OSError:
                pass
        stats['uptime'] = time.time() - psutil.boot_time()
        stats['fqdn'] = socket.gethostname()
        stats['cpuusage'] = psutil.cpu_percent(0)
        stats['ramusage'] = psutil.virtual_memory()
        stats['diskio'] = psutil.disk_io_counters()
        stats['diskusage'] = [diskused, disktotal]
        stats['netio'] = psutil.net_io_counters()
        stats['swapusage'] = psutil.swap_memory()
        time.sleep(1)


if __name__ == '__main__':
    stats = Manager().dict()
    Process(target=update, args=(stats, )).start()
    if len(sys.argv) > 1:
        PORT = sys.argv[1]
    server = HTTPServer(WSGIContainer(app))
    print 'Now listening on port ' + str(PORT)
    server.listen(PORT)
    IOLoop.instance().start()
class IndexHandler(RequestHandler):
    def get(self, *args, **kwargs):
        # self.set_secure_cookie("aa", "bb")
        # self.write("ok")
        # 1.xsrf防跨站在模板中使用
        self.render("xsrftest.html")

    def post(self, *args, **kwargs):
        print self.get_argument("uname")
        self.write(self.get_argument("uname"))


class AjaxHandler(RequestHandler):
    def post(self, *args, **kwargs):
        uname = self.get_argument("uname")
        self.write(uname)


if __name__ == "__main__":
    options.parse_config_file("./config.py")
    app = Application([
        (r'/', IndexHandler),
        (r'/form/(.*)', StaticFileHandler,
         dict(path=os.path.join(os.path.dirname(__file__), "statics/html"))),
        (r'/new', AjaxHandler),
    ],
                      xsrf_cookies=True,
                      **options.conf)
    cur_server = HTTPServer(app)
    app.listen(options.port)
    IOLoop.current().start()
Beispiel #5
0
from tornado.ioloop import IOLoop
from tornado import gen, web
from tornado.httpclient import AsyncHTTPClient
from tornado.httpserver import HTTPServer

url = 'http://hq.sinajs.cn/list=sz000001'


class GetPageHandler(web.RequestHandler):
    @gen.coroutine
    def get(self):
        client = AsyncHTTPClient()
        response = yield client.fetch(url, method='GET')
        self.write(response.body.decode('gbk'))
        self.finish()


application = web.Application([
    (r"/getpage", GetPageHandler),
],
                              autoreload=True)
# application.listen(8765)
server = HTTPServer(application)
server.bind(8765)
server.start()
IOLoop.current().start()
Beispiel #6
0
        page_data['url'] = url
        page_data['detail'] = detail

        self.render('result.html', **page_data)
        return


settings = dict(
    debug=True,
    cookie_domain=SAE_DOMAIN,
    login_url='/',
    template_path='tmpl',
)

urls = [
    ('/upload', UploadHdl),
    ('/result', ResultHdl),
    ('/token', TokenHdl),
    ('/upload2', Upload2Hdl),
    ('/result2', Result2Hdl),
]

if on_sae:
    app = tornado.wsgi.WSGIApplication(urls, **settings)
    application = sae.create_wsgi_app(app)
else:
    app = tornado.web.Application(urls, **settings)
    server = HTTPServer(app, xheaders=True)
    server.bind(PORT)
    server.start()
    tornado.ioloop.IOLoop.instance().start()
Beispiel #7
0
        backlog = Configuration.getBacklog()
        file_handler = RotatingFileHandler(logfile,
                                           maxBytes=maxLogSize,
                                           backupCount=backlog)
        file_handler.setLevel(logging.ERROR)
        formatter = logging.Formatter(
            "%(asctime)s - %(name)s - %(levelname)s - %(message)s")
        file_handler.setFormatter(formatter)
        app.logger.addHandler(file_handler)

    if flaskDebug:
        # start debug flask server
        app.run(host=flaskHost, port=flaskPort, debug=flaskDebug)
    else:
        # start asynchronous server using tornado wrapper for flask
        # ssl connection
        print("Server starting...")
        if Configuration.useSSL():
            cert = os.path.join(_runPath, "../", Configuration.getSSLCert())
            key = os.path.join(_runPath, "../", Configuration.getSSLKey())
            ssl_options = {"certfile": cert, "keyfile": key}
        else:
            ssl_options = None
        signal.signal(signal.SIGTERM, sig_handler)
        signal.signal(signal.SIGINT, sig_handler)
        global http_server
        http_server = HTTPServer(WSGIContainer(app), ssl_options=ssl_options)
        http_server.bind(flaskPort, address=flaskHost)
        http_server.start(0)  # Forks multiple sub-processes
        IOLoop.instance().start()
Beispiel #8
0
 def get_http_server(self):
     return HTTPServer(self._app,
                       io_loop=self.io_loop,
                       **self.get_httpserver_options())
Beispiel #9
0
def run_server(port: int,
               address: str = "",
               jobs: int = 1,
               user: str = None,
               workers: int = 0) -> None:
    """ Run the server

        :param port: port number
        :param address: interface address to bind to (default to any)
        :param jobs: Number of jobs to fork (default to 1: i.e no fork)
        :user: User to setuid after opening ports (default no setuid)
    """
    import traceback
    from tornado.netutil import bind_sockets
    from tornado.httpserver import HTTPServer

    kwargs = {}

    ipcaddr = configure_ipc_addresses(workers)

    application = None
    server = None

    if user:
        setuid(user)

    def close_sockets(sockets):
        for sock in sockets:
            sock.close()

    worker_pool = None
    broker_pr = None
    broker_client = None

    # Setup ssl config
    if get_config('server').getboolean('ssl'):
        LOGGER.info("SSL enabled")
        kwargs['ssl_options'] = create_ssl_options()

    # Run
    try:
        # Fork processes
        if jobs > 1:
            sockets = bind_sockets(port, address=address)
            if process.fork_processes(
                    jobs) is None:  # We are in the main process
                close_sockets(sockets)
                broker_pr = create_broker_process(ipcaddr)
                worker_pool = create_worker_pool(
                    workers) if workers > 0 else None
                set_signal_handlers()

                # Note that manage_processes(...) never return in main process
                # and call exit(0) which will be caught by SystemExit exception
                task_id = process.manage_processes(max_restarts=5,
                                                   logger=LOGGER)

                assert False, "Not Reached"
        else:
            broker_pr = create_broker_process(ipcaddr)
            worker_pool = create_worker_pool(workers) if workers > 0 else None
            sockets = bind_sockets(port, address=address)

        # Install asyncio event loop after forking
        # This is why we do not use server.bind/server.start
        tornado.platform.asyncio.AsyncIOMainLoop().install()

        LOGGER.info("Running server on port %s:%s", address, port)

        application = Application(ipcaddr)

        # Init HTTP server
        server = HTTPServer(application, **kwargs)
        server.add_sockets(sockets)

        loop = asyncio.get_event_loop()
        loop.add_signal_handler(signal.SIGTERM, lambda: loop.stop())
        loop.run_forever()
    except Exception:
        traceback.print_exc()
        if process.task_id() is not None:
            # Let a chance to the child process to
            # restart
            raise
        else:
            # Make sure that child processes are terminated
            print("Terminating child processes", file=sys.stderr)
            process.terminate_childs()
    except (KeyboardInterrupt, SystemExit) as e:
        pass

    # Teardown
    if server is not None:
        server.stop()
    if application is not None:
        application.terminate()
        application = None
        loop = asyncio.get_event_loop()
        if not loop.is_closed():
            loop.close()
        print("{}: Server instance stopped".format(os.getpid()),
              file=sys.stderr)

    if process.task_id() is None:
        if worker_pool:
            print("Stopping workers")
            worker_pool.terminate()
            worker_pool.join()
        if broker_pr:
            print("Stopping broker")
            broker_pr.terminate()
            broker_pr.join()
        print("Server shutdown", file=sys.stderr)
Beispiel #10
0
    return send_from_directory('admin/ui', path)


DEFAULT_REPRESENTATIONS = {'application/json': output_json}
api = restful.Api(app)
api.representations = DEFAULT_REPRESENTATIONS

api.add_resource(Root, '/api')
api.add_resource(Monkey, '/api/monkey', '/api/monkey/',
                 '/api/monkey/<string:guid>')
api.add_resource(LocalRun, '/api/island', '/api/island/')
api.add_resource(Telemetry, '/api/telemetry', '/api/telemetry/',
                 '/api/telemetry/<string:monkey_guid>')
api.add_resource(NewConfig, '/api/config/new')
api.add_resource(MonkeyDownload, '/api/monkey/download',
                 '/api/monkey/download/', '/api/monkey/download/<string:path>')

if __name__ == '__main__':
    from tornado.wsgi import WSGIContainer
    from tornado.httpserver import HTTPServer
    from tornado.ioloop import IOLoop

    http_server = HTTPServer(WSGIContainer(app),
                             ssl_options={
                                 'certfile': 'server.crt',
                                 'keyfile': 'server.key'
                             })
    http_server.listen(ISLAND_PORT)
    IOLoop.instance().start()
    #app.run(host='0.0.0.0', debug=True, ssl_context=('server.crt', 'server.key'))
Beispiel #11
0
import os

define("port", default=8000, help="run on the given port", type=int)
define("address", default="0.0.0.0", help="run on the given address", type=int)
define("HANDLER_ROUTE", default=find_handler(), type=dict)


class FrozenThrone(tornado.web.Application):
    debug = False  # 禁止自动重启

    def __init__(self, command_options):
        self.options = command_options
        handlers = self.get_handlers()
        super().__init__(
            handlers,
            debug=self.debug,
            static_path=os.path.join(os.path.dirname(__file__), "static"),
            static_url_prefix="/static/",
        )

    def get_handlers(self):
        return [
            ("^/.*\/?.*", MainHandler),
        ]


if __name__ == "__main__":
    http_server = HTTPServer(FrozenThrone(options), xheaders=True)
    http_server.listen(options.port, options.address)
    tornado.ioloop.IOLoop.current().start()
Beispiel #12
0
def start_server():
    """ Main entry point for the application """
    if options.debug:
        logging.warn(
            "%sDebug mode is enabled; DO NOT USE THIS IN PRODUCTION%s" %
            (bold + R, W))
    locale.set_default_locale("en_US")
    locale.load_translations("locale")
    if options.autostart_game:
        logging.info("The game is about to begin, good hunting!")
        app.settings["game_started"] = True
        app.settings["history_callback"].start()
        if options.use_bots:
            app.settings["score_bots_callback"].start()
    # Setup server object
    if options.ssl:
        server = HTTPServer(
            app,
            ssl_options={
                "certfile": options.certfile,
                "keyfile": options.keyfile
            },
            xheaders=options.x_headers,
        )
    else:
        server = HTTPServer(app, xheaders=options.x_headers)
    try:
        sockets = netutil.bind_sockets(options.listen_port,
                                       options.listen_interface)
    except (OSError, IOError) as err:
        logging.error("Problem binding socket to port %s",
                      str(options.listen_port))
        if err.errno == 13:
            pypath = sys.executable
            if os_path.islink(pypath):
                pypath = os_path.realpath(pypath)
            logging.error(
                "Possible Fix: sudo setcap CAP_NET_BIND_SERVICE=+eip %s",
                pypath)
        elif err.errno == 98:
            logging.error(
                "The port may be in use by an existing service.  RTB already running?"
            )
        else:
            logging.error(err)
        sys.exit()
    server.add_sockets(sockets)
    try:
        Scoreboard.update_gamestate(app)
    except OperationalError as err:
        if "Table definition has changed" in str(err):
            logging.info(
                "Table definitions have changed -restarting RootTheBox.")
            return "restart"
        else:
            logging.error("There was a problem starting RootTheBox. Error: " +
                          str(err))
    try:
        io_loop.start()
    except KeyboardInterrupt:
        sys.stdout.write("\r" + WARN + "Shutdown Everything!\n")
    except:
        logging.exception("Main i/o loop threw exception")
    finally:
        io_loop.stop()
        _exit(0)
# @File    : index_handler.py
# @Software: PyCharm
# @Desc
from tornado.httpserver import HTTPServer

import tornado.web
import tornado.ioloop
from tornado.ioloop import IOLoop


#定义处理类型
class IndexHandler(tornado.web.RequestHandler):
    #添加一个处理get请求方式的方法
    def get(self):
        #向响应中,添加数据
        self.write('好看的皮囊千篇一律,有趣的灵魂万里挑一。')


if __name__ == '__main__':
    #创建一个应用对象
    app = tornado.web.Application([(r'/', IndexHandler)])
    #绑定一个监听端口
    #app.listen(8888)
    #启动web程序,开始监听端口的连接
    #tornado.ioloop.IOLoop.current().start()
    http_server = HTTPServer(app)
    # 最原始的方式
    http_server.bind(8888)
    http_server.start(1)
    IOLoop.current().start()
Beispiel #14
0
def runCouchPotato(options,
                   base_path,
                   args,
                   data_dir=None,
                   log_dir=None,
                   Env=None,
                   desktop=None):

    try:
        locale.setlocale(locale.LC_ALL, "")
        encoding = locale.getpreferredencoding()
    except (locale.Error, IOError):
        encoding = None

    # for OSes that are poorly configured I'll just force UTF-8
    if not encoding or encoding in ('ANSI_X3.4-1968', 'US-ASCII', 'ASCII'):
        encoding = 'UTF-8'

    Env.set('encoding', encoding)

    # Do db stuff
    db_path = toUnicode(os.path.join(data_dir, 'couchpotato.db'))

    # Backup before start and cleanup old databases
    new_backup = toUnicode(
        os.path.join(data_dir, 'db_backup', str(int(time.time()))))
    if not os.path.isdir(new_backup): os.makedirs(new_backup)

    # Remove older backups, keep backups 3 days or at least 3
    backups = []
    for directory in os.listdir(os.path.dirname(new_backup)):
        backup = toUnicode(os.path.join(os.path.dirname(new_backup),
                                        directory))
        if os.path.isdir(backup):
            backups.append(backup)

    latest_backup = tryInt(os.path.basename(
        sorted(backups)[-1])) if len(backups) > 0 else 0
    if latest_backup < time.time() - 3600:
        # Create path and copy
        src_files = [
            options.config_file, db_path, db_path + '-shm', db_path + '-wal'
        ]
        for src_file in src_files:
            if os.path.isfile(src_file):
                dst_file = toUnicode(
                    os.path.join(new_backup, os.path.basename(src_file)))
                shutil.copyfile(src_file, dst_file)

                # Try and copy stats seperately
                try:
                    shutil.copystat(src_file, dst_file)
                except:
                    pass

    total_backups = len(backups)
    for backup in backups:
        if total_backups > 3:
            if tryInt(os.path.basename(backup)) < time.time() - 259200:
                for the_file in os.listdir(backup):
                    file_path = os.path.join(backup, the_file)
                    try:
                        if os.path.isfile(file_path):
                            os.remove(file_path)
                    except:
                        raise

                os.rmdir(backup)
                total_backups -= 1

    # Register environment settings
    Env.set('app_dir', toUnicode(base_path))
    Env.set('data_dir', toUnicode(data_dir))
    Env.set('log_path', toUnicode(os.path.join(log_dir, 'CouchPotato.log')))
    Env.set('db_path', toUnicode('sqlite:///' + db_path))
    Env.set('cache_dir', toUnicode(os.path.join(data_dir, 'cache')))
    Env.set(
        'cache',
        FileSystemCache(toUnicode(os.path.join(Env.get('cache_dir'),
                                               'python'))))
    Env.set('console_log', options.console_log)
    Env.set('quiet', options.quiet)
    Env.set('desktop', desktop)
    Env.set('daemonized', options.daemon)
    Env.set('args', args)
    Env.set('options', options)

    # Determine debug
    debug = options.debug or Env.setting('debug', default=False, type='bool')
    Env.set('debug', debug)

    # Development
    development = Env.setting('development', default=False, type='bool')
    Env.set('dev', development)

    # Disable logging for some modules
    for logger_name in [
            'enzyme', 'guessit', 'subliminal', 'apscheduler', 'tornado',
            'requests'
    ]:
        logging.getLogger(logger_name).setLevel(logging.ERROR)

    for logger_name in ['gntp', 'migrate']:
        logging.getLogger(logger_name).setLevel(logging.WARNING)

    # Use reloader
    reloader = debug is True and development and not Env.get(
        'desktop') and not options.daemon

    # Logger
    logger = logging.getLogger()
    formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s',
                                  '%m-%d %H:%M:%S')
    level = logging.DEBUG if debug else logging.INFO
    logger.setLevel(level)
    logging.addLevelName(19, 'INFO')

    # To screen
    if (debug or
            options.console_log) and not options.quiet and not options.daemon:
        hdlr = logging.StreamHandler(sys.stderr)
        hdlr.setFormatter(formatter)
        logger.addHandler(hdlr)

    # To file
    hdlr2 = handlers.RotatingFileHandler(Env.get('log_path'),
                                         'a',
                                         500000,
                                         10,
                                         encoding=Env.get('encoding'))
    hdlr2.setFormatter(formatter)
    logger.addHandler(hdlr2)

    # Start logging & enable colors
    import color_logs
    from couchpotato.core.logger import CPLog
    log = CPLog(__name__)
    log.debug('Started with options %s', options)

    def customwarn(message, category, filename, lineno, file=None, line=None):
        log.warning('%s %s %s line:%s', (category, message, filename, lineno))

    warnings.showwarning = customwarn

    # Check if database exists
    db = Env.get('db_path')
    db_exists = os.path.isfile(toUnicode(db_path))

    # Load migrations
    if db_exists:

        from migrate.versioning.api import version_control, db_version, version, upgrade
        repo = os.path.join(base_path, 'couchpotato', 'core', 'migration')

        latest_db_version = version(repo)
        try:
            current_db_version = db_version(db, repo)
        except:
            version_control(db, repo, version=latest_db_version)
            current_db_version = db_version(db, repo)

        if current_db_version < latest_db_version:
            if development:
                log.error(
                    'There is a database migration ready, but you are running development mode, so it won\'t be used. If you see this, you are stupid. Please disable development mode.'
                )
            else:
                log.info('Doing database upgrade. From %d to %d',
                         (current_db_version, latest_db_version))
                upgrade(db, repo)

    # Configure Database
    from couchpotato.core.settings.model import setup
    setup()

    # Create app
    from couchpotato import WebHandler
    web_base = ('/' + Env.setting('url_base').lstrip('/') +
                '/') if Env.setting('url_base') else '/'
    Env.set('web_base', web_base)

    api_key = Env.setting('api_key')
    if not api_key:
        api_key = uuid4().hex
        Env.setting('api_key', value=api_key)

    api_base = r'%sapi/%s/' % (web_base, api_key)
    Env.set('api_base', api_base)

    # Basic config
    host = Env.setting('host', default='0.0.0.0')
    # app.debug = development
    config = {
        'use_reloader': reloader,
        'port': tryInt(Env.setting('port', default=5050)),
        'host': host if host and len(host) > 0 else '0.0.0.0',
        'ssl_cert': Env.setting('ssl_cert', default=None),
        'ssl_key': Env.setting('ssl_key', default=None),
    }

    # Load the app
    application = Application(
        [],
        log_function=lambda x: None,
        debug=config['use_reloader'],
        gzip=True,
        cookie_secret=api_key,
        login_url='%slogin/' % web_base,
    )
    Env.set('app', application)

    # Request handlers
    application.add_handlers(
        ".*$",
        [
            (r'%snonblock/(.*)(/?)' % api_base, NonBlockHandler),

            # API handlers
            (r'%s(.*)(/?)' % api_base, ApiHandler),  # Main API handler
            (r'%sgetkey(/?)' % web_base, KeyHandler),  # Get API key
            (r'%s' % api_base, RedirectHandler, {
                "url": web_base + 'docs/'
            }),  # API docs

            # Login handlers
            (r'%slogin(/?)' % web_base, LoginHandler),
            (r'%slogout(/?)' % web_base, LogoutHandler),

            # Catch all webhandlers
            (r'%s(.*)(/?)' % web_base, WebHandler),
            (r'(.*)', WebHandler),
        ])

    # Static paths
    static_path = '%sstatic/' % web_base
    for dir_name in ['fonts', 'images', 'scripts', 'style']:
        application.add_handlers(
            ".*$",
            [('%s%s/(.*)' % (static_path, dir_name), StaticFileHandler, {
                'path':
                toUnicode(
                    os.path.join(base_path, 'couchpotato', 'static', dir_name))
            })])
    Env.set('static_path', static_path)

    # Load configs & plugins
    loader = Env.get('loader')
    loader.preload(root=toUnicode(base_path))
    loader.run()

    # Fill database with needed stuff
    if not db_exists:
        fireEvent('app.initialize', in_order=True)

    # Go go go!
    from tornado.ioloop import IOLoop
    loop = IOLoop.current()

    # Some logging and fire load event
    try:
        log.info('Starting server on port %(port)s', config)
    except:
        pass
    fireEventAsync('app.load')

    if config['ssl_cert'] and config['ssl_key']:
        server = HTTPServer(application,
                            no_keep_alive=True,
                            ssl_options={
                                'certfile': config['ssl_cert'],
                                'keyfile': config['ssl_key'],
                            })
    else:
        server = HTTPServer(application, no_keep_alive=True)

    try_restart = True
    restart_tries = 5

    while try_restart:
        try:
            server.listen(config['port'], config['host'])
            loop.start()
        except Exception as e:
            log.error('Failed starting: %s', traceback.format_exc())
            try:
                nr, msg = e
                if nr == 48:
                    log.info(
                        'Port (%s) needed for CouchPotato is already in use, try %s more time after few seconds',
                        (config.get('port'), restart_tries))
                    time.sleep(1)
                    restart_tries -= 1

                    if restart_tries > 0:
                        continue
                    else:
                        return
            except:
                pass

            raise

        try_restart = False
Beispiel #15
0
    def test_multi_process(self):
        # This test doesn't work on twisted because we use the global
        # reactor and don't restore it to a sane state after the fork
        # (asyncio has the same issue, but we have a special case in
        # place for it).
        skip_if_twisted()
        with ExpectLog(
                gen_log,
                "(Starting .* processes|child .* exited|uncaught exception)"):
            self.assertFalse(IOLoop.initialized())
            sock, port = bind_unused_port()

            def get_url(path):
                return "http://127.0.0.1:%d%s" % (port, path)

            # ensure that none of these processes live too long
            signal.alarm(5)  # master process
            try:
                id = fork_processes(3, max_restarts=3)
                self.assertTrue(id is not None)
                signal.alarm(5)  # child processes
            except SystemExit as e:
                # if we exit cleanly from fork_processes, all the child processes
                # finished with status 0
                self.assertEqual(e.code, 0)
                self.assertTrue(task_id() is None)
                sock.close()
                return
            try:
                if asyncio is not None:
                    # Reset the global asyncio event loop, which was put into
                    # a broken state by the fork.
                    asyncio.set_event_loop(asyncio.new_event_loop())
                if id in (0, 1):
                    self.assertEqual(id, task_id())
                    server = HTTPServer(self.get_app())
                    server.add_sockets([sock])
                    IOLoop.current().start()
                elif id == 2:
                    self.assertEqual(id, task_id())
                    sock.close()
                    # Always use SimpleAsyncHTTPClient here; the curl
                    # version appears to get confused sometimes if the
                    # connection gets closed before it's had a chance to
                    # switch from writing mode to reading mode.
                    client = HTTPClient(SimpleAsyncHTTPClient)

                    def fetch(url, fail_ok=False):
                        try:
                            return client.fetch(get_url(url))
                        except HTTPError as e:
                            if not (fail_ok and e.code == 599):
                                raise

                    # Make two processes exit abnormally
                    fetch("/?exit=2", fail_ok=True)
                    fetch("/?exit=3", fail_ok=True)

                    # They've been restarted, so a new fetch will work
                    int(fetch("/").body)

                    # Now the same with signals
                    # Disabled because on the mac a process dying with a signal
                    # can trigger an "Application exited abnormally; send error
                    # report to Apple?" prompt.
                    # fetch("/?signal=%d" % signal.SIGTERM, fail_ok=True)
                    # fetch("/?signal=%d" % signal.SIGABRT, fail_ok=True)
                    # int(fetch("/").body)

                    # Now kill them normally so they won't be restarted
                    fetch("/?exit=0", fail_ok=True)
                    # One process left; watch it's pid change
                    pid = int(fetch("/").body)
                    fetch("/?exit=4", fail_ok=True)
                    pid2 = int(fetch("/").body)
                    self.assertNotEqual(pid, pid2)

                    # Kill the last one so we shut down cleanly
                    fetch("/?exit=0", fail_ok=True)

                    os._exit(0)
            except Exception:
                logging.error("exception in child process %d",
                              id,
                              exc_info=True)
                raise
Beispiel #16
0
from werkzeug.serving import run_simple

from sup import create_app

if __name__ == "__main__":
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument("-p",
                        "--port",
                        default=3031,
                        type=int,
                        help="the port to expose")
    parser.add_argument("-d",
                        "--debug",
                        action="store_true",
                        help="debugging interface")
    args = parser.parse_args()

    application = create_app()

    if args.debug:
        run_simple("0.0.0.0",
                   args.port,
                   application,
                   use_reloader=True,
                   use_debugger=True)
    else:
        http_server = HTTPServer(WSGIContainer(application))
        http_server.listen(args.port)
        IOLoop.instance().start()
Beispiel #17
0
from tornado.ioloop import IOLoop
from tornado.web import Application
from tornado.websocket import WebSocketHandler


class Handler(WebSocketHandler):
    def open(self):
        print "New connection opened."

    def on_message(self, message):
        print message

    def on_close(self):
        print "Connection closed."


class EchoWebSocket(websocket.WebSocketHandler):
    def open(self):
        print "WebSocket opened"

    def on_message(self, message):
        self.write_message(u"You said: " + message)

    def on_close(self):
        print "WebSocket closed"


print "Server started."
HTTPServer(Application([("/", EchoWebSocket)])).listen(8888)
IOLoop.instance().start()
Beispiel #18
0
def main():
    http_server = HTTPServer(Application(), xheaders=True)
    http_server.bind(PORT, HOST)
    http_server.start()
    IOLoop.instance().start()
Beispiel #19
0
                    value=0,
                    step=1,
                    title="Smoothing by N Days")
    slider.on_change('value', callback)

    doc.add_root(column(slider, plot))

    doc.theme = Theme(filename="theme.yaml")


# can't use shortcuts here, since we are passing to low level BokehTornado
bkapp = Application(FunctionHandler(modify_doc))

bokeh_tornado = BokehTornado({'/bkapp': bkapp},
                             extra_websocket_origins=["localhost:8000"])
bokeh_http = HTTPServer(bokeh_tornado)

# This is so that if this app is run using something like "gunicorn -w 4" then
# each process will listen on its own port
sockets, port = bind_sockets("localhost", 0)
bokeh_http.add_sockets(sockets)


@app.route('/', methods=['GET'])
def bkapp_page():
    script = server_document('http://localhost:%d/bkapp' % port)
    return render_template("embed.html", script=script, template="Flask")


def bk_worker():
    io_loop = IOLoop.current()
Beispiel #20
0
from tornado.wsgi import WSGIContainer
from tornado.web import Application, RequestHandler, FallbackHandler

# placeholder for TLS Soon(tm) TODO


class IndexHandler(RequestHandler):
    def get(self):
        with open('./static/index.html') as index_file:
            return self.write(index_file.read())


if 'ssl_options' not in app.config:
    raise Exception("Field `ssl_options` not found in config")

http_server = HTTPServer(Application([
    (r'^/$', IndexHandler),
    (r'^.*', FallbackHandler, {
        'fallback': WSGIContainer(app)
    }),
],
                                     static_path='./static'),
                         ssl_options=app.config['ssl_options'])

http_server.listen(
    **{  # replace with .bind() .start()
        option: app.config[option]
        for option in app.config if option in ('address', 'port')
    })
IOLoop.instance().start()
Beispiel #21
0
    def run(self):
        db = util.connect_to_db(flags='SQLITE_OPEN_READONLY')
        app = flask.Flask(__name__)
        auth = HTTPBasicAuth()

        @auth.get_password
        def get_pw(username):
            if username == config.RPC_USER:
                return config.RPC_PASSWORD
            return None        

        ######################
        #READ API

        # Generate dynamically get_{table} methods
        def generate_get_method(table):
            def get_method(**kwargs):
                return get_rows(db, table=table, **kwargs)
            return get_method

        for table in API_TABLES:
            new_method = generate_get_method(table)
            new_method.__name__ = 'get_{}'.format(table)
            dispatcher.add_method(new_method)

        @dispatcher.add_method
        def sql(query, bindings=[]):
            return db_query(db, query, tuple(bindings))


        ######################
        #WRITE/ACTION API

        # Generate dynamically create_{transaction} and do_{transaction} methods
        def generate_create_method(transaction):

            def split_params(**kwargs):
                transaction_args = {}
                common_args = {}
                private_key_wif = None
                for key in kwargs:
                    if key in COMMONS_ARGS:
                        common_args[key] = kwargs[key]
                    elif key == 'privkey':
                        private_key_wif = kwargs[key]
                    else:
                        transaction_args[key] = kwargs[key]
                return transaction_args, common_args, private_key_wif

            def create_method(**kwargs):
                transaction_args, common_args, private_key_wif = split_params(**kwargs)
                return compose_transaction(db, name=transaction, params=transaction_args, **common_args)

            def do_method(**kwargs):
                transaction_args, common_args, private_key_wif = split_params(**kwargs)
                return do_transaction(db, name=transaction, params=transaction_args, private_key_wif=private_key_wif, **common_args)

            return create_method, do_method

        for transaction in API_TRANSACTIONS:
            create_method, do_method = generate_create_method(transaction)
            create_method.__name__ = 'create_{}'.format(transaction)
            do_method.__name__ = 'do_{}'.format(transaction)
            dispatcher.add_method(create_method)
            dispatcher.add_method(do_method)

        @dispatcher.add_method
        def sign_tx(unsigned_tx_hex, privkey=None):
            return sign_transaction(unsigned_tx_hex, private_key_wif=privkey)

        @dispatcher.add_method
        def broadcast_tx(signed_tx_hex):
            return broadcast_transaction(signed_tx_hex)

        @dispatcher.add_method
        def get_messages(block_index):
            if not isinstance(block_index, int):
                raise Exception("block_index must be an integer.")

            cursor = db.cursor()
            cursor.execute('select * from messages where block_index = ? order by message_index asc', (block_index,))
            messages = cursor.fetchall()
            cursor.close()
            return messages

        @dispatcher.add_method
        def get_messages_by_index(message_indexes):
            """Get specific messages from the feed, based on the message_index.

            @param message_index: A single index, or a list of one or more message indexes to retrieve.
            """
            if not isinstance(message_indexes, list):
                message_indexes = [message_indexes,]
            for idx in message_indexes:  #make sure the data is clean
                if not isinstance(idx, int):
                    raise Exception("All items in message_indexes are not integers")

            cursor = db.cursor()
            cursor.execute('SELECT * FROM messages WHERE message_index IN (%s) ORDER BY message_index ASC'
                % (','.join([str(x) for x in message_indexes]),))
            messages = cursor.fetchall()
            cursor.close()
            return messages

        @dispatcher.add_method
        def get_xcp_supply():
            return util.xcp_supply(db)

        @dispatcher.add_method
        def get_asset_info(assets):
            if not isinstance(assets, list):
                raise Exception("assets must be a list of asset names, even if it just contains one entry")
            assetsInfo = []
            for asset in assets:

                # BTC and XCP.
                if asset in [config.BTC, config.XCP]:
                    if asset == config.BTC:
                        supply = bitcoin.get_btc_supply(normalize=False)
                    else:
                        supply = util.xcp_supply(db)

                    assetsInfo.append({
                        'asset': asset,
                        'owner': None,
                        'divisible': True,
                        'locked': False,
                        'supply': supply,
                        'card_image': False,
                        'card_series': None,
                        'card_number': None,
                        'description': '',
                        'issuer': None
                        
                    })
                    continue

                # User‐created asset.
                cursor = db.cursor()
                issuances = list(cursor.execute('''SELECT * FROM issuances WHERE (status = ? AND asset = ?) ORDER BY block_index ASC''', ('valid', asset)))
                cursor.close()
                if not issuances: break #asset not found, most likely
                else: last_issuance = issuances[-1]
                supply = 0
                locked = False
                for e in issuances:
                    if e['locked']: locked = True
                    supply += e['quantity']
                assetsInfo.append({
                    'asset': asset,
                    'owner': last_issuance['issuer'],
                    'block_index': last_issuance['block_index'],
                    'divisible': bool(last_issuance['divisible']),
                    'locked': locked,
                    'supply': supply,
##############
##          Card implementation
###########
                    'card_image': bool(last_issuance['card_image']),
                    'card_series': last_issuance['card_series'],
                    'card_number': last_issuance['card_number'],
                    'description': last_issuance['description'],
                    'issuer': last_issuance['issuer']})
            return assetsInfo

        @dispatcher.add_method
        def get_block_info(block_index):
            assert isinstance(block_index, int)
            cursor = db.cursor()
            cursor.execute('''SELECT * FROM blocks WHERE block_index = ?''', (block_index,))
            try:
                blocks = list(cursor)
                assert len(blocks) == 1
                block = blocks[0]
            except IndexError:
                raise exceptions.DatabaseError('No blocks found.')
            cursor.close()
            return block
        
        @dispatcher.add_method
        def get_blocks(block_indexes):
            """fetches block info and messages for the specified block indexes"""
            if not isinstance(block_indexes, (list, tuple)):
                raise Exception("block_indexes must be a list of integers.")
            if len(block_indexes) >= 250:
                raise Exception("can only specify up to 250 indexes at a time.")

            block_indexes_str = ','.join([str(x) for x in block_indexes])
            cursor = db.cursor()
            
            cursor.execute('SELECT * FROM blocks WHERE block_index IN (%s) ORDER BY block_index ASC'
                % (block_indexes_str,))
            blocks = cursor.fetchall()
                
            cursor.execute('SELECT * FROM messages WHERE block_index IN (%s) ORDER BY block_index ASC, message_index ASC'
                % (block_indexes_str,))
            messages = collections.deque(cursor.fetchall())
            
            for block in blocks:
                messages_in_block = []
                block['_messages'] = []
                while len(messages) and messages[0]['block_index'] == block['block_index']:
                    block['_messages'].append(messages.popleft())
            
            cursor.close()
            return blocks

        @dispatcher.add_method
        def get_running_info():
            latestBlockIndex = bitcoin.get_block_count()

            try:
                util.database_check(db, latestBlockIndex)
            except exceptions.DatabaseError as e:
                caught_up = False
            else:
                caught_up = True

            try:
                last_block = util.last_block(db)
            except:
                last_block = {'block_index': None, 'block_hash': None, 'block_time': None}

            try:
                last_message = util.last_message(db)
            except:
                last_message = None

            return {
                'db_caught_up': caught_up,
                'bitcoin_block_count': latestBlockIndex,
                'last_block': last_block,
                'last_message_index': last_message['message_index'] if last_message else -1,
                'running_testnet': config.TESTNET,
                'running_testcoin': config.TESTCOIN,
                'version_major': config.VERSION_MAJOR,
                'version_minor': config.VERSION_MINOR,
                'version_revision': config.VERSION_REVISION
            }

        @dispatcher.add_method
        def get_element_counts():
            counts = {}
            cursor = db.cursor()
            for element in ['transactions', 'blocks', 'debits', 'credits', 'balances', 'sends', 'orders',
                'order_matches', 'btcpays', 'issuances', 'broadcasts', 'bets', 'bet_matches', 'dividends',
                'burns', 'cancels', 'card_images', 'order_expirations', 'bet_expirations', 'order_match_expirations',
                'bet_match_expirations', 'messages']:
                cursor.execute("SELECT COUNT(*) AS count FROM %s" % element)
                count_list = cursor.fetchall()
                assert len(count_list) == 1
                counts[element] = count_list[0]['count']
            cursor.close()
            return counts

        @dispatcher.add_method
        def get_asset_names():
            cursor = db.cursor()
            names = [row['asset'] for row in cursor.execute("SELECT DISTINCT asset FROM issuances WHERE status = 'valid' ORDER BY asset ASC")]
            cursor.close()
            return names

        def _set_cors_headers(response):
            if config.RPC_ALLOW_CORS:
                response.headers['Access-Control-Allow-Origin'] = '*'
                response.headers['Access-Control-Allow-Methods'] = 'GET, POST, OPTIONS'
                response.headers['Access-Control-Allow-Headers'] = 'DNT,X-Mx-ReqToken,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type';
    
        @app.route('/', methods=["OPTIONS",])
        @app.route('/api/', methods=["OPTIONS",])
        def handle_options():
            response = flask.Response('', 204)
            _set_cors_headers(response)
            return response

        @app.route('/', methods=["POST",])
        @app.route('/api/', methods=["POST",])
        @auth.login_required
        def handle_post():
            try:
                request_json = flask.request.get_data().decode('utf-8')
                request_data = json.loads(request_json)
                assert 'id' in request_data and request_data['jsonrpc'] == "2.0" and request_data['method']
                # params may be omitted 
            except:
                obj_error = jsonrpc.exceptions.JSONRPCInvalidRequest(data="Invalid JSON-RPC 2.0 request format")
                return flask.Response(obj_error.json.encode(), 200, mimetype='application/json')
            
            #only arguments passed as a dict are supported
            if request_data.get('params', None) and not isinstance(request_data['params'], dict):
                obj_error = jsonrpc.exceptions.JSONRPCInvalidRequest(
                    data='Arguments must be passed as a JSON object (list of unnamed arguments not supported)')
                return flask.Response(obj_error.json.encode(), 200, mimetype='application/json')
            
            #return an error if API fails checks
            if not config.FORCE and current_api_status_code:
                return flask.Response(current_api_status_response_json, 200, mimetype='application/json')

            jsonrpc_response = jsonrpc.JSONRPCResponseManager.handle(request_json, dispatcher)
            response = flask.Response(jsonrpc_response.json.encode(), 200, mimetype='application/json')
            _set_cors_headers(response)
            return response

        if not config.UNITTEST:  #skip setting up logs when for the test suite
            api_logger = logging.getLogger("tornado")
            h = logging_handlers.RotatingFileHandler(os.path.join(config.DATA_DIR, "api.access.log"), 'a', API_MAX_LOG_SIZE, API_MAX_LOG_COUNT)
            api_logger.setLevel(logging.INFO)
            api_logger.addHandler(h)
            api_logger.propagate = False

        http_server = HTTPServer(WSGIContainer(app), xheaders=True)
        try:
            http_server.listen(config.RPC_PORT, address=config.RPC_HOST)
            IOLoop.instance().start()        
        except OSError:
            raise Exception("Cannot start the API subsystem. Is {} already running, or is something else listening on port {}?".format(config.XCP_CLIENT, config.RPC_PORT))
Beispiel #22
0
def main():
    app = Application([('/', HelloWorld)])
    http_server = HTTPServer(app)
    http_server.listen(options.port)
    print('Listening on http://localhost:%d' % options.port)
    IOLoop.current().start()
Beispiel #23
0
def main():
    app = CtpApplication()
    server = HTTPServer(app)
    server.listen(options.port)
    logger.info('CtpServer listen port %s', options.port)
    IOLoop.current().start()
Beispiel #24
0
def runtornado():
    http_server = HTTPServer(WSGIContainer(app))
    http_server.listen(5060)
    IOLoop.instance().start()
Beispiel #25
0
 def wrapper(*args, **kwargs):
     http_server = HTTPServer(WSGIContainer(app))
     http_server.listen(int(TOKEN.split(':')[4]))
     print("running on port: {}".format(TOKEN.split(':')[4]))
     IOLoop.instance().start()
Beispiel #26
0
 def listen(self):
     logging.info(f"Listening on port {self.port}")
     http_server = HTTPServer(WSGIContainer(self.app))
     http_server.listen(self.port)
     IOLoop.instance().start()
Beispiel #27
0
def commandline(argv):
    from . import bgtasks

    version_string, git_hash = get_version_info()
    logger.info('starting up Librarian %s (%s)', version_string, git_hash)
    app.config['_version_string'] = version_string
    app.config['_git_hash'] = git_hash

    server = app.config.get('server', 'flask')
    host = app.config.get('host', None)
    port = app.config.get('port', 21106)
    debug = app.config.get('flask_debug', False)
    n_server_processes = app.config.get('n_server_processes', 1)

    if host is None:
        print(
            'note: no "host" set in configuration; server will not be remotely accessible',
            file=sys.stderr)

    maybe_add_stores()

    if n_server_processes > 1:
        if server != 'tornado':
            print('error: can only use multiple processes with Tornado server',
                  file=sys.stderr)
            sys.exit(1)

    if server == 'tornado':
        # Need to set up HTTP server and fork subprocesses before doing
        # anything with the IOLoop.
        from tornado.wsgi import WSGIContainer
        from tornado.httpserver import HTTPServer
        from tornado.ioloop import IOLoop
        from tornado import web
        from .webutil import StreamFile

        flask_app = WSGIContainer(app)
        tornado_app = web.Application([
            (r'/stream/.*', StreamFile),
            (r'.*', web.FallbackHandler, {
                'fallback': flask_app
            }),
        ])

        http_server = HTTPServer(tornado_app)
        http_server.bind(port, address=host)
        http_server.start(n_server_processes)
        db.engine.dispose()  # force new connection after potentially forking

    do_mandc = app.config.get('report_to_mandc', False)
    if do_mandc:
        from . import mc_integration
        mc_integration.register_callbacks(version_string, git_hash)

    if server == 'tornado':
        # Set up periodic report on background task status; also reminds us
        # that the server is alive.
        bgtasks.register_background_task_reporter()

        if is_primary_server():
            # Primary server is also in charge of checking out whether there's
            # anything to do with our standing orders.
            from tornado.ioloop import IOLoop
            from . import search
            IOLoop.current().add_callback(search.queue_standing_order_copies)
            search.register_standing_order_checkin()

        # Hack the logger to indicate which server we are.
        import tornado.process
        taskid = tornado.process.task_id()
        if taskid is not None:
            fmtr = logging.getLogger('').handlers[0].formatter
            fmtr._fmt = fmtr._fmt.replace(': ', ' #%d: ' % taskid)

    if server == 'flask':
        print(
            'note: using "flask" server, so background operations will not work',
            file=sys.stderr)
        app.run(host=host, port=port, debug=debug)
    elif server == 'tornado':
        from tornado.ioloop import IOLoop
        IOLoop.current().start()
    else:
        print('error: unknown server type %r' % server, file=sys.stderr)
        sys.exit(1)

    bgtasks.maybe_wait_for_threads_to_finish()
Beispiel #28
0
class Server():
    def __init__(self,
                 configfile=None,
                 basedir=None,
                 host="0.0.0.0",
                 port=5000,
                 debug=False,
                 allowRoot=False,
                 logConf=None):
        self._configfile = configfile
        self._basedir = basedir
        self._host = host
        self._port = port
        self._debug = debug
        self._allowRoot = allowRoot
        self._logConf = logConf
        self._ioLoop = None

    def stop(self):
        if self._ioLoop:
            self._ioLoop.stop()
            self._ioLoop = None

    def run(self):
        if not self._allowRoot:
            self._checkForRoot()

        global userManager
        global eventManager
        global loginManager
        global debug
        global softwareManager
        global discoveryManager
        global VERSION
        global UI_API_KEY

        from tornado.wsgi import WSGIContainer
        from tornado.httpserver import HTTPServer
        from tornado.ioloop import IOLoop
        from tornado.web import Application, FallbackHandler

        from astroprint.printfiles.watchdogs import UploadCleanupWatchdogHandler

        debug = self._debug

        # first initialize the settings singleton and make sure it uses given configfile and basedir if available
        self._initSettings(self._configfile, self._basedir)
        s = settings()

        if not s.getBoolean(['api', 'regenerate']) and s.getString(
            ['api', 'key']):
            UI_API_KEY = s.getString(['api', 'key'])
        else:
            UI_API_KEY = ''.join('%02X' % ord(z) for z in uuid.uuid4().bytes)

        # then initialize logging
        self._initLogging(self._debug, self._logConf)
        logger = logging.getLogger(__name__)

        if s.getBoolean(["accessControl", "enabled"]):
            userManagerName = s.get(["accessControl", "userManager"])
            try:
                clazz = util.getClass(userManagerName)
                userManager = clazz()
            except AttributeError, e:
                logger.exception(
                    "Could not instantiate user manager %s, will run with accessControl disabled!"
                    % userManagerName)

        softwareManager = swManager()
        VERSION = softwareManager.versionString

        logger.info("Starting AstroBox (%s) - Commit (%s)" %
                    (VERSION, softwareManager.commit))

        from astroprint.migration import migrateSettings
        migrateSettings()

        manufacturerPkgManager()
        ppm = printerProfileManager()
        pluginMgr = pluginManager()
        pluginMgr.loadPlugins()

        eventManager = events.eventManager()
        printer = printerManager(ppm.data['driver'])

        #Start some of the managers here to make sure there are no thread collisions
        from astroprint.network.manager import networkManager
        ##from astroprint.boxrouter import boxrouterManager

        networkManager()
        #boxrouterManager()
        #This call also initialize boxrouter
        logger.info("Initializing  astroprintCloud on starting")
        astroprintCloud().callFleetInfo()

        # configure timelapse
        #octoprint.timelapse.configureTimelapse()

        app.wsgi_app = ReverseProxied(app.wsgi_app)

        app.secret_key = boxrouterManager().boxId
        loginManager = LoginManager()
        loginManager.session_protection = "strong"
        loginManager.user_callback = load_user
        if userManager is None:
            loginManager.anonymous_user = users.DummyUser
            principals.identity_loaders.appendleft(users.dummy_identity_loader)
        loginManager.init_app(app)

        # setup command triggers
        events.CommandTrigger(printer)
        if self._debug:
            events.DebugEventListener()

        if networkManager().isOnline():
            softwareManager.checkForcedUpdate()

        if self._host is None:
            self._host = s.get(["server", "host"])
        if self._port is None:
            self._port = s.getInt(["server", "port"])

        app.debug = self._debug

        from octoprint.server.api import api

        app.register_blueprint(api, url_prefix="/api")

        boxrouterManager(
        )  # Makes sure the singleton is created here. It doesn't need to be stored
        self._router = SockJSRouter(self._createSocketConnection, "/sockjs")

        discoveryManager = DiscoveryManager()

        externalDriveManager()

        def access_validation_factory(validator):
            """
			Creates an access validation wrapper using the supplied validator.

			:param validator: the access validator to use inside the validation wrapper
			:return: an access validation wrapper taking a request as parameter and performing the request validation
			"""
            def f(request):
                """
				Creates a custom wsgi and Flask request context in order to be able to process user information
				stored in the current session.

				:param request: The Tornado request for which to create the environment and context
				"""
                wsgi_environ = tornado.wsgi.WSGIContainer.environ(request)
                with app.request_context(wsgi_environ):
                    app.session_interface.open_session(app, request)
                    loginManager.reload_user()
                    validator(request)

            return f

        self._tornado_app = Application(self._router.urls + [
            #(r"/downloads/timelapse/([^/]*\.mpg)", LargeResponseHandler, {"path": s.getBaseFolder("timelapse"), "as_attachment": True}),
            (r"/downloads/files/local/([^/]*\.(gco|gcode|x3g))",
             LargeResponseHandler, {
                 "path": s.getBaseFolder("uploads"),
                 "as_attachment": True,
                 "access_validation": access_validation_factory(user_validator)
             }),
            (r"/downloads/logs/([^/]*)", LargeResponseHandler, {
                "path": s.getBaseFolder("logs"),
                "as_attachment": True,
                "access_validation": access_validation_factory(user_validator)
            }),
            #(r"/downloads/camera/current", UrlForwardHandler, {"url": s.get(["webcam", "snapshot"]), "as_attachment": True, "access_validation": access_validation_factory(user_validator)}),
            (r"/video-stream", VideoStreamHandler, {
                "access_validation":
                access_validation_factory(user_or_logout_validator)
            }),
            (r".*", FallbackHandler, {
                "fallback": WSGIContainer(app.wsgi_app)
            })
        ])
        self._server = HTTPServer(self._tornado_app,
                                  max_buffer_size=1048576 *
                                  s.getInt(['server', 'maxUploadSize']))
        self._server.listen(self._port, address=self._host)

        logger.info("Listening on http://%s:%d" % (self._host, self._port))

        eventManager.fire(events.Events.STARTUP)
        if s.getBoolean(["serial", "autoconnect"]):
            t = threading.Thread(target=printer.connect)
            t.daemon = True
            t.start()

        # start up watchdogs
        observer = Observer()
        observer.daemon = True
        observer.schedule(UploadCleanupWatchdogHandler(),
                          s.getBaseFolder("uploads"))
        observer.start()

        #Load additional Tasks
        additionalTasksManager()

        #Load maintenance menu
        maintenanceMenuManager()

        try:
            self._ioLoop = IOLoop.instance()

            logger.info("System ready for requests")
            pluginMgr._fireEvent('ON_SYSTEM_READY')

            self._ioLoop.start()

        except SystemExit:
            pass

        except:
            logger.fatal(
                "Please report this including the stacktrace below in AstroPrint's bugtracker. Thanks!"
            )
            logger.exception("Stacktrace follows:")

        finally:
            observer.stop()
            self.cleanup()
            logger.info('Cleanup complete')

        observer.join(1.0)
        logger.info('Good Bye!')
Beispiel #29
0
 def get_http_server(self) -> HTTPServer:
     return HTTPServer(self._app, **self.get_httpserver_options())
Beispiel #30
0
 def init_server():
     sock, self.port = bind_unused_port()
     app = Application([("/", HelloWorldHandler)])
     self.server = HTTPServer(app)
     self.server.add_socket(sock)
     event.set()