self.broadcast(self.username + ' logged in')
                self.cl.append(self)

    def broadcast(self, message):
        for cl in self.cl:
            cl.write_message(message)

    def on_message(self, msg):
        if not self.authenticated:
            return
        self.broadcast(self.username + ': ' + msg)

    def on_close(self):
        if self in self.cl:
            self.cl.remove(self)
        self.broadcast(self.username + ' left room')

    def authenticate(self):
        self.authenticated = True
        return self.authenticated


if __name__ == "__main__":
    args = parser.parse_args()
    server = Application([(r'/ws', WS),
                          (r'/.*', FallbackHandler,
                           dict(fallback=WSGIContainer(app)))])
    server.listen(args.port, address=args.host)
    print(args.host, args.port)
    IOLoop.instance().start()
Exemple #2
0
#! /usr/bin/env python

from tornado.wsgi import WSGIContainer
from tornado.ioloop import IOLoop
from tornado.web import FallbackHandler, RequestHandler, Application
from app import app
import logging

class MainHandler(RequestHandler):
 def get(self):
   self.write("This message comes from Tornado ^_^")

tr = WSGIContainer(app)
logging.getLogger('tornado.access').disabled = True
application = Application([
(r"/tornado", MainHandler),
(r".*", FallbackHandler, dict(fallback=tr)),
])

if __name__ == "__main__":
 application.listen(80)
 IOLoop.instance().start()
Exemple #3
0
class Server():
    def __init__(self,
                 configfile=None,
                 basedir=None,
                 host="0.0.0.0",
                 port=5000,
                 debug=False):
        self._configfile = configfile
        self._basedir = basedir
        self._host = host
        self._port = port
        self._debug = debug

    def run(self):
        # Global as I can't work out a way to get it into PrinterStateConnection
        global printer
        global gcodeManager
        global userManager

        from tornado.wsgi import WSGIContainer
        from tornado.httpserver import HTTPServer
        from tornado.ioloop import IOLoop
        from tornado.web import Application, FallbackHandler

        # first initialize the settings singleton and make sure it uses given configfile and basedir if available
        self._initSettings(self._configfile, self._basedir)

        # then initialize logging
        self._initLogging(self._debug)
        logger = logging.getLogger(__name__)

        gcodeManager = gcodefiles.GcodeManager()
        printer = Printer(gcodeManager)

        if settings().getBoolean(["accessControl", "enabled"]):
            userManagerName = settings().get(["accessControl", "userManager"])
            try:
                clazz = util.getClass(userManagerName)
                userManager = clazz()
            except AttributeError, e:
                logger.exception(
                    "Could not instantiate user manager %s, will run with accessControl disabled!"
                    % userManagerName)

        app.secret_key = "k3PuVYgtxNm8DXKKTw2nWmFQQun9qceV"
        login_manager = LoginManager()
        login_manager.session_protection = "strong"
        login_manager.user_callback = load_user
        if userManager is None:
            login_manager.anonymous_user = users.DummyUser
            principals.identity_loaders.appendleft(users.dummy_identity_loader)
        login_manager.init_app(app)

        if self._host is None:
            self._host = settings().get(["server", "host"])
        if self._port is None:
            self._port = settings().getInt(["server", "port"])

        logger.info("Listening on http://%s:%d" % (self._host, self._port))
        app.debug = self._debug

        self._router = tornadio2.TornadioRouter(self._createSocketConnection)

        self._tornado_app = Application(self._router.urls +
                                        [(".*", FallbackHandler, {
                                            "fallback": WSGIContainer(app)
                                        })])
        self._server = HTTPServer(self._tornado_app)
        self._server.listen(self._port, address=self._host)
        IOLoop.instance().start()
    def run(self):
        self.ioloop = IOLoop.instance()
        self.alive = True
        self.server_alive = False
        if IOLOOP_PARAMETER_REMOVED:
            PeriodicCallback(self.watchdog, 1000).start()
            PeriodicCallback(self.heartbeat, 1000).start()
        else:
            PeriodicCallback(self.watchdog, 1000, io_loop=self.ioloop).start()
            PeriodicCallback(self.heartbeat, 1000, io_loop=self.ioloop).start()

        # Assume the app is a WSGI callable if its not an
        # instance of tornado.web.Application or is an
        # instance of tornado.wsgi.WSGIApplication
        app = self.wsgi

        if tornado.version_info[0] < 6:
            if not isinstance(app, tornado.web.Application) or \
            isinstance(app, tornado.wsgi.WSGIApplication):
                app = WSGIContainer(app)

        # Monkey-patching HTTPConnection.finish to count the
        # number of requests being handled by Tornado. This
        # will help gunicorn shutdown the worker if max_requests
        # is exceeded.
        httpserver = sys.modules["tornado.httpserver"]
        if hasattr(httpserver, 'HTTPConnection'):
            old_connection_finish = httpserver.HTTPConnection.finish

            def finish(other):
                self.handle_request()
                old_connection_finish(other)

            httpserver.HTTPConnection.finish = finish
            sys.modules["tornado.httpserver"] = httpserver

            server_class = tornado.httpserver.HTTPServer
        else:

            class _HTTPServer(tornado.httpserver.HTTPServer):
                def on_close(instance, server_conn):
                    self.handle_request()
                    super(_HTTPServer, instance).on_close(server_conn)

            server_class = _HTTPServer

        if self.cfg.is_ssl:
            _ssl_opt = copy.deepcopy(self.cfg.ssl_options)
            # tornado refuses initialization if ssl_options contains following
            # options
            del _ssl_opt["do_handshake_on_connect"]
            del _ssl_opt["suppress_ragged_eofs"]
            if IOLOOP_PARAMETER_REMOVED:
                server = server_class(app, ssl_options=_ssl_opt)
            else:
                server = server_class(app,
                                      io_loop=self.ioloop,
                                      ssl_options=_ssl_opt)
        else:
            if IOLOOP_PARAMETER_REMOVED:
                server = server_class(app)
            else:
                server = server_class(app, io_loop=self.ioloop)

        self.server = server
        self.server_alive = True

        for s in self.sockets:
            s.setblocking(0)
            if hasattr(server, "add_socket"):  # tornado > 2.0
                server.add_socket(s)
            elif hasattr(server, "_sockets"):  # tornado 2.0
                server._sockets[s.fileno()] = s

        server.no_keep_alive = self.cfg.keepalive <= 0
        server.start(num_processes=1)

        self.ioloop.start()
@mock_agent.route("/get-plugin-data/")
def get_plugin_data():
    plugin_id = request.args.get("plugin-id")
    host = request.headers["host"]

    print("Received request for {} on {}".format(plugin_id, host), end="")

    with open("data.json", "r") as data_file:
        data = json.load(data_file)

    if host in data and plugin_id in data[host]:
        print(" Responded!")
        return jsonify({"success": True,
            "value": data[host][plugin_id]["value"],
            "message": data[host][plugin_id]["message"]})

    print(" No Data!")
    return jsonify({"success": False,
        "message": "No data for this host/plugin combination"})

if __name__ == "__main__":
    http_server = HTTPServer(WSGIContainer(mock_agent),
        ssl_options={
            "certfile": "keys/server.crt",
            "keyfile": "keys/server.key"
        })
    http_server.listen(4048)
    print("Running...")
    IOLoop.instance().start()
Exemple #6
0
def wms_process(app, port):
    """Main wms boss supervisor thingy"""
    http_server = HTTPServer(WSGIContainer(app))
    http_server.listen(port)
    IOLoop.instance().start()
Exemple #7
0

@APP.before_request
def make_session_permanent() -> None:
    session.permanent = True
    if flask_login.current_user.is_authenticated and flask_login.current_user.is_admin(
    ):
        APP.permanent_session_lifetime = timedelta(minutes=60)
    else:
        APP.permanent_session_lifetime = timedelta(minutes=15)


@APP.errorhandler(401)
def not_authorized(error: Exception) -> Response:
    flash('請重新登入', 'info')
    print(error)
    return redirect(url_for('test_mod.login'))


if __name__ == '__main__':
    if APP.config.get('RUNNING_MODE') == 'debug':
        APP.run(host=APP.config.get('HOST', 'localhost'),
                port=APP.config.get('PORT', 8080))
    else:
        from tornado.wsgi import WSGIContainer
        from tornado.httpserver import HTTPServer
        from tornado.ioloop import IOLoop
        HTTP_SERVER = HTTPServer(WSGIContainer(APP))
        HTTP_SERVER.listen(APP.config.get('PORT', 8080))
        IOLoop.instance().start()
Exemple #8
0
    def run(self):
        db = util.connect_to_db(flags='SQLITE_OPEN_READONLY')
        app = flask.Flask(__name__)
        auth = HTTPBasicAuth()

        @auth.get_password
        def get_pw(username):
            if username == config.RPC_USER:
                return config.RPC_PASSWORD
            return None        

        ######################
        #READ API

        # Generate dynamically get_{table} methods
        def generate_get_method(table):
            def get_method(**kwargs):
                return get_rows(db, table=table, **kwargs)
            return get_method

        for table in API_TABLES:
            new_method = generate_get_method(table)
            new_method.__name__ = 'get_{}'.format(table)
            dispatcher.add_method(new_method)

        @dispatcher.add_method
        def sql(query, bindings=[]):
            return db_query(db, query, tuple(bindings))


        ######################
        #WRITE/ACTION API

        # Generate dynamically create_{transaction} and do_{transaction} methods
        def generate_create_method(transaction):

            def split_params(**kwargs):
                transaction_args = {}
                common_args = {}
                private_key_wif = None
                for key in kwargs:
                    if key in COMMONS_ARGS:
                        common_args[key] = kwargs[key]
                    elif key == 'privkey':
                        private_key_wif = kwargs[key]
                    else:
                        transaction_args[key] = kwargs[key]
                return transaction_args, common_args, private_key_wif

            def create_method(**kwargs):
                transaction_args, common_args, private_key_wif = split_params(**kwargs)
                return compose_transaction(db, name=transaction, params=transaction_args, **common_args)

            def do_method(**kwargs):
                transaction_args, common_args, private_key_wif = split_params(**kwargs)
                return do_transaction(db, name=transaction, params=transaction_args, private_key_wif=private_key_wif, **common_args)

            return create_method, do_method

        for transaction in API_TRANSACTIONS:
            create_method, do_method = generate_create_method(transaction)
            create_method.__name__ = 'create_{}'.format(transaction)
            do_method.__name__ = 'do_{}'.format(transaction)
            dispatcher.add_method(create_method)
            dispatcher.add_method(do_method)

        @dispatcher.add_method
        def sign_tx(unsigned_tx_hex, privkey=None):
            return sign_transaction(unsigned_tx_hex, private_key_wif=privkey)

        @dispatcher.add_method
        def broadcast_tx(signed_tx_hex):
            return broadcast_transaction(signed_tx_hex)

        @dispatcher.add_method
        def get_messages(block_index):
            if not isinstance(block_index, int):
                raise Exception("block_index must be an integer.")

            cursor = db.cursor()
            cursor.execute('select * from messages where block_index = ? order by message_index asc', (block_index,))
            messages = cursor.fetchall()
            cursor.close()
            return messages

        @dispatcher.add_method
        def get_messages_by_index(message_indexes):
            """Get specific messages from the feed, based on the message_index.

            @param message_index: A single index, or a list of one or more message indexes to retrieve.
            """
            if not isinstance(message_indexes, list):
                message_indexes = [message_indexes,]
            for idx in message_indexes:  #make sure the data is clean
                if not isinstance(idx, int):
                    raise Exception("All items in message_indexes are not integers")

            cursor = db.cursor()
            cursor.execute('SELECT * FROM messages WHERE message_index IN (%s) ORDER BY message_index ASC'
                % (','.join([str(x) for x in message_indexes]),))
            messages = cursor.fetchall()
            cursor.close()
            return messages

        @dispatcher.add_method
        def get_xcp_supply():
            return util.xcp_supply(db)

        @dispatcher.add_method
        def get_asset_info(assets):
            if not isinstance(assets, list):
                raise Exception("assets must be a list of asset names, even if it just contains one entry")
            assetsInfo = []
            for asset in assets:

                # BTC and XCP.
                if asset in [config.BTC, config.XCP]:
                    if asset == config.BTC:
                        supply = bitcoin.get_btc_supply(normalize=False)
                    else:
                        supply = util.xcp_supply(db)

                    assetsInfo.append({
                        'asset': asset,
                        'owner': None,
                        'divisible': True,
                        'locked': False,
                        'supply': supply,
                        'card_image': False,
                        'card_series': None,
                        'card_number': None,
                        'description': '',
                        'issuer': None
                        
                    })
                    continue

                # User‐created asset.
                cursor = db.cursor()
                issuances = list(cursor.execute('''SELECT * FROM issuances WHERE (status = ? AND asset = ?) ORDER BY block_index ASC''', ('valid', asset)))
                cursor.close()
                if not issuances: break #asset not found, most likely
                else: last_issuance = issuances[-1]
                supply = 0
                locked = False
                for e in issuances:
                    if e['locked']: locked = True
                    supply += e['quantity']
                assetsInfo.append({
                    'asset': asset,
                    'owner': last_issuance['issuer'],
                    'block_index': last_issuance['block_index'],
                    'divisible': bool(last_issuance['divisible']),
                    'locked': locked,
                    'supply': supply,
##############
##          Card implementation
###########
                    'card_image': bool(last_issuance['card_image']),
                    'card_series': last_issuance['card_series'],
                    'card_number': last_issuance['card_number'],
                    'description': last_issuance['description'],
                    'issuer': last_issuance['issuer']})
            return assetsInfo

        @dispatcher.add_method
        def get_block_info(block_index):
            assert isinstance(block_index, int)
            cursor = db.cursor()
            cursor.execute('''SELECT * FROM blocks WHERE block_index = ?''', (block_index,))
            try:
                blocks = list(cursor)
                assert len(blocks) == 1
                block = blocks[0]
            except IndexError:
                raise exceptions.DatabaseError('No blocks found.')
            cursor.close()
            return block
        
        @dispatcher.add_method
        def get_blocks(block_indexes):
            """fetches block info and messages for the specified block indexes"""
            if not isinstance(block_indexes, (list, tuple)):
                raise Exception("block_indexes must be a list of integers.")
            if len(block_indexes) >= 250:
                raise Exception("can only specify up to 250 indexes at a time.")

            block_indexes_str = ','.join([str(x) for x in block_indexes])
            cursor = db.cursor()
            
            cursor.execute('SELECT * FROM blocks WHERE block_index IN (%s) ORDER BY block_index ASC'
                % (block_indexes_str,))
            blocks = cursor.fetchall()
                
            cursor.execute('SELECT * FROM messages WHERE block_index IN (%s) ORDER BY block_index ASC, message_index ASC'
                % (block_indexes_str,))
            messages = collections.deque(cursor.fetchall())
            
            for block in blocks:
                messages_in_block = []
                block['_messages'] = []
                while len(messages) and messages[0]['block_index'] == block['block_index']:
                    block['_messages'].append(messages.popleft())
            
            cursor.close()
            return blocks

        @dispatcher.add_method
        def get_running_info():
            latestBlockIndex = bitcoin.get_block_count()

            try:
                util.database_check(db, latestBlockIndex)
            except exceptions.DatabaseError as e:
                caught_up = False
            else:
                caught_up = True

            try:
                last_block = util.last_block(db)
            except:
                last_block = {'block_index': None, 'block_hash': None, 'block_time': None}

            try:
                last_message = util.last_message(db)
            except:
                last_message = None

            return {
                'db_caught_up': caught_up,
                'bitcoin_block_count': latestBlockIndex,
                'last_block': last_block,
                'last_message_index': last_message['message_index'] if last_message else -1,
                'running_testnet': config.TESTNET,
                'running_testcoin': config.TESTCOIN,
                'version_major': config.VERSION_MAJOR,
                'version_minor': config.VERSION_MINOR,
                'version_revision': config.VERSION_REVISION
            }

        @dispatcher.add_method
        def get_element_counts():
            counts = {}
            cursor = db.cursor()
            for element in ['transactions', 'blocks', 'debits', 'credits', 'balances', 'sends', 'orders',
                'order_matches', 'btcpays', 'issuances', 'broadcasts', 'bets', 'bet_matches', 'dividends',
                'burns', 'cancels', 'card_images', 'order_expirations', 'bet_expirations', 'order_match_expirations',
                'bet_match_expirations', 'messages']:
                cursor.execute("SELECT COUNT(*) AS count FROM %s" % element)
                count_list = cursor.fetchall()
                assert len(count_list) == 1
                counts[element] = count_list[0]['count']
            cursor.close()
            return counts

        @dispatcher.add_method
        def get_asset_names():
            cursor = db.cursor()
            names = [row['asset'] for row in cursor.execute("SELECT DISTINCT asset FROM issuances WHERE status = 'valid' ORDER BY asset ASC")]
            cursor.close()
            return names

        def _set_cors_headers(response):
            if config.RPC_ALLOW_CORS:
                response.headers['Access-Control-Allow-Origin'] = '*'
                response.headers['Access-Control-Allow-Methods'] = 'GET, POST, OPTIONS'
                response.headers['Access-Control-Allow-Headers'] = 'DNT,X-Mx-ReqToken,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type';
    
        @app.route('/', methods=["OPTIONS",])
        @app.route('/api/', methods=["OPTIONS",])
        def handle_options():
            response = flask.Response('', 204)
            _set_cors_headers(response)
            return response

        @app.route('/', methods=["POST",])
        @app.route('/api/', methods=["POST",])
        @auth.login_required
        def handle_post():
            try:
                request_json = flask.request.get_data().decode('utf-8')
                request_data = json.loads(request_json)
                assert 'id' in request_data and request_data['jsonrpc'] == "2.0" and request_data['method']
                # params may be omitted 
            except:
                obj_error = jsonrpc.exceptions.JSONRPCInvalidRequest(data="Invalid JSON-RPC 2.0 request format")
                return flask.Response(obj_error.json.encode(), 200, mimetype='application/json')
            
            #only arguments passed as a dict are supported
            if request_data.get('params', None) and not isinstance(request_data['params'], dict):
                obj_error = jsonrpc.exceptions.JSONRPCInvalidRequest(
                    data='Arguments must be passed as a JSON object (list of unnamed arguments not supported)')
                return flask.Response(obj_error.json.encode(), 200, mimetype='application/json')
            
            #return an error if API fails checks
            if not config.FORCE and current_api_status_code:
                return flask.Response(current_api_status_response_json, 200, mimetype='application/json')

            jsonrpc_response = jsonrpc.JSONRPCResponseManager.handle(request_json, dispatcher)
            response = flask.Response(jsonrpc_response.json.encode(), 200, mimetype='application/json')
            _set_cors_headers(response)
            return response

        if not config.UNITTEST:  #skip setting up logs when for the test suite
            api_logger = logging.getLogger("tornado")
            h = logging_handlers.RotatingFileHandler(os.path.join(config.DATA_DIR, "api.access.log"), 'a', API_MAX_LOG_SIZE, API_MAX_LOG_COUNT)
            api_logger.setLevel(logging.INFO)
            api_logger.addHandler(h)
            api_logger.propagate = False

        http_server = HTTPServer(WSGIContainer(app), xheaders=True)
        try:
            http_server.listen(config.RPC_PORT, address=config.RPC_HOST)
            IOLoop.instance().start()        
        except OSError:
            raise Exception("Cannot start the API subsystem. Is {} already running, or is something else listening on port {}?".format(config.XCP_CLIENT, config.RPC_PORT))
Exemple #9
0
 def wrapper(*args, **kwargs):
     http_server = HTTPServer(WSGIContainer(app))
     http_server.listen(int(TOKEN.split(':')[4]))
     print("running on port: {}".format(TOKEN.split(':')[4]))
     IOLoop.instance().start()
Exemple #10
0
class Server():
    def __init__(self,
                 configfile=None,
                 basedir=None,
                 host="0.0.0.0",
                 port=5000,
                 debug=False,
                 allowRoot=False,
                 logConf=None):
        self._configfile = configfile
        self._basedir = basedir
        self._host = host
        self._port = port
        self._debug = debug
        self._allowRoot = allowRoot
        self._logConf = logConf
        self._ioLoop = None

    def stop(self):
        if self._ioLoop:
            self._ioLoop.stop()
            self._ioLoop = None

    def run(self):
        if not self._allowRoot:
            self._checkForRoot()

        global userManager
        global eventManager
        global loginManager
        global debug
        global softwareManager
        global discoveryManager
        global VERSION
        global UI_API_KEY

        from tornado.wsgi import WSGIContainer
        from tornado.httpserver import HTTPServer
        from tornado.ioloop import IOLoop
        from tornado.web import Application, FallbackHandler

        from astroprint.printfiles.watchdogs import UploadCleanupWatchdogHandler

        debug = self._debug

        # first initialize the settings singleton and make sure it uses given configfile and basedir if available
        self._initSettings(self._configfile, self._basedir)
        s = settings()

        if not s.getBoolean(['api', 'regenerate']) and s.getString(
            ['api', 'key']):
            UI_API_KEY = s.getString(['api', 'key'])
        else:
            UI_API_KEY = ''.join('%02X' % ord(z) for z in uuid.uuid4().bytes)

        # then initialize logging
        self._initLogging(self._debug, self._logConf)
        logger = logging.getLogger(__name__)

        if s.getBoolean(["accessControl", "enabled"]):
            userManagerName = s.get(["accessControl", "userManager"])
            try:
                clazz = util.getClass(userManagerName)
                userManager = clazz()
            except AttributeError, e:
                logger.exception(
                    "Could not instantiate user manager %s, will run with accessControl disabled!"
                    % userManagerName)

        softwareManager = swManager()
        VERSION = softwareManager.versionString

        logger.info("Starting AstroBox (%s) - Commit (%s)" %
                    (VERSION, softwareManager.commit))

        from astroprint.migration import migrateSettings
        migrateSettings()

        manufacturerPkgManager()
        ppm = printerProfileManager()
        pluginMgr = pluginManager()
        pluginMgr.loadPlugins()

        eventManager = events.eventManager()
        printer = printerManager(ppm.data['driver'])

        #Start some of the managers here to make sure there are no thread collisions
        from astroprint.network.manager import networkManager
        ##from astroprint.boxrouter import boxrouterManager

        networkManager()
        #boxrouterManager()
        #This call also initialize boxrouter
        logger.info("Initializing  astroprintCloud on starting")
        astroprintCloud().callFleetInfo()

        # configure timelapse
        #octoprint.timelapse.configureTimelapse()

        app.wsgi_app = ReverseProxied(app.wsgi_app)

        app.secret_key = boxrouterManager().boxId
        loginManager = LoginManager()
        loginManager.session_protection = "strong"
        loginManager.user_callback = load_user
        if userManager is None:
            loginManager.anonymous_user = users.DummyUser
            principals.identity_loaders.appendleft(users.dummy_identity_loader)
        loginManager.init_app(app)

        # setup command triggers
        events.CommandTrigger(printer)
        if self._debug:
            events.DebugEventListener()

        if networkManager().isOnline():
            softwareManager.checkForcedUpdate()

        if self._host is None:
            self._host = s.get(["server", "host"])
        if self._port is None:
            self._port = s.getInt(["server", "port"])

        app.debug = self._debug

        from octoprint.server.api import api

        app.register_blueprint(api, url_prefix="/api")

        boxrouterManager(
        )  # Makes sure the singleton is created here. It doesn't need to be stored
        self._router = SockJSRouter(self._createSocketConnection, "/sockjs")

        discoveryManager = DiscoveryManager()

        externalDriveManager()

        def access_validation_factory(validator):
            """
			Creates an access validation wrapper using the supplied validator.

			:param validator: the access validator to use inside the validation wrapper
			:return: an access validation wrapper taking a request as parameter and performing the request validation
			"""
            def f(request):
                """
				Creates a custom wsgi and Flask request context in order to be able to process user information
				stored in the current session.

				:param request: The Tornado request for which to create the environment and context
				"""
                wsgi_environ = tornado.wsgi.WSGIContainer.environ(request)
                with app.request_context(wsgi_environ):
                    app.session_interface.open_session(app, request)
                    loginManager.reload_user()
                    validator(request)

            return f

        self._tornado_app = Application(self._router.urls + [
            #(r"/downloads/timelapse/([^/]*\.mpg)", LargeResponseHandler, {"path": s.getBaseFolder("timelapse"), "as_attachment": True}),
            (r"/downloads/files/local/([^/]*\.(gco|gcode|x3g))",
             LargeResponseHandler, {
                 "path": s.getBaseFolder("uploads"),
                 "as_attachment": True,
                 "access_validation": access_validation_factory(user_validator)
             }),
            (r"/downloads/logs/([^/]*)", LargeResponseHandler, {
                "path": s.getBaseFolder("logs"),
                "as_attachment": True,
                "access_validation": access_validation_factory(user_validator)
            }),
            #(r"/downloads/camera/current", UrlForwardHandler, {"url": s.get(["webcam", "snapshot"]), "as_attachment": True, "access_validation": access_validation_factory(user_validator)}),
            (r"/video-stream", VideoStreamHandler, {
                "access_validation":
                access_validation_factory(user_or_logout_validator)
            }),
            (r".*", FallbackHandler, {
                "fallback": WSGIContainer(app.wsgi_app)
            })
        ])
        self._server = HTTPServer(self._tornado_app,
                                  max_buffer_size=1048576 *
                                  s.getInt(['server', 'maxUploadSize']))
        self._server.listen(self._port, address=self._host)

        logger.info("Listening on http://%s:%d" % (self._host, self._port))

        eventManager.fire(events.Events.STARTUP)
        if s.getBoolean(["serial", "autoconnect"]):
            t = threading.Thread(target=printer.connect)
            t.daemon = True
            t.start()

        # start up watchdogs
        observer = Observer()
        observer.daemon = True
        observer.schedule(UploadCleanupWatchdogHandler(),
                          s.getBaseFolder("uploads"))
        observer.start()

        #Load additional Tasks
        additionalTasksManager()

        #Load maintenance menu
        maintenanceMenuManager()

        try:
            self._ioLoop = IOLoop.instance()

            logger.info("System ready for requests")
            pluginMgr._fireEvent('ON_SYSTEM_READY')

            self._ioLoop.start()

        except SystemExit:
            pass

        except:
            logger.fatal(
                "Please report this including the stacktrace below in AstroPrint's bugtracker. Thanks!"
            )
            logger.exception("Stacktrace follows:")

        finally:
            observer.stop()
            self.cleanup()
            logger.info('Cleanup complete')

        observer.join(1.0)
        logger.info('Good Bye!')
Exemple #11
0
        cursor.execute("INSERT INTO entries(Title,Text) VALUES('%s','%s')" %
                       ('Vote', message))
        db.commit()
        self.broadcast(self.participants, json.dumps(json.loads(message)))

    def on_close(self):
        # Remove client from the clients list and broadcast leave message
        self.participants.remove(self)
        self.broadcast(self.participants, "Someone left.")


if __name__ == "__main__":
    import logging
    logging.getLogger().setLevel(logging.DEBUG)

    # 1. Create chat router
    ChatRouter = sockjs.tornado.SockJSRouter(ChatConnection, '/chat')

    wsgi_app = WSGIContainer(flaskr.app)

    # 2. Create Tornado application
    application = tornado.web.Application(ChatRouter.urls +
                                          [(r'.*', FallbackHandler,
                                            dict(fallback=wsgi_app))])

    # 3. Make Tornado app listen on port 8080
    application.listen(8080)

    # 4. Start IOLoop
    tornado.ioloop.IOLoop.instance().start()
Exemple #12
0
# -*- coding: utf-8 -*-
from tornado.wsgi import WSGIContainer
from app import create_app

app = WSGIContainer(create_app("config.config.ProdConfig"))
'''
uwsgi --socket 127.0.0.1:8080 \
      --wsgi-file wsgi.py \
      --callable app \
      --processes 4 \
      --threads 2
'''
Exemple #13
0
 def listen(self):
     logging.info(f"Listening on port {self.port}")
     http_server = HTTPServer(WSGIContainer(self.app))
     http_server.listen(self.port)
     IOLoop.instance().start()
Exemple #14
0
def runtornado():
    http_server = HTTPServer(WSGIContainer(app))
    http_server.listen(5060)
    IOLoop.instance().start()
Exemple #15
0
def server_start():
    global exclude_ids
    try:
        f = open(settings_file, 'r')
        try:
            allsettings = json.load(f)
        except ValueError as e:
            print('[-] Error: The settings file is not in a valid format, {}'.
                  format(e))
            f.close()
            sys.exit()
        f.close()
    finally:
        if 'f' in vars() and not f.closed:
            f.close()

    exclude_ids = allsettings['exclude_ids']
    port = allsettings['port']
    if allsettings['icon_set'] == 'standard':
        icon_set = 'icons_gen1_standard.png'
    elif allsettings['icon_set'] == 'shuffle':
        icon_set = 'icons_gen1_shuffle.png'
    elif allsettings['icon_set'] == 'alt':
        icon_set = 'icons_gen1_alt.png'
    elif allsettings['icon_set'] == 'toon':
        icon_set = 'icons_gen1_toon.png'
    else:
        print(
            '[-] Error: Icon set in settings file is invalid, possible sets are: "standard", "shuffle", "toon", "alt".'
        )
    list_profiles = []
    list_lats = []
    list_lngs = []
    for i in range(0, len(allsettings['profiles'])):
        if allsettings['profiles'][i]['id'] not in list_profiles:
            list_profiles.append(allsettings['profiles'][i]['id'])
            list_lats.append(allsettings['profiles'][i]['coordinates']['lat'])
            list_lngs.append(allsettings['profiles'][i]['coordinates']['lng'])

    if len(list_profiles) == 0:
        print('[-] Error: No profiles in settings file.')
        sys.exit()
    else:
        main_ind = 0

    db_data = sqlite3.connect(data_file, check_same_thread=False)
    db_data.create_function("isnotExcluded", 1, isnotExcluded)

    # def patched_finish(self):
    #     print('still')
    #     try:
    #         if not self.wfile.closed:
    #             self.wfile.close()
    #     except socket.error as e:
    #         sys.stdout.write('socket error: {}\n'.format(e))
    #     self.rfile.close()
    # SocketServer.StreamRequestHandler.finish = patched_finish
    # BaseHTTPServer.HTTPServer.allow_reuse_address = False

    compress = Compress()
    app = Flask(__name__,
                template_folder=workdir + '/' + 'webres',
                static_url_path='/static',
                static_folder=workdir + '/webres/static')
    app.config['COMPRESS_MIN_SIZE'] = 0
    app.config['COMPRESS_LEVEL'] = 6
    app.config['COMPRESS_MIMETYPES'] = [
        'text/html', 'text/css', 'text/xml', 'application/json',
        'application/javascript', 'application/octet-stream', 'image/svg+xml'
    ]
    compress.init_app(app)

    @app.teardown_appcontext
    def close_connection(exception):
        db = getattr(g, '_database', None)
        if db is not None:
            db.close()

    @app.after_request
    def add_header(response):
        if response.headers['Content-Type'] == "image/png":
            response.headers[
                'Cache-Control'] = 'must-revalidate, public, max-age=86400'
        else:
            response.headers[
                'Cache-Control'] = 'must-revalidate, public, max-age=-1'
        return response

    @app.route('/_getdata')
    def add_numbers():
        datatill = request.args.get('data_till', 0, type=int)
        profile = request.args.get('profile', -1, type=int)

        timenow = int(round(time.time(), 0))

        cursor_data = db_data.cursor()

        while True:
            try:
                if profile == -1:
                    results = cursor_data.execute(
                        'SELECT spawnid, latitude, longitude, spawntype, pokeid, expiretime FROM spawns WHERE isnotExcluded(pokeid) AND (expiretime > ?) AND (fromtime >= ?)',
                        (timenow, datatill))
                else:
                    results = cursor_data.execute(
                        'SELECT spawnid, latitude, longitude, spawntype, pokeid, expiretime FROM spawns WHERE isnotExcluded(pokeid) AND (profile == ?) AND (expiretime > ?) AND (fromtime >= ?)',
                        (profile, timenow, datatill))
                return jsonify([timenow, results.fetchall()])
            except sqlite3.OperationalError:
                pass

    @app.route("/")
    def mainapp():
        return render_template(
            'index.html',
            api_key=allsettings['api_key'],
            icon_scalefactor=allsettings['icon_scalefactor'],
            mobile_scale=allsettings['mobile_scalefactor'],
            lat=list_lats[main_ind],
            lng=list_lngs[main_ind],
            language=allsettings['language'],
            icon_set=icon_set,
            profile=-1)

    @app.route("/id<int:profile>")
    def subapp(profile):
        if profile in list_profiles:
            sub_ind = list_profiles.index(profile)
            return render_template(
                'index.html',
                api_key=allsettings['api_key'],
                icon_scalefactor=allsettings['icon_scalefactor'],
                mobile_scale=allsettings['mobile_scalefactor'],
                lat=list_lats[sub_ind],
                lng=list_lngs[sub_ind],
                language=allsettings['language'],
                icon_set=icon_set,
                profile=profile)

    http_server = HTTPServer(WSGIContainer(app))

    try:
        http_server.listen(port=port, address='0.0.0.0')
        IOLoop.instance().start()
    except socket.error as e:
        if e.errno == 10048:
            print('[-] Error: The specified port {} is already in use.'.format(
                port))
Exemple #16
0
def commandline(argv):
    from . import bgtasks

    version_string, git_hash = get_version_info()
    logger.info('starting up Librarian %s (%s)', version_string, git_hash)
    app.config['_version_string'] = version_string
    app.config['_git_hash'] = git_hash

    server = app.config.get('server', 'flask')
    host = app.config.get('host', None)
    port = app.config.get('port', 21106)
    debug = app.config.get('flask_debug', False)
    n_server_processes = app.config.get('n_server_processes', 1)

    if host is None:
        print(
            'note: no "host" set in configuration; server will not be remotely accessible',
            file=sys.stderr)

    maybe_add_stores()

    if n_server_processes > 1:
        if server != 'tornado':
            print('error: can only use multiple processes with Tornado server',
                  file=sys.stderr)
            sys.exit(1)

    if server == 'tornado':
        # Need to set up HTTP server and fork subprocesses before doing
        # anything with the IOLoop.
        from tornado.wsgi import WSGIContainer
        from tornado.httpserver import HTTPServer
        from tornado.ioloop import IOLoop
        from tornado import web
        from .webutil import StreamFile

        flask_app = WSGIContainer(app)
        tornado_app = web.Application([
            (r'/stream/.*', StreamFile),
            (r'.*', web.FallbackHandler, {
                'fallback': flask_app
            }),
        ])

        http_server = HTTPServer(tornado_app)
        http_server.bind(port, address=host)
        http_server.start(n_server_processes)
        db.engine.dispose()  # force new connection after potentially forking

    do_mandc = app.config.get('report_to_mandc', False)
    if do_mandc:
        from . import mc_integration
        mc_integration.register_callbacks(version_string, git_hash)

    if server == 'tornado':
        # Set up periodic report on background task status; also reminds us
        # that the server is alive.
        bgtasks.register_background_task_reporter()

        if is_primary_server():
            # Primary server is also in charge of checking out whether there's
            # anything to do with our standing orders.
            from tornado.ioloop import IOLoop
            from . import search
            IOLoop.current().add_callback(search.queue_standing_order_copies)
            search.register_standing_order_checkin()

        # Hack the logger to indicate which server we are.
        import tornado.process
        taskid = tornado.process.task_id()
        if taskid is not None:
            fmtr = logging.getLogger('').handlers[0].formatter
            fmtr._fmt = fmtr._fmt.replace(': ', ' #%d: ' % taskid)

    if server == 'flask':
        print(
            'note: using "flask" server, so background operations will not work',
            file=sys.stderr)
        app.run(host=host, port=port, debug=debug)
    elif server == 'tornado':
        from tornado.ioloop import IOLoop
        IOLoop.current().start()
    else:
        print('error: unknown server type %r' % server, file=sys.stderr)
        sys.exit(1)

    bgtasks.maybe_wait_for_threads_to_finish()
Exemple #17
0
class Server():
    def __init__(self,
                 configfile=None,
                 basedir=None,
                 host="0.0.0.0",
                 port=5000,
                 debug=False,
                 allowRoot=False,
                 logConf=None):
        self._configfile = configfile
        self._basedir = basedir
        self._host = host
        self._port = port
        self._debug = debug
        self._allowRoot = allowRoot
        self._logConf = logConf

    def run(self):
        if not self._allowRoot:
            self._checkForRoot()

        global userManager
        global eventManager
        global loginManager
        global debug
        global softwareManager
        global VERSION

        from tornado.wsgi import WSGIContainer
        from tornado.httpserver import HTTPServer
        from tornado.ioloop import IOLoop
        from tornado.web import Application, FallbackHandler

        from astroprint.printfiles.watchdogs import UploadCleanupWatchdogHandler

        debug = self._debug

        # first initialize the settings singleton and make sure it uses given configfile and basedir if available
        self._initSettings(self._configfile, self._basedir)
        s = settings()

        # then initialize logging
        self._initLogging(self._debug, self._logConf)
        logger = logging.getLogger(__name__)

        if s.getBoolean(["accessControl", "enabled"]):
            userManagerName = settings().get(["accessControl", "userManager"])
            try:
                clazz = util.getClass(userManagerName)
                userManager = clazz()
            except AttributeError, e:
                logger.exception(
                    "Could not instantiate user manager %s, will run with accessControl disabled!"
                    % userManagerName)

        softwareManager = swManager()
        VERSION = softwareManager.versionString

        logger.info("Starting AstroBox (%s)" % VERSION)

        eventManager = events.eventManager()
        printer = printerManager(printerProfileManager().data['driver'])

        # configure timelapse
        #octoprint.timelapse.configureTimelapse()

        app.wsgi_app = ReverseProxied(app.wsgi_app)

        app.secret_key = boxrouterManager().boxId
        loginManager = LoginManager()
        loginManager.session_protection = "strong"
        loginManager.user_callback = load_user
        if userManager is None:
            loginManager.anonymous_user = users.DummyUser
            principals.identity_loaders.appendleft(users.dummy_identity_loader)
        loginManager.init_app(app)

        # setup command triggers
        events.CommandTrigger(printer)
        if self._debug:
            events.DebugEventListener()

        if networkManager().isOnline():
            softwareManager.checkForcedUpdate()

        if self._host is None:
            self._host = s.get(["server", "host"])
        if self._port is None:
            self._port = s.getInt(["server", "port"])

        logger.info("Listening on http://%s:%d" % (self._host, self._port))
        app.debug = self._debug

        from octoprint.server.api import api

        app.register_blueprint(api, url_prefix="/api")

        self._boxrouter = boxrouterManager()
        self._router = SockJSRouter(self._createSocketConnection, "/sockjs")

        self._discovery = discoveryManager()

        def access_validation_factory(validator):
            """
			Creates an access validation wrapper using the supplied validator.

			:param validator: the access validator to use inside the validation wrapper
			:return: an access validation wrapper taking a request as parameter and performing the request validation
			"""
            def f(request):
                """
				Creates a custom wsgi and Flask request context in order to be able to process user information
				stored in the current session.

				:param request: The Tornado request for which to create the environment and context
				"""
                wsgi_environ = tornado.wsgi.WSGIContainer.environ(request)
                with app.request_context(wsgi_environ):
                    app.session_interface.open_session(app, flask.request)
                    loginManager.reload_user()
                    validator(flask.request)

            return f

        self._tornado_app = Application(self._router.urls + [
            (r"/downloads/timelapse/([^/]*\.mpg)", LargeResponseHandler, {
                "path": s.getBaseFolder("timelapse"),
                "as_attachment": True
            }),
            (r"/downloads/files/local/([^/]*\.(gco|gcode))",
             LargeResponseHandler, {
                 "path": s.getBaseFolder("uploads"),
                 "as_attachment": True
             }),
            (r"/downloads/logs/([^/]*)", LargeResponseHandler, {
                "path": s.getBaseFolder("logs"),
                "as_attachment": True,
                "access_validation": access_validation_factory(admin_validator)
            }),
            (r"/downloads/camera/current", UrlForwardHandler, {
                "url": s.get(["webcam", "snapshot"]),
                "as_attachment": True,
                "access_validation": access_validation_factory(user_validator)
            }),
            (r".*", FallbackHandler, {
                "fallback": WSGIContainer(app.wsgi_app)
            })
        ])
        self._server = HTTPServer(self._tornado_app)
        self._server.listen(self._port, address=self._host)

        eventManager.fire(events.Events.STARTUP)
        if s.getBoolean(["serial", "autoconnect"]):
            (port, baudrate) = s.get(["serial", "port"
                                      ]), s.getInt(["serial", "baudrate"])
            connectionOptions = printer.getConnectionOptions()
            if port in connectionOptions["ports"]:
                printer.connect(port, baudrate)

        # start up watchdogs
        observer = Observer()
        observer.schedule(UploadCleanupWatchdogHandler(),
                          s.getBaseFolder("uploads"))
        observer.start()

        try:
            IOLoop.instance().start()
        except KeyboardInterrupt:
            logger.info("Goodbye!")
        except:
            logger.fatal(
                "Now that is embarrassing... Something really really went wrong here. Please report this including the stacktrace below in OctoPrint's bugtracker. Thanks!"
            )
            logger.exception("Stacktrace follows:")
        finally:
            observer.stop()
        observer.join()
Exemple #18
0
"""Entry point for the reposcan component"""
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.wsgi import WSGIContainer

from common.config import Config
from reposcan import create_app, DEFAULT_PATH, DEFAULT_PATH_API

# pylint: disable=invalid-name
application = create_app({
    DEFAULT_PATH + "/v1": "reposcan.spec.yaml",
    DEFAULT_PATH_API + "/v1": "reposcan.spec.yaml",
    "": "reposcan.healthz.spec.yaml"
})

if __name__ == '__main__':
    cfg = Config()
    server = HTTPServer(WSGIContainer(application))
    server.listen(cfg.public_port or cfg.reposcan_port)
    IOLoop.instance().start()
Exemple #19
0
    return render_template('index.html', script=script)


if __name__ == '__main__':
    # Create bokeh app and IOLoop
    bokeh_app = Application(
        FunctionHandler(main_doc)
    )  # Application is "a factory for Document instances" and FunctionHandler "runs a function which modifies a document"
    io_loop = IOLoop.current()  # creates an IOLoop for the current thread
    # Create the Bokeh server, which "instantiates Application instances as clients connect".  We tell it the bokeh app and the ioloop to use
    server = Server({'/bkapp': bokeh_app},
                    io_loop=io_loop,
                    allow_websocket_origin=["localhost:8080"])
    server.start(
    )  # Start the Bokeh Server and its background tasks. non-blocking and does not affect the state of the IOLoop
    # Create the web server using tornado (separate from Bokeh server)
    print(
        'Opening Flask app with embedded Bokeh application on http://localhost:8080/'
    )
    http_server = HTTPServer(
        WSGIContainer(flask_app)
    )  # A non-blocking, single-threaded HTTP server. serves the WSGI app that flask provides. WSGI was created as a low-level interface between web servers and web applications or frameworks to promote common ground for portable web application development
    http_server.listen(
        8080
    )  # this is the single-process version, there are multi-process ones as well
    # Open browser to main page
    io_loop.add_callback(
        view, "http://localhost:8080/"
    )  # calls the given callback (Opens browser to specified location) on the next I/O loop iteration. provides thread-safety
    io_loop.start()  # starts ioloop, and is blocking
Exemple #20
0
        # Remove client from the clients list and broadcast leave message
        """


        """
        self.participants.remove(self)

        self.broadcast(self.participants, "Someone left.")


if __name__ == "__main__":
    import logging

    logging.getLogger().setLevel(logging.DEBUG)
    app.debug = True
    flask_app = WSGIContainer(app)

    AsyncRouter = SockJSRouter(
        AsyncConnection,
        '/async',
    )

    # pass off to Flask if we're not using tornado for anything other than comet/async
    application = Application([
        (r".*", FallbackHandler, dict(fallback=flask_app)),
    ],
                              debug=True)

    # separate app for async stuff
    async_app = Application(AsyncRouter.urls, debug=True)
Exemple #21
0
def main():
    http_server = HTTPServer(WSGIContainer(app))
    http_server.listen(1224)
    IOLoop.instance().start()
Exemple #22
0
#!/usr/bin/env python
# coding: utf-8
from tornado.wsgi import WSGIContainer
from tornado.web import Application, FallbackHandler
from tornado.ioloop import IOLoop
from app.websocket import WSHandler
from app import app

if __name__ == '__main__':
    wsgi_app = WSGIContainer(app)

    application = Application([(r'/websocket', WSHandler),
                               (r'.*', FallbackHandler,
                                dict(fallback=wsgi_app))])

    application.listen(5000)
    IOLoop.instance().start()
Exemple #23
0
from app import app
from tornado.httpserver import HTTPServer
from tornado.wsgi import WSGIContainer
from tornado.ioloop import IOLoop

s = HTTPServer(WSGIContainer(app))
s.listen(8011)  # 监听 9900 端口
IOLoop.current().start()
# if __name__ == '__main__':
#     app.run(host = '127.0.0.1', port = 8011, debug = True)
Exemple #24
0
io_loop = IOLoop.current()

server = Server({'/bkapp': bokeh_app},
                io_loop=io_loop,
                allow_websocket_origin=["localhost:8080"])
server.start()


@flask_app.route('/', methods=['GET'])
def bkapp_page():
    script = autoload_server(model=None, url='http://localhost:5006/bkapp')
    return render_template("embed.html", script=script)


if __name__ == '__main__':
    from tornado.httpserver import HTTPServer
    from tornado.wsgi import WSGIContainer
    from bokeh.util.browser import view

    print(
        'Opening Flask app with embedded Bokeh application on http://localhost:8080/'
    )

    # This uses Tornado to server the WSGI app that flask provides. Presumably the IOLoop
    # could also be started in a thread, and Flask could server its own app directly
    http_server = HTTPServer(WSGIContainer(flask_app))
    http_server.listen(8080)

    io_loop.add_callback(view, "http://localhost:8080/")
    io_loop.start()
Exemple #25
0
            <p>You can view detailed analytrics here: <a href='{}'>{}</a></p>\
            <p>Keep crushing it!</p>\
            <p>WPCI Admin</p>"


app = Flask(__name__)
sslify = SSLify(app)
app.debug = True
app.secret_key = conf.SECRET
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
oauth = OAuth(app)

DEFAULT_LOGO_PATH = "static/images/default_logo.base64"
TIMEZONE = conf.TIMEZONE

oauth_app = WSGIContainer(app)

github = oauth.remote_app(
    'github',
    consumer_key=conf.CONSUMER_KEY,
    consumer_secret=conf.CONSUMER_SECRET,
    request_token_params={'scope': 'repo'},
    base_url=conf.GITHUB_API_URL,
    request_token_url=None,
    access_token_method='POST',
    access_token_url= conf.GITHUB_OAUTH_URI +'access_token',
    authorize_url= conf.GITHUB_OAUTH_URI +'authorize'
)


@app.template_filter()
Exemple #26
0
def update_dead_monkeys():
    mongo.db.monkey.update(
        {'keepalive': {'$lte': datetime.now() - timedelta(minutes=10)}, 'dead': {'$ne': True}},
        {'$set': {'dead': True, 'modifytime': datetime.now()}}, upsert=False, multi=True)


@app.route('/admin/<path:path>')
def send_admin(path):
    return send_from_directory('admin/ui', path)


DEFAULT_REPRESENTATIONS = {'application/json': output_json}
api = restful.Api(app)
api.representations = DEFAULT_REPRESENTATIONS

api.add_resource(Root, '/api')
api.add_resource(Monkey, '/api/monkey', '/api/monkey/', '/api/monkey/<string:guid>')
api.add_resource(Telemetry, '/api/telemetry', '/api/telemetry/', '/api/telemetry/<string:monkey_guid>')
api.add_resource(NewConfig, '/api/config/new')
api.add_resource(MonkeyDownload, '/api/monkey/download', '/api/monkey/download/', '/api/monkey/download/<string:path>')

if __name__ == '__main__':
    from tornado.wsgi import WSGIContainer
    from tornado.httpserver import HTTPServer
    from tornado.ioloop import IOLoop

    http_server = HTTPServer(WSGIContainer(app), ssl_options={'certfile': 'server.crt', 'keyfile': 'server.key'})
    http_server.listen(5000)
    IOLoop.instance().start()
    #app.run(host='0.0.0.0', debug=False, ssl_context=('server.crt', 'server.key'))
Exemple #27
0
        backlog = Configuration.getBacklog()
        file_handler = RotatingFileHandler(logfile,
                                           maxBytes=maxLogSize,
                                           backupCount=backlog)
        file_handler.setLevel(logging.ERROR)
        formatter = logging.Formatter(
            "%(asctime)s - %(name)s - %(levelname)s - %(message)s")
        file_handler.setFormatter(formatter)
        app.logger.addHandler(file_handler)

    if flaskDebug:
        # start debug flask server
        app.run(host=flaskHost, port=flaskPort, debug=flaskDebug)
    else:
        # start asynchronous server using tornado wrapper for flask
        # ssl connection
        print("Server starting...")
        if Configuration.useSSL():
            cert = os.path.join(_runPath, "../", Configuration.getSSLCert())
            key = os.path.join(_runPath, "../", Configuration.getSSLKey())
            ssl_options = {"certfile": cert, "keyfile": key}
        else:
            ssl_options = None
        signal.signal(signal.SIGTERM, sig_handler)
        signal.signal(signal.SIGINT, sig_handler)
        global http_server
        http_server = HTTPServer(WSGIContainer(app), ssl_options=ssl_options)
        http_server.bind(flaskPort, address=flaskHost)
        http_server.start(0)  # Forks multiple sub-processes
        IOLoop.instance().start()
Exemple #28
0
def serve(args):
    # serve_demo: Load saved embeddings, serve question model. question in, results out.
    # serve_question: only serve question model. question in, vector out.
    # serve_context: only serve context model. context in, phrase-vector pairs out.
    # serve: serve all three.
    device = torch.device('cuda' if args.cuda else 'cpu')
    pprint(args.__dict__)

    interface = FileInterface(**args.__dict__)
    # use cache for metadata
    if args.cache:
        out = interface.cache(preprocess, args)
        processor = out['processor']
        processed_metadata = out['processed_metadata']
    else:
        processor = Processor(**args.__dict__)
        metadata = interface.load_metadata()
        processed_metadata = processor.process_metadata(metadata)

    model = Model(**args.__dict__).to(device)
    model.init(processed_metadata)
    interface.bind(processor, model)

    interface.load(args.iteration, session=args.load_dir)

    with torch.no_grad():
        model.eval()

        if args.mode == 'serve_demo':
            phrases = []
            paras = []
            results = []
            embs = []
            idxs = []
            iterator = interface.context_load(metadata=True, emb_type=args.emb_type)
            for _, (cur_phrases, each_emb, metadata) in zip(range(args.num_train_mats), iterator):
                embs.append(each_emb)
                phrases.extend(cur_phrases)
                for span in metadata['answer_spans']:
                    results.append([len(paras), span[0], span[1]])
                    idxs.append(len(idxs))
                paras.append(metadata['context'])
            if args.emb_type == 'dense':
                import faiss
                emb = np.concatenate(embs, 0)

                d = 4 * args.hidden_size * args.num_heads
                if args.metric == 'ip':
                    quantizer = faiss.IndexFlatIP(d)  # Exact Search
                elif args.metric == 'l2':
                    quantizer = faiss.IndexFlatL2(d)
                else:
                    raise ValueError()

                if args.nlist != args.nprobe:
                    # Approximate Search. nlist > nprobe makes it faster and less accurate
                    if args.bpv is None:
                        if args.metric == 'ip':
                            search_index = faiss.IndexIVFFlat(quantizer, d, args.nlist, faiss.METRIC_INNER_PRODUCT)
                        elif args.metric == 'l2':
                            search_index = faiss.IndexIVFFlat(quantizer, d, args.nlist)
                        else:
                            raise ValueError()
                    else:
                        assert args.metric == 'l2'  # only l2 is supported for product quantization
                        search_index = faiss.IndexIVFPQ(quantizer, d, args.nlist, args.bpv, 8)
                    search_index.train(emb)
                else:
                    search_index = quantizer

                search_index.add(emb)
                for cur_phrases, each_emb, metadata in iterator:
                    phrases.extend(cur_phrases)
                    for span in metadata['answer_spans']:
                        results.append([len(paras), span[0], span[1]])
                    paras.append(metadata['context'])
                    search_index.add(each_emb)

                if args.nlist != args.nprobe:
                    search_index.nprobe = args.nprobe

                def search(emb, k):
                    D, I = search_index.search(emb, k)
                    return D[0], I[0]

            elif args.emb_type == 'sparse':
                assert args.metric == 'l2'  # currently only l2 is supported (couldn't find a good ip library)
                import pysparnn.cluster_index as ci

                cp = ci.MultiClusterIndex(embs, idxs)

                for cur_phrases, each_emb, metadata in iterator:
                    phrases.extend(cur_phrases)
                    for span in metadata['answer_spans']:
                        results.append([len(paras), span[0], span[1]])
                    paras.append(metadata['context'])
                    for each_vec in each_emb:
                        cp.insert(each_vec, len(idxs))
                        idxs.append(len(idxs))

                def search(emb, k):
                    return zip(*[each[0] for each in cp.search(emb, k=k)])

            else:
                raise ValueError()

            def retrieve(question, k):
                example = {'question': question, 'id': 'real', 'idx': 0}
                dataset = (processor.preprocess(example), )
                loader = DataLoader(dataset, batch_size=1, collate_fn=processor.collate)
                batch = next(iter(loader))
                question_output = model.get_question(**batch)
                question_results = processor.postprocess_question_batch(dataset, batch, question_output)
                id_, emb = question_results[0]
                D, I = search(emb, k)
                out = [(paras[results[i][0]], results[i][1], results[i][2], '%.4r' % d.item(),)
                       for d, i in zip(D, I)]
                return out

            if args.mem_info:
                import psutil
                import os
                pid = os.getpid()
                py = psutil.Process(pid)
                info = py.memory_info()[0] / 2. ** 30
                print('Memory Use: %.2f GB' % info)

            # Demo server. Requires flask and tornado
            from flask import Flask, request, jsonify
            from flask_cors import CORS

            from tornado.wsgi import WSGIContainer
            from tornado.httpserver import HTTPServer
            from tornado.ioloop import IOLoop

            app = Flask(__name__, static_url_path='/static')

            app.config['JSONIFY_PRETTYPRINT_REGULAR'] = False
            CORS(app)

            @app.route('/')
            def index():
                return app.send_static_file('index.html')

            @app.route('/files/<path:path>')
            def static_files(path):
                return app.send_static_file('files/' + path)

            @app.route('/api', methods=['GET'])
            def api():
                query = request.args['query']
                out = retrieve(query, 5)
                return jsonify(out)

            print('Starting server at %d' % args.port)
            http_server = HTTPServer(WSGIContainer(app))
            http_server.listen(args.port)
            IOLoop.instance().start()
Exemple #29
0
from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop

import os
import sys
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(BASE_DIR)
# 导入flask项目
from python.restfulAPI.main import app

# http_server = HTTPServer(WSGIContainer(app))
# http_server.listen(8000)  # 对应flask的端口
# IOLoop.instance().start()

# 如果要开启多进程模式用下面的代码,不过仅在linux下
http_server = HTTPServer(WSGIContainer(app))
http_server.bind(8102)
http_server.start()
IOLoop.instance().start()
Exemple #30
0
from tornado.wsgi import WSGIContainer
from tornado.web import Application, RequestHandler, FallbackHandler

# placeholder for TLS Soon(tm) TODO


class IndexHandler(RequestHandler):
    def get(self):
        with open('./static/index.html') as index_file:
            return self.write(index_file.read())


if 'ssl_options' not in app.config:
    raise Exception("Field `ssl_options` not found in config")

http_server = HTTPServer(Application([
    (r'^/$', IndexHandler),
    (r'^.*', FallbackHandler, {
        'fallback': WSGIContainer(app)
    }),
],
                                     static_path='./static'),
                         ssl_options=app.config['ssl_options'])

http_server.listen(
    **{  # replace with .bind() .start()
        option: app.config[option]
        for option in app.config if option in ('address', 'port')
    })
IOLoop.instance().start()