示例#1
0
def main():
    store_pidfile(os.path.abspath('.'))
    repo_dir = os.path.join('.', 'config', 'repo')

    # Update Django settings
    config = json.loads(open(os.path.join(repo_dir, 'web-admin.conf')).read())
    config['config_dir'] = os.path.abspath('.')
    update_globals(config)

    os.environ['DJANGO_SETTINGS_MODULE'] = 'zato.admin.settings'
    django.setup()
    call_command('loaddata', os.path.join(repo_dir, 'initial-data.json'))

    RepoManager(repo_dir).ensure_repo_consistency()
    execute_from_command_line(['zato-web-admin', 'runserver', '--noreload', '--nothreading', '{host}:{port}'.format(**config)])
示例#2
0
def main():

    # Always attempt to store the PID file first
    store_pidfile(os.path.abspath('.'))

    # Capture warnings to log files
    logging.captureWarnings(True)

    config = Config()
    repo_location = os.path.join('.', 'config', 'repo')

    # Logging configuration
    with open(os.path.join(repo_location, 'logging.conf')) as f:
        dictConfig(yaml.load(f))

    # Read config in and extend it with ODB-specific information
    config.main = get_config(repo_location, 'scheduler.conf')
    config.main.odb.fs_sql_config = get_config(repo_location,
                                               'sql.conf',
                                               needs_user_config=False)

    # Make all paths absolute
    if config.main.crypto.use_tls:
        config.main.crypto.ca_certs_location = absjoin(
            repo_location, config.main.crypto.ca_certs_location)
        config.main.crypto.priv_key_location = absjoin(
            repo_location, config.main.crypto.priv_key_location)
        config.main.crypto.cert_location = absjoin(
            repo_location, config.main.crypto.cert_location)

    logger = logging.getLogger(__name__)
    logger.info('Scheduler starting (http{}://{}:{})'.format(
        's' if config.main.crypto.use_tls else '', config.main.bind.host,
        config.main.bind.port))

    # Fix up configuration so it uses the format internal utilities expect
    for name, job_config in iteritems(
            get_config(repo_location,
                       'startup_jobs.conf',
                       needs_user_config=False)):
        job_config['name'] = name
        config.startup_jobs.append(job_config)

    # Run the scheduler server
    try:
        SchedulerServer(config, repo_location).serve_forever()
    except Exception:
        logger.warn(format_exc())
示例#3
0
def main():
    store_pidfile(os.path.abspath('.'))
    repo_dir = os.path.join('.', 'config', 'repo')

    # Update Django settings
    config = json.loads(open(os.path.join(repo_dir, 'web-admin.conf')).read())
    config['config_dir'] = os.path.abspath('.')
    update_globals(config)

    # Store the PID so that the server can be later stopped by its PID.
    open('./.web-admin.pid', 'w').write(str(os.getpid()))
    os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'zato.admin.settings')
    call_command('loaddata', os.path.join(repo_dir, 'initial-data.json'))

    RepoManager(repo_dir).ensure_repo_consistency()

    # Cannot be imported before update_globals does its job of updating settings' configuration
    execute_from_command_line(['zato-web-admin', 'runserver', '--noreload', '{host}:{port}'.format(**config)])
示例#4
0
文件: main.py 项目: rpeterson/zato
def main():
    store_pidfile(os.path.abspath("."))
    repo_dir = os.path.join(".", "config", "repo")

    # Update Django settings
    config = json.loads(open(os.path.join(repo_dir, "web-admin.conf")).read())
    config["config_dir"] = os.path.abspath(".")
    update_globals(config)

    # Store the PID so that the server can be later stopped by its PID.
    open("./.web-admin.pid", "w").write(str(os.getpid()))

    os.environ["DJANGO_SETTINGS_MODULE"] = "zato.admin.settings"
    call_command("loaddata", os.path.join(repo_dir, "initial-data.json"))

    RepoManager(repo_dir).ensure_repo_consistency()

    # Cannot be imported before update_globals does its job of updating settings' configuration
    from zato.admin import settings

    execute_manager(settings, ["zato-web-admin", "runserver", "--noreload", "{host}:{port}".format(**config)])
示例#5
0
def main():
    store_pidfile(os.path.abspath('.'))
    repo_dir = os.path.join('.', 'config', 'repo')

    # Update Django settings
    config = json.loads(open(os.path.join(repo_dir, 'web-admin.conf')).read())
    config['config_dir'] = os.path.abspath('.')
    update_globals(config)

    os.environ['DJANGO_SETTINGS_MODULE'] = 'zato.admin.settings'
    call_command('loaddata', os.path.join(repo_dir, 'initial-data.json'))

    RepoManager(repo_dir).ensure_repo_consistency()

    # Cannot be imported before update_globals does its job of updating settings' configuration
    from zato.admin import settings

    execute_manager(settings, [
        'zato-web-admin', 'runserver', '--noreload',
        '{host}:{port}'.format(**config)
    ])
示例#6
0
def main():
    store_pidfile(os.path.abspath('.'))
    repo_dir = os.path.join('.', 'config', 'repo')

    # Update Django settings
    config = json.loads(open(os.path.join(repo_dir, 'web-admin.conf')).read())
    config['config_dir'] = os.path.abspath('.')
    update_globals(config)

    # Store the PID so that the server can be later stopped by its PID.
    open('./.web-admin.pid', 'w').write(str(os.getpid()))
    os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'zato.admin.settings')
    call_command('loaddata', os.path.join(repo_dir, 'initial-data.json'))

    RepoManager(repo_dir).ensure_repo_consistency()

    # Cannot be imported before update_globals does its job of updating settings' configuration
    execute_from_command_line([
        'zato-web-admin', 'runserver', '--noreload',
        '{host}:{port}'.format(**config)
    ])
示例#7
0
文件: main.py 项目: dangnammta/zato
from __future__ import absolute_import, division, print_function, unicode_literals

# stdlib
import os, sys

# ConcurrentLogHandler - updates stlidb's logging config on import so this needs to stay
import cloghandler
cloghandler = cloghandler  # For pyflakes

# Zato
from zato.agent.load_balancer.server import LoadBalancerAgent, TLSLoadBalancerAgent
from zato.common.util import get_lb_agent_json_config, parse_cmd_line_options, store_pidfile

if __name__ == '__main__':
    repo_dir = sys.argv[1]
    component_dir = os.path.join(repo_dir, '..', '..')

    lba_config = get_lb_agent_json_config(repo_dir)

    # Store agent's pidfile only if we are not running in foreground
    options = parse_cmd_line_options(sys.argv[2])
    if not options.get('fg', None):
        store_pidfile(component_dir, lba_config['pid_file'])

    lba_class = TLSLoadBalancerAgent if lba_config.get(
        'is_tls_enabled', True) else LoadBalancerAgent

    lba = lba_class(repo_dir)
    lba.start_load_balancer()
    lba.serve_forever()
示例#8
0
文件: main.py 项目: SciF0r/zato
def run(base_dir, start_gunicorn_app=True):

    # Store a pidfile before doing anything else
    store_pidfile(base_dir)

    # For dumping stacktraces
    register_diag_handlers()

    # Start initializing the server now
    os.chdir(base_dir)

    try:
        import pymysql
        pymysql.install_as_MySQLdb()
    except ImportError:
        pass

    # We're doing it here even if someone doesn't use PostgreSQL at all
    # so we're not suprised when someone suddenly starts using PG.
    # TODO: Make sure it's registered for each of the subprocess
    psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
    psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)

    repo_location = os.path.join(base_dir, 'config', 'repo')

    # Configure the logging first, before configuring the actual server.
    logging.addLevelName('TRACE1', TRACE1)
    logging.config.fileConfig(os.path.join(repo_location, 'logging.conf'))

    logger = logging.getLogger(__name__)
    kvdb_logger = logging.getLogger('zato_kvdb')

    config = get_config(repo_location, 'server.conf')

    # New in 2.0 - Start monitoring as soon as possible
    if config.get('newrelic', {}).get('config'):
        import newrelic.agent
        newrelic.agent.initialize(
            config.newrelic.config, config.newrelic.environment or None, config.newrelic.ignore_errors or None,
            config.newrelic.log_file or None, config.newrelic.log_level or None)

    # Store KVDB config in logs, possibly replacing its password if told to
    kvdb_config = get_kvdb_config_for_log(config.kvdb)
    kvdb_logger.info('Master process config `%s`', kvdb_config)

    # New in 2.0 hence optional
    user_locale = config.misc.get('locale', None)
    if user_locale:
        locale.setlocale(locale.LC_ALL, user_locale)
        value = 12345
        logger.info('Locale is `%s`, amount of %s -> `%s`', user_locale, value,
                    locale.currency(value, grouping=True).decode('utf-8'))

    # Spring Python
    app_context = get_app_context(config)

    # Makes queries against Postgres asynchronous
    if asbool(config.odb.use_async_driver) and config.odb.engine == 'postgresql':
        make_psycopg_green()

    # New in 2.0 - Put HTTP_PROXY in os.environ.
    http_proxy = config.misc.get('http_proxy', False)
    if http_proxy:
        os.environ['http_proxy'] = http_proxy

    crypto_manager = get_crypto_manager(repo_location, app_context, config)
    parallel_server = app_context.get_object('parallel_server')

    zato_gunicorn_app = ZatoGunicornApplication(parallel_server, repo_location, config.main, config.crypto)

    parallel_server.crypto_manager = crypto_manager
    parallel_server.odb_data = config.odb
    parallel_server.host = zato_gunicorn_app.zato_host
    parallel_server.port = zato_gunicorn_app.zato_port
    parallel_server.repo_location = repo_location
    parallel_server.base_dir = base_dir
    parallel_server.fs_server_config = config
    parallel_server.user_config.update(config.user_config_items)
    parallel_server.startup_jobs = app_context.get_object('startup_jobs')
    parallel_server.app_context = app_context

    # Remove all locks possibly left over by previous server instances
    kvdb = app_context.get_object('kvdb')
    kvdb.component = 'master-proc'
    clear_locks(kvdb, config.main.token, config.kvdb, crypto_manager.decrypt)

    # Turn the repo dir into an actual repository and commit any new/modified files
    RepoManager(repo_location).ensure_repo_consistency()

    # New in 2.0 so it's optional.
    profiler_enabled = config.get('profiler', {}).get('enabled', False)

    if asbool(profiler_enabled):
        profiler_dir = os.path.abspath(os.path.join(base_dir, config.profiler.profiler_dir))
        parallel_server.on_wsgi_request = ProfileMiddleware(
            parallel_server.on_wsgi_request,
            log_filename = os.path.join(profiler_dir, config.profiler.log_filename),
            cachegrind_filename = os.path.join(profiler_dir, config.profiler.cachegrind_filename),
            discard_first_request = config.profiler.discard_first_request,
            flush_at_shutdown = config.profiler.flush_at_shutdown,
            path = config.profiler.url_path,
            unwind = config.profiler.unwind)

    # Run the app at last we execute from command line
    if start_gunicorn_app:
        zato_gunicorn_app.run()
    else:
        return zato_gunicorn_app.zato_wsgi_app
示例#9
0
def run(base_dir, start_gunicorn_app=True):

    # Filter out warnings we are not interested in
    warnings.filterwarnings('ignore', 'Mean of empty slice.')
    warnings.filterwarnings('ignore',
                            'invalid value encountered in double_scalars')

    # Store a pidfile before doing anything else
    store_pidfile(base_dir)

    # For dumping stacktraces
    register_diag_handlers()

    # Start initializing the server now
    os.chdir(base_dir)

    try:
        import pymysql
        pymysql.install_as_MySQLdb()
    except ImportError:
        pass

    # We're doing it here even if someone doesn't use PostgreSQL at all
    # so we're not suprised when someone suddenly starts using PG.
    # TODO: Make sure it's registered for each of the subprocess
    psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
    psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)

    repo_location = os.path.join(base_dir, 'config', 'repo')

    # Configure the logging first, before configuring the actual server.
    logging.addLevelName('TRACE1', TRACE1)

    with open(os.path.join(repo_location, 'logging.conf')) as f:
        dictConfig(yaml.load(f))

    logger = logging.getLogger(__name__)
    kvdb_logger = logging.getLogger('zato_kvdb')

    config = get_config(repo_location, 'server.conf')

    # New in 2.0 - Start monitoring as soon as possible
    if config.get('newrelic', {}).get('config'):
        import newrelic.agent
        newrelic.agent.initialize(config.newrelic.config,
                                  config.newrelic.environment or None,
                                  config.newrelic.ignore_errors or None,
                                  config.newrelic.log_file or None,
                                  config.newrelic.log_level or None)

    # New in 2.0 - override gunicorn-set Server HTTP header
    gunicorn.SERVER_SOFTWARE = config.misc.get('http_server_header', 'Zato')

    # Store KVDB config in logs, possibly replacing its password if told to
    kvdb_config = get_kvdb_config_for_log(config.kvdb)
    kvdb_logger.info('Master process config `%s`', kvdb_config)

    # New in 2.0 hence optional
    user_locale = config.misc.get('locale', None)
    if user_locale:
        locale.setlocale(locale.LC_ALL, user_locale)
        value = 12345
        logger.info('Locale is `%s`, amount of %s -> `%s`', user_locale, value,
                    locale.currency(value, grouping=True).decode('utf-8'))

    # Spring Python
    app_context = get_app_context(config)

    # Makes queries against Postgres asynchronous
    if asbool(
            config.odb.use_async_driver) and config.odb.engine == 'postgresql':
        make_psycopg_green()

    # New in 2.0 - Put HTTP_PROXY in os.environ.
    http_proxy = config.misc.get('http_proxy', False)
    if http_proxy:
        os.environ['http_proxy'] = http_proxy

    crypto_manager = get_crypto_manager(repo_location, app_context, config)
    parallel_server = app_context.get_object('parallel_server')

    zato_gunicorn_app = ZatoGunicornApplication(parallel_server, repo_location,
                                                config.main, config.crypto)

    parallel_server.crypto_manager = crypto_manager
    parallel_server.odb_data = config.odb
    parallel_server.host = zato_gunicorn_app.zato_host
    parallel_server.port = zato_gunicorn_app.zato_port
    parallel_server.repo_location = repo_location
    parallel_server.base_dir = base_dir
    parallel_server.tls_dir = os.path.join(parallel_server.base_dir, 'config',
                                           'repo', 'tls')
    parallel_server.fs_server_config = config
    parallel_server.user_config.update(config.user_config_items)
    parallel_server.startup_jobs = app_context.get_object('startup_jobs')
    parallel_server.app_context = app_context

    # Remove all locks possibly left over by previous server instances
    kvdb = app_context.get_object('kvdb')
    kvdb.component = 'master-proc'
    clear_locks(kvdb, config.main.token, config.kvdb, crypto_manager.decrypt)

    # Turn the repo dir into an actual repository and commit any new/modified files
    RepoManager(repo_location).ensure_repo_consistency()

    # New in 2.0 so it's optional.
    profiler_enabled = config.get('profiler', {}).get('enabled', False)

    # New in 2.0 so it's optional.
    sentry_config = config.get('sentry')

    dsn = sentry_config.pop('dsn', None)
    if dsn:

        from raven import Client
        from raven.conf import setup_logging
        from raven.handlers.logging import SentryHandler

        handler_level = sentry_config.pop('level')
        client = Client(dsn, **sentry_config)

        handler = SentryHandler(client=client)
        handler.setLevel(getattr(logging, handler_level))

        logger = logging.getLogger('')
        logger.addHandler(handler)

        for name in logging.Logger.manager.loggerDict:
            if name.startswith('zato'):
                logger = logging.getLogger(name)
                logger.addHandler(handler)

    if asbool(profiler_enabled):
        profiler_dir = os.path.abspath(
            os.path.join(base_dir, config.profiler.profiler_dir))
        parallel_server.on_wsgi_request = ProfileMiddleware(
            parallel_server.on_wsgi_request,
            log_filename=os.path.join(profiler_dir,
                                      config.profiler.log_filename),
            cachegrind_filename=os.path.join(
                profiler_dir, config.profiler.cachegrind_filename),
            discard_first_request=config.profiler.discard_first_request,
            flush_at_shutdown=config.profiler.flush_at_shutdown,
            path=config.profiler.url_path,
            unwind=config.profiler.unwind)

    # Run the app at last we execute from command line
    if start_gunicorn_app:
        zato_gunicorn_app.run()
    else:
        return zato_gunicorn_app.zato_wsgi_app
示例#10
0
def run(base_dir, start_gunicorn_app=True, options=None):
    options = options or {}

    # Store a pidfile before doing anything else
    store_pidfile(base_dir)

    # For dumping stacktraces
    register_diag_handlers()

    # Capture warnings to log files
    logging.captureWarnings(True)

    # Start initializing the server now
    os.chdir(base_dir)

    try:
        import pymysql
        pymysql.install_as_MySQLdb()
    except ImportError:
        pass

    # We're doing it here even if someone doesn't use PostgreSQL at all
    # so we're not suprised when someone suddenly starts using PG.
    # TODO: Make sure it's registered for each of the subprocess
    psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
    psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)

    # We know we don't need warnings because users may explicitly configure no certificate validation.
    # We don't want for urllib3 to warn us about it.
    import requests as _r
    _r.packages.urllib3.disable_warnings()

    repo_location = os.path.join(base_dir, 'config', 'repo')

    # Configure the logging first, before configuring the actual server.
    logging.addLevelName('TRACE1', TRACE1)
    logging_conf_path = os.path.join(repo_location, 'logging.conf')

    with open(logging_conf_path) as f:
        logging_config = yaml.load(f)
        dictConfig(logging_config)

    logger = logging.getLogger(__name__)
    kvdb_logger = logging.getLogger('zato_kvdb')

    crypto_manager = ServerCryptoManager(repo_location,
                                         secret_key=options['secret_key'],
                                         stdin_data=read_stdin_data())
    secrets_config = ConfigObj(os.path.join(repo_location, 'secrets.conf'),
                               use_zato=False)
    server_config = get_config(repo_location,
                               'server.conf',
                               crypto_manager=crypto_manager,
                               secrets_conf=secrets_config)
    pickup_config = get_config(repo_location, 'pickup.conf')
    sio_config = get_config(repo_location,
                            'simple-io.conf',
                            needs_user_config=False)
    sso_config = get_config(repo_location, 'sso.conf', needs_user_config=False)
    normalize_sso_config(sso_config)

    # Now that we have access to server.conf, greenify libraries required to be made greenlet-friendly,
    # assuming that there are any - otherwise do not do anything.
    to_greenify = []
    for key, value in server_config.get('greenify', {}).items():
        if asbool(value):
            if not os.path.exists(key):
                raise ValueError('No such path `{}`'.format(key))
            else:
                to_greenify.append(key)

    # Go ahead only if we actually have anything to greenify
    if to_greenify:
        import greenify
        greenify.greenify()
        for name in to_greenify:
            result = greenify.patch_lib(name)
            if not result:
                raise ValueError(
                    'Library `{}` could not be greenified'.format(name))
            else:
                logger.info('Greenified library `%s`', name)

    server_config.main.token = server_config.main.token.encode('utf8')

    # Do not proceed unless we can be certain our own preferred address or IP can be obtained.
    preferred_address = server_config.preferred_address.get('address')

    if not preferred_address:
        preferred_address = get_preferred_ip(server_config.main.gunicorn_bind,
                                             server_config.preferred_address)

    if not preferred_address and not server_config.server_to_server.boot_if_preferred_not_found:
        msg = 'Unable to start the server. Could not obtain a preferred address, please configure [bind_options] in server.conf'
        logger.warn(msg)
        raise Exception(msg)

    # Create the startup callable tool as soon as practical
    startup_callable_tool = StartupCallableTool(server_config)

    # Run the hook before there is any server object created
    startup_callable_tool.invoke(SERVER_STARTUP.PHASE.FS_CONFIG_ONLY,
                                 kwargs={
                                     'server_config': server_config,
                                     'pickup_config': pickup_config,
                                     'sio_config': sio_config,
                                     'sso_config': sso_config,
                                 })

    # New in 2.0 - Start monitoring as soon as possible
    if server_config.get('newrelic', {}).get('config'):
        import newrelic.agent
        newrelic.agent.initialize(server_config.newrelic.config,
                                  server_config.newrelic.environment or None,
                                  server_config.newrelic.ignore_errors or None,
                                  server_config.newrelic.log_file or None,
                                  server_config.newrelic.log_level or None)

    zunicorn.SERVER_SOFTWARE = server_config.misc.get('http_server_header',
                                                      'Zato')

    # Store KVDB config in logs, possibly replacing its password if told to
    kvdb_config = get_kvdb_config_for_log(server_config.kvdb)
    kvdb_logger.info('Main process config `%s`', kvdb_config)

    # New in 2.0 hence optional
    user_locale = server_config.misc.get('locale', None)
    if user_locale:
        locale.setlocale(locale.LC_ALL, user_locale)
        value = 12345
        logger.info('Locale is `%s`, amount of %s -> `%s`', user_locale, value,
                    locale.currency(value, grouping=True).decode('utf-8'))

    # Makes queries against Postgres asynchronous
    if asbool(server_config.odb.use_async_driver
              ) and server_config.odb.engine == 'postgresql':
        make_psycopg_green()

    if server_config.misc.http_proxy:
        os.environ['http_proxy'] = server_config.misc.http_proxy

    # Basic components needed for the server to boot up
    kvdb = KVDB()
    odb_manager = ODBManager(well_known_data=ZATO_CRYPTO_WELL_KNOWN_DATA)
    sql_pool_store = PoolStore()

    service_store = ServiceStore()
    service_store.odb = odb_manager
    service_store.services = {}

    server = ParallelServer()
    server.odb = odb_manager
    server.service_store = service_store
    server.service_store.server = server
    server.sql_pool_store = sql_pool_store
    server.service_modules = []
    server.kvdb = kvdb

    # Assigned here because it is a circular dependency
    odb_manager.parallel_server = server

    zato_gunicorn_app = ZatoGunicornApplication(server, repo_location,
                                                server_config.main,
                                                server_config.crypto)

    server.has_fg = options.get('fg')
    server.crypto_manager = crypto_manager
    server.odb_data = server_config.odb
    server.host = zato_gunicorn_app.zato_host
    server.port = zato_gunicorn_app.zato_port
    server.repo_location = repo_location
    server.user_conf_location = os.path.join(server.repo_location, 'user-conf')
    server.base_dir = base_dir
    server.logs_dir = os.path.join(server.base_dir, 'logs')
    server.tls_dir = os.path.join(server.base_dir, 'config', 'repo', 'tls')
    server.static_dir = os.path.join(server.base_dir, 'config', 'repo',
                                     'static')
    server.json_schema_dir = os.path.join(server.base_dir, 'config', 'repo',
                                          'schema', 'json')
    server.fs_server_config = server_config
    server.fs_sql_config = get_config(repo_location,
                                      'sql.conf',
                                      needs_user_config=False)
    server.pickup_config = pickup_config
    server.logging_config = logging_config
    server.logging_conf_path = logging_conf_path
    server.sio_config = sio_config
    server.sso_config = sso_config
    server.user_config.update(server_config.user_config_items)
    server.preferred_address = preferred_address
    server.sync_internal = options['sync_internal']
    server.jwt_secret = server.fs_server_config.misc.jwt_secret.encode('utf8')
    server.startup_callable_tool = startup_callable_tool
    server.is_sso_enabled = server.fs_server_config.component_enabled.sso
    if server.is_sso_enabled:
        server.sso_api = SSOAPI(server, sso_config, None,
                                crypto_manager.encrypt, crypto_manager.decrypt,
                                crypto_manager.hash_secret,
                                crypto_manager.verify_hash, new_user_id)

    # Remove all locks possibly left over by previous server instances
    kvdb.component = 'master-proc'
    clear_locks(kvdb, server_config.main.token, server_config.kvdb,
                crypto_manager.decrypt)

    # New in 2.0.8
    server.return_tracebacks = asbool(
        server_config.misc.get('return_tracebacks', True))
    server.default_error_message = server_config.misc.get(
        'default_error_message', 'An error has occurred')

    # Turn the repo dir into an actual repository and commit any new/modified files
    RepoManager(repo_location).ensure_repo_consistency()

    # New in 2.0 so it's optional.
    profiler_enabled = server_config.get('profiler', {}).get('enabled', False)

    # New in 2.0 so it's optional.
    sentry_config = server_config.get('sentry')

    dsn = sentry_config.pop('dsn', None)
    if dsn:

        from raven import Client
        from raven.handlers.logging import SentryHandler

        handler_level = sentry_config.pop('level')
        client = Client(dsn, **sentry_config)

        handler = SentryHandler(client=client)
        handler.setLevel(getattr(logging, handler_level))

        logger = logging.getLogger('')
        logger.addHandler(handler)

        for name in logging.Logger.manager.loggerDict:
            if name.startswith('zato'):
                logger = logging.getLogger(name)
                logger.addHandler(handler)

    if asbool(profiler_enabled):
        profiler_dir = os.path.abspath(
            os.path.join(base_dir, server_config.profiler.profiler_dir))
        server.on_wsgi_request = ProfileMiddleware(
            server.on_wsgi_request,
            log_filename=os.path.join(profiler_dir,
                                      server_config.profiler.log_filename),
            cachegrind_filename=os.path.join(
                profiler_dir, server_config.profiler.cachegrind_filename),
            discard_first_request=server_config.profiler.discard_first_request,
            flush_at_shutdown=server_config.profiler.flush_at_shutdown,
            path=server_config.profiler.url_path,
            unwind=server_config.profiler.unwind)

    # New in 2.0 - set environmet variables for servers to inherit
    os_environ = server_config.get('os_environ', {})
    for key, value in os_environ.items():
        os.environ[key] = value

    # Run the hook right before the Gunicorn-level server actually starts
    startup_callable_tool.invoke(SERVER_STARTUP.PHASE.IMPL_BEFORE_RUN,
                                 kwargs={
                                     'zato_gunicorn_app': zato_gunicorn_app,
                                 })

    # Run the app at last
    if start_gunicorn_app:
        zato_gunicorn_app.run()
    else:
        return zato_gunicorn_app.zato_wsgi_app
示例#11
0
文件: main.py 项目: azazel75/zato
#!/usr/bin/env python

"""
Copyright (C) 2011 Dariusz Suchojad <dsuch at zato.io>

Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""

from __future__ import absolute_import, division, print_function, unicode_literals

# stdlib
import os, sys

# Zato
from zato.agent.load_balancer.server import LoadBalancerAgent
from zato.common.util import store_pidfile

if __name__ == '__main__':
    store_pidfile(os.path.abspath(os.path.join(sys.argv[1], '..', '..')))
    lba = LoadBalancerAgent(sys.argv[1])
    lba.start_load_balancer()
    lba.serve_forever()
示例#12
0
文件: main.py 项目: znavy/zato
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""

from __future__ import absolute_import, division, print_function, unicode_literals

# stdlib
import os, sys

# ConcurrentLogHandler - updates stlidb's logging config on import so this needs to stay
import cloghandler

cloghandler = cloghandler  # For pyflakes

# Zato
from zato.agent.load_balancer.server import LoadBalancerAgent
from zato.common.util import get_lb_agent_json_config, parse_cmd_line_options, store_pidfile

if __name__ == '__main__':
    repo_dir = sys.argv[1]
    component_dir = os.path.join(repo_dir, '..', '..')

    # Store agent's pidfile only if we are not running in foreground
    options = parse_cmd_line_options(sys.argv[2])
    if not options.get('fg', None):
        store_pidfile(component_dir,
                      get_lb_agent_json_config(repo_dir)['pid_file'])

    lba = LoadBalancerAgent(repo_dir)
    lba.start_load_balancer()
    lba.serve_forever()
示例#13
0
def run(base_dir, start_gunicorn_app=True, options=None):
    options = options or {}

    # Store a pidfile before doing anything else
    store_pidfile(base_dir)

    # For dumping stacktraces
    register_diag_handlers()

    # Capture warnings to log files
    logging.captureWarnings(True)

    # Start initializing the server now
    os.chdir(base_dir)

    try:
        import pymysql
        pymysql.install_as_MySQLdb()
    except ImportError:
        pass

    # We're doing it here even if someone doesn't use PostgreSQL at all
    # so we're not suprised when someone suddenly starts using PG.
    # TODO: Make sure it's registered for each of the subprocess
    psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
    psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)

    # We know we don't need warnings because users may explicitly configure no certificate validation.
    # We don't want for urllib3 to warn us about it.
    import requests as _r
    _r.packages.urllib3.disable_warnings()

    repo_location = os.path.join(base_dir, 'config', 'repo')

    # Configure the logging first, before configuring the actual server.
    logging.addLevelName('TRACE1', TRACE1)

    with open(os.path.join(repo_location, 'logging.conf')) as f:
        dictConfig(yaml.load(f))

    logger = logging.getLogger(__name__)
    kvdb_logger = logging.getLogger('zato_kvdb')

    config = get_config(repo_location, 'server.conf')
    pickup_config = get_config(repo_location, 'pickup.conf')

    # Do not proceed unless we can be certain our own preferred address or IP can be obtained.
    preferred_address = config.preferred_address.get('address')

    if not preferred_address:
        preferred_address = get_preferred_ip(config.main.gunicorn_bind,
                                             config.preferred_address)

    if not preferred_address and not config.server_to_server.boot_if_preferred_not_found:
        msg = 'Unable to start the server. Could not obtain a preferred address, please configure [bind_options] in server.conf'
        logger.warn(msg)
        raise Exception(msg)

    # New in 2.0 - Start monitoring as soon as possible
    if config.get('newrelic', {}).get('config'):
        import newrelic.agent
        newrelic.agent.initialize(config.newrelic.config,
                                  config.newrelic.environment or None,
                                  config.newrelic.ignore_errors or None,
                                  config.newrelic.log_file or None,
                                  config.newrelic.log_level or None)

    # New in 2.0 - override gunicorn-set Server HTTP header
    gunicorn.SERVER_SOFTWARE = config.misc.get('http_server_header', 'Zato')

    # Store KVDB config in logs, possibly replacing its password if told to
    kvdb_config = get_kvdb_config_for_log(config.kvdb)
    kvdb_logger.info('Master process config `%s`', kvdb_config)

    # New in 2.0 hence optional
    user_locale = config.misc.get('locale', None)
    if user_locale:
        locale.setlocale(locale.LC_ALL, user_locale)
        value = 12345
        logger.info('Locale is `%s`, amount of %s -> `%s`', user_locale, value,
                    locale.currency(value, grouping=True).decode('utf-8'))

    # Spring Python
    app_context = get_app_context(config)

    # Makes queries against Postgres asynchronous
    if asbool(
            config.odb.use_async_driver) and config.odb.engine == 'postgresql':
        make_psycopg_green()

    # New in 2.0 - Put HTTP_PROXY in os.environ.
    http_proxy = config.misc.get('http_proxy', False)
    if http_proxy:
        os.environ['http_proxy'] = http_proxy

    crypto_manager = get_crypto_manager(repo_location, app_context, config)
    server = app_context.get_object('server')

    zato_gunicorn_app = ZatoGunicornApplication(server, repo_location,
                                                config.main, config.crypto)

    server.crypto_manager = crypto_manager
    server.odb_data = config.odb
    server.host = zato_gunicorn_app.zato_host
    server.port = zato_gunicorn_app.zato_port
    server.repo_location = repo_location
    server.user_conf_location = os.path.join(server.repo_location, 'user-conf')
    server.base_dir = base_dir
    server.tls_dir = os.path.join(server.base_dir, 'config', 'repo', 'tls')
    server.fs_server_config = config
    server.pickup_config = pickup_config
    server.user_config.update(config.user_config_items)
    server.app_context = app_context
    server.preferred_address = preferred_address
    server.sync_internal = options['sync_internal']
    server.jwt_secret = server.fs_server_config.misc.jwt_secret.encode('utf8')

    # Remove all locks possibly left over by previous server instances
    kvdb = app_context.get_object('kvdb')
    kvdb.component = 'master-proc'
    clear_locks(kvdb, config.main.token, config.kvdb, crypto_manager.decrypt)

    # New in 2.0.8
    server.return_tracebacks = asbool(
        config.misc.get('return_tracebacks', True))
    server.default_error_message = config.misc.get('default_error_message',
                                                   'An error has occurred')

    # Turn the repo dir into an actual repository and commit any new/modified files
    RepoManager(repo_location).ensure_repo_consistency()

    # New in 2.0 so it's optional.
    profiler_enabled = config.get('profiler', {}).get('enabled', False)

    # New in 2.0 so it's optional.
    sentry_config = config.get('sentry')

    dsn = sentry_config.pop('dsn', None)
    if dsn:

        from raven import Client
        from raven.handlers.logging import SentryHandler

        handler_level = sentry_config.pop('level')
        client = Client(dsn, **sentry_config)

        handler = SentryHandler(client=client)
        handler.setLevel(getattr(logging, handler_level))

        logger = logging.getLogger('')
        logger.addHandler(handler)

        for name in logging.Logger.manager.loggerDict:
            if name.startswith('zato'):
                logger = logging.getLogger(name)
                logger.addHandler(handler)

    if asbool(profiler_enabled):
        profiler_dir = os.path.abspath(
            os.path.join(base_dir, config.profiler.profiler_dir))
        server.on_wsgi_request = ProfileMiddleware(
            server.on_wsgi_request,
            log_filename=os.path.join(profiler_dir,
                                      config.profiler.log_filename),
            cachegrind_filename=os.path.join(
                profiler_dir, config.profiler.cachegrind_filename),
            discard_first_request=config.profiler.discard_first_request,
            flush_at_shutdown=config.profiler.flush_at_shutdown,
            path=config.profiler.url_path,
            unwind=config.profiler.unwind)

    # New in 2.0 - set environmet variables for servers to inherit
    os_environ = config.get('os_environ', {})
    for key, value in os_environ.items():
        os.environ[key] = value

    # Run the app at last
    if start_gunicorn_app:
        zato_gunicorn_app.run()
    else:
        return zato_gunicorn_app.zato_wsgi_app
示例#14
0
def run(base_dir, start_gunicorn_app=True):

    # Store a pidfile before doing anything else
    store_pidfile(base_dir)

    # For dumping stacktraces
    register_diag_handlers()

    # Capture warnings to log files
    logging.captureWarnings(True)

    # Start initializing the server now
    os.chdir(base_dir)

    try:
        import pymysql
        pymysql.install_as_MySQLdb()
    except ImportError:
        pass

    # We're doing it here even if someone doesn't use PostgreSQL at all
    # so we're not suprised when someone suddenly starts using PG.
    # TODO: Make sure it's registered for each of the subprocess
    psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
    psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)

    # We know we don't need warnings because users may explicitly configure no certificate validation.
    # We don't want for urllib3 to warn us about it.
    import requests as _r
    _r.packages.urllib3.disable_warnings()

    repo_location = os.path.join(base_dir, 'config', 'repo')

    # Configure the logging first, before configuring the actual server.
    logging.addLevelName('TRACE1', TRACE1)

    with open(os.path.join(repo_location, 'logging.conf')) as f:
        dictConfig(yaml.load(f))

    logger = logging.getLogger(__name__)
    kvdb_logger = logging.getLogger('zato_kvdb')

    config = get_config(repo_location, 'server.conf')

    # New in 2.0 - Start monitoring as soon as possible
    if config.get('newrelic', {}).get('config'):
        import newrelic.agent
        newrelic.agent.initialize(
            config.newrelic.config, config.newrelic.environment or None, config.newrelic.ignore_errors or None,
            config.newrelic.log_file or None, config.newrelic.log_level or None)

    # New in 2.0 - override gunicorn-set Server HTTP header
    gunicorn.SERVER_SOFTWARE = config.misc.get('http_server_header', 'Zato')

    # Store KVDB config in logs, possibly replacing its password if told to
    kvdb_config = get_kvdb_config_for_log(config.kvdb)
    kvdb_logger.info('Master process config `%s`', kvdb_config)

    # New in 2.0 hence optional
    user_locale = config.misc.get('locale', None)
    if user_locale:
        locale.setlocale(locale.LC_ALL, user_locale)
        value = 12345
        logger.info('Locale is `%s`, amount of %s -> `%s`', user_locale, value,
                    locale.currency(value, grouping=True).decode('utf-8'))

    # Spring Python
    app_context = get_app_context(config)

    # Makes queries against Postgres asynchronous
    if asbool(config.odb.use_async_driver) and config.odb.engine == 'postgresql':
        make_psycopg_green()

    # New in 2.0 - Put HTTP_PROXY in os.environ.
    http_proxy = config.misc.get('http_proxy', False)
    if http_proxy:
        os.environ['http_proxy'] = http_proxy

    crypto_manager = get_crypto_manager(repo_location, app_context, config)
    parallel_server = app_context.get_object('parallel_server')

    zato_gunicorn_app = ZatoGunicornApplication(parallel_server, repo_location, config.main, config.crypto)

    parallel_server.crypto_manager = crypto_manager
    parallel_server.odb_data = config.odb
    parallel_server.host = zato_gunicorn_app.zato_host
    parallel_server.port = zato_gunicorn_app.zato_port
    parallel_server.repo_location = repo_location
    parallel_server.base_dir = base_dir
    parallel_server.tls_dir = os.path.join(parallel_server.base_dir, 'config', 'repo', 'tls')
    parallel_server.fs_server_config = config
    parallel_server.user_config.update(config.user_config_items)
    parallel_server.startup_jobs = app_context.get_object('startup_jobs')
    parallel_server.app_context = app_context

    # Remove all locks possibly left over by previous server instances
    kvdb = app_context.get_object('kvdb')
    kvdb.component = 'master-proc'
    clear_locks(kvdb, config.main.token, config.kvdb, crypto_manager.decrypt)

    # Turn the repo dir into an actual repository and commit any new/modified files
    RepoManager(repo_location).ensure_repo_consistency()

    # New in 2.0 so it's optional.
    profiler_enabled = config.get('profiler', {}).get('enabled', False)

    # New in 2.0 so it's optional.
    sentry_config = config.get('sentry')

    dsn = sentry_config.pop('dsn', None)
    if dsn:

        from raven import Client
        from raven.handlers.logging import SentryHandler

        handler_level = sentry_config.pop('level')
        client = Client(dsn, **sentry_config)

        handler = SentryHandler(client=client)
        handler.setLevel(getattr(logging, handler_level))

        logger = logging.getLogger('')
        logger.addHandler(handler)

        for name in logging.Logger.manager.loggerDict:
            if name.startswith('zato'):
                logger = logging.getLogger(name)
                logger.addHandler(handler)

    if asbool(profiler_enabled):
        profiler_dir = os.path.abspath(os.path.join(base_dir, config.profiler.profiler_dir))
        parallel_server.on_wsgi_request = ProfileMiddleware(
            parallel_server.on_wsgi_request,
            log_filename = os.path.join(profiler_dir, config.profiler.log_filename),
            cachegrind_filename = os.path.join(profiler_dir, config.profiler.cachegrind_filename),
            discard_first_request = config.profiler.discard_first_request,
            flush_at_shutdown = config.profiler.flush_at_shutdown,
            path = config.profiler.url_path,
            unwind = config.profiler.unwind)

    # New in 2.0 - set environmet variables for servers to inherit
    os_environ = config.get('os_environ', {})
    for key, value in os_environ.items():
        os.environ[key] = value

    # Run the app at last
    if start_gunicorn_app:
        zato_gunicorn_app.run()
    else:
        return zato_gunicorn_app.zato_wsgi_app
示例#15
0
文件: main.py 项目: wrightrocket/zato
#!/usr/bin/env python

"""
Copyright (C) 2011 Dariusz Suchojad <dsuch at zato.io>

Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""

from __future__ import absolute_import, division, print_function, unicode_literals

# stdlib
import os, sys

# ConcurrentLogHandler - updates stlidb's logging config on import so this needs to stay
import cloghandler
cloghandler = cloghandler # For pyflakes

# Zato
from zato.agent.load_balancer.server import LoadBalancerAgent
from zato.common.util import store_pidfile

if __name__ == '__main__':
    store_pidfile(os.path.abspath(os.path.join(sys.argv[1], '..', '..')))
    lba = LoadBalancerAgent(sys.argv[1])
    lba.start_load_balancer()
    lba.serve_forever()