Exemplo n.º 1
0
Arquivo: __init__.py Projeto: xbx/zato
 def _init(self):
     """ Initializes all the basic run-time data structures and connects
     to the Zato broker.
     """
     fs_server_config = get_config(self.repo_location, 'server.conf')
     app_context = get_app_context(fs_server_config)
     crypto_manager = get_crypto_manager(self.repo_location, app_context, fs_server_config)
     
     config_odb = fs_server_config.odb
     self.odb = app_context.get_object('odb_manager')
     self.odb.crypto_manager = crypto_manager
     self.odb.token = fs_server_config.main.token
     
     # Key-value DB
     self.kvdb = KVDB()
     self.kvdb.config = fs_server_config.kvdb
     self.kvdb.decrypt_func = self.odb.crypto_manager.decrypt
     self.kvdb.init()
     
     # Broker client
     self.broker_client = BrokerClient(self.kvdb, self.broker_client_id, self.broker_callbacks)
     self.broker_client.start()
     
     # ODB
     self.odb_config = Bunch()
     self.odb_config.db_name = config_odb.db_name
     self.odb_config.is_active = True
     self.odb_config.engine = config_odb.engine
     self.odb_config.extra = config_odb.extra
     self.odb_config.host = config_odb.host
     self.odb_config.password = self.odb.crypto_manager.decrypt(config_odb.password)
     self.odb_config.pool_size = config_odb.pool_size
     self.odb_config.username = config_odb.username
     
     self.odb_config.is_odb = True
     
     self.sql_pool_store = app_context.get_object('sql_pool_store')
     self.sql_pool_store[ZATO_ODB_POOL_NAME] = self.odb_config
     self.odb.pool = self.sql_pool_store[ZATO_ODB_POOL_NAME].pool
     
     self._setup_odb()
     
     # Delivery store
     self.delivery_store = DeliveryStore(self.kvdb, self.broker_client, self.odb, float(fs_server_config.misc.delivery_lock_timeout))
Exemplo n.º 2
0
    def __init__(self, config=None, run=False):
        self.config = config
        self.broker_client = None
        self.config.on_job_executed_cb = self.on_job_executed
        self.sched = _Scheduler(self.config, self)

        # Broker connection
        self.broker_conn = KVDB(config=self.config.main.broker)
        self.broker_conn.init()

        # Broker client
        self.broker_callbacks = {
            TOPICS[MESSAGE_TYPE.TO_SCHEDULER]: self.on_broker_msg,
        }

        self.broker_client = BrokerClient(self.broker_conn, 'scheduler',
                                          self.broker_callbacks, [])

        if run:
            self.serve_forever()
Exemplo n.º 3
0
    def on_server_check_kvdb(self, cm, conf, conf_key='kvdb'):

        kvdb_config = Bunch(dict(iteritems((conf[conf_key]))))
        kvdb = KVDB(None, kvdb_config, cm.decrypt)
        kvdb.init()

        minimum = '2.8.4'

        info = kvdb.conn.info()
        redis_version = info.get('redis_version')

        if not redis_version:
            raise Exception('Could not obtain `redis_version` from {}'.format(info))

        if not LooseVersion(redis_version) >= LooseVersion(minimum):
            raise Exception('Redis version required: `{}` or later, found:`{}`'.format(minimum, redis_version))

        kvdb.close()

        if self.show_output:
            self.logger.info('Redis connection OK')
Exemplo n.º 4
0
    def test_parse_config(self):
        class FakeSentinel(object):
            def __init__(self, sentinels, password, socket_timeout):
                self.sentinels = sentinels
                self.password = password
                self.socket_timeout = socket_timeout
                self.master_for_called_with = None

            def master_for(self, master_name):
                self.master_for_called_with = master_name
                return self

        class FakeStrictRedis(object):
            def __init__(self, **config):
                self.config = config

        class FakeKVDB(KVDB):
            def _get_connection_class(self):
                return FakeSentinel if self.has_sentinel else FakeStrictRedis

        def decrypt_func(password):
            return password

        sentinel1_host, sentinel1_port = 'a-' + rand_string(), rand_int()
        sentinel2_host, sentinel2_port = 'b-' + rand_string(), rand_int()

        password = rand_string()
        socket_timeout = rand_int()
        redis_sentinels_master = rand_string()
        redis_sentinels = [
            '{}:{}'.format(sentinel1_host, sentinel1_port),
            '{}:{}'.format(sentinel2_host, sentinel2_port)
        ]

        try:
            config = {'use_redis_sentinels': True}
            kvdb = KVDB(config=config)
            kvdb.init()
        except ValueError, e:
            eq_(e.message, 'kvdb.redis_sentinels must be provided')
Exemplo n.º 5
0
 def kvdb(self):
     return KVDB()
Exemplo n.º 6
0
def run(base_dir, start_gunicorn_app=True, options=None):
    options = options or {}

    # Store a pidfile before doing anything else
    store_pidfile(base_dir)

    # For dumping stacktraces
    register_diag_handlers()

    # Capture warnings to log files
    logging.captureWarnings(True)

    # Start initializing the server now
    os.chdir(base_dir)

    try:
        import pymysql
        pymysql.install_as_MySQLdb()
    except ImportError:
        pass

    # We're doing it here even if someone doesn't use PostgreSQL at all
    # so we're not suprised when someone suddenly starts using PG.
    # TODO: Make sure it's registered for each of the subprocess
    psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
    psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)

    # We know we don't need warnings because users may explicitly configure no certificate validation.
    # We don't want for urllib3 to warn us about it.
    import requests as _r
    _r.packages.urllib3.disable_warnings()

    repo_location = os.path.join(base_dir, 'config', 'repo')

    # Configure the logging first, before configuring the actual server.
    logging.addLevelName('TRACE1', TRACE1)
    logging_conf_path = os.path.join(repo_location, 'logging.conf')

    with open(logging_conf_path) as f:
        logging_config = yaml.load(f)
        dictConfig(logging_config)

    logger = logging.getLogger(__name__)
    kvdb_logger = logging.getLogger('zato_kvdb')

    crypto_manager = ServerCryptoManager(repo_location,
                                         secret_key=options['secret_key'],
                                         stdin_data=read_stdin_data())
    secrets_config = ConfigObj(os.path.join(repo_location, 'secrets.conf'),
                               use_zato=False)
    server_config = get_config(repo_location,
                               'server.conf',
                               crypto_manager=crypto_manager,
                               secrets_conf=secrets_config)
    pickup_config = get_config(repo_location, 'pickup.conf')
    sio_config = get_config(repo_location,
                            'simple-io.conf',
                            needs_user_config=False)
    sso_config = get_config(repo_location, 'sso.conf', needs_user_config=False)
    normalize_sso_config(sso_config)

    # Now that we have access to server.conf, greenify libraries required to be made greenlet-friendly,
    # assuming that there are any - otherwise do not do anything.
    to_greenify = []
    for key, value in server_config.get('greenify', {}).items():
        if asbool(value):
            if not os.path.exists(key):
                raise ValueError('No such path `{}`'.format(key))
            else:
                to_greenify.append(key)

    # Go ahead only if we actually have anything to greenify
    if to_greenify:
        import greenify
        greenify.greenify()
        for name in to_greenify:
            result = greenify.patch_lib(name)
            if not result:
                raise ValueError(
                    'Library `{}` could not be greenified'.format(name))
            else:
                logger.info('Greenified library `%s`', name)

    server_config.main.token = server_config.main.token.encode('utf8')

    # Do not proceed unless we can be certain our own preferred address or IP can be obtained.
    preferred_address = server_config.preferred_address.get('address')

    if not preferred_address:
        preferred_address = get_preferred_ip(server_config.main.gunicorn_bind,
                                             server_config.preferred_address)

    if not preferred_address and not server_config.server_to_server.boot_if_preferred_not_found:
        msg = 'Unable to start the server. Could not obtain a preferred address, please configure [bind_options] in server.conf'
        logger.warn(msg)
        raise Exception(msg)

    # Create the startup callable tool as soon as practical
    startup_callable_tool = StartupCallableTool(server_config)

    # Run the hook before there is any server object created
    startup_callable_tool.invoke(SERVER_STARTUP.PHASE.FS_CONFIG_ONLY,
                                 kwargs={
                                     'server_config': server_config,
                                     'pickup_config': pickup_config,
                                     'sio_config': sio_config,
                                     'sso_config': sso_config,
                                 })

    # New in 2.0 - Start monitoring as soon as possible
    if server_config.get('newrelic', {}).get('config'):
        import newrelic.agent
        newrelic.agent.initialize(server_config.newrelic.config,
                                  server_config.newrelic.environment or None,
                                  server_config.newrelic.ignore_errors or None,
                                  server_config.newrelic.log_file or None,
                                  server_config.newrelic.log_level or None)

    zunicorn.SERVER_SOFTWARE = server_config.misc.get('http_server_header',
                                                      'Zato')

    # Store KVDB config in logs, possibly replacing its password if told to
    kvdb_config = get_kvdb_config_for_log(server_config.kvdb)
    kvdb_logger.info('Main process config `%s`', kvdb_config)

    # New in 2.0 hence optional
    user_locale = server_config.misc.get('locale', None)
    if user_locale:
        locale.setlocale(locale.LC_ALL, user_locale)
        value = 12345
        logger.info('Locale is `%s`, amount of %s -> `%s`', user_locale, value,
                    locale.currency(value, grouping=True).decode('utf-8'))

    # Makes queries against Postgres asynchronous
    if asbool(server_config.odb.use_async_driver
              ) and server_config.odb.engine == 'postgresql':
        make_psycopg_green()

    if server_config.misc.http_proxy:
        os.environ['http_proxy'] = server_config.misc.http_proxy

    # Basic components needed for the server to boot up
    kvdb = KVDB()
    odb_manager = ODBManager(well_known_data=ZATO_CRYPTO_WELL_KNOWN_DATA)
    sql_pool_store = PoolStore()

    service_store = ServiceStore()
    service_store.odb = odb_manager
    service_store.services = {}

    server = ParallelServer()
    server.odb = odb_manager
    server.service_store = service_store
    server.service_store.server = server
    server.sql_pool_store = sql_pool_store
    server.service_modules = []
    server.kvdb = kvdb

    # Assigned here because it is a circular dependency
    odb_manager.parallel_server = server

    zato_gunicorn_app = ZatoGunicornApplication(server, repo_location,
                                                server_config.main,
                                                server_config.crypto)

    server.has_fg = options.get('fg')
    server.crypto_manager = crypto_manager
    server.odb_data = server_config.odb
    server.host = zato_gunicorn_app.zato_host
    server.port = zato_gunicorn_app.zato_port
    server.repo_location = repo_location
    server.user_conf_location = os.path.join(server.repo_location, 'user-conf')
    server.base_dir = base_dir
    server.logs_dir = os.path.join(server.base_dir, 'logs')
    server.tls_dir = os.path.join(server.base_dir, 'config', 'repo', 'tls')
    server.static_dir = os.path.join(server.base_dir, 'config', 'repo',
                                     'static')
    server.json_schema_dir = os.path.join(server.base_dir, 'config', 'repo',
                                          'schema', 'json')
    server.fs_server_config = server_config
    server.fs_sql_config = get_config(repo_location,
                                      'sql.conf',
                                      needs_user_config=False)
    server.pickup_config = pickup_config
    server.logging_config = logging_config
    server.logging_conf_path = logging_conf_path
    server.sio_config = sio_config
    server.sso_config = sso_config
    server.user_config.update(server_config.user_config_items)
    server.preferred_address = preferred_address
    server.sync_internal = options['sync_internal']
    server.jwt_secret = server.fs_server_config.misc.jwt_secret.encode('utf8')
    server.startup_callable_tool = startup_callable_tool
    server.is_sso_enabled = server.fs_server_config.component_enabled.sso
    if server.is_sso_enabled:
        server.sso_api = SSOAPI(server, sso_config, None,
                                crypto_manager.encrypt, crypto_manager.decrypt,
                                crypto_manager.hash_secret,
                                crypto_manager.verify_hash, new_user_id)

    # Remove all locks possibly left over by previous server instances
    kvdb.component = 'master-proc'
    clear_locks(kvdb, server_config.main.token, server_config.kvdb,
                crypto_manager.decrypt)

    # New in 2.0.8
    server.return_tracebacks = asbool(
        server_config.misc.get('return_tracebacks', True))
    server.default_error_message = server_config.misc.get(
        'default_error_message', 'An error has occurred')

    # Turn the repo dir into an actual repository and commit any new/modified files
    RepoManager(repo_location).ensure_repo_consistency()

    # New in 2.0 so it's optional.
    profiler_enabled = server_config.get('profiler', {}).get('enabled', False)

    # New in 2.0 so it's optional.
    sentry_config = server_config.get('sentry')

    dsn = sentry_config.pop('dsn', None)
    if dsn:

        from raven import Client
        from raven.handlers.logging import SentryHandler

        handler_level = sentry_config.pop('level')
        client = Client(dsn, **sentry_config)

        handler = SentryHandler(client=client)
        handler.setLevel(getattr(logging, handler_level))

        logger = logging.getLogger('')
        logger.addHandler(handler)

        for name in logging.Logger.manager.loggerDict:
            if name.startswith('zato'):
                logger = logging.getLogger(name)
                logger.addHandler(handler)

    if asbool(profiler_enabled):
        profiler_dir = os.path.abspath(
            os.path.join(base_dir, server_config.profiler.profiler_dir))
        server.on_wsgi_request = ProfileMiddleware(
            server.on_wsgi_request,
            log_filename=os.path.join(profiler_dir,
                                      server_config.profiler.log_filename),
            cachegrind_filename=os.path.join(
                profiler_dir, server_config.profiler.cachegrind_filename),
            discard_first_request=server_config.profiler.discard_first_request,
            flush_at_shutdown=server_config.profiler.flush_at_shutdown,
            path=server_config.profiler.url_path,
            unwind=server_config.profiler.unwind)

    # New in 2.0 - set environmet variables for servers to inherit
    os_environ = server_config.get('os_environ', {})
    for key, value in os_environ.items():
        os.environ[key] = value

    # Run the hook right before the Gunicorn-level server actually starts
    startup_callable_tool.invoke(SERVER_STARTUP.PHASE.IMPL_BEFORE_RUN,
                                 kwargs={
                                     'zato_gunicorn_app': zato_gunicorn_app,
                                 })

    # Run the app at last
    if start_gunicorn_app:
        zato_gunicorn_app.run()
    else:
        return zato_gunicorn_app.zato_wsgi_app
Exemplo n.º 7
0
    def _init(self):
        """ Initializes all the basic run-time data structures and connects
        to the Zato broker.
        """
        fs_server_config = get_config(self.repo_location, 'server.conf')
        app_context = get_app_context(fs_server_config)
        crypto_manager = get_crypto_manager(self.repo_location, app_context,
                                            fs_server_config)

        config_odb = fs_server_config.odb
        self.odb = app_context.get_object('odb_manager')
        self.odb.crypto_manager = crypto_manager
        self.odb.token = fs_server_config.main.token

        # Key-value DB
        self.kvdb = KVDB()
        self.kvdb.config = fs_server_config.kvdb
        self.kvdb.decrypt_func = self.odb.crypto_manager.decrypt
        self.kvdb.init()

        # Broker client
        self.broker_client = BrokerClient(self.kvdb, self.broker_client_id,
                                          self.broker_callbacks)
        self.broker_client.start()

        # ODB

        #
        # Ticket #35 Don't ignore odb_port when creating an ODB
        # https://github.com/zatosource/zato/issues/35
        #

        engine = config_odb.engine
        port = config_odb.get('port')

        if not port:
            port = 5432 if engine == 'postgresql' else 1521

        self.odb_config = Bunch()
        self.odb_config.db_name = config_odb.db_name
        self.odb_config.is_active = True
        self.odb_config.engine = engine
        self.odb_config.extra = config_odb.extra
        self.odb_config.host = config_odb.host
        self.odb_config.port = port
        self.odb_config.password = self.odb.crypto_manager.decrypt(
            config_odb.password)
        self.odb_config.pool_size = config_odb.pool_size
        self.odb_config.username = config_odb.username

        self.odb_config.is_odb = True

        self.sql_pool_store = app_context.get_object('sql_pool_store')
        self.sql_pool_store[ZATO_ODB_POOL_NAME] = self.odb_config
        self.odb.pool = self.sql_pool_store[ZATO_ODB_POOL_NAME].pool

        self._setup_odb()

        # Delivery store
        self.delivery_store = DeliveryStore(
            self.kvdb, self.broker_client, self.odb,
            float(fs_server_config.misc.delivery_lock_timeout))
Exemplo n.º 8
0
        try:
            config = {'use_redis_sentinels': True}
            kvdb = KVDB(config=config)
            kvdb.init()
        except ValueError, e:
            eq_(e.message, 'kvdb.redis_sentinels must be provided')
        else:
            self.fail('Expected a ValueError (kvdb.redis_sentinels)')

        try:
            config = {
                'use_redis_sentinels': True,
                'redis_sentinels': redis_sentinels
            }
            kvdb = KVDB(config=config)
            kvdb.init()
        except ValueError, e:
            eq_(e.message, 'kvdb.redis_sentinels_master must be provided')
        else:
            self.fail('Expected a ValueError (kvdb.redis_sentinels_master)')

        config = Bunch({
            'use_redis_sentinels': True,
            'redis_sentinels': redis_sentinels,
            'redis_sentinels_master': redis_sentinels_master,
            'password': password,
            'socket_timeout': socket_timeout
        })
        kvdb = FakeKVDB(config=config, decrypt_func=decrypt_func)
        kvdb.init()
Exemplo n.º 9
0
    def test_parse_config(self):
        class FakeSentinel(object):
            def __init__(self, sentinels, password, socket_timeout):
                self.sentinels = sentinels
                self.password = password
                self.socket_timeout = socket_timeout
                self.master_for_called_with = None

            def master_for(self, master_name):
                self.master_for_called_with = master_name
                return self

        class FakeStrictRedis(object):
            def __init__(self, **config):
                self.config = config

        class FakeKVDB(KVDB):
            def _get_connection_class(self):
                return FakeSentinel if self.has_sentinel else FakeStrictRedis

        def decrypt_func(password):
            return password

        sentinel1_host, sentinel1_port = 'a-' + rand_string(), rand_int()
        sentinel2_host, sentinel2_port = 'b-' + rand_string(), rand_int()

        password = rand_string()
        socket_timeout = rand_int()
        redis_sentinels_master = rand_string()
        redis_sentinels = [
            '{}:{}'.format(sentinel1_host, sentinel1_port),
            '{}:{}'.format(sentinel2_host, sentinel2_port)
        ]

        try:
            config = {'use_redis_sentinels': True}
            kvdb = KVDB(config=config)
            kvdb.init()
        except ValueError as e:
            eq_(e.message, 'kvdb.redis_sentinels must be provided')
        else:
            self.fail('Expected a ValueError (kvdb.redis_sentinels)')

        try:
            config = {
                'use_redis_sentinels': True,
                'redis_sentinels': redis_sentinels
            }
            kvdb = KVDB(config=config)
            kvdb.init()
        except ValueError as e:
            eq_(e.message, 'kvdb.redis_sentinels_master must be provided')
        else:
            self.fail('Expected a ValueError (kvdb.redis_sentinels_master)')

        config = Bunch({
            'use_redis_sentinels': True,
            'redis_sentinels': redis_sentinels,
            'redis_sentinels_master': redis_sentinels_master,
            'password': password,
            'socket_timeout': socket_timeout
        })
        kvdb = FakeKVDB(config=config, decrypt_func=decrypt_func)
        kvdb.init()

        eq_(sorted(kvdb.conn.sentinels), [(sentinel1_host, sentinel1_port),
                                          (sentinel2_host, sentinel2_port)])

        eq_(kvdb.conn.password, password)
        eq_(kvdb.conn.socket_timeout, socket_timeout)
        eq_(kvdb.conn.master_for_called_with, redis_sentinels_master)

        config = {'use_redis_sentinels': False}
        kvdb = FakeKVDB(config=config, decrypt_func=decrypt_func)
        kvdb.init()

        self.assertTrue(isinstance(kvdb.conn, FakeStrictRedis))
Exemplo n.º 10
0
    def setUp(self):

        # For mocking out Vault responses
        self.vault_adapter = RequestsAdapter()

        # We are always the first process in a server
        os.environ['ZATO_SERVER_WORKER_IDX'] = '1'

        # Represents the server.conf file
        self.fs_server_config = FSServerConfig()

        self.worker_config = ConfigStore()
        self.fernet_key = Fernet.generate_key() # type: str
        self.crypto_manager = CryptoManager(secret_key=self.fernet_key)
        self.vault_conn_api = VaultConnAPI(requests_adapter=self.vault_adapter)

        self.server = ParallelServer()
        self.server.fs_server_config = self.fs_server_config
        self.server.kvdb = KVDB()
        self.server.component_enabled.stats = False
        self.server.component_enabled.slow_response = False
        self.server.crypto_manager = self.crypto_manager

        self.service_store = ServiceStore(is_testing=True)
        self.service_store.server = self.server
        self.service_store.services = {}

        self.server.service_store = self.service_store

        self.fs_sql_config = {
            UNITTEST.SQL_ENGINE: {
                'ping_query': 'SELECT 1+1'
            }
        }

        self.cache = Cache()
        self.sql_pool_store = PoolStore()

        self.worker_store = WorkerStore(self.worker_config, self.server)
        self.worker_store.sql_pool_store = self.sql_pool_store
        self.worker_store.stomp_outconn_api = None
        self.worker_store.outconn_wsx = None
        self.worker_store.vault_conn_api = self.vault_conn_api
        self.worker_store.sms_twilio_api = None
        self.worker_store.out_sap = None
        self.worker_store.out_sftp = None
        self.worker_store.outconn_ldap = {}
        self.worker_store.outconn_mongodb = {}
        self.worker_store.def_kafka = {}

        self.worker_store.cache_api = CacheAPI(self.server)
        self.worker_store.cache_api.default = self.cache

        self.request_handler = RequestHandler(self.server)

        self.wsgi_environ = {
            'HTTP_HOST': 'api.localhost'
        }

        # Callback methods for particular SQL queries
        self.sql_callback_by_idx = {}