Exemple #1
0
    def setUp(self):

        # For mocking out Vault responses
        self.vault_adapter = RequestsAdapter()

        # We are always the first process in a server
        os.environ['ZATO_SERVER_WORKER_IDX'] = '1'

        # Represents the server.conf file
        self.fs_server_config = FSServerConfig()

        self.worker_config = ConfigStore()
        self.fernet_key = Fernet.generate_key()  # type: str
        self.crypto_manager = CryptoManager(secret_key=self.fernet_key)
        self.vault_conn_api = VaultConnAPI(requests_adapter=self.vault_adapter)

        self.server = ParallelServer()
        self.server.fs_server_config = self.fs_server_config
        self.server.kvdb = KVDB()
        self.server.component_enabled.stats = False
        self.server.component_enabled.slow_response = False
        self.server.crypto_manager = self.crypto_manager

        self.service_store = ServiceStore(is_testing=True)
        self.service_store.server = self.server
        self.service_store.services = {}

        self.server.service_store = self.service_store

        self.fs_sql_config = {
            UNITTEST.SQL_ENGINE: {
                'ping_query': 'SELECT 1+1'
            }
        }

        self.cache = Cache()
        self.sql_pool_store = PoolStore()

        self.worker_store = WorkerStore(self.worker_config, self.server)
        self.worker_store.sql_pool_store = self.sql_pool_store
        self.worker_store.outconn_wsx = None
        self.worker_store.vault_conn_api = self.vault_conn_api
        self.worker_store.sms_twilio_api = None
        self.worker_store.out_sap = None
        self.worker_store.out_sftp = None
        self.worker_store.outconn_ldap = {}
        self.worker_store.outconn_mongodb = {}
        self.worker_store.def_kafka = {}

        self.worker_store.cache_api = CacheAPI(self.server)
        self.worker_store.cache_api.default = self.cache

        self.request_handler = RequestHandler(self.server)

        self.wsgi_environ = {'HTTP_HOST': 'api.localhost'}

        # Callback methods for particular SQL queries
        self.sql_callback_by_idx = {}
Exemple #2
0
    def __init__(self, config, repo_location):
        self.odb = config.odb
        self.config = config
        self.repo_location = repo_location
        self.sql_pool_store = PoolStore()

        # Set up the crypto manager that will be used by both ODB and, possibly, KVDB
        self.config.crypto_manager = get_crypto_manager(
            self.repo_location,
            None,
            config.main,
            crypto_manager=CryptoManager())

        # ODB connection
        self.odb = ODBManager()

        if self.config.main.odb.engine != 'sqlite':
            self.config.main.odb.password = self.config.crypto_manager.decrypt(
                config.main.odb.password)
            self.config.main.odb.host = config.main.odb.host
            self.config.main.odb.pool_size = config.main.odb.pool_size
            self.config.main.odb.username = config.main.odb.username

        self.sql_pool_store[ZATO_ODB_POOL_NAME] = self.config.main.odb

        main = self.config.main

        if main.crypto.use_tls:
            priv_key, cert = main.crypto.priv_key_location, main.crypto.cert_location
        else:
            priv_key, cert = None, None

        # API server
        self.api_server = WSGIServer((main.bind.host, int(main.bind.port)),
                                     self,
                                     keyfile=priv_key,
                                     certfile=cert)

        self.odb.pool = self.sql_pool_store[ZATO_ODB_POOL_NAME].pool
        self.odb.init_session(ZATO_ODB_POOL_NAME, self.config.main.odb,
                              self.odb.pool, False)
        self.config.odb = self.odb

        # Scheduler
        self.scheduler = Scheduler(self.config)
Exemple #3
0
    def __init__(self, config):
        self.config = config
        self.sql_pool_store = PoolStore()
        self.sql_pool_store[ZATO_ODB_POOL_NAME] = self.config.main.odb

        main = self.config.main

        if main.crypto.use_tls:
            priv_key, cert = main.crypto.priv_key_location, main.crypto.cert_location
        else:
            priv_key, cert = None, None

        # API server
        self.api_server = WSGIServer((main.bind.host, int(main.bind.port)), self, keyfile=priv_key, certfile=cert)

        # ODB connection
        self.odb = ODBManager()
        self.odb.pool = self.sql_pool_store[ZATO_ODB_POOL_NAME].pool
        self.odb.init_session(ZATO_ODB_POOL_NAME, self.config.odb, self.odb.pool, False)
        self.config.odb = self.odb

        # Scheduler
        self.scheduler = Scheduler(self.config)
Exemple #4
0
def run(base_dir, start_gunicorn_app=True, options=None):
    options = options or {}

    # Store a pidfile before doing anything else
    store_pidfile(base_dir)

    # For dumping stacktraces
    register_diag_handlers()

    # Capture warnings to log files
    logging.captureWarnings(True)

    # Start initializing the server now
    os.chdir(base_dir)

    try:
        import pymysql
        pymysql.install_as_MySQLdb()
    except ImportError:
        pass

    # We're doing it here even if someone doesn't use PostgreSQL at all
    # so we're not suprised when someone suddenly starts using PG.
    # TODO: Make sure it's registered for each of the subprocess
    psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
    psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)

    # We know we don't need warnings because users may explicitly configure no certificate validation.
    # We don't want for urllib3 to warn us about it.
    import requests as _r
    _r.packages.urllib3.disable_warnings()

    repo_location = os.path.join(base_dir, 'config', 'repo')

    # Configure the logging first, before configuring the actual server.
    logging.addLevelName('TRACE1', TRACE1)
    logging_conf_path = os.path.join(repo_location, 'logging.conf')

    with open(logging_conf_path) as f:
        logging_config = yaml.load(f)
        dictConfig(logging_config)

    logger = logging.getLogger(__name__)
    kvdb_logger = logging.getLogger('zato_kvdb')

    crypto_manager = ServerCryptoManager(repo_location,
                                         secret_key=options['secret_key'],
                                         stdin_data=read_stdin_data())
    secrets_config = ConfigObj(os.path.join(repo_location, 'secrets.conf'),
                               use_zato=False)
    server_config = get_config(repo_location,
                               'server.conf',
                               crypto_manager=crypto_manager,
                               secrets_conf=secrets_config)
    pickup_config = get_config(repo_location, 'pickup.conf')
    sio_config = get_config(repo_location,
                            'simple-io.conf',
                            needs_user_config=False)
    sso_config = get_config(repo_location, 'sso.conf', needs_user_config=False)
    normalize_sso_config(sso_config)

    # Now that we have access to server.conf, greenify libraries required to be made greenlet-friendly,
    # assuming that there are any - otherwise do not do anything.
    to_greenify = []
    for key, value in server_config.get('greenify', {}).items():
        if asbool(value):
            if not os.path.exists(key):
                raise ValueError('No such path `{}`'.format(key))
            else:
                to_greenify.append(key)

    # Go ahead only if we actually have anything to greenify
    if to_greenify:
        import greenify
        greenify.greenify()
        for name in to_greenify:
            result = greenify.patch_lib(name)
            if not result:
                raise ValueError(
                    'Library `{}` could not be greenified'.format(name))
            else:
                logger.info('Greenified library `%s`', name)

    server_config.main.token = server_config.main.token.encode('utf8')

    # Do not proceed unless we can be certain our own preferred address or IP can be obtained.
    preferred_address = server_config.preferred_address.get('address')

    if not preferred_address:
        preferred_address = get_preferred_ip(server_config.main.gunicorn_bind,
                                             server_config.preferred_address)

    if not preferred_address and not server_config.server_to_server.boot_if_preferred_not_found:
        msg = 'Unable to start the server. Could not obtain a preferred address, please configure [bind_options] in server.conf'
        logger.warn(msg)
        raise Exception(msg)

    # Create the startup callable tool as soon as practical
    startup_callable_tool = StartupCallableTool(server_config)

    # Run the hook before there is any server object created
    startup_callable_tool.invoke(SERVER_STARTUP.PHASE.FS_CONFIG_ONLY,
                                 kwargs={
                                     'server_config': server_config,
                                     'pickup_config': pickup_config,
                                     'sio_config': sio_config,
                                     'sso_config': sso_config,
                                 })

    # New in 2.0 - Start monitoring as soon as possible
    if server_config.get('newrelic', {}).get('config'):
        import newrelic.agent
        newrelic.agent.initialize(server_config.newrelic.config,
                                  server_config.newrelic.environment or None,
                                  server_config.newrelic.ignore_errors or None,
                                  server_config.newrelic.log_file or None,
                                  server_config.newrelic.log_level or None)

    zunicorn.SERVER_SOFTWARE = server_config.misc.get('http_server_header',
                                                      'Zato')

    # Store KVDB config in logs, possibly replacing its password if told to
    kvdb_config = get_kvdb_config_for_log(server_config.kvdb)
    kvdb_logger.info('Main process config `%s`', kvdb_config)

    # New in 2.0 hence optional
    user_locale = server_config.misc.get('locale', None)
    if user_locale:
        locale.setlocale(locale.LC_ALL, user_locale)
        value = 12345
        logger.info('Locale is `%s`, amount of %s -> `%s`', user_locale, value,
                    locale.currency(value, grouping=True).decode('utf-8'))

    # Makes queries against Postgres asynchronous
    if asbool(server_config.odb.use_async_driver
              ) and server_config.odb.engine == 'postgresql':
        make_psycopg_green()

    if server_config.misc.http_proxy:
        os.environ['http_proxy'] = server_config.misc.http_proxy

    # Basic components needed for the server to boot up
    kvdb = KVDB()
    odb_manager = ODBManager(well_known_data=ZATO_CRYPTO_WELL_KNOWN_DATA)
    sql_pool_store = PoolStore()

    service_store = ServiceStore()
    service_store.odb = odb_manager
    service_store.services = {}

    server = ParallelServer()
    server.odb = odb_manager
    server.service_store = service_store
    server.service_store.server = server
    server.sql_pool_store = sql_pool_store
    server.service_modules = []
    server.kvdb = kvdb

    # Assigned here because it is a circular dependency
    odb_manager.parallel_server = server

    zato_gunicorn_app = ZatoGunicornApplication(server, repo_location,
                                                server_config.main,
                                                server_config.crypto)

    server.has_fg = options.get('fg')
    server.crypto_manager = crypto_manager
    server.odb_data = server_config.odb
    server.host = zato_gunicorn_app.zato_host
    server.port = zato_gunicorn_app.zato_port
    server.repo_location = repo_location
    server.user_conf_location = os.path.join(server.repo_location, 'user-conf')
    server.base_dir = base_dir
    server.logs_dir = os.path.join(server.base_dir, 'logs')
    server.tls_dir = os.path.join(server.base_dir, 'config', 'repo', 'tls')
    server.static_dir = os.path.join(server.base_dir, 'config', 'repo',
                                     'static')
    server.json_schema_dir = os.path.join(server.base_dir, 'config', 'repo',
                                          'schema', 'json')
    server.fs_server_config = server_config
    server.fs_sql_config = get_config(repo_location,
                                      'sql.conf',
                                      needs_user_config=False)
    server.pickup_config = pickup_config
    server.logging_config = logging_config
    server.logging_conf_path = logging_conf_path
    server.sio_config = sio_config
    server.sso_config = sso_config
    server.user_config.update(server_config.user_config_items)
    server.preferred_address = preferred_address
    server.sync_internal = options['sync_internal']
    server.jwt_secret = server.fs_server_config.misc.jwt_secret.encode('utf8')
    server.startup_callable_tool = startup_callable_tool
    server.is_sso_enabled = server.fs_server_config.component_enabled.sso
    if server.is_sso_enabled:
        server.sso_api = SSOAPI(server, sso_config, None,
                                crypto_manager.encrypt, crypto_manager.decrypt,
                                crypto_manager.hash_secret,
                                crypto_manager.verify_hash, new_user_id)

    # Remove all locks possibly left over by previous server instances
    kvdb.component = 'master-proc'
    clear_locks(kvdb, server_config.main.token, server_config.kvdb,
                crypto_manager.decrypt)

    # New in 2.0.8
    server.return_tracebacks = asbool(
        server_config.misc.get('return_tracebacks', True))
    server.default_error_message = server_config.misc.get(
        'default_error_message', 'An error has occurred')

    # Turn the repo dir into an actual repository and commit any new/modified files
    RepoManager(repo_location).ensure_repo_consistency()

    # New in 2.0 so it's optional.
    profiler_enabled = server_config.get('profiler', {}).get('enabled', False)

    # New in 2.0 so it's optional.
    sentry_config = server_config.get('sentry')

    dsn = sentry_config.pop('dsn', None)
    if dsn:

        from raven import Client
        from raven.handlers.logging import SentryHandler

        handler_level = sentry_config.pop('level')
        client = Client(dsn, **sentry_config)

        handler = SentryHandler(client=client)
        handler.setLevel(getattr(logging, handler_level))

        logger = logging.getLogger('')
        logger.addHandler(handler)

        for name in logging.Logger.manager.loggerDict:
            if name.startswith('zato'):
                logger = logging.getLogger(name)
                logger.addHandler(handler)

    if asbool(profiler_enabled):
        profiler_dir = os.path.abspath(
            os.path.join(base_dir, server_config.profiler.profiler_dir))
        server.on_wsgi_request = ProfileMiddleware(
            server.on_wsgi_request,
            log_filename=os.path.join(profiler_dir,
                                      server_config.profiler.log_filename),
            cachegrind_filename=os.path.join(
                profiler_dir, server_config.profiler.cachegrind_filename),
            discard_first_request=server_config.profiler.discard_first_request,
            flush_at_shutdown=server_config.profiler.flush_at_shutdown,
            path=server_config.profiler.url_path,
            unwind=server_config.profiler.unwind)

    # New in 2.0 - set environmet variables for servers to inherit
    os_environ = server_config.get('os_environ', {})
    for key, value in os_environ.items():
        os.environ[key] = value

    # Run the hook right before the Gunicorn-level server actually starts
    startup_callable_tool.invoke(SERVER_STARTUP.PHASE.IMPL_BEFORE_RUN,
                                 kwargs={
                                     'zato_gunicorn_app': zato_gunicorn_app,
                                 })

    # Run the app at last
    if start_gunicorn_app:
        zato_gunicorn_app.run()
    else:
        return zato_gunicorn_app.zato_wsgi_app
Exemple #5
0
 def sql_pool_store(self):
     return PoolStore()
Exemple #6
0
class ServiceTestCase(TestCase):
    def setUp(self):

        # For mocking out Vault responses
        self.vault_adapter = RequestsAdapter()

        # We are always the first process in a server
        os.environ['ZATO_SERVER_WORKER_IDX'] = '1'

        # Represents the server.conf file
        self.fs_server_config = FSServerConfig()

        self.worker_config = ConfigStore()
        self.fernet_key = Fernet.generate_key()  # type: str
        self.crypto_manager = CryptoManager(secret_key=self.fernet_key)
        self.vault_conn_api = VaultConnAPI(requests_adapter=self.vault_adapter)

        self.server = ParallelServer()
        self.server.fs_server_config = self.fs_server_config
        self.server.kvdb = KVDB()
        self.server.component_enabled.stats = False
        self.server.component_enabled.slow_response = False
        self.server.crypto_manager = self.crypto_manager

        self.service_store = ServiceStore(is_testing=True)
        self.service_store.server = self.server
        self.service_store.services = {}

        self.server.service_store = self.service_store

        self.fs_sql_config = {
            UNITTEST.SQL_ENGINE: {
                'ping_query': 'SELECT 1+1'
            }
        }

        self.cache = Cache()
        self.sql_pool_store = PoolStore()

        self.worker_store = WorkerStore(self.worker_config, self.server)
        self.worker_store.sql_pool_store = self.sql_pool_store
        self.worker_store.outconn_wsx = None
        self.worker_store.vault_conn_api = self.vault_conn_api
        self.worker_store.sms_twilio_api = None
        self.worker_store.out_sap = None
        self.worker_store.out_sftp = None
        self.worker_store.outconn_ldap = {}
        self.worker_store.outconn_mongodb = {}
        self.worker_store.def_kafka = {}

        self.worker_store.cache_api = CacheAPI(self.server)
        self.worker_store.cache_api.default = self.cache

        self.request_handler = RequestHandler(self.server)

        self.wsgi_environ = {'HTTP_HOST': 'api.localhost'}

        # Callback methods for particular SQL queries
        self.sql_callback_by_idx = {}

# ################################################################################################################################

    def add_outconn_sql(self, name):
        # type: (str)
        self.sql_pool_store.add_unittest_item(name)

# ################################################################################################################################

    def import_services(self, item):
        # type: (object)
        self.service_store.import_services_from_anywhere(
            item, None, None, False)

# ################################################################################################################################

    def invoke_service(self, class_, request=None, **kwargs):
        # type: (Service, object, **object)

        class_.name = class_.get_name()
        class_.impl_name = class_.get_impl_name()
        class_.component_enabled_ibm_mq = True
        class_.component_enabled_zeromq = False
        class_.component_enabled_sms = True
        class_.component_enabled_cassandra = False
        class_.component_enabled_email = False
        class_.component_enabled_search = False
        class_.component_enabled_msg_path = False
        class_.component_enabled_patterns = False
        class_.has_sio = True
        class_._worker_config = self.worker_config
        class_._worker_store = self.worker_store
        class_.crypto = self.server.crypto_manager

        service = class_()  # type: Service
        service.out.vault = self.vault_conn_api

        self.service_store.services[service.impl_name] = {
            'slow_threshold': 100,
        }

        channel = kwargs.get('channel') or CHANNEL.INVOKE
        data_format = kwargs.get('data_format') or DATA_FORMAT.DICT
        transport = ''
        broker_client = None
        cid = kwargs.get('cid') or new_cid()
        simple_io_config = {'bytes_to_str': {'encoding': 'utf8'}}

        response = service.update_handle(
            self.request_handler._set_response_data,
            service,
            request,
            channel,
            data_format,
            transport,
            self.server,
            broker_client,
            self.worker_store,
            cid,
            simple_io_config,
            environ=kwargs.get('environ'))

        if kwargs.get('as_bunch'):
            if isinstance(response.payload, basestring):
                payload = loads(response.payload)
                payload = bunchify(payload)
                response._payload = payload

        return response