Exemplo n.º 1
0
    def setUp(self):

        # For mocking out Vault responses
        self.vault_adapter = RequestsAdapter()

        # We are always the first process in a server
        os.environ['ZATO_SERVER_WORKER_IDX'] = '1'

        # Represents the server.conf file
        self.fs_server_config = FSServerConfig()

        self.worker_config = ConfigStore()
        self.fernet_key = Fernet.generate_key()  # type: str
        self.crypto_manager = CryptoManager(secret_key=self.fernet_key)
        self.vault_conn_api = VaultConnAPI(requests_adapter=self.vault_adapter)

        self.server = ParallelServer()
        self.server.fs_server_config = self.fs_server_config
        self.server.kvdb = KVDB()
        self.server.component_enabled.stats = False
        self.server.component_enabled.slow_response = False
        self.server.crypto_manager = self.crypto_manager

        self.service_store = ServiceStore(is_testing=True)
        self.service_store.server = self.server
        self.service_store.services = {}

        self.server.service_store = self.service_store

        self.fs_sql_config = {
            UNITTEST.SQL_ENGINE: {
                'ping_query': 'SELECT 1+1'
            }
        }

        self.cache = Cache()
        self.sql_pool_store = PoolStore()

        self.worker_store = WorkerStore(self.worker_config, self.server)
        self.worker_store.sql_pool_store = self.sql_pool_store
        self.worker_store.outconn_wsx = None
        self.worker_store.vault_conn_api = self.vault_conn_api
        self.worker_store.sms_twilio_api = None
        self.worker_store.out_sap = None
        self.worker_store.out_sftp = None
        self.worker_store.outconn_ldap = {}
        self.worker_store.outconn_mongodb = {}
        self.worker_store.def_kafka = {}

        self.worker_store.cache_api = CacheAPI(self.server)
        self.worker_store.cache_api.default = self.cache

        self.request_handler = RequestHandler(self.server)

        self.wsgi_environ = {'HTTP_HOST': 'api.localhost'}

        # Callback methods for particular SQL queries
        self.sql_callback_by_idx = {}
Exemplo n.º 2
0
    def __init__(self, config=None, run=False):
        self.config = config
        self.broker_client = None
        self.config.on_job_executed_cb = self.on_job_executed
        self.sched = _Scheduler(self.config, self)

        # Broker connection
        self.broker_conn = KVDB(config=self.config.main.broker, decrypt_func=self.config.crypto_manager.decrypt)
        self.broker_conn.init()

        # Broker client
        self.broker_callbacks = {
            TOPICS[MESSAGE_TYPE.TO_SCHEDULER]: self.on_broker_msg,
        }

        self.broker_client = BrokerClient(self.broker_conn, 'scheduler', self.broker_callbacks, [])

        if run:
            self.serve_forever()
Exemplo n.º 3
0
    def on_server_check_kvdb(self, cm, conf, conf_key='kvdb'):

        # Bunch
        from bunch import Bunch

        # Zato
        from zato.common.kvdb.api import KVDB

        # Python 2/3 compatibility
        from future.utils import iteritems

        kvdb_config = Bunch(dict(iteritems((conf[conf_key]))))
        kvdb = KVDB(None, kvdb_config, cm.decrypt)
        kvdb.init()
        kvdb.conn.info()
        kvdb.close()

        if self.show_output:
            self.logger.info('Redis connection OK')
Exemplo n.º 4
0
class Scheduler(BrokerMessageReceiver):
    """ The Zato's job scheduler. All of the operations assume the data was already validated and sanitized
    by relevant Zato public API services.
    """
    def __init__(self, config=None, run=False):
        self.config = config
        self.broker_client = None
        self.config.on_job_executed_cb = self.on_job_executed
        self.sched = _Scheduler(self.config, self)

        # Broker connection
        self.broker_conn = KVDB(config=self.config.main.broker, decrypt_func=self.config.crypto_manager.decrypt)
        self.broker_conn.init()

        # Broker client
        self.broker_callbacks = {
            TOPICS[MESSAGE_TYPE.TO_SCHEDULER]: self.on_broker_msg,
        }

        self.broker_client = BrokerClient(self.broker_conn, 'scheduler', self.broker_callbacks, [])

        if run:
            self.serve_forever()

# ################################################################################################################################

    def serve_forever(self):
        try:
            try:
                spawn_greenlet(self.sched.run)
            except Exception:
                logger.warn(format_exc())

            while not self.sched.ready:
                sleep(0.1)

        except Exception:
            logger.warn(format_exc())

# ################################################################################################################################

    def on_job_executed(self, ctx, extra_data_format=ZATO_NONE):
        """ Invoked by the underlying scheduler when a job is executed. Sends the actual execution request to the broker
        so it can be picked up by one of the parallel server's broker clients.
        """
        name = ctx['name']

        payload = ctx['cb_kwargs']['extra']
        if isinstance(payload, bytes):
            payload = payload.decode('utf8')

        msg = {
            'action': SCHEDULER_MSG.JOB_EXECUTED.value,
            'name':name,
            'service': ctx['cb_kwargs']['service'],
            'payload':payload,
            'cid':ctx['cid'],
            'job_type': ctx['type']
        }

        if extra_data_format != ZATO_NONE:
            msg['data_format'] = extra_data_format

        self.broker_client.invoke_async(msg)

        if _has_debug:
            msg = 'Sent a job execution request, name [{}], service [{}], extra [{}]'.format(
                name, ctx['cb_kwargs']['service'], ctx['cb_kwargs']['extra'])
            logger.debug(msg)

        # Now, if it was a one-time job, it needs to be deactivated.
        if ctx['type'] == SCHEDULER.JOB_TYPE.ONE_TIME:
            msg = {
                'action': SERVICE.PUBLISH.value,
                'service': 'zato.scheduler.job.set-active-status',
                'payload': {'id':ctx['id'], 'is_active':False},
                'cid': new_cid(),
                'channel': CHANNEL.SCHEDULER_AFTER_ONE_TIME,
                'data_format': DATA_FORMAT.JSON,
            }
            self.broker_client.publish(msg)

# ################################################################################################################################

    def create_edit(self, action, job_data, **kwargs):
        """ Invokes a handler appropriate for the given action and job_data.job_type.
        """
        handler = '{0}_{1}'.format(action, job_data.job_type)
        handler = getattr(self, handler)

        try:
            handler(job_data, **kwargs)
        except Exception:
            logger.error('Caught exception `%s`', format_exc())

# ################################################################################################################################

    def create_edit_job(self, id, name, old_name, start_time, job_type, service, is_create=True, max_repeats=1, days=0, hours=0,
            minutes=0, seconds=0, extra=None, cron_definition=None, is_active=None, **kwargs):
        """ A base method for scheduling of jobs.
        """
        cb_kwargs = {
            'service': service,
            'extra': extra,
        }

        if job_type == SCHEDULER.JOB_TYPE.CRON_STYLE:
            interval = CronTab(cron_definition)
        else:
            interval = Interval(days=days, hours=hours, minutes=minutes, seconds=seconds)

        job = Job(id, name, job_type, interval, start_time, cb_kwargs=cb_kwargs, max_repeats=max_repeats,
            is_active=is_active, cron_definition=cron_definition, service=service, extra=extra, old_name=old_name)

        func = self.sched.create if is_create else self.sched.edit
        func(job, **kwargs)

# ################################################################################################################################

    def create_edit_one_time(self, job_data, is_create=True, **kwargs):
        """ Re-/schedules the execution of a one-time job.
        """
        self.create_edit_job(job_data.id, job_data.name, job_data.get('old_name'), _start_date(job_data),
            SCHEDULER.JOB_TYPE.ONE_TIME, job_data.service, is_create, extra=job_data.extra,
            is_active=job_data.is_active, **kwargs)

    def create_one_time(self, job_data, **kwargs):
        """ Schedules the execution of a one-time job.
        """
        self.create_edit_one_time(job_data, **kwargs)

    def edit_one_time(self, job_data, **kwargs):
        """ First unschedules a one-time job and then schedules its execution.
        The operations aren't parts of an atomic transaction.
        """
        self.create_edit_one_time(job_data, False, **kwargs)

# ################################################################################################################################

    def create_edit_interval_based(self, job_data, is_create=True, **kwargs):
        """ Re-/schedules the execution of an interval-based job.
        """
        start_date = _start_date(job_data)
        weeks = job_data.weeks if job_data.get('weeks') else 0
        days = job_data.days if job_data.get('days') else 0
        hours = job_data.hours if job_data.get('hours') else 0
        minutes = job_data.minutes if job_data.get('minutes') else 0
        seconds = job_data.seconds if job_data.get('seconds') else 0
        max_repeats = job_data.repeats if job_data.get('repeats') else None

        self.create_edit_job(job_data.id, job_data.name, job_data.get('old_name'), start_date, SCHEDULER.JOB_TYPE.INTERVAL_BASED,
            job_data.service, is_create, max_repeats, days+weeks*7, hours, minutes, seconds, job_data.extra,
            is_active=job_data.is_active, **kwargs)

    def create_interval_based(self, job_data, **kwargs):
        """ Schedules the execution of an interval-based job.
        """
        self.create_edit_interval_based(job_data, **kwargs)

    def edit_interval_based(self, job_data, **kwargs):
        """ First unschedules an interval-based job and then schedules its execution.
        The operations aren't parts of an atomic transaction.
        """
        self.create_edit_interval_based(job_data, False, **kwargs)

# ################################################################################################################################

    def create_edit_cron_style(self, job_data,  is_create=True, **kwargs):
        """ Re-/schedules the execution of a cron-style job.
        """
        start_date = _start_date(job_data)
        self.create_edit_job(job_data.id, job_data.name, job_data.get('old_name'), start_date, SCHEDULER.JOB_TYPE.CRON_STYLE,
            job_data.service, is_create, max_repeats=None, extra=job_data.extra, is_active=job_data.is_active,
            cron_definition=job_data.cron_definition, **kwargs)

    def create_cron_style(self, job_data,  **kwargs):
        """ Schedules the execution of a cron-style job.
        """
        self.create_edit_cron_style(job_data,  **kwargs)

    def edit_cron_style(self, job_data,  **kwargs):
        """ First unschedules a cron-style job and then schedules its execution.
        The operations aren't parts of an atomic transaction.
        """
        self.create_edit_cron_style(job_data, False, **kwargs)

# ################################################################################################################################

    def delete(self, job_data, **kwargs):
        """ Deletes the job from the scheduler.
        """
        self.sched.unschedule_by_name(job_data.old_name if job_data.get('old_name') else job_data.name, **kwargs)

# ################################################################################################################################

    def execute(self, job_data):
        self.sched.execute(job_data.name)

# ################################################################################################################################

    def stop(self):
        self.sched.stop()

# ################################################################################################################################

    def filter(self, *ignored):
        """ Accept broker messages destined to our client.
        """
        return True

# ################################################################################################################################

    def on_broker_msg_SCHEDULER_CREATE(self, msg, *ignored_args):
        self.create_edit('create', msg)

# ################################################################################################################################

    def on_broker_msg_SCHEDULER_EDIT(self, msg, *ignored_args):
        self.create_edit('edit', msg)

# ################################################################################################################################

    def on_broker_msg_SCHEDULER_DELETE(self, msg, *ignored_args):
        self.delete(msg)

# ################################################################################################################################

    def on_broker_msg_SCHEDULER_EXECUTE(self, msg, *ignored_args):
        self.execute(msg)

# ################################################################################################################################

    def on_broker_msg_SCHEDULER_CLOSE(self, msg, *ignored_args):
        self.broker_client.close()
        self.stop()
Exemplo n.º 5
0
Arquivo: main.py Projeto: danlg/zato
def run(base_dir, start_gunicorn_app=True, options=None):
    # type: (str, bool, dict)
    options = options or {}

    # Store a pidfile before doing anything else
    store_pidfile(base_dir)

    # For dumping stacktraces
    register_diag_handlers()

    # Capture warnings to log files
    logging.captureWarnings(True)

    # Start initializing the server now
    os.chdir(base_dir)

    try:
        import pymysql
        pymysql.install_as_MySQLdb()
    except ImportError:
        pass

    # We know we don't need warnings because users may explicitly configure no certificate validation.
    # We don't want for urllib3 to warn us about it.
    import requests as _r
    _r.packages.urllib3.disable_warnings()

    repo_location = os.path.join(base_dir, 'config', 'repo')

    # Configure the logging first, before configuring the actual server.
    logging.addLevelName('TRACE1', TRACE1)
    logging_conf_path = os.path.join(repo_location, 'logging.conf')

    with open(logging_conf_path) as f:
        logging_config = yaml.load(f, yaml.FullLoader)
        dictConfig(logging_config)

    logger = logging.getLogger(__name__)
    kvdb_logger = logging.getLogger('zato_kvdb')

    crypto_manager = ServerCryptoManager(repo_location,
                                         secret_key=options['secret_key'],
                                         stdin_data=read_stdin_data())
    secrets_config = ConfigObj(os.path.join(repo_location, 'secrets.conf'),
                               use_zato=False)
    server_config = get_config(repo_location,
                               'server.conf',
                               crypto_manager=crypto_manager,
                               secrets_conf=secrets_config)
    pickup_config = get_config(repo_location, 'pickup.conf')

    sio_config = get_config(repo_location,
                            'simple-io.conf',
                            needs_user_config=False)
    sio_config = get_sio_server_config(sio_config)

    sso_config = get_config(repo_location, 'sso.conf', needs_user_config=False)
    normalize_sso_config(sso_config)

    # Now that we have access to server.conf, greenify libraries required to be made greenlet-friendly,
    # assuming that there are any - otherwise do not do anything.
    to_greenify = []
    for key, value in server_config.get('greenify', {}).items():
        if asbool(value):
            if not os.path.exists(key):
                raise ValueError('No such path `{}`'.format(key))
            else:
                to_greenify.append(key)

    # Go ahead only if we actually have anything to greenify
    if to_greenify:
        import greenify
        greenify.greenify()
        for name in to_greenify:
            result = greenify.patch_lib(name)
            if not result:
                raise ValueError(
                    'Library `{}` could not be greenified'.format(name))
            else:
                logger.info('Greenified library `%s`', name)

    server_config.main.token = server_config.main.token.encode('utf8')

    # Do not proceed unless we can be certain our own preferred address or IP can be obtained.
    preferred_address = server_config.preferred_address.get('address')

    if not preferred_address:
        preferred_address = get_preferred_ip(server_config.main.gunicorn_bind,
                                             server_config.preferred_address)

    if not preferred_address and not server_config.server_to_server.boot_if_preferred_not_found:
        msg = 'Unable to start the server. Could not obtain a preferred address, please configure [bind_options] in server.conf'
        logger.warn(msg)
        raise Exception(msg)

    # Create the startup callable tool as soon as practical
    startup_callable_tool = StartupCallableTool(server_config)

    # Run the hook before there is any server object created
    startup_callable_tool.invoke(SERVER_STARTUP.PHASE.FS_CONFIG_ONLY,
                                 kwargs={
                                     'server_config': server_config,
                                     'pickup_config': pickup_config,
                                     'sio_config': sio_config,
                                     'sso_config': sso_config,
                                     'base_dir': base_dir,
                                 })

    # New in 2.0 - Start monitoring as soon as possible
    if server_config.get('newrelic', {}).get('config'):
        import newrelic.agent
        newrelic.agent.initialize(server_config.newrelic.config,
                                  server_config.newrelic.environment or None,
                                  server_config.newrelic.ignore_errors or None,
                                  server_config.newrelic.log_file or None,
                                  server_config.newrelic.log_level or None)

    zunicorn.SERVER_SOFTWARE = server_config.misc.get('http_server_header',
                                                      'Zato')

    # Store KVDB config in logs, possibly replacing its password if told to
    kvdb_config = get_kvdb_config_for_log(server_config.kvdb)
    kvdb_logger.info('Main process config `%s`', kvdb_config)

    # New in 2.0 hence optional
    user_locale = server_config.misc.get('locale', None)
    if user_locale:
        locale.setlocale(locale.LC_ALL, user_locale)
        value = 12345
        logger.info('Locale is `%s`, amount of %s -> `%s`', user_locale, value,
                    locale.currency(value, grouping=True).decode('utf-8'))

    if server_config.misc.http_proxy:
        os.environ['http_proxy'] = server_config.misc.http_proxy

    # Basic components needed for the server to boot up
    kvdb = KVDB()
    odb_manager = ODBManager(well_known_data=ZATO_CRYPTO_WELL_KNOWN_DATA)
    sql_pool_store = PoolStore()

    service_store = ServiceStore()
    service_store.odb = odb_manager
    service_store.services = {}

    server = ParallelServer()
    server.odb = odb_manager
    server.service_store = service_store
    server.service_store.server = server
    server.sql_pool_store = sql_pool_store
    server.service_modules = []
    server.kvdb = kvdb
    server.stderr_path = options.get('stderr_path')

    # Assigned here because it is a circular dependency
    odb_manager.parallel_server = server

    zato_gunicorn_app = ZatoGunicornApplication(server, repo_location,
                                                server_config.main,
                                                server_config.crypto)

    server.has_fg = options.get('fg')
    server.crypto_manager = crypto_manager
    server.odb_data = server_config.odb
    server.host = zato_gunicorn_app.zato_host
    server.port = zato_gunicorn_app.zato_port
    server.repo_location = repo_location
    server.user_conf_location = os.path.join(server.repo_location, 'user-conf')
    server.base_dir = base_dir
    server.logs_dir = os.path.join(server.base_dir, 'logs')
    server.tls_dir = os.path.join(server.base_dir, 'config', 'repo', 'tls')
    server.static_dir = os.path.join(server.base_dir, 'config', 'repo',
                                     'static')
    server.json_schema_dir = os.path.join(server.base_dir, 'config', 'repo',
                                          'schema', 'json')
    server.fs_server_config = server_config
    server.fs_sql_config = get_config(repo_location,
                                      'sql.conf',
                                      needs_user_config=False)
    server.pickup_config = pickup_config
    server.logging_config = logging_config
    server.logging_conf_path = logging_conf_path
    server.sio_config = sio_config
    server.sso_config = sso_config
    server.user_config.update(server_config.user_config_items)
    server.preferred_address = preferred_address
    server.sync_internal = options['sync_internal']
    server.jwt_secret = server.fs_server_config.misc.jwt_secret.encode('utf8')
    server.startup_callable_tool = startup_callable_tool
    server.is_sso_enabled = server.fs_server_config.component_enabled.sso
    if server.is_sso_enabled:
        server.sso_api = SSOAPI(server, sso_config, None,
                                crypto_manager.encrypt, crypto_manager.decrypt,
                                crypto_manager.hash_secret,
                                crypto_manager.verify_hash, new_user_id)

    # Remove all locks possibly left over by previous server instances
    kvdb.component = 'master-proc'
    clear_locks(kvdb, server_config.main.token, server_config.kvdb,
                crypto_manager.decrypt)

    # New in 2.0.8
    server.return_tracebacks = asbool(
        server_config.misc.get('return_tracebacks', True))
    server.default_error_message = server_config.misc.get(
        'default_error_message', 'An error has occurred')

    # Turn the repo dir into an actual repository and commit any new/modified files
    RepoManager(repo_location).ensure_repo_consistency()

    # New in 2.0 so it's optional.
    profiler_enabled = server_config.get('profiler', {}).get('enabled', False)

    # New in 2.0 so it's optional.
    sentry_config = server_config.get('sentry')

    dsn = sentry_config.pop('dsn', None)
    if dsn:

        from raven import Client
        from raven.handlers.logging import SentryHandler

        handler_level = sentry_config.pop('level')
        client = Client(dsn, **sentry_config)

        handler = SentryHandler(client=client)
        handler.setLevel(getattr(logging, handler_level))

        logger = logging.getLogger('')
        logger.addHandler(handler)

        for name in logging.Logger.manager.loggerDict:
            if name.startswith('zato'):
                logger = logging.getLogger(name)
                logger.addHandler(handler)

    if asbool(profiler_enabled):

        # Repoze
        from repoze.profile import ProfileMiddleware

        profiler_dir = os.path.abspath(
            os.path.join(base_dir, server_config.profiler.profiler_dir))
        server.on_wsgi_request = ProfileMiddleware(
            server.on_wsgi_request,
            log_filename=os.path.join(profiler_dir,
                                      server_config.profiler.log_filename),
            cachegrind_filename=os.path.join(
                profiler_dir, server_config.profiler.cachegrind_filename),
            discard_first_request=server_config.profiler.discard_first_request,
            flush_at_shutdown=server_config.profiler.flush_at_shutdown,
            path=server_config.profiler.url_path,
            unwind=server_config.profiler.unwind)

    # New in 2.0 - set environmet variables for servers to inherit
    os_environ = server_config.get('os_environ', {})
    for key, value in os_environ.items():
        os.environ[key] = value

    # Run the hook right before the Gunicorn-level server actually starts
    startup_callable_tool.invoke(SERVER_STARTUP.PHASE.IMPL_BEFORE_RUN,
                                 kwargs={
                                     'zato_gunicorn_app': zato_gunicorn_app,
                                 })

    # Run the app at last
    if start_gunicorn_app:
        zato_gunicorn_app.run()
    else:
        return zato_gunicorn_app.zato_wsgi_app
Exemplo n.º 6
0
    def test_parse_config(self):
        class FakeSentinel(object):
            def __init__(self, sentinels, password, socket_timeout):
                self.sentinels = sentinels
                self.password = password
                self.socket_timeout = socket_timeout
                self.master_for_called_with = None

            def master_for(self, master_name):
                self.master_for_called_with = master_name
                return self

        class FakeStrictRedis(object):
            def __init__(self, **config):
                self.config = config

        class FakeKVDB(KVDB):
            def _get_connection_class(self):
                return FakeSentinel if self.has_sentinel else FakeStrictRedis

        def decrypt_func(password):
            return password

        sentinel1_host, sentinel1_port = 'a-' + rand_string(), rand_int()
        sentinel2_host, sentinel2_port = 'b-' + rand_string(), rand_int()

        password = rand_string()
        socket_timeout = rand_int()
        redis_sentinels_master = rand_string()
        redis_sentinels = [
            '{}:{}'.format(sentinel1_host, sentinel1_port),
            '{}:{}'.format(sentinel2_host, sentinel2_port)
        ]

        try:
            config = {'use_redis_sentinels': True}
            kvdb = KVDB(config=config)
            kvdb.init()
        except ValueError as e:
            eq_(e.message, 'kvdb.redis_sentinels must be provided')
        else:
            self.fail('Expected a ValueError (kvdb.redis_sentinels)')

        try:
            config = {
                'use_redis_sentinels': True,
                'redis_sentinels': redis_sentinels
            }
            kvdb = KVDB(config=config)
            kvdb.init()
        except ValueError as e:
            eq_(e.message, 'kvdb.redis_sentinels_master must be provided')
        else:
            self.fail('Expected a ValueError (kvdb.redis_sentinels_master)')

        config = Bunch({
            'use_redis_sentinels': True,
            'redis_sentinels': redis_sentinels,
            'redis_sentinels_master': redis_sentinels_master,
            'password': password,
            'socket_timeout': socket_timeout
        })
        kvdb = FakeKVDB(config=config, decrypt_func=decrypt_func)
        kvdb.init()

        eq_(sorted(kvdb.conn.sentinels), [(sentinel1_host, sentinel1_port),
                                          (sentinel2_host, sentinel2_port)])

        eq_(kvdb.conn.password, password)
        eq_(kvdb.conn.socket_timeout, socket_timeout)
        eq_(kvdb.conn.master_for_called_with, redis_sentinels_master)

        config = {'use_redis_sentinels': False}
        kvdb = FakeKVDB(config=config, decrypt_func=decrypt_func)
        kvdb.init()

        self.assertTrue(isinstance(kvdb.conn, FakeStrictRedis))