Esempio n. 1
0
class BaseConnector(BrokerMessageReceiver):
    """ A base class for both channels and outgoing connectors.
    """
    def __init__(self, repo_location, def_id):
        self.repo_location = repo_location
        self.def_id = def_id
        self.odb = None
        self.sql_pool_store = None
        
    def _close(self):
        """ Close the process, don't forget about the ODB connection if it exists.
        """
        if self.odb:
            self.odb.close()
        p = psutil.Process(os.getpid())
        p.terminate()
    
    def _setup_odb(self):
        # First let's see if the server we're running on top of exists in the ODB.
        self.server = self.odb.fetch_server()
        if not self.server:
            raise Exception('Server does not exist in the ODB')
        
    def _init(self):
        """ Initializes all the basic run-time data structures and connects
        to the Zato broker.
        """
        fs_server_config = get_config(self.repo_location, 'server.conf')
        app_context = get_app_context(fs_server_config)
        crypto_manager = get_crypto_manager(self.repo_location, app_context, fs_server_config)
        
        config_odb = fs_server_config.odb
        self.odb = app_context.get_object('odb_manager')
        self.odb.crypto_manager = crypto_manager
        self.odb.token = fs_server_config.main.token
        
        # Key-value DB
        self.kvdb = KVDB()
        self.kvdb.config = fs_server_config.kvdb
        self.kvdb.decrypt_func = self.odb.crypto_manager.decrypt
        self.kvdb.init()
        
        odb_data = Bunch()
        odb_data.db_name = config_odb.db_name
        odb_data.engine = config_odb.engine
        odb_data.extra = config_odb.extra
        odb_data.host = config_odb.host
        odb_data.password = self.odb.crypto_manager.decrypt(config_odb.password)
        odb_data.pool_size = config_odb.pool_size
        odb_data.username = config_odb.username
        odb_data.is_odb = True
        
        self.sql_pool_store = app_context.get_object('sql_pool_store')
        self.sql_pool_store[ZATO_ODB_POOL_NAME] = odb_data
        self.odb.pool = self.sql_pool_store[ZATO_ODB_POOL_NAME]
        
        self._setup_odb()

        # Connects to the broker
        super(BaseConnector, self)._init()
Esempio n. 2
0
    def _on_server(self, args):
        
        repo_dir = join(self.config_dir, 'repo')
        server_conf = ConfigObj(join(repo_dir, 'server.conf'))

        cm = CryptoManager(priv_key_location=abspath(join(repo_dir, server_conf['crypto']['priv_key_location'])))
        cm.load_keys()
        
        engine_params = dict(server_conf['odb'].items())
        engine_params['extra'] = {}
        engine_params['pool_size'] = 1
        
        query = ping_queries[engine_params['engine']]
        
        session = create_pool(cm, engine_params)
        session.execute(query)
        session.close()
        
        if self.show_output:
            self.logger.info('SQL ODB connection OK')
        
        kvdb_config = Bunch(dict(server_conf['kvdb'].items()))
        kvdb = KVDB(None, kvdb_config, cm.decrypt)
        kvdb.init()
        
        kvdb.conn.info()
        kvdb.close()
        
        if self.show_output:
            self.logger.info('Redis connection OK')
Esempio n. 3
0
    def _init(self):
        """ Initializes all the basic run-time data structures and connects
        to the Zato broker.
        """
        fs_server_config = get_config(self.repo_location, 'server.conf')
        app_context = get_app_context(fs_server_config)
        crypto_manager = get_crypto_manager(self.repo_location, app_context,
                                            fs_server_config)

        config_odb = fs_server_config.odb
        self.odb = app_context.get_object('odb_manager')
        self.odb.crypto_manager = crypto_manager
        self.odb.token = fs_server_config.main.token

        # Key-value DB
        self.kvdb = KVDB()
        self.kvdb.config = fs_server_config.kvdb
        self.kvdb.decrypt_func = self.odb.crypto_manager.decrypt
        self.kvdb.init()

        # Broker client
        self.broker_client = BrokerClient(self.kvdb, self.broker_client_id,
                                          self.broker_callbacks)
        self.broker_client.start()

        # ODB

        #
        # Ticket #35 Don't ignore odb_port when creating an ODB
        # https://github.com/zatosource/zato/issues/35
        #

        engine = config_odb.engine
        port = config_odb['port']

        self.odb_config = Bunch()
        self.odb_config.db_name = config_odb.db_name
        self.odb_config.is_active = True
        self.odb_config.engine = engine
        self.odb_config.extra = config_odb.extra
        self.odb_config.host = config_odb.host
        self.odb_config.port = port
        self.odb_config.password = self.odb.crypto_manager.decrypt(
            config_odb.password)
        self.odb_config.pool_size = config_odb.pool_size
        self.odb_config.username = config_odb.username

        self.odb_config.is_odb = True

        self.sql_pool_store = app_context.get_object('sql_pool_store')
        self.sql_pool_store[ZATO_ODB_POOL_NAME] = self.odb_config
        self.odb.pool = self.sql_pool_store[ZATO_ODB_POOL_NAME].pool

        self._setup_odb()

        # Delivery store
        self.delivery_store = DeliveryStore(
            self.kvdb, self.broker_client, self.odb,
            float(fs_server_config.misc.delivery_lock_timeout))
Esempio n. 4
0
    def on_server_check_kvdb(self, cm, server_conf):

        kvdb_config = Bunch(dict(server_conf['kvdb'].items()))
        kvdb = KVDB(None, kvdb_config, cm.decrypt)
        kvdb.init()
        
        kvdb.conn.info()
        kvdb.close()

        if self.show_output:
            self.logger.info('Redis connection OK')
Esempio n. 5
0
    def _init(self):
        """ Initializes all the basic run-time data structures and connects
        to the Zato broker.
        """
        fs_server_config = get_config(self.repo_location, 'server.conf')
        app_context = get_app_context(fs_server_config)
        crypto_manager = get_crypto_manager(self.repo_location, app_context, fs_server_config)
        
        config_odb = fs_server_config.odb
        self.odb = app_context.get_object('odb_manager')
        self.odb.crypto_manager = crypto_manager
        self.odb.token = fs_server_config.main.token
        
        # Key-value DB
        self.kvdb = KVDB()
        self.kvdb.config = fs_server_config.kvdb
        self.kvdb.decrypt_func = self.odb.crypto_manager.decrypt
        self.kvdb.init()
        
        # Broker client
        self.broker_client = BrokerClient(self.kvdb, self.broker_client_id, self.broker_callbacks)
        self.broker_client.start()

        # ODB        
        
        #
        # Ticket #35 Don't ignore odb_port when creating an ODB
        # https://github.com/zatosource/zato/issues/35
        #
        
        engine = config_odb.engine
        port = config_odb.get('port')
        
        if not port:
            port = 5432 if engine == 'postgresql' else 1521
        
        self.odb_config = Bunch()
        self.odb_config.db_name = config_odb.db_name
        self.odb_config.is_active = True
        self.odb_config.engine = engine
        self.odb_config.extra = config_odb.extra
        self.odb_config.host = config_odb.host
        self.odb_config.port = port
        self.odb_config.password = self.odb.crypto_manager.decrypt(config_odb.password)
        self.odb_config.pool_size = config_odb.pool_size
        self.odb_config.username = config_odb.username
        
        self.odb_config.is_odb = True
        
        self.sql_pool_store = app_context.get_object('sql_pool_store')
        self.sql_pool_store[ZATO_ODB_POOL_NAME] = self.odb_config
        self.odb.pool = self.sql_pool_store[ZATO_ODB_POOL_NAME].pool
        
        self._setup_odb()
        
        # Delivery store
        self.delivery_store = DeliveryStore(self.kvdb, self.broker_client, self.odb, float(fs_server_config.misc.delivery_lock_timeout))
Esempio n. 6
0
    def _init(self):
        """ Initializes all the basic run-time data structures and connects
        to the Zato broker.
        """
        fs_server_config = get_config(self.repo_location, 'server.conf')
        app_context = get_app_context(fs_server_config)
        crypto_manager = get_crypto_manager(self.repo_location, app_context,
                                            fs_server_config)

        config_odb = fs_server_config.odb
        self.odb = app_context.get_object('odb_manager')
        self.odb.crypto_manager = crypto_manager
        self.odb.token = fs_server_config.main.token

        # Key-value DB
        self.kvdb = KVDB()
        self.kvdb.config = fs_server_config.kvdb
        self.kvdb.decrypt_func = self.odb.crypto_manager.decrypt
        self.kvdb.init()

        # Broker client
        self.broker_client = BrokerClient(self.kvdb, self.broker_client_id,
                                          self.broker_callbacks)
        self.broker_client.start()

        # ODB
        self.odb_config = Bunch()
        self.odb_config.db_name = config_odb.db_name
        self.odb_config.is_active = True
        self.odb_config.engine = config_odb.engine
        self.odb_config.extra = config_odb.extra
        self.odb_config.host = config_odb.host
        self.odb_config.password = self.odb.crypto_manager.decrypt(
            config_odb.password)
        self.odb_config.pool_size = config_odb.pool_size
        self.odb_config.username = config_odb.username
        self.odb_config.is_odb = True

        self.sql_pool_store = app_context.get_object('sql_pool_store')
        self.sql_pool_store[ZATO_ODB_POOL_NAME] = self.odb_config
        self.odb.pool = self.sql_pool_store[ZATO_ODB_POOL_NAME].pool

        self._setup_odb()
Esempio n. 7
0
    def __init__(self, config=None, run=False):
        self.config = config
        self.broker_client = None
        self.config.on_job_executed_cb = self.on_job_executed
        self.sched = _Scheduler(self.config, self)

        # Broker connection
        self.broker_conn = KVDB(config=self.config.main.broker)
        self.broker_conn.init()

        # Broker client
        self.broker_callbacks = {
            TOPICS[MESSAGE_TYPE.TO_SCHEDULER]: self.on_broker_msg,
        }

        self.broker_client = BrokerClient(self.broker_conn, 'scheduler',
                                          self.broker_callbacks, [])

        if run:
            self.serve_forever()
Esempio n. 8
0
    def on_server_check_kvdb(self, cm, conf):

        kvdb_config = Bunch(dict(conf['kvdb'].items()))
        kvdb = KVDB(None, kvdb_config, cm.decrypt)
        kvdb.init()

        minimum = '2.8.4'

        info = kvdb.conn.info()
        redis_version = info.get('redis_version')

        if not redis_version:
            raise Exception('Could not obtain `redis_version` from {}'.format(info))

        if not LooseVersion(redis_version) >= LooseVersion(minimum):
            raise Exception('Redis version required: `{}` or later, found:`{}`'.format(minimum, redis_version))

        kvdb.close()

        if self.show_output:
            self.logger.info('Redis connection OK')
Esempio n. 9
0
    def test_parse_config(self):
        class FakeSentinel(object):
            def __init__(self, sentinels, password, socket_timeout):
                self.sentinels = sentinels
                self.password = password
                self.socket_timeout = socket_timeout
                self.master_for_called_with = None

            def master_for(self, master_name):
                self.master_for_called_with = master_name
                return self

        class FakeStrictRedis(object):
            def __init__(self, **config):
                self.config = config

        class FakeKVDB(KVDB):
            def _get_connection_class(self):
                return FakeSentinel if self.has_sentinel else FakeStrictRedis

        def decrypt_func(password):
            return password

        sentinel1_host, sentinel1_port = 'a-' + rand_string(), rand_int()
        sentinel2_host, sentinel2_port = 'b-' + rand_string(), rand_int()

        password = rand_string()
        socket_timeout = rand_int()
        redis_sentinels_master = rand_string()
        redis_sentinels = [
            '{}:{}'.format(sentinel1_host, sentinel1_port),
            '{}:{}'.format(sentinel2_host, sentinel2_port)
        ]

        try:
            config = {'use_redis_sentinels': True}
            kvdb = KVDB(config=config)
            kvdb.init()
        except ValueError, e:
            eq_(e.message, 'kvdb.redis_sentinels must be provided')
Esempio n. 10
0
    def _on_server(self, args):
        
        repo_dir = join(self.config_dir, 'repo')
        server_conf = ConfigObj(join(repo_dir, 'server.conf'))

        cm = CryptoManager(priv_key_location=abspath(join(repo_dir, server_conf['crypto']['priv_key_location'])))
        cm.load_keys()
        
        engine_params = dict(server_conf['odb'].items())
        engine_params['extra'] = {}
        engine_params['pool_size'] = 1
        
        query = ping_queries[engine_params['engine']]
        
        session = create_pool(cm, engine_params)
        session.execute(query)
        session.close()
        
        if self.show_output:
            self.logger.info('SQL ODB connection OK')
        
        kvdb_config = Bunch(dict(server_conf['kvdb'].items()))
        kvdb = KVDB(None, kvdb_config, cm.decrypt)
        kvdb.init()
        
        kvdb.conn.info()
        kvdb.close()
        
        if self.show_output:
            self.logger.info('Redis connection OK')
Esempio n. 11
0
class Scheduler(BrokerMessageReceiver):
    """ The Zato's job scheduler. All of the operations assume the data was already validated and sanitized
    by relevant Zato public API services.
    """
    def __init__(self, config=None, run=False):
        self.config = config
        self.broker_client = None
        self.config.on_job_executed_cb = self.on_job_executed
        self.sched = _Scheduler(self.config, self)

        # Broker connection
        self.broker_conn = KVDB(
            config=self.config.main.broker,
            decrypt_func=self.config.crypto_manager.decrypt)
        self.broker_conn.init()

        # Broker client
        self.broker_callbacks = {
            TOPICS[MESSAGE_TYPE.TO_SCHEDULER]: self.on_broker_msg,
        }

        self.broker_client = BrokerClient(self.broker_conn, 'scheduler',
                                          self.broker_callbacks, [])

        if run:
            self.serve_forever()

    def serve_forever(self):
        try:
            try:
                spawn_greenlet(self.sched.run)
            except Exception, e:
                logger.warn(format_exc(e))

            while not self.sched.ready:
                sleep(0.1)

        except Exception, e:
            logger.warn(format_exc(e))
Esempio n. 12
0
    def test_parse_config(self):

        class FakeSentinel(object):
            def __init__(self, sentinels, password, socket_timeout):
                self.sentinels = sentinels
                self.password = password
                self.socket_timeout = socket_timeout
                self.master_for_called_with = None

            def master_for(self, master_name):
                self.master_for_called_with = master_name
                return self

        class FakeStrictRedis(object):
            def __init__(self, **config):
                self.config = config

        class FakeKVDB(KVDB):
            def _get_connection_class(self):
                return FakeSentinel if self.has_sentinel else FakeStrictRedis

        def decrypt_func(password):
            return password

        sentinel1_host, sentinel1_port = 'a-' + rand_string(), rand_int()
        sentinel2_host, sentinel2_port = 'b-' + rand_string(), rand_int()

        password = rand_string()
        socket_timeout = rand_int()
        redis_sentinels_master = rand_string()
        redis_sentinels = ['{}:{}'.format(sentinel1_host, sentinel1_port), '{}:{}'.format(sentinel2_host, sentinel2_port)]

        try:
            config = {'use_redis_sentinels': True}
            kvdb = KVDB(config=config)
            kvdb.init()
        except ValueError, e:
            eq_(e.message, 'kvdb.redis_sentinels must be provided')
Esempio n. 13
0
    def on_server_check_kvdb(self, cm, conf):

        kvdb_config = Bunch(dict(conf['kvdb'].items()))
        kvdb = KVDB(None, kvdb_config, cm.decrypt)
        kvdb.init()

        kvdb.conn.info()
        kvdb.close()

        if self.show_output:
            self.logger.info('Redis connection OK')
Esempio n. 14
0
 def _init(self):
     """ Initializes all the basic run-time data structures and connects
     to the Zato broker.
     """
     fs_server_config = get_config(self.repo_location, 'server.conf')
     app_context = get_app_context(fs_server_config)
     crypto_manager = get_crypto_manager(self.repo_location, app_context, fs_server_config)
     
     config_odb = fs_server_config.odb
     self.odb = app_context.get_object('odb_manager')
     self.odb.crypto_manager = crypto_manager
     self.odb.token = fs_server_config.main.token
     
     # Key-value DB
     self.kvdb = KVDB()
     self.kvdb.config = fs_server_config.kvdb
     self.kvdb.decrypt_func = self.odb.crypto_manager.decrypt
     self.kvdb.init()
     
     # Broker client
     self.broker_client = BrokerClient(self.kvdb, self.broker_client_id, self.broker_callbacks)
     self.broker_client.start()
     
     # ODB
     self.odb_config = Bunch()
     self.odb_config.db_name = config_odb.db_name
     self.odb_config.is_active = True
     self.odb_config.engine = config_odb.engine
     self.odb_config.extra = config_odb.extra
     self.odb_config.host = config_odb.host
     self.odb_config.password = self.odb.crypto_manager.decrypt(config_odb.password)
     self.odb_config.pool_size = config_odb.pool_size
     self.odb_config.username = config_odb.username
     
     self.odb_config.is_odb = True
     
     self.sql_pool_store = app_context.get_object('sql_pool_store')
     self.sql_pool_store[ZATO_ODB_POOL_NAME] = self.odb_config
     self.odb.pool = self.sql_pool_store[ZATO_ODB_POOL_NAME].pool
     
     self._setup_odb()
     
     # Delivery store
     self.delivery_store = DeliveryStore(self.kvdb, self.broker_client, self.odb, float(fs_server_config.misc.delivery_lock_timeout))
Esempio n. 15
0
    def _init(self):
        """ Initializes all the basic run-time data structures and connects
        to the Zato broker.
        """
        fs_server_config = get_config(self.repo_location, 'server.conf')
        app_context = get_app_context(fs_server_config)
        crypto_manager = get_crypto_manager(self.repo_location, app_context, fs_server_config)
        
        config_odb = fs_server_config.odb
        self.odb = app_context.get_object('odb_manager')
        self.odb.crypto_manager = crypto_manager
        self.odb.token = fs_server_config.main.token
        
        # Key-value DB
        self.kvdb = KVDB()
        self.kvdb.config = fs_server_config.kvdb
        self.kvdb.decrypt_func = self.odb.crypto_manager.decrypt
        self.kvdb.init()
        
        odb_data = Bunch()
        odb_data.db_name = config_odb.db_name
        odb_data.engine = config_odb.engine
        odb_data.extra = config_odb.extra
        odb_data.host = config_odb.host
        odb_data.password = self.odb.crypto_manager.decrypt(config_odb.password)
        odb_data.pool_size = config_odb.pool_size
        odb_data.username = config_odb.username
        odb_data.is_odb = True
        
        self.sql_pool_store = app_context.get_object('sql_pool_store')
        self.sql_pool_store[ZATO_ODB_POOL_NAME] = odb_data
        self.odb.pool = self.sql_pool_store[ZATO_ODB_POOL_NAME]
        
        self._setup_odb()

        # Connects to the broker
        super(BaseConnector, self)._init()
Esempio n. 16
0
    def on_server_check_kvdb(self, cm, conf, conf_key='kvdb'):

        kvdb_config = Bunch(dict(iteritems((conf[conf_key]))))
        kvdb = KVDB(None, kvdb_config, cm.decrypt)
        kvdb.init()

        minimum = '2.8.4'

        info = kvdb.conn.info()
        redis_version = info.get('redis_version')

        if not redis_version:
            raise Exception('Could not obtain `redis_version` from {}'.format(info))

        if not LooseVersion(redis_version) >= LooseVersion(minimum):
            raise Exception('Redis version required: `{}` or later, found:`{}`'.format(minimum, redis_version))

        kvdb.close()

        if self.show_output:
            self.logger.info('Redis connection OK')
Esempio n. 17
0
    def setUp(self):

        # For mocking out Vault responses
        self.vault_adapter = RequestsAdapter()

        # We are always the first process in a server
        os.environ['ZATO_SERVER_WORKER_IDX'] = '1'

        # Represents the server.conf file
        self.fs_server_config = FSServerConfig()

        self.worker_config = ConfigStore()
        self.fernet_key = Fernet.generate_key() # type: str
        self.crypto_manager = CryptoManager(secret_key=self.fernet_key)
        self.vault_conn_api = VaultConnAPI(requests_adapter=self.vault_adapter)

        self.server = ParallelServer()
        self.server.fs_server_config = self.fs_server_config
        self.server.kvdb = KVDB()
        self.server.component_enabled.stats = False
        self.server.component_enabled.slow_response = False
        self.server.crypto_manager = self.crypto_manager

        self.service_store = ServiceStore(is_testing=True)
        self.service_store.server = self.server
        self.service_store.services = {}

        self.server.service_store = self.service_store

        self.fs_sql_config = {
            UNITTEST.SQL_ENGINE: {
                'ping_query': 'SELECT 1+1'
            }
        }

        self.cache = Cache()
        self.sql_pool_store = PoolStore()

        self.worker_store = WorkerStore(self.worker_config, self.server)
        self.worker_store.sql_pool_store = self.sql_pool_store
        self.worker_store.stomp_outconn_api = None
        self.worker_store.outconn_wsx = None
        self.worker_store.vault_conn_api = self.vault_conn_api
        self.worker_store.sms_twilio_api = None
        self.worker_store.out_sap = None
        self.worker_store.out_sftp = None
        self.worker_store.outconn_ldap = {}
        self.worker_store.outconn_mongodb = {}
        self.worker_store.def_kafka = {}

        self.worker_store.cache_api = CacheAPI(self.server)
        self.worker_store.cache_api.default = self.cache

        self.request_handler = RequestHandler(self.server)

        self.wsgi_environ = {
            'HTTP_HOST': 'api.localhost'
        }

        # Callback methods for particular SQL queries
        self.sql_callback_by_idx = {}
Esempio n. 18
0
    def test_parse_config(self):
        class FakeSentinel(object):
            def __init__(self, sentinels, password, socket_timeout):
                self.sentinels = sentinels
                self.password = password
                self.socket_timeout = socket_timeout
                self.master_for_called_with = None

            def master_for(self, master_name):
                self.master_for_called_with = master_name
                return self

        class FakeStrictRedis(object):
            def __init__(self, **config):
                self.config = config

        class FakeKVDB(KVDB):
            def _get_connection_class(self):
                return FakeSentinel if self.has_sentinel else FakeStrictRedis

        def decrypt_func(password):
            return password

        sentinel1_host, sentinel1_port = 'a-' + rand_string(), rand_int()
        sentinel2_host, sentinel2_port = 'b-' + rand_string(), rand_int()

        password = rand_string()
        socket_timeout = rand_int()
        redis_sentinels_master = rand_string()
        redis_sentinels = [
            '{}:{}'.format(sentinel1_host, sentinel1_port),
            '{}:{}'.format(sentinel2_host, sentinel2_port)
        ]

        try:
            config = {'use_redis_sentinels': True}
            kvdb = KVDB(config=config)
            kvdb.init()
        except ValueError as e:
            eq_(e.message, 'kvdb.redis_sentinels must be provided')
        else:
            self.fail('Expected a ValueError (kvdb.redis_sentinels)')

        try:
            config = {
                'use_redis_sentinels': True,
                'redis_sentinels': redis_sentinels
            }
            kvdb = KVDB(config=config)
            kvdb.init()
        except ValueError as e:
            eq_(e.message, 'kvdb.redis_sentinels_master must be provided')
        else:
            self.fail('Expected a ValueError (kvdb.redis_sentinels_master)')

        config = Bunch({
            'use_redis_sentinels': True,
            'redis_sentinels': redis_sentinels,
            'redis_sentinels_master': redis_sentinels_master,
            'password': password,
            'socket_timeout': socket_timeout
        })
        kvdb = FakeKVDB(config=config, decrypt_func=decrypt_func)
        kvdb.init()

        eq_(sorted(kvdb.conn.sentinels), [(sentinel1_host, sentinel1_port),
                                          (sentinel2_host, sentinel2_port)])

        eq_(kvdb.conn.password, password)
        eq_(kvdb.conn.socket_timeout, socket_timeout)
        eq_(kvdb.conn.master_for_called_with, redis_sentinels_master)

        config = {'use_redis_sentinels': False}
        kvdb = FakeKVDB(config=config, decrypt_func=decrypt_func)
        kvdb.init()

        self.assertTrue(isinstance(kvdb.conn, FakeStrictRedis))
Esempio n. 19
0
 def kvdb(self):
     return KVDB()
Esempio n. 20
0
        try:
            config = {'use_redis_sentinels': True}
            kvdb = KVDB(config=config)
            kvdb.init()
        except ValueError, e:
            eq_(e.message, 'kvdb.redis_sentinels must be provided')
        else:
            self.fail('Expected a ValueError (kvdb.redis_sentinels)')

        try:
            config = {
                'use_redis_sentinels': True,
                'redis_sentinels': redis_sentinels
            }
            kvdb = KVDB(config=config)
            kvdb.init()
        except ValueError, e:
            eq_(e.message, 'kvdb.redis_sentinels_master must be provided')
        else:
            self.fail('Expected a ValueError (kvdb.redis_sentinels_master)')

        config = Bunch({
            'use_redis_sentinels': True,
            'redis_sentinels': redis_sentinels,
            'redis_sentinels_master': redis_sentinels_master,
            'password': password,
            'socket_timeout': socket_timeout
        })
        kvdb = FakeKVDB(config=config, decrypt_func=decrypt_func)
        kvdb.init()
Esempio n. 21
0
        socket_timeout = rand_int()
        redis_sentinels_master = rand_string()
        redis_sentinels = ['{}:{}'.format(sentinel1_host, sentinel1_port), '{}:{}'.format(sentinel2_host, sentinel2_port)]

        try:
            config = {'use_redis_sentinels': True}
            kvdb = KVDB(config=config)
            kvdb.init()
        except ValueError, e:
            eq_(e.message, 'kvdb.redis_sentinels must be provided')
        else:
            self.fail('Expected a ValueError (kvdb.redis_sentinels)')

        try:
            config = {'use_redis_sentinels': True, 'redis_sentinels': redis_sentinels}
            kvdb = KVDB(config=config)
            kvdb.init()
        except ValueError, e:
            eq_(e.message, 'kvdb.redis_sentinels_master must be provided')
        else:
            self.fail('Expected a ValueError (kvdb.redis_sentinels_master)')

        config = Bunch({
            'use_redis_sentinels': True,
            'redis_sentinels':redis_sentinels,
            'redis_sentinels_master':redis_sentinels_master,
            'password': password,
            'socket_timeout':socket_timeout
        })
        kvdb = FakeKVDB(config=config, decrypt_func=decrypt_func)
        kvdb.init()
Esempio n. 22
0
def run(base_dir, start_gunicorn_app=True, options=None):
    options = options or {}

    # Store a pidfile before doing anything else
    store_pidfile(base_dir)

    # For dumping stacktraces
    register_diag_handlers()

    # Capture warnings to log files
    logging.captureWarnings(True)

    # Start initializing the server now
    os.chdir(base_dir)

    try:
        import pymysql
        pymysql.install_as_MySQLdb()
    except ImportError:
        pass

    # We're doing it here even if someone doesn't use PostgreSQL at all
    # so we're not suprised when someone suddenly starts using PG.
    # TODO: Make sure it's registered for each of the subprocess
    psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
    psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)

    # We know we don't need warnings because users may explicitly configure no certificate validation.
    # We don't want for urllib3 to warn us about it.
    import requests as _r
    _r.packages.urllib3.disable_warnings()

    repo_location = os.path.join(base_dir, 'config', 'repo')

    # Configure the logging first, before configuring the actual server.
    logging.addLevelName('TRACE1', TRACE1)
    logging_conf_path = os.path.join(repo_location, 'logging.conf')

    with open(logging_conf_path) as f:
        logging_config = yaml.load(f)
        dictConfig(logging_config)

    logger = logging.getLogger(__name__)
    kvdb_logger = logging.getLogger('zato_kvdb')

    crypto_manager = ServerCryptoManager(repo_location,
                                         secret_key=options['secret_key'],
                                         stdin_data=read_stdin_data())
    secrets_config = ConfigObj(os.path.join(repo_location, 'secrets.conf'),
                               use_zato=False)
    server_config = get_config(repo_location,
                               'server.conf',
                               crypto_manager=crypto_manager,
                               secrets_conf=secrets_config)
    pickup_config = get_config(repo_location, 'pickup.conf')
    sio_config = get_config(repo_location,
                            'simple-io.conf',
                            needs_user_config=False)
    sso_config = get_config(repo_location, 'sso.conf', needs_user_config=False)
    normalize_sso_config(sso_config)

    # Now that we have access to server.conf, greenify libraries required to be made greenlet-friendly,
    # assuming that there are any - otherwise do not do anything.
    to_greenify = []
    for key, value in server_config.get('greenify', {}).items():
        if asbool(value):
            if not os.path.exists(key):
                raise ValueError('No such path `{}`'.format(key))
            else:
                to_greenify.append(key)

    # Go ahead only if we actually have anything to greenify
    if to_greenify:
        import greenify
        greenify.greenify()
        for name in to_greenify:
            result = greenify.patch_lib(name)
            if not result:
                raise ValueError(
                    'Library `{}` could not be greenified'.format(name))
            else:
                logger.info('Greenified library `%s`', name)

    server_config.main.token = server_config.main.token.encode('utf8')

    # Do not proceed unless we can be certain our own preferred address or IP can be obtained.
    preferred_address = server_config.preferred_address.get('address')

    if not preferred_address:
        preferred_address = get_preferred_ip(server_config.main.gunicorn_bind,
                                             server_config.preferred_address)

    if not preferred_address and not server_config.server_to_server.boot_if_preferred_not_found:
        msg = 'Unable to start the server. Could not obtain a preferred address, please configure [bind_options] in server.conf'
        logger.warn(msg)
        raise Exception(msg)

    # Create the startup callable tool as soon as practical
    startup_callable_tool = StartupCallableTool(server_config)

    # Run the hook before there is any server object created
    startup_callable_tool.invoke(SERVER_STARTUP.PHASE.FS_CONFIG_ONLY,
                                 kwargs={
                                     'server_config': server_config,
                                     'pickup_config': pickup_config,
                                     'sio_config': sio_config,
                                     'sso_config': sso_config,
                                 })

    # New in 2.0 - Start monitoring as soon as possible
    if server_config.get('newrelic', {}).get('config'):
        import newrelic.agent
        newrelic.agent.initialize(server_config.newrelic.config,
                                  server_config.newrelic.environment or None,
                                  server_config.newrelic.ignore_errors or None,
                                  server_config.newrelic.log_file or None,
                                  server_config.newrelic.log_level or None)

    zunicorn.SERVER_SOFTWARE = server_config.misc.get('http_server_header',
                                                      'Zato')

    # Store KVDB config in logs, possibly replacing its password if told to
    kvdb_config = get_kvdb_config_for_log(server_config.kvdb)
    kvdb_logger.info('Main process config `%s`', kvdb_config)

    # New in 2.0 hence optional
    user_locale = server_config.misc.get('locale', None)
    if user_locale:
        locale.setlocale(locale.LC_ALL, user_locale)
        value = 12345
        logger.info('Locale is `%s`, amount of %s -> `%s`', user_locale, value,
                    locale.currency(value, grouping=True).decode('utf-8'))

    # Makes queries against Postgres asynchronous
    if asbool(server_config.odb.use_async_driver
              ) and server_config.odb.engine == 'postgresql':
        make_psycopg_green()

    if server_config.misc.http_proxy:
        os.environ['http_proxy'] = server_config.misc.http_proxy

    # Basic components needed for the server to boot up
    kvdb = KVDB()
    odb_manager = ODBManager(well_known_data=ZATO_CRYPTO_WELL_KNOWN_DATA)
    sql_pool_store = PoolStore()

    service_store = ServiceStore()
    service_store.odb = odb_manager
    service_store.services = {}

    server = ParallelServer()
    server.odb = odb_manager
    server.service_store = service_store
    server.service_store.server = server
    server.sql_pool_store = sql_pool_store
    server.service_modules = []
    server.kvdb = kvdb

    # Assigned here because it is a circular dependency
    odb_manager.parallel_server = server

    zato_gunicorn_app = ZatoGunicornApplication(server, repo_location,
                                                server_config.main,
                                                server_config.crypto)

    server.has_fg = options.get('fg')
    server.crypto_manager = crypto_manager
    server.odb_data = server_config.odb
    server.host = zato_gunicorn_app.zato_host
    server.port = zato_gunicorn_app.zato_port
    server.repo_location = repo_location
    server.user_conf_location = os.path.join(server.repo_location, 'user-conf')
    server.base_dir = base_dir
    server.logs_dir = os.path.join(server.base_dir, 'logs')
    server.tls_dir = os.path.join(server.base_dir, 'config', 'repo', 'tls')
    server.static_dir = os.path.join(server.base_dir, 'config', 'repo',
                                     'static')
    server.json_schema_dir = os.path.join(server.base_dir, 'config', 'repo',
                                          'schema', 'json')
    server.fs_server_config = server_config
    server.fs_sql_config = get_config(repo_location,
                                      'sql.conf',
                                      needs_user_config=False)
    server.pickup_config = pickup_config
    server.logging_config = logging_config
    server.logging_conf_path = logging_conf_path
    server.sio_config = sio_config
    server.sso_config = sso_config
    server.user_config.update(server_config.user_config_items)
    server.preferred_address = preferred_address
    server.sync_internal = options['sync_internal']
    server.jwt_secret = server.fs_server_config.misc.jwt_secret.encode('utf8')
    server.startup_callable_tool = startup_callable_tool
    server.is_sso_enabled = server.fs_server_config.component_enabled.sso
    if server.is_sso_enabled:
        server.sso_api = SSOAPI(server, sso_config, None,
                                crypto_manager.encrypt, crypto_manager.decrypt,
                                crypto_manager.hash_secret,
                                crypto_manager.verify_hash, new_user_id)

    # Remove all locks possibly left over by previous server instances
    kvdb.component = 'master-proc'
    clear_locks(kvdb, server_config.main.token, server_config.kvdb,
                crypto_manager.decrypt)

    # New in 2.0.8
    server.return_tracebacks = asbool(
        server_config.misc.get('return_tracebacks', True))
    server.default_error_message = server_config.misc.get(
        'default_error_message', 'An error has occurred')

    # Turn the repo dir into an actual repository and commit any new/modified files
    RepoManager(repo_location).ensure_repo_consistency()

    # New in 2.0 so it's optional.
    profiler_enabled = server_config.get('profiler', {}).get('enabled', False)

    # New in 2.0 so it's optional.
    sentry_config = server_config.get('sentry')

    dsn = sentry_config.pop('dsn', None)
    if dsn:

        from raven import Client
        from raven.handlers.logging import SentryHandler

        handler_level = sentry_config.pop('level')
        client = Client(dsn, **sentry_config)

        handler = SentryHandler(client=client)
        handler.setLevel(getattr(logging, handler_level))

        logger = logging.getLogger('')
        logger.addHandler(handler)

        for name in logging.Logger.manager.loggerDict:
            if name.startswith('zato'):
                logger = logging.getLogger(name)
                logger.addHandler(handler)

    if asbool(profiler_enabled):
        profiler_dir = os.path.abspath(
            os.path.join(base_dir, server_config.profiler.profiler_dir))
        server.on_wsgi_request = ProfileMiddleware(
            server.on_wsgi_request,
            log_filename=os.path.join(profiler_dir,
                                      server_config.profiler.log_filename),
            cachegrind_filename=os.path.join(
                profiler_dir, server_config.profiler.cachegrind_filename),
            discard_first_request=server_config.profiler.discard_first_request,
            flush_at_shutdown=server_config.profiler.flush_at_shutdown,
            path=server_config.profiler.url_path,
            unwind=server_config.profiler.unwind)

    # New in 2.0 - set environmet variables for servers to inherit
    os_environ = server_config.get('os_environ', {})
    for key, value in os_environ.items():
        os.environ[key] = value

    # Run the hook right before the Gunicorn-level server actually starts
    startup_callable_tool.invoke(SERVER_STARTUP.PHASE.IMPL_BEFORE_RUN,
                                 kwargs={
                                     'zato_gunicorn_app': zato_gunicorn_app,
                                 })

    # Run the app at last
    if start_gunicorn_app:
        zato_gunicorn_app.run()
    else:
        return zato_gunicorn_app.zato_wsgi_app
Esempio n. 23
0
class Scheduler(BrokerMessageReceiver):
    """ The Zato's job scheduler. All of the operations assume the data was already validated and sanitized
    by relevant Zato public API services.
    """
    def __init__(self, config=None, run=False):
        self.config = config
        self.broker_client = None
        self.config.on_job_executed_cb = self.on_job_executed
        self.sched = _Scheduler(self.config, self)

        # Broker connection
        self.broker_conn = KVDB(
            config=self.config.main.broker,
            decrypt_func=self.config.crypto_manager.decrypt)
        self.broker_conn.init()

        # Broker client
        self.broker_callbacks = {
            TOPICS[MESSAGE_TYPE.TO_SCHEDULER]: self.on_broker_msg,
        }

        self.broker_client = BrokerClient(self.broker_conn, 'scheduler',
                                          self.broker_callbacks, [])

        if run:
            self.serve_forever()

# ################################################################################################################################

    def serve_forever(self):
        try:
            try:
                spawn_greenlet(self.sched.run)
            except Exception:
                logger.warn(format_exc())

            while not self.sched.ready:
                sleep(0.1)

        except Exception:
            logger.warn(format_exc())

# ################################################################################################################################

    def on_job_executed(self, ctx, extra_data_format=ZATO_NONE):
        """ Invoked by the underlying scheduler when a job is executed. Sends the actual execution request to the broker
        so it can be picked up by one of the parallel server's broker clients.
        """
        name = ctx['name']

        msg = {
            'action': SCHEDULER_MSG.JOB_EXECUTED.value,
            'name': name,
            'service': ctx['cb_kwargs']['service'],
            'payload': ctx['cb_kwargs']['extra'],
            'cid': ctx['cid'],
            'job_type': ctx['type']
        }

        if extra_data_format != ZATO_NONE:
            msg['data_format'] = extra_data_format

        self.broker_client.invoke_async(msg)

        if _has_debug:
            msg = 'Sent a job execution request, name [{}], service [{}], extra [{}]'.format(
                name, ctx['cb_kwargs']['service'], ctx['cb_kwargs']['extra'])
            logger.debug(msg)

        # Now, if it was a one-time job, it needs to be deactivated.
        if ctx['type'] == SCHEDULER.JOB_TYPE.ONE_TIME:
            msg = {
                'action': SERVICE.PUBLISH.value,
                'service': 'zato.scheduler.job.set-active-status',
                'payload': {
                    'id': ctx['id'],
                    'is_active': False
                },
                'cid': new_cid(),
                'channel': CHANNEL.SCHEDULER_AFTER_ONE_TIME,
                'data_format': DATA_FORMAT.JSON,
            }
            self.broker_client.publish(msg)

# ################################################################################################################################

    def create_edit(self, action, job_data, **kwargs):
        """ Invokes a handler appropriate for the given action and job_data.job_type.
        """
        handler = '{0}_{1}'.format(action, job_data.job_type)
        handler = getattr(self, handler)

        try:
            handler(job_data, **kwargs)
        except Exception, e:
            logger.error('Caught exception `%s`', format_exc(e))
Esempio n. 24
0
class BaseConnector(BrokerMessageReceiver):
    """ A base class for both channels and outgoing connectors.
    """
    def __init__(self, repo_location, def_id):
        self.repo_location = repo_location
        self.def_id = def_id
        self.odb = None
        self.odb_config = None
        self.sql_pool_store = None

    def _close(self):
        """ Close the process, don't forget about the ODB connection if it exists.
        """
        if self.odb:
            self.odb.close()
        p = psutil.Process(os.getpid())
        p.terminate()

    def _setup_odb(self):
        # First let's see if the server we're running on top of exists in the ODB.
        self.server = self.odb.fetch_server(self.odb_config)
        if not self.server:
            raise Exception('Server does not exist in the ODB')

    def _init(self):
        """ Initializes all the basic run-time data structures and connects
        to the Zato broker.
        """
        fs_server_config = get_config(self.repo_location, 'server.conf')
        app_context = get_app_context(fs_server_config)
        crypto_manager = get_crypto_manager(self.repo_location, app_context,
                                            fs_server_config)

        config_odb = fs_server_config.odb
        self.odb = app_context.get_object('odb_manager')
        self.odb.crypto_manager = crypto_manager
        self.odb.token = fs_server_config.main.token

        # Key-value DB
        self.kvdb = KVDB()
        self.kvdb.config = fs_server_config.kvdb
        self.kvdb.decrypt_func = self.odb.crypto_manager.decrypt
        self.kvdb.init()

        # Broker client
        self.broker_client = BrokerClient(self.kvdb, self.broker_client_id,
                                          self.broker_callbacks)
        self.broker_client.start()

        # ODB

        #
        # Ticket #35 Don't ignore odb_port when creating an ODB
        # https://github.com/zatosource/zato/issues/35
        #

        engine = config_odb.engine
        port = config_odb['port']

        self.odb_config = Bunch()
        self.odb_config.db_name = config_odb.db_name
        self.odb_config.is_active = True
        self.odb_config.engine = engine
        self.odb_config.extra = config_odb.extra

        if self.odb_config.engine != 'sqlite':
            self.odb_config.password = self.odb.crypto_manager.decrypt(
                config_odb.password)
            self.odb_config.host = config_odb.host
            self.odb_config.port = port
            self.odb_config.pool_size = config_odb.pool_size
            self.odb_config.username = config_odb.username

        self.odb_config.is_odb = True

        self.sql_pool_store = app_context.get_object('sql_pool_store')
        self.sql_pool_store[ZATO_ODB_POOL_NAME] = self.odb_config
        self.odb.pool = self.sql_pool_store[ZATO_ODB_POOL_NAME].pool

        self._setup_odb()
Esempio n. 25
0
class BaseConnector(BrokerMessageReceiver):
    """ A base class for both channels and outgoing connectors.
    """
    def __init__(self, repo_location, def_id):
        self.repo_location = repo_location
        self.def_id = def_id
        self.odb = None
        self.odb_config = None
        self.sql_pool_store = None
        
    def _close(self):
        """ Close the process, don't forget about the ODB connection if it exists.
        """
        if self.odb:
            self.odb.close()
        p = psutil.Process(os.getpid())
        p.terminate()
    
    def _setup_odb(self):
        # First let's see if the server we're running on top of exists in the ODB.
        self.server = self.odb.fetch_server(self.odb_config)
        if not self.server:
            raise Exception('Server does not exist in the ODB')
        
    def _init(self):
        """ Initializes all the basic run-time data structures and connects
        to the Zato broker.
        """
        fs_server_config = get_config(self.repo_location, 'server.conf')
        app_context = get_app_context(fs_server_config)
        crypto_manager = get_crypto_manager(self.repo_location, app_context, fs_server_config)
        
        config_odb = fs_server_config.odb
        self.odb = app_context.get_object('odb_manager')
        self.odb.crypto_manager = crypto_manager
        self.odb.token = fs_server_config.main.token
        
        # Key-value DB
        self.kvdb = KVDB()
        self.kvdb.config = fs_server_config.kvdb
        self.kvdb.decrypt_func = self.odb.crypto_manager.decrypt
        self.kvdb.init()
        
        # Broker client
        self.broker_client = BrokerClient(self.kvdb, self.broker_client_id, self.broker_callbacks)
        self.broker_client.start()

        # ODB        
        
        #
        # Ticket #35 Don't ignore odb_port when creating an ODB
        # https://github.com/zatosource/zato/issues/35
        #
        
        engine = config_odb.engine
        port = config_odb['port']

        self.odb_config = Bunch()
        self.odb_config.db_name = config_odb.db_name
        self.odb_config.is_active = True
        self.odb_config.engine = engine
        self.odb_config.extra = config_odb.extra
        self.odb_config.host = config_odb.host
        self.odb_config.port = port
        self.odb_config.password = self.odb.crypto_manager.decrypt(config_odb.password)
        self.odb_config.pool_size = config_odb.pool_size
        self.odb_config.username = config_odb.username
        
        self.odb_config.is_odb = True
        
        self.sql_pool_store = app_context.get_object('sql_pool_store')
        self.sql_pool_store[ZATO_ODB_POOL_NAME] = self.odb_config
        self.odb.pool = self.sql_pool_store[ZATO_ODB_POOL_NAME].pool
        
        self._setup_odb()
        
        # Delivery store
        self.delivery_store = DeliveryStore(
            self.kvdb, self.broker_client, self.odb, float(fs_server_config.misc.delivery_lock_timeout))
Esempio n. 26
0
class Scheduler(BrokerMessageReceiver):
    """ The Zato's job scheduler. All of the operations assume the data was already validated and sanitized
    by relevant Zato public API services.
    """
    def __init__(self, config=None, run=False):
        self.config = config
        self.broker_client = None
        self.config.on_job_executed_cb = self.on_job_executed
        self.sched = _Scheduler(self.config, self)

        # Broker connection
        self.broker_conn = KVDB(
            config=self.config.main.broker,
            decrypt_func=self.config.crypto_manager.decrypt)
        self.broker_conn.init()

        # Broker client
        self.broker_callbacks = {
            TOPICS[MESSAGE_TYPE.TO_SCHEDULER]: self.on_broker_msg,
        }

        self.broker_client = BrokerClient(self.broker_conn, 'scheduler',
                                          self.broker_callbacks, [])

        if run:
            self.serve_forever()

# ################################################################################################################################

    def serve_forever(self):
        try:
            try:
                spawn_greenlet(self.sched.run)
            except Exception:
                logger.warn(format_exc())

            while not self.sched.ready:
                sleep(0.1)

        except Exception:
            logger.warn(format_exc())

# ################################################################################################################################

    def on_job_executed(self, ctx, extra_data_format=ZATO_NONE):
        """ Invoked by the underlying scheduler when a job is executed. Sends the actual execution request to the broker
        so it can be picked up by one of the parallel server's broker clients.
        """
        name = ctx['name']

        msg = {
            'action': SCHEDULER_MSG.JOB_EXECUTED.value,
            'name': name,
            'service': ctx['cb_kwargs']['service'],
            'payload': ctx['cb_kwargs']['extra'],
            'cid': ctx['cid'],
            'job_type': ctx['type']
        }

        if extra_data_format != ZATO_NONE:
            msg['data_format'] = extra_data_format

        self.broker_client.invoke_async(msg)

        if _has_debug:
            msg = 'Sent a job execution request, name [{}], service [{}], extra [{}]'.format(
                name, ctx['cb_kwargs']['service'], ctx['cb_kwargs']['extra'])
            logger.debug(msg)

        # Now, if it was a one-time job, it needs to be deactivated.
        if ctx['type'] == SCHEDULER.JOB_TYPE.ONE_TIME:
            msg = {
                'action': SERVICE.PUBLISH.value,
                'service': 'zato.scheduler.job.set-active-status',
                'payload': {
                    'id': ctx['id'],
                    'is_active': False
                },
                'cid': new_cid(),
                'channel': CHANNEL.SCHEDULER_AFTER_ONE_TIME,
                'data_format': DATA_FORMAT.JSON,
            }
            self.broker_client.publish(msg)

# ################################################################################################################################

    def create_edit(self, action, job_data, **kwargs):
        """ Invokes a handler appropriate for the given action and job_data.job_type.
        """
        handler = '{0}_{1}'.format(action, job_data.job_type)
        handler = getattr(self, handler)

        try:
            handler(job_data, **kwargs)
        except Exception:
            logger.error('Caught exception `%s`', format_exc())

# ################################################################################################################################

    def create_edit_job(self,
                        id,
                        name,
                        old_name,
                        start_time,
                        job_type,
                        service,
                        is_create=True,
                        max_repeats=1,
                        days=0,
                        hours=0,
                        minutes=0,
                        seconds=0,
                        extra=None,
                        cron_definition=None,
                        is_active=None,
                        **kwargs):
        """ A base method for scheduling of jobs.
        """
        cb_kwargs = {
            'service': service,
            'extra': extra,
        }

        if job_type == SCHEDULER.JOB_TYPE.CRON_STYLE:
            interval = CronTab(cron_definition)
        else:
            interval = Interval(days=days,
                                hours=hours,
                                minutes=minutes,
                                seconds=seconds)

        job = Job(id,
                  name,
                  job_type,
                  interval,
                  start_time,
                  cb_kwargs=cb_kwargs,
                  max_repeats=max_repeats,
                  is_active=is_active,
                  cron_definition=cron_definition,
                  old_name=old_name)

        func = self.sched.create if is_create else self.sched.edit
        func(job, **kwargs)

# ################################################################################################################################

    def create_edit_one_time(self, job_data, is_create=True, **kwargs):
        """ Re-/schedules the execution of a one-time job.
        """
        self.create_edit_job(job_data.id,
                             job_data.name,
                             job_data.get('old_name'),
                             _start_date(job_data),
                             SCHEDULER.JOB_TYPE.ONE_TIME,
                             job_data.service,
                             is_create,
                             extra=job_data.extra,
                             is_active=job_data.is_active,
                             **kwargs)

    def create_one_time(self, job_data, **kwargs):
        """ Schedules the execution of a one-time job.
        """
        self.create_edit_one_time(job_data, **kwargs)

    def edit_one_time(self, job_data, **kwargs):
        """ First unschedules a one-time job and then schedules its execution.
        The operations aren't parts of an atomic transaction.
        """
        self.create_edit_one_time(job_data, False, **kwargs)

# ################################################################################################################################

    def create_edit_interval_based(self, job_data, is_create=True, **kwargs):
        """ Re-/schedules the execution of an interval-based job.
        """
        start_date = _start_date(job_data)
        weeks = job_data.weeks if job_data.get('weeks') else 0
        days = job_data.days if job_data.get('days') else 0
        hours = job_data.hours if job_data.get('hours') else 0
        minutes = job_data.minutes if job_data.get('minutes') else 0
        seconds = job_data.seconds if job_data.get('seconds') else 0
        max_repeats = job_data.repeats if job_data.get('repeats') else None

        self.create_edit_job(job_data.id,
                             job_data.name,
                             job_data.get('old_name'),
                             start_date,
                             SCHEDULER.JOB_TYPE.INTERVAL_BASED,
                             job_data.service,
                             is_create,
                             max_repeats,
                             days + weeks * 7,
                             hours,
                             minutes,
                             seconds,
                             job_data.extra,
                             is_active=job_data.is_active,
                             **kwargs)

    def create_interval_based(self, job_data, **kwargs):
        """ Schedules the execution of an interval-based job.
        """
        self.create_edit_interval_based(job_data, **kwargs)

    def edit_interval_based(self, job_data, **kwargs):
        """ First unschedules an interval-based job and then schedules its execution.
        The operations aren't parts of an atomic transaction.
        """
        self.create_edit_interval_based(job_data, False, **kwargs)

# ################################################################################################################################

    def create_edit_cron_style(self, job_data, is_create=True, **kwargs):
        """ Re-/schedules the execution of a cron-style job.
        """
        start_date = _start_date(job_data)
        self.create_edit_job(job_data.id,
                             job_data.name,
                             job_data.get('old_name'),
                             start_date,
                             SCHEDULER.JOB_TYPE.CRON_STYLE,
                             job_data.service,
                             is_create,
                             max_repeats=None,
                             extra=job_data.extra,
                             is_active=job_data.is_active,
                             cron_definition=job_data.cron_definition,
                             **kwargs)

    def create_cron_style(self, job_data, **kwargs):
        """ Schedules the execution of a cron-style job.
        """
        self.create_edit_cron_style(job_data, **kwargs)

    def edit_cron_style(self, job_data, **kwargs):
        """ First unschedules a cron-style job and then schedules its execution.
        The operations aren't parts of an atomic transaction.
        """
        self.create_edit_cron_style(job_data, False, **kwargs)

# ################################################################################################################################

    def delete(self, job_data, **kwargs):
        """ Deletes the job from the scheduler.
        """
        self.sched.unschedule_by_name(
            job_data.old_name if job_data.get('old_name') else job_data.name,
            **kwargs)

# ################################################################################################################################

    def execute(self, job_data):
        self.sched.execute(job_data.name)

# ################################################################################################################################

    def stop(self):
        self.sched.stop()

# ################################################################################################################################

    def filter(self, *ignored):
        """ Accept broker messages destined to our client.
        """
        return True

# ################################################################################################################################

    def on_broker_msg_SCHEDULER_CREATE(self, msg, *ignored_args):
        self.create_edit('create', msg)

# ################################################################################################################################

    def on_broker_msg_SCHEDULER_EDIT(self, msg, *ignored_args):
        self.create_edit('edit', msg)

# ################################################################################################################################

    def on_broker_msg_SCHEDULER_DELETE(self, msg, *ignored_args):
        self.delete(msg)

# ################################################################################################################################

    def on_broker_msg_SCHEDULER_EXECUTE(self, msg, *ignored_args):
        self.execute(msg)

# ################################################################################################################################

    def on_broker_msg_SCHEDULER_CLOSE(self, msg, *ignored_args):
        self.broker_client.close()
        self.stop()