예제 #1
0
파일: check_config.py 프로젝트: Adniel/zato
    def _on_server(self, args):
        
        repo_dir = join(self.config_dir, 'repo')
        server_conf = ConfigObj(join(repo_dir, 'server.conf'))

        cm = CryptoManager(priv_key_location=abspath(join(repo_dir, server_conf['crypto']['priv_key_location'])))
        cm.load_keys()
        
        engine_params = dict(server_conf['odb'].items())
        engine_params['extra'] = {}
        engine_params['pool_size'] = 1
        
        query = ping_queries[engine_params['engine']]
        
        session = create_pool(cm, engine_params)
        session.execute(query)
        session.close()
        
        if self.show_output:
            self.logger.info('SQL ODB connection OK')
        
        kvdb_config = Bunch(dict(server_conf['kvdb'].items()))
        kvdb = KVDB(None, kvdb_config, cm.decrypt)
        kvdb.init()
        
        kvdb.conn.info()
        kvdb.close()
        
        if self.show_output:
            self.logger.info('Redis connection OK')
예제 #2
0
파일: __init__.py 프로젝트: dsuch/zato
class BaseConnector(BrokerMessageReceiver):
    """ A base class for both channels and outgoing connectors.
    """
    def __init__(self, repo_location, def_id):
        self.repo_location = repo_location
        self.def_id = def_id
        self.odb = None
        self.sql_pool_store = None
        
    def _close(self):
        """ Close the process, don't forget about the ODB connection if it exists.
        """
        if self.odb:
            self.odb.close()
        p = psutil.Process(os.getpid())
        p.terminate()
    
    def _setup_odb(self):
        # First let's see if the server we're running on top of exists in the ODB.
        self.server = self.odb.fetch_server()
        if not self.server:
            raise Exception('Server does not exist in the ODB')
        
    def _init(self):
        """ Initializes all the basic run-time data structures and connects
        to the Zato broker.
        """
        fs_server_config = get_config(self.repo_location, 'server.conf')
        app_context = get_app_context(fs_server_config)
        crypto_manager = get_crypto_manager(self.repo_location, app_context, fs_server_config)
        
        config_odb = fs_server_config.odb
        self.odb = app_context.get_object('odb_manager')
        self.odb.crypto_manager = crypto_manager
        self.odb.token = fs_server_config.main.token
        
        # Key-value DB
        self.kvdb = KVDB()
        self.kvdb.config = fs_server_config.kvdb
        self.kvdb.decrypt_func = self.odb.crypto_manager.decrypt
        self.kvdb.init()
        
        odb_data = Bunch()
        odb_data.db_name = config_odb.db_name
        odb_data.engine = config_odb.engine
        odb_data.extra = config_odb.extra
        odb_data.host = config_odb.host
        odb_data.password = self.odb.crypto_manager.decrypt(config_odb.password)
        odb_data.pool_size = config_odb.pool_size
        odb_data.username = config_odb.username
        odb_data.is_odb = True
        
        self.sql_pool_store = app_context.get_object('sql_pool_store')
        self.sql_pool_store[ZATO_ODB_POOL_NAME] = odb_data
        self.odb.pool = self.sql_pool_store[ZATO_ODB_POOL_NAME]
        
        self._setup_odb()

        # Connects to the broker
        super(BaseConnector, self)._init()
예제 #3
0
파일: check_config.py 프로젝트: ibeex/zato
    def _on_server(self, args):
        
        repo_dir = join(self.config_dir, 'repo')
        server_conf = ConfigObj(join(repo_dir, 'server.conf'))

        cm = CryptoManager(priv_key_location=abspath(join(repo_dir, server_conf['crypto']['priv_key_location'])))
        cm.load_keys()
        
        engine_params = dict(server_conf['odb'].items())
        engine_params['extra'] = {}
        engine_params['pool_size'] = 1
        
        query = ping_queries[engine_params['engine']]
        
        session = create_pool(cm, engine_params)
        session.execute(query)
        session.close()
        
        if self.show_output:
            self.logger.info('SQL ODB connection OK')
        
        kvdb_config = Bunch(dict(server_conf['kvdb'].items()))
        kvdb = KVDB(None, kvdb_config, cm.decrypt)
        kvdb.init()
        
        kvdb.conn.info()
        kvdb.close()
        
        if self.show_output:
            self.logger.info('Redis connection OK')
예제 #4
0
    def on_server_check_kvdb(self, cm, server_conf):

        kvdb_config = Bunch(dict(server_conf['kvdb'].items()))
        kvdb = KVDB(None, kvdb_config, cm.decrypt)
        kvdb.init()
        
        kvdb.conn.info()
        kvdb.close()

        if self.show_output:
            self.logger.info('Redis connection OK')
예제 #5
0
    def on_server_check_kvdb(self, cm, conf):

        kvdb_config = Bunch(dict(conf['kvdb'].items()))
        kvdb = KVDB(None, kvdb_config, cm.decrypt)
        kvdb.init()

        kvdb.conn.info()
        kvdb.close()

        if self.show_output:
            self.logger.info('Redis connection OK')
예제 #6
0
    def on_server_check_kvdb(self, cm, conf):

        kvdb_config = Bunch(dict(conf['kvdb'].items()))
        kvdb = KVDB(None, kvdb_config, cm.decrypt)
        kvdb.init()

        minimum = '2.8.4'

        info = kvdb.conn.info()
        redis_version = info.get('redis_version')

        if not redis_version:
            raise Exception('Could not obtain `redis_version` from {}'.format(info))

        if not LooseVersion(redis_version) >= LooseVersion(minimum):
            raise Exception('Redis version required: `{}` or later, found:`{}`'.format(minimum, redis_version))

        kvdb.close()

        if self.show_output:
            self.logger.info('Redis connection OK')
예제 #7
0
    def on_server_check_kvdb(self, cm, conf, conf_key='kvdb'):

        kvdb_config = Bunch(dict(iteritems((conf[conf_key]))))
        kvdb = KVDB(None, kvdb_config, cm.decrypt)
        kvdb.init()

        minimum = '2.8.4'

        info = kvdb.conn.info()
        redis_version = info.get('redis_version')

        if not redis_version:
            raise Exception('Could not obtain `redis_version` from {}'.format(info))

        if not LooseVersion(redis_version) >= LooseVersion(minimum):
            raise Exception('Redis version required: `{}` or later, found:`{}`'.format(minimum, redis_version))

        kvdb.close()

        if self.show_output:
            self.logger.info('Redis connection OK')
예제 #8
0
    def test_parse_config(self):
        class FakeSentinel(object):
            def __init__(self, sentinels, password, socket_timeout):
                self.sentinels = sentinels
                self.password = password
                self.socket_timeout = socket_timeout
                self.master_for_called_with = None

            def master_for(self, master_name):
                self.master_for_called_with = master_name
                return self

        class FakeStrictRedis(object):
            def __init__(self, **config):
                self.config = config

        class FakeKVDB(KVDB):
            def _get_connection_class(self):
                return FakeSentinel if self.has_sentinel else FakeStrictRedis

        def decrypt_func(password):
            return password

        sentinel1_host, sentinel1_port = 'a-' + rand_string(), rand_int()
        sentinel2_host, sentinel2_port = 'b-' + rand_string(), rand_int()

        password = rand_string()
        socket_timeout = rand_int()
        redis_sentinels_master = rand_string()
        redis_sentinels = [
            '{}:{}'.format(sentinel1_host, sentinel1_port),
            '{}:{}'.format(sentinel2_host, sentinel2_port)
        ]

        try:
            config = {'use_redis_sentinels': True}
            kvdb = KVDB(config=config)
            kvdb.init()
        except ValueError, e:
            eq_(e.message, 'kvdb.redis_sentinels must be provided')
예제 #9
0
파일: api.py 프로젝트: xulong2005/zato
class Scheduler(BrokerMessageReceiver):
    """ The Zato's job scheduler. All of the operations assume the data was already validated and sanitized
    by relevant Zato public API services.
    """
    def __init__(self, config=None, run=False):
        self.config = config
        self.broker_client = None
        self.config.on_job_executed_cb = self.on_job_executed
        self.sched = _Scheduler(self.config, self)

        # Broker connection
        self.broker_conn = KVDB(
            config=self.config.main.broker,
            decrypt_func=self.config.crypto_manager.decrypt)
        self.broker_conn.init()

        # Broker client
        self.broker_callbacks = {
            TOPICS[MESSAGE_TYPE.TO_SCHEDULER]: self.on_broker_msg,
        }

        self.broker_client = BrokerClient(self.broker_conn, 'scheduler',
                                          self.broker_callbacks, [])

        if run:
            self.serve_forever()

    def serve_forever(self):
        try:
            try:
                spawn_greenlet(self.sched.run)
            except Exception, e:
                logger.warn(format_exc(e))

            while not self.sched.ready:
                sleep(0.1)

        except Exception, e:
            logger.warn(format_exc(e))
예제 #10
0
    def test_parse_config(self):

        class FakeSentinel(object):
            def __init__(self, sentinels, password, socket_timeout):
                self.sentinels = sentinels
                self.password = password
                self.socket_timeout = socket_timeout
                self.master_for_called_with = None

            def master_for(self, master_name):
                self.master_for_called_with = master_name
                return self

        class FakeStrictRedis(object):
            def __init__(self, **config):
                self.config = config

        class FakeKVDB(KVDB):
            def _get_connection_class(self):
                return FakeSentinel if self.has_sentinel else FakeStrictRedis

        def decrypt_func(password):
            return password

        sentinel1_host, sentinel1_port = 'a-' + rand_string(), rand_int()
        sentinel2_host, sentinel2_port = 'b-' + rand_string(), rand_int()

        password = rand_string()
        socket_timeout = rand_int()
        redis_sentinels_master = rand_string()
        redis_sentinels = ['{}:{}'.format(sentinel1_host, sentinel1_port), '{}:{}'.format(sentinel2_host, sentinel2_port)]

        try:
            config = {'use_redis_sentinels': True}
            kvdb = KVDB(config=config)
            kvdb.init()
        except ValueError, e:
            eq_(e.message, 'kvdb.redis_sentinels must be provided')
예제 #11
0
    def test_parse_config(self):
        class FakeSentinel(object):
            def __init__(self, sentinels, password, socket_timeout):
                self.sentinels = sentinels
                self.password = password
                self.socket_timeout = socket_timeout
                self.master_for_called_with = None

            def master_for(self, master_name):
                self.master_for_called_with = master_name
                return self

        class FakeStrictRedis(object):
            def __init__(self, **config):
                self.config = config

        class FakeKVDB(KVDB):
            def _get_connection_class(self):
                return FakeSentinel if self.has_sentinel else FakeStrictRedis

        def decrypt_func(password):
            return password

        sentinel1_host, sentinel1_port = 'a-' + rand_string(), rand_int()
        sentinel2_host, sentinel2_port = 'b-' + rand_string(), rand_int()

        password = rand_string()
        socket_timeout = rand_int()
        redis_sentinels_master = rand_string()
        redis_sentinels = [
            '{}:{}'.format(sentinel1_host, sentinel1_port),
            '{}:{}'.format(sentinel2_host, sentinel2_port)
        ]

        try:
            config = {'use_redis_sentinels': True}
            kvdb = KVDB(config=config)
            kvdb.init()
        except ValueError as e:
            eq_(e.message, 'kvdb.redis_sentinels must be provided')
        else:
            self.fail('Expected a ValueError (kvdb.redis_sentinels)')

        try:
            config = {
                'use_redis_sentinels': True,
                'redis_sentinels': redis_sentinels
            }
            kvdb = KVDB(config=config)
            kvdb.init()
        except ValueError as e:
            eq_(e.message, 'kvdb.redis_sentinels_master must be provided')
        else:
            self.fail('Expected a ValueError (kvdb.redis_sentinels_master)')

        config = Bunch({
            'use_redis_sentinels': True,
            'redis_sentinels': redis_sentinels,
            'redis_sentinels_master': redis_sentinels_master,
            'password': password,
            'socket_timeout': socket_timeout
        })
        kvdb = FakeKVDB(config=config, decrypt_func=decrypt_func)
        kvdb.init()

        eq_(sorted(kvdb.conn.sentinels), [(sentinel1_host, sentinel1_port),
                                          (sentinel2_host, sentinel2_port)])

        eq_(kvdb.conn.password, password)
        eq_(kvdb.conn.socket_timeout, socket_timeout)
        eq_(kvdb.conn.master_for_called_with, redis_sentinels_master)

        config = {'use_redis_sentinels': False}
        kvdb = FakeKVDB(config=config, decrypt_func=decrypt_func)
        kvdb.init()

        self.assertTrue(isinstance(kvdb.conn, FakeStrictRedis))
예제 #12
0
class BaseConnector(BrokerMessageReceiver):
    """ A base class for both channels and outgoing connectors.
    """
    def __init__(self, repo_location, def_id):
        self.repo_location = repo_location
        self.def_id = def_id
        self.odb = None
        self.odb_config = None
        self.sql_pool_store = None

    def _close(self):
        """ Close the process, don't forget about the ODB connection if it exists.
        """
        if self.odb:
            self.odb.close()
        p = psutil.Process(os.getpid())
        p.terminate()

    def _setup_odb(self):
        # First let's see if the server we're running on top of exists in the ODB.
        self.server = self.odb.fetch_server(self.odb_config)
        if not self.server:
            raise Exception('Server does not exist in the ODB')

    def _init(self):
        """ Initializes all the basic run-time data structures and connects
        to the Zato broker.
        """
        fs_server_config = get_config(self.repo_location, 'server.conf')
        app_context = get_app_context(fs_server_config)
        crypto_manager = get_crypto_manager(self.repo_location, app_context,
                                            fs_server_config)

        config_odb = fs_server_config.odb
        self.odb = app_context.get_object('odb_manager')
        self.odb.crypto_manager = crypto_manager
        self.odb.token = fs_server_config.main.token

        # Key-value DB
        self.kvdb = KVDB()
        self.kvdb.config = fs_server_config.kvdb
        self.kvdb.decrypt_func = self.odb.crypto_manager.decrypt
        self.kvdb.init()

        # Broker client
        self.broker_client = BrokerClient(self.kvdb, self.broker_client_id,
                                          self.broker_callbacks)
        self.broker_client.start()

        # ODB

        #
        # Ticket #35 Don't ignore odb_port when creating an ODB
        # https://github.com/zatosource/zato/issues/35
        #

        engine = config_odb.engine
        port = config_odb['port']

        self.odb_config = Bunch()
        self.odb_config.db_name = config_odb.db_name
        self.odb_config.is_active = True
        self.odb_config.engine = engine
        self.odb_config.extra = config_odb.extra

        if self.odb_config.engine != 'sqlite':
            self.odb_config.password = self.odb.crypto_manager.decrypt(
                config_odb.password)
            self.odb_config.host = config_odb.host
            self.odb_config.port = port
            self.odb_config.pool_size = config_odb.pool_size
            self.odb_config.username = config_odb.username

        self.odb_config.is_odb = True

        self.sql_pool_store = app_context.get_object('sql_pool_store')
        self.sql_pool_store[ZATO_ODB_POOL_NAME] = self.odb_config
        self.odb.pool = self.sql_pool_store[ZATO_ODB_POOL_NAME].pool

        self._setup_odb()
예제 #13
0
파일: __init__.py 프로젝트: damilare/zato
class BaseConnector(BrokerMessageReceiver):
    """ A base class for both channels and outgoing connectors.
    """
    def __init__(self, repo_location, def_id):
        self.repo_location = repo_location
        self.def_id = def_id
        self.odb = None
        self.odb_config = None
        self.sql_pool_store = None
        
    def _close(self):
        """ Close the process, don't forget about the ODB connection if it exists.
        """
        if self.odb:
            self.odb.close()
        p = psutil.Process(os.getpid())
        p.terminate()
    
    def _setup_odb(self):
        # First let's see if the server we're running on top of exists in the ODB.
        self.server = self.odb.fetch_server(self.odb_config)
        if not self.server:
            raise Exception('Server does not exist in the ODB')
        
    def _init(self):
        """ Initializes all the basic run-time data structures and connects
        to the Zato broker.
        """
        fs_server_config = get_config(self.repo_location, 'server.conf')
        app_context = get_app_context(fs_server_config)
        crypto_manager = get_crypto_manager(self.repo_location, app_context, fs_server_config)
        
        config_odb = fs_server_config.odb
        self.odb = app_context.get_object('odb_manager')
        self.odb.crypto_manager = crypto_manager
        self.odb.token = fs_server_config.main.token
        
        # Key-value DB
        self.kvdb = KVDB()
        self.kvdb.config = fs_server_config.kvdb
        self.kvdb.decrypt_func = self.odb.crypto_manager.decrypt
        self.kvdb.init()
        
        # Broker client
        self.broker_client = BrokerClient(self.kvdb, self.broker_client_id, self.broker_callbacks)
        self.broker_client.start()

        # ODB        
        
        #
        # Ticket #35 Don't ignore odb_port when creating an ODB
        # https://github.com/zatosource/zato/issues/35
        #
        
        engine = config_odb.engine
        port = config_odb['port']

        self.odb_config = Bunch()
        self.odb_config.db_name = config_odb.db_name
        self.odb_config.is_active = True
        self.odb_config.engine = engine
        self.odb_config.extra = config_odb.extra
        self.odb_config.host = config_odb.host
        self.odb_config.port = port
        self.odb_config.password = self.odb.crypto_manager.decrypt(config_odb.password)
        self.odb_config.pool_size = config_odb.pool_size
        self.odb_config.username = config_odb.username
        
        self.odb_config.is_odb = True
        
        self.sql_pool_store = app_context.get_object('sql_pool_store')
        self.sql_pool_store[ZATO_ODB_POOL_NAME] = self.odb_config
        self.odb.pool = self.sql_pool_store[ZATO_ODB_POOL_NAME].pool
        
        self._setup_odb()
        
        # Delivery store
        self.delivery_store = DeliveryStore(
            self.kvdb, self.broker_client, self.odb, float(fs_server_config.misc.delivery_lock_timeout))
예제 #14
0
class Scheduler(BrokerMessageReceiver):
    """ The Zato's job scheduler. All of the operations assume the data was already validated and sanitized
    by relevant Zato public API services.
    """
    def __init__(self, config=None, run=False):
        self.config = config
        self.broker_client = None
        self.config.on_job_executed_cb = self.on_job_executed
        self.sched = _Scheduler(self.config, self)

        # Broker connection
        self.broker_conn = KVDB(
            config=self.config.main.broker,
            decrypt_func=self.config.crypto_manager.decrypt)
        self.broker_conn.init()

        # Broker client
        self.broker_callbacks = {
            TOPICS[MESSAGE_TYPE.TO_SCHEDULER]: self.on_broker_msg,
        }

        self.broker_client = BrokerClient(self.broker_conn, 'scheduler',
                                          self.broker_callbacks, [])

        if run:
            self.serve_forever()

# ################################################################################################################################

    def serve_forever(self):
        try:
            try:
                spawn_greenlet(self.sched.run)
            except Exception:
                logger.warn(format_exc())

            while not self.sched.ready:
                sleep(0.1)

        except Exception:
            logger.warn(format_exc())

# ################################################################################################################################

    def on_job_executed(self, ctx, extra_data_format=ZATO_NONE):
        """ Invoked by the underlying scheduler when a job is executed. Sends the actual execution request to the broker
        so it can be picked up by one of the parallel server's broker clients.
        """
        name = ctx['name']

        msg = {
            'action': SCHEDULER_MSG.JOB_EXECUTED.value,
            'name': name,
            'service': ctx['cb_kwargs']['service'],
            'payload': ctx['cb_kwargs']['extra'],
            'cid': ctx['cid'],
            'job_type': ctx['type']
        }

        if extra_data_format != ZATO_NONE:
            msg['data_format'] = extra_data_format

        self.broker_client.invoke_async(msg)

        if _has_debug:
            msg = 'Sent a job execution request, name [{}], service [{}], extra [{}]'.format(
                name, ctx['cb_kwargs']['service'], ctx['cb_kwargs']['extra'])
            logger.debug(msg)

        # Now, if it was a one-time job, it needs to be deactivated.
        if ctx['type'] == SCHEDULER.JOB_TYPE.ONE_TIME:
            msg = {
                'action': SERVICE.PUBLISH.value,
                'service': 'zato.scheduler.job.set-active-status',
                'payload': {
                    'id': ctx['id'],
                    'is_active': False
                },
                'cid': new_cid(),
                'channel': CHANNEL.SCHEDULER_AFTER_ONE_TIME,
                'data_format': DATA_FORMAT.JSON,
            }
            self.broker_client.publish(msg)

# ################################################################################################################################

    def create_edit(self, action, job_data, **kwargs):
        """ Invokes a handler appropriate for the given action and job_data.job_type.
        """
        handler = '{0}_{1}'.format(action, job_data.job_type)
        handler = getattr(self, handler)

        try:
            handler(job_data, **kwargs)
        except Exception:
            logger.error('Caught exception `%s`', format_exc())

# ################################################################################################################################

    def create_edit_job(self,
                        id,
                        name,
                        old_name,
                        start_time,
                        job_type,
                        service,
                        is_create=True,
                        max_repeats=1,
                        days=0,
                        hours=0,
                        minutes=0,
                        seconds=0,
                        extra=None,
                        cron_definition=None,
                        is_active=None,
                        **kwargs):
        """ A base method for scheduling of jobs.
        """
        cb_kwargs = {
            'service': service,
            'extra': extra,
        }

        if job_type == SCHEDULER.JOB_TYPE.CRON_STYLE:
            interval = CronTab(cron_definition)
        else:
            interval = Interval(days=days,
                                hours=hours,
                                minutes=minutes,
                                seconds=seconds)

        job = Job(id,
                  name,
                  job_type,
                  interval,
                  start_time,
                  cb_kwargs=cb_kwargs,
                  max_repeats=max_repeats,
                  is_active=is_active,
                  cron_definition=cron_definition,
                  old_name=old_name)

        func = self.sched.create if is_create else self.sched.edit
        func(job, **kwargs)

# ################################################################################################################################

    def create_edit_one_time(self, job_data, is_create=True, **kwargs):
        """ Re-/schedules the execution of a one-time job.
        """
        self.create_edit_job(job_data.id,
                             job_data.name,
                             job_data.get('old_name'),
                             _start_date(job_data),
                             SCHEDULER.JOB_TYPE.ONE_TIME,
                             job_data.service,
                             is_create,
                             extra=job_data.extra,
                             is_active=job_data.is_active,
                             **kwargs)

    def create_one_time(self, job_data, **kwargs):
        """ Schedules the execution of a one-time job.
        """
        self.create_edit_one_time(job_data, **kwargs)

    def edit_one_time(self, job_data, **kwargs):
        """ First unschedules a one-time job and then schedules its execution.
        The operations aren't parts of an atomic transaction.
        """
        self.create_edit_one_time(job_data, False, **kwargs)

# ################################################################################################################################

    def create_edit_interval_based(self, job_data, is_create=True, **kwargs):
        """ Re-/schedules the execution of an interval-based job.
        """
        start_date = _start_date(job_data)
        weeks = job_data.weeks if job_data.get('weeks') else 0
        days = job_data.days if job_data.get('days') else 0
        hours = job_data.hours if job_data.get('hours') else 0
        minutes = job_data.minutes if job_data.get('minutes') else 0
        seconds = job_data.seconds if job_data.get('seconds') else 0
        max_repeats = job_data.repeats if job_data.get('repeats') else None

        self.create_edit_job(job_data.id,
                             job_data.name,
                             job_data.get('old_name'),
                             start_date,
                             SCHEDULER.JOB_TYPE.INTERVAL_BASED,
                             job_data.service,
                             is_create,
                             max_repeats,
                             days + weeks * 7,
                             hours,
                             minutes,
                             seconds,
                             job_data.extra,
                             is_active=job_data.is_active,
                             **kwargs)

    def create_interval_based(self, job_data, **kwargs):
        """ Schedules the execution of an interval-based job.
        """
        self.create_edit_interval_based(job_data, **kwargs)

    def edit_interval_based(self, job_data, **kwargs):
        """ First unschedules an interval-based job and then schedules its execution.
        The operations aren't parts of an atomic transaction.
        """
        self.create_edit_interval_based(job_data, False, **kwargs)

# ################################################################################################################################

    def create_edit_cron_style(self, job_data, is_create=True, **kwargs):
        """ Re-/schedules the execution of a cron-style job.
        """
        start_date = _start_date(job_data)
        self.create_edit_job(job_data.id,
                             job_data.name,
                             job_data.get('old_name'),
                             start_date,
                             SCHEDULER.JOB_TYPE.CRON_STYLE,
                             job_data.service,
                             is_create,
                             max_repeats=None,
                             extra=job_data.extra,
                             is_active=job_data.is_active,
                             cron_definition=job_data.cron_definition,
                             **kwargs)

    def create_cron_style(self, job_data, **kwargs):
        """ Schedules the execution of a cron-style job.
        """
        self.create_edit_cron_style(job_data, **kwargs)

    def edit_cron_style(self, job_data, **kwargs):
        """ First unschedules a cron-style job and then schedules its execution.
        The operations aren't parts of an atomic transaction.
        """
        self.create_edit_cron_style(job_data, False, **kwargs)

# ################################################################################################################################

    def delete(self, job_data, **kwargs):
        """ Deletes the job from the scheduler.
        """
        self.sched.unschedule_by_name(
            job_data.old_name if job_data.get('old_name') else job_data.name,
            **kwargs)

# ################################################################################################################################

    def execute(self, job_data):
        self.sched.execute(job_data.name)

# ################################################################################################################################

    def stop(self):
        self.sched.stop()

# ################################################################################################################################

    def filter(self, *ignored):
        """ Accept broker messages destined to our client.
        """
        return True

# ################################################################################################################################

    def on_broker_msg_SCHEDULER_CREATE(self, msg, *ignored_args):
        self.create_edit('create', msg)

# ################################################################################################################################

    def on_broker_msg_SCHEDULER_EDIT(self, msg, *ignored_args):
        self.create_edit('edit', msg)

# ################################################################################################################################

    def on_broker_msg_SCHEDULER_DELETE(self, msg, *ignored_args):
        self.delete(msg)

# ################################################################################################################################

    def on_broker_msg_SCHEDULER_EXECUTE(self, msg, *ignored_args):
        self.execute(msg)

# ################################################################################################################################

    def on_broker_msg_SCHEDULER_CLOSE(self, msg, *ignored_args):
        self.broker_client.close()
        self.stop()
예제 #15
0
class Scheduler(BrokerMessageReceiver):
    """ The Zato's job scheduler. All of the operations assume the data was already validated and sanitized
    by relevant Zato public API services.
    """
    def __init__(self, config=None, run=False):
        self.config = config
        self.broker_client = None
        self.config.on_job_executed_cb = self.on_job_executed
        self.sched = _Scheduler(self.config, self)

        # Broker connection
        self.broker_conn = KVDB(
            config=self.config.main.broker,
            decrypt_func=self.config.crypto_manager.decrypt)
        self.broker_conn.init()

        # Broker client
        self.broker_callbacks = {
            TOPICS[MESSAGE_TYPE.TO_SCHEDULER]: self.on_broker_msg,
        }

        self.broker_client = BrokerClient(self.broker_conn, 'scheduler',
                                          self.broker_callbacks, [])

        if run:
            self.serve_forever()

# ################################################################################################################################

    def serve_forever(self):
        try:
            try:
                spawn_greenlet(self.sched.run)
            except Exception:
                logger.warn(format_exc())

            while not self.sched.ready:
                sleep(0.1)

        except Exception:
            logger.warn(format_exc())

# ################################################################################################################################

    def on_job_executed(self, ctx, extra_data_format=ZATO_NONE):
        """ Invoked by the underlying scheduler when a job is executed. Sends the actual execution request to the broker
        so it can be picked up by one of the parallel server's broker clients.
        """
        name = ctx['name']

        msg = {
            'action': SCHEDULER_MSG.JOB_EXECUTED.value,
            'name': name,
            'service': ctx['cb_kwargs']['service'],
            'payload': ctx['cb_kwargs']['extra'],
            'cid': ctx['cid'],
            'job_type': ctx['type']
        }

        if extra_data_format != ZATO_NONE:
            msg['data_format'] = extra_data_format

        self.broker_client.invoke_async(msg)

        if _has_debug:
            msg = 'Sent a job execution request, name [{}], service [{}], extra [{}]'.format(
                name, ctx['cb_kwargs']['service'], ctx['cb_kwargs']['extra'])
            logger.debug(msg)

        # Now, if it was a one-time job, it needs to be deactivated.
        if ctx['type'] == SCHEDULER.JOB_TYPE.ONE_TIME:
            msg = {
                'action': SERVICE.PUBLISH.value,
                'service': 'zato.scheduler.job.set-active-status',
                'payload': {
                    'id': ctx['id'],
                    'is_active': False
                },
                'cid': new_cid(),
                'channel': CHANNEL.SCHEDULER_AFTER_ONE_TIME,
                'data_format': DATA_FORMAT.JSON,
            }
            self.broker_client.publish(msg)

# ################################################################################################################################

    def create_edit(self, action, job_data, **kwargs):
        """ Invokes a handler appropriate for the given action and job_data.job_type.
        """
        handler = '{0}_{1}'.format(action, job_data.job_type)
        handler = getattr(self, handler)

        try:
            handler(job_data, **kwargs)
        except Exception, e:
            logger.error('Caught exception `%s`', format_exc(e))
예제 #16
0
        redis_sentinels_master = rand_string()
        redis_sentinels = ['{}:{}'.format(sentinel1_host, sentinel1_port), '{}:{}'.format(sentinel2_host, sentinel2_port)]

        try:
            config = {'use_redis_sentinels': True}
            kvdb = KVDB(config=config)
            kvdb.init()
        except ValueError, e:
            eq_(e.message, 'kvdb.redis_sentinels must be provided')
        else:
            self.fail('Expected a ValueError (kvdb.redis_sentinels)')

        try:
            config = {'use_redis_sentinels': True, 'redis_sentinels': redis_sentinels}
            kvdb = KVDB(config=config)
            kvdb.init()
        except ValueError, e:
            eq_(e.message, 'kvdb.redis_sentinels_master must be provided')
        else:
            self.fail('Expected a ValueError (kvdb.redis_sentinels_master)')

        config = Bunch({
            'use_redis_sentinels': True,
            'redis_sentinels':redis_sentinels,
            'redis_sentinels_master':redis_sentinels_master,
            'password': password,
            'socket_timeout':socket_timeout
        })
        kvdb = FakeKVDB(config=config, decrypt_func=decrypt_func)
        kvdb.init()
예제 #17
0
        try:
            config = {'use_redis_sentinels': True}
            kvdb = KVDB(config=config)
            kvdb.init()
        except ValueError, e:
            eq_(e.message, 'kvdb.redis_sentinels must be provided')
        else:
            self.fail('Expected a ValueError (kvdb.redis_sentinels)')

        try:
            config = {
                'use_redis_sentinels': True,
                'redis_sentinels': redis_sentinels
            }
            kvdb = KVDB(config=config)
            kvdb.init()
        except ValueError, e:
            eq_(e.message, 'kvdb.redis_sentinels_master must be provided')
        else:
            self.fail('Expected a ValueError (kvdb.redis_sentinels_master)')

        config = Bunch({
            'use_redis_sentinels': True,
            'redis_sentinels': redis_sentinels,
            'redis_sentinels_master': redis_sentinels_master,
            'password': password,
            'socket_timeout': socket_timeout
        })
        kvdb = FakeKVDB(config=config, decrypt_func=decrypt_func)
        kvdb.init()