Beispiel #1
0
    def execute(self, args):

        repo_dir = os.path.join(os.path.abspath(os.path.join(self.original_dir, args.path)), 'config', 'repo')
        config = get_config(repo_dir, 'server.conf')

        headers = {}

        if args.headers:
            for pair in args.headers.strip().split(';'):
                k, v = pair.strip().split('=', 1)
                headers[k] = v

        repo_dir = os.path.join(os.path.abspath(os.path.join(args.path)), 'config', 'repo')
        config = get_config(repo_dir, 'server.conf')

        client = AnyServiceInvoker(
            'http://{}'.format(config.main.gunicorn_bind), args.url_path, self.get_server_client_auth(config, repo_dir),
            max_response_repr=int(args.max_response_repr), max_cid_repr=int(args.max_cid_repr))

        # Prevents attempts to convert/escape XML into JSON
        to_json = True if args.data_format == DATA_FORMAT.JSON else False

        func = client.invoke_async if args.async else client.invoke
        response = func(args.name, args.payload, headers, args.channel, args.data_format, args.transport, to_json=to_json)

        if response.ok:
            self.logger.info(response.data or '(None)')
        else:
            self.logger.error(response.details)

        if args.verbose:
            self.logger.debug('inner.text:[{}]'.format(response.inner.text))
            self.logger.debug('response:[{}]'.format(response))
Beispiel #2
0
    def _get_sso_config(self, args, repo_location, secrets_conf):
        # type: (Namespace, unicode, Bunch) -> UserAPI
        sso_conf = get_config(repo_location,
                              'sso.conf',
                              needs_user_config=False)
        normalize_password_reject_list(sso_conf)

        crypto_manager = CryptoManager.from_secret_key(
            secrets_conf.secret_keys.key1)
        crypto_manager.add_hash_scheme('sso.super-user',
                                       sso_conf.hash_secret.rounds,
                                       sso_conf.hash_secret.salt_size)

        server_conf = get_config(repo_location,
                                 'server.conf',
                                 needs_user_config=False,
                                 crypto_manager=crypto_manager,
                                 secrets_conf=secrets_conf)

        def _get_session():
            return self.get_odb_session_from_server_config(server_conf, None)

        def _hash_secret(_secret):
            return crypto_manager.hash_secret(_secret, 'sso.super-user')

        return UserAPI(None, sso_conf, _get_session, crypto_manager.encrypt,
                       crypto_manager.decrypt, _hash_secret, None, new_user_id)
Beispiel #3
0
def main():

    # Always attempt to store the PID file first
    store_pidfile(os.path.abspath('.'))

    # Capture warnings to log files
    logging.captureWarnings(True)

    config = Config()
    repo_location = os.path.join('.', 'config', 'repo')

    # Logging configuration
    with open(os.path.join(repo_location, 'logging.conf')) as f:
        dictConfig(yaml.load(f))

    # Read config in and extend it with ODB-specific information
    config.main = get_config(repo_location, 'scheduler.conf')
    config.main.odb.fs_sql_config = get_config(repo_location,
                                               'sql.conf',
                                               needs_user_config=False)

    # Make all paths absolute
    if config.main.crypto.use_tls:
        config.main.crypto.ca_certs_location = absjoin(
            repo_location, config.main.crypto.ca_certs_location)
        config.main.crypto.priv_key_location = absjoin(
            repo_location, config.main.crypto.priv_key_location)
        config.main.crypto.cert_location = absjoin(
            repo_location, config.main.crypto.cert_location)

    logger = logging.getLogger(__name__)
    logger.info('Scheduler starting (http{}://{}:{})'.format(
        's' if config.main.crypto.use_tls else '', config.main.bind.host,
        config.main.bind.port))

    # Fix up configuration so it uses the format internal utilities expect
    for name, job_config in iteritems(
            get_config(repo_location,
                       'startup_jobs.conf',
                       needs_user_config=False)):
        job_config['name'] = name
        config.startup_jobs.append(job_config)

    # Run the scheduler server
    try:
        SchedulerServer(config, repo_location).serve_forever()
    except Exception:
        logger.warn(format_exc())
Beispiel #4
0
    def set_zato_client(self):

        repo_dir = os.path.join(
            os.path.abspath(os.path.join(self.server_path)), 'config', 'repo')
        config = get_config(repo_dir, 'server.conf')

        self.client = ZatoClient('http://{}'.format(config.main.gunicorn_bind),
                                 '/zato/admin/invoke',
                                 get_server_client_auth(config, repo_dir),
                                 max_response_repr=15000)

        session = get_odb_session_from_server_config(
            config, get_crypto_manager_from_server_config(config, repo_dir))

        self.client.cluster_id = session.query(odb.model.Server).\
            filter(odb.model.Server.token == config.main.token).\
            one().cluster_id

        self.client.odb_session = session

        # Sanity check
        self.client.invoke('zato.ping')


# ################################################################################################################################
Beispiel #5
0
    def execute(self, args):
        # type: (Namespace) -> object

        repo_location = os.path.join(args.path, 'config', 'repo')
        secrets_conf = get_config(repo_location,
                                  'secrets.conf',
                                  needs_user_config=False)

        # This file must exist, otherwise it's not a path to a server
        if not secrets_conf:
            self.logger.warn('Could not find file `secrets.conf` in `%s`',
                             repo_location)
            return self.SYS_ERROR.NOT_A_ZATO_SERVER

        user_api = self._get_sso_config(args, repo_location, secrets_conf)

        if self.user_required:
            user = user_api.get_user_by_username(_cid, args.username)
            if not user:
                self.logger.warn('No such user `%s`', args.username)
                return self.SYS_ERROR.NO_SUCH_SSO_USER
        else:
            user = None

        return self._on_sso_command(args, user, user_api)
Beispiel #6
0
    def _after_init_common(self, server):
        """ Initializes parts of the server that don't depend on whether the
        server's been allowed to join the cluster or not.
        """
        # Patterns to match during deployment
        self.service_store.patterns_matcher.read_config(
            self.fs_server_config.deploy_patterns_allowed)

        # Static config files
        self.static_config = StaticConfig(
            os.path.join(self.repo_location, 'static'))

        # Key-value DB
        kvdb_config = get_kvdb_config_for_log(self.fs_server_config.kvdb)
        kvdb_logger.info('Worker config `%s`', kvdb_config)

        self.kvdb.config = self.fs_server_config.kvdb
        self.kvdb.server = self
        self.kvdb.decrypt_func = self.crypto_manager.decrypt
        self.kvdb.init()

        kvdb_logger.info('Worker config `%s`', kvdb_config)

        # Lua programs, both internal and user defined ones.
        for name, program in self.get_lua_programs():
            self.kvdb.lua_container.add_lua_program(name, program)

        # TimeUtil needs self.kvdb so it can be set now
        self.time_util = TimeUtil(self.kvdb)

        # Service sources
        self.service_sources = []
        for name in open(
                os.path.join(self.repo_location,
                             self.fs_server_config.main.service_sources)):
            name = name.strip()
            if name and not name.startswith('#'):
                if not os.path.isabs(name):
                    name = os.path.normpath(os.path.join(self.base_dir, name))
                self.service_sources.append(name)

        # User-config from ./config/repo/user-config
        for file_name in os.listdir(self.user_conf_location):
            conf = get_config(self.user_conf_location, file_name)

            # Not used at all in this type of configuration
            conf.pop('user_config_items', None)

            self.user_config[get_user_config_name(file_name)] = conf

        # Convert size of FIFO response buffers to megabytes
        self.fifo_response_buffer_size = int(
            float(self.fs_server_config.misc.fifo_response_buffer_size) *
            megabyte)

        is_first, locally_deployed = self.maybe_on_first_worker(
            server, self.kvdb.conn)

        return is_first, locally_deployed
Beispiel #7
0
    def execute(self, args):

        repo_dir = os.path.join(os.path.abspath(os.path.join(self.original_dir, args.path)), 'config', 'repo')
        config = get_config(repo_dir, 'server.conf')
        priv_key_location = os.path.abspath(os.path.join(repo_dir, config.crypto.priv_key_location))
        
        cm = CryptoManager(priv_key_location=priv_key_location)
        cm.load_keys()
        
        engine_args = Bunch()
        engine_args.odb_type = config.odb.engine
        engine_args.odb_user = config.odb.username
        engine_args.odb_password = cm.decrypt(config.odb.password)
        engine_args.odb_host = config.odb.host
        engine_args.odb_db_name = config.odb.db_name
        
        engine = self._get_engine(engine_args)
        session = self._get_session(engine)
        
        auth = None
        headers = {}
        
        with closing(session) as session:
            cluster = session.query(Server).\
                filter(Server.token == config.main.token).\
                one().cluster
            
            channel = session.query(HTTPSOAP).\
                filter(HTTPSOAP.cluster_id == cluster.id).\
                filter(HTTPSOAP.url_path == args.url_path).\
                one()
            
            if channel.security_id:
                security = session.query(HTTPBasicAuth).\
                    filter(HTTPBasicAuth.id == channel.security_id).\
                    first()
                
                if security:
                    auth = (security.username, security.password)
                    
        if args.headers:
            for pair in args.headers.strip().split(';'):
                k, v = pair.strip().split('=', 1)
                headers[k] = v
                    
        client = AnyServiceInvoker('http://{}'.format(config.main.gunicorn_bind), args.url_path, auth,
            max_response_repr=int(args.max_response_repr), max_cid_repr=int(args.max_cid_repr))
        
        func = client.invoke_async if args.async else client.invoke
        response = func(args.name, args.payload, headers, args.channel, args.data_format, args.transport)
        
        if response.ok:
            self.logger.info(response.data or '(None)')
        else:
            self.logger.error(response.details)
            
        if args.verbose:
            self.logger.debug('inner.text:[{}]'.format(response.inner.text))
            self.logger.debug('response:[{}]'.format(response))
Beispiel #8
0
    def _init(self):
        """ Initializes all the basic run-time data structures and connects
        to the Zato broker.
        """
        fs_server_config = get_config(self.repo_location, 'server.conf')
        app_context = get_app_context(fs_server_config)
        crypto_manager = get_crypto_manager(self.repo_location, app_context,
                                            fs_server_config)

        config_odb = fs_server_config.odb
        self.odb = app_context.get_object('odb_manager')
        self.odb.crypto_manager = crypto_manager
        self.odb.token = fs_server_config.main.token

        # Key-value DB
        self.kvdb = KVDB()
        self.kvdb.config = fs_server_config.kvdb
        self.kvdb.decrypt_func = self.odb.crypto_manager.decrypt
        self.kvdb.init()

        # Broker client
        self.broker_client = BrokerClient(self.kvdb, self.broker_client_id,
                                          self.broker_callbacks)
        self.broker_client.start()

        # ODB

        #
        # Ticket #35 Don't ignore odb_port when creating an ODB
        # https://github.com/zatosource/zato/issues/35
        #

        engine = config_odb.engine
        port = config_odb['port']

        self.odb_config = Bunch()
        self.odb_config.db_name = config_odb.db_name
        self.odb_config.is_active = True
        self.odb_config.engine = engine
        self.odb_config.extra = config_odb.extra
        self.odb_config.host = config_odb.host
        self.odb_config.port = port
        self.odb_config.password = self.odb.crypto_manager.decrypt(
            config_odb.password)
        self.odb_config.pool_size = config_odb.pool_size
        self.odb_config.username = config_odb.username

        self.odb_config.is_odb = True

        self.sql_pool_store = app_context.get_object('sql_pool_store')
        self.sql_pool_store[ZATO_ODB_POOL_NAME] = self.odb_config
        self.odb.pool = self.sql_pool_store[ZATO_ODB_POOL_NAME].pool

        self._setup_odb()

        # Delivery store
        self.delivery_store = DeliveryStore(
            self.kvdb, self.broker_client, self.odb,
            float(fs_server_config.misc.delivery_lock_timeout))
Beispiel #9
0
 def load_secrets(secrets, secret_names, crypto_manager, conf_location, conf_file_name):
     conf = get_config(os.path.dirname(conf_location), conf_file_name, False)
     for name in secret_names:
         k, v = name.split(':')
         if conf[k][v]:
             secrets[name] = crypto_manager.decrypt(conf[k][v])
         
     return secrets, conf
Beispiel #10
0
        def load_secrets(secrets, secret_names, crypto_manager, conf_location, conf_file_name):
            conf = get_config(os.path.dirname(conf_location), conf_file_name, False)
            for name in secret_names:
                k, v = name.split(':')
                if conf[k][v]:
                    secrets[name] = crypto_manager.decrypt(conf[k][v])

            return secrets, conf
Beispiel #11
0
    def execute(self, args):

        repo_dir = os.path.join(
            os.path.abspath(os.path.join(self.original_dir, args.path)),
            'config', 'repo')
        config = get_config(repo_dir, 'server.conf')

        headers = {}

        if args.headers:
            for pair in args.headers.strip().split(';'):
                k, v = pair.strip().split('=', 1)
                headers[k] = v

        repo_dir = os.path.join(os.path.abspath(os.path.join(args.path)),
                                'config', 'repo')
        config = get_config(repo_dir, 'server.conf')

        client = AnyServiceInvoker(
            'http://{}'.format(config.main.gunicorn_bind),
            args.url_path,
            self.get_server_client_auth(config, repo_dir),
            max_response_repr=int(args.max_response_repr),
            max_cid_repr=int(args.max_cid_repr))

        # Prevents attempts to convert/escape XML into JSON
        to_json = True if args.data_format == DATA_FORMAT.JSON else False

        func = client.invoke_async if args. async else client.invoke
        response = func(args.name,
                        args.payload,
                        headers,
                        args.channel,
                        args.data_format,
                        args.transport,
                        to_json=to_json)

        if response.ok:
            self.logger.info(response.data or '(None)')
        else:
            self.logger.error(response.details)

        if args.verbose:
            self.logger.debug('inner.text:[{}]'.format(response.inner.text))
            self.logger.debug('response:[{}]'.format(response))
Beispiel #12
0
    def _init(self):
        """ Initializes all the basic run-time data structures and connects
        to the Zato broker.
        """
        fs_server_config = get_config(self.repo_location, 'server.conf')
        app_context = get_app_context(fs_server_config)
        crypto_manager = get_crypto_manager(self.repo_location, app_context, fs_server_config)
        
        config_odb = fs_server_config.odb
        self.odb = app_context.get_object('odb_manager')
        self.odb.crypto_manager = crypto_manager
        self.odb.token = fs_server_config.main.token
        
        # Key-value DB
        self.kvdb = KVDB()
        self.kvdb.config = fs_server_config.kvdb
        self.kvdb.decrypt_func = self.odb.crypto_manager.decrypt
        self.kvdb.init()
        
        # Broker client
        self.broker_client = BrokerClient(self.kvdb, self.broker_client_id, self.broker_callbacks)
        self.broker_client.start()

        # ODB        
        
        #
        # Ticket #35 Don't ignore odb_port when creating an ODB
        # https://github.com/zatosource/zato/issues/35
        #
        
        engine = config_odb.engine
        port = config_odb.get('port')
        
        if not port:
            port = 5432 if engine == 'postgresql' else 1521
        
        self.odb_config = Bunch()
        self.odb_config.db_name = config_odb.db_name
        self.odb_config.is_active = True
        self.odb_config.engine = engine
        self.odb_config.extra = config_odb.extra
        self.odb_config.host = config_odb.host
        self.odb_config.port = port
        self.odb_config.password = self.odb.crypto_manager.decrypt(config_odb.password)
        self.odb_config.pool_size = config_odb.pool_size
        self.odb_config.username = config_odb.username
        
        self.odb_config.is_odb = True
        
        self.sql_pool_store = app_context.get_object('sql_pool_store')
        self.sql_pool_store[ZATO_ODB_POOL_NAME] = self.odb_config
        self.odb.pool = self.sql_pool_store[ZATO_ODB_POOL_NAME].pool
        
        self._setup_odb()
        
        # Delivery store
        self.delivery_store = DeliveryStore(self.kvdb, self.broker_client, self.odb, float(fs_server_config.misc.delivery_lock_timeout))
Beispiel #13
0
def run(base_dir):

    os.chdir(base_dir)
    
    # We're doing it here even if someone doesn't use PostgreSQL at all
    # so we're not suprised when someone suddenly starts using PG.
    # TODO: Make sure it's registered for each of the subprocess
    psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
    psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)

    repo_location = os.path.join(base_dir, 'config', 'repo')

    # Configure the logging first, before configuring the actual server.
    logging.addLevelName('TRACE1', TRACE1)
    logging.config.fileConfig(os.path.join(repo_location, 'logging.conf'))

    config = get_config(repo_location, 'server.conf')
    app_context = get_app_context(config)

    crypto_manager = get_crypto_manager(repo_location, app_context, config)
    parallel_server = app_context.get_object('parallel_server')
    
    zato_gunicorn_app = ZatoGunicornApplication(
        parallel_server, repo_location, config.main, config.crypto)
    
    parallel_server.crypto_manager = crypto_manager
    parallel_server.odb_data = config.odb
    parallel_server.host = zato_gunicorn_app.zato_host
    parallel_server.port = zato_gunicorn_app.zato_port
    parallel_server.repo_location = repo_location
    parallel_server.base_dir = base_dir
    parallel_server.fs_server_config = config
    parallel_server.startup_jobs = app_context.get_object('startup_jobs')
    parallel_server.app_context = app_context

    # Remove all locks possibly left over by previous server instances
    clear_locks(app_context.get_object('kvdb'), config.main.token, config.kvdb, crypto_manager.decrypt)
        
    # Turn the repo dir into an actual repository and commit any new/modified files
    RepoManager(repo_location).ensure_repo_consistency()

    # This is new in 1.2 so is optional
    profiler_enabled = config.get('profiler', {}).get('enabled', False)
    
    if asbool(profiler_enabled):
        profiler_dir = os.path.abspath(os.path.join(base_dir, config.profiler.profiler_dir))
        parallel_server.on_wsgi_request = ProfileMiddleware(
            parallel_server.on_wsgi_request,
            log_filename = os.path.join(profiler_dir, config.profiler.log_filename),
            cachegrind_filename = os.path.join(profiler_dir, config.profiler.cachegrind_filename),
            discard_first_request = config.profiler.discard_first_request,
            flush_at_shutdown = config.profiler.flush_at_shutdown,
            path = config.profiler.url_path,
            unwind = config.profiler.unwind)

    # Run the app at last
    zato_gunicorn_app.run()
Beispiel #14
0
    def from_server_conf(server_dir, cache_name, is_https):
        # type: (str, str, bool) -> Client
        repo_dir = get_repo_dir_from_component_dir(server_dir)
        cm = ServerCryptoManager.from_repo_dir(None, repo_dir, None)
        secrets_conf = get_config(repo_dir,
                                  'secrets.conf',
                                  needs_user_config=False)
        config = get_config(repo_dir,
                            'server.conf',
                            crypto_manager=cm,
                            secrets_conf=secrets_conf)

        session = None
        password = None

        try:
            session = get_odb_session_from_server_config(config, None, False)

            cluster = session.query(Server).\
                filter(Server.token == config.main.token).\
                one().cluster # type: Cluster

            security = session.query(HTTPBasicAuth).\
                filter(Cluster.id == HTTPBasicAuth.cluster_id).\
                filter(HTTPBasicAuth.username == CACHE.API_USERNAME).\
                filter(HTTPBasicAuth.cluster_id == cluster.id).\
                first() # type: HTTPBasicAuth

            if security:
                password = security.password

        finally:
            if session:
                session.close()

        return Client.from_dict({
            'username': CACHE.API_USERNAME,
            'password': password,
            'address': config.main.gunicorn_bind,
            'cache_name': cache_name,
            'is_https': is_https,
        })
Beispiel #15
0
    def handle(self):
        input = self.request.input

        with self.lock('{}-{}-{}'.format(self.name, self.server.name,
                                         input.data)):
            with open(
                    os.path.join(self.server.user_conf_location,
                                 input.file_name), 'wb') as f:
                f.write(input.data)

            conf = get_config(self.server.user_conf_location, input.file_name)
            entry = self.server.user_config.setdefault(
                get_user_config_name(input.file_name), Bunch())
            entry.clear()
            entry.update(conf)
Beispiel #16
0
 def _init(self):
     """ Initializes all the basic run-time data structures and connects
     to the Zato broker.
     """
     fs_server_config = get_config(self.repo_location, 'server.conf')
     app_context = get_app_context(fs_server_config)
     crypto_manager = get_crypto_manager(self.repo_location, app_context, fs_server_config)
     
     config_odb = fs_server_config.odb
     self.odb = app_context.get_object('odb_manager')
     self.odb.crypto_manager = crypto_manager
     self.odb.token = fs_server_config.main.token
     
     # Key-value DB
     self.kvdb = KVDB()
     self.kvdb.config = fs_server_config.kvdb
     self.kvdb.decrypt_func = self.odb.crypto_manager.decrypt
     self.kvdb.init()
     
     # Broker client
     self.broker_client = BrokerClient(self.kvdb, self.broker_client_id, self.broker_callbacks)
     self.broker_client.start()
     
     # ODB
     self.odb_config = Bunch()
     self.odb_config.db_name = config_odb.db_name
     self.odb_config.is_active = True
     self.odb_config.engine = config_odb.engine
     self.odb_config.extra = config_odb.extra
     self.odb_config.host = config_odb.host
     self.odb_config.password = self.odb.crypto_manager.decrypt(config_odb.password)
     self.odb_config.pool_size = config_odb.pool_size
     self.odb_config.username = config_odb.username
     
     self.odb_config.is_odb = True
     
     self.sql_pool_store = app_context.get_object('sql_pool_store')
     self.sql_pool_store[ZATO_ODB_POOL_NAME] = self.odb_config
     self.odb.pool = self.sql_pool_store[ZATO_ODB_POOL_NAME].pool
     
     self._setup_odb()
     
     # Delivery store
     self.delivery_store = DeliveryStore(self.kvdb, self.broker_client, self.odb, float(fs_server_config.misc.delivery_lock_timeout))
Beispiel #17
0
    def set_zato_client(self):

        repo_dir = os.path.join(os.path.abspath(os.path.join(self.server_path)), 'config', 'repo')
        config = get_config(repo_dir, 'server.conf')

        self.client = ZatoClient('http://{}'.format(config.main.gunicorn_bind),
            '/zato/admin/invoke', get_server_client_auth(config, repo_dir), max_response_repr=15000)

        session = get_odb_session_from_server_config(
            config, get_crypto_manager_from_server_config(config, repo_dir))

        self.client.cluster_id = session.query(odb.model.Server).\
            filter(odb.model.Server.token == config.main.token).\
            one().cluster_id

        self.client.odb_session = session

        # Sanity check
        self.client.invoke('zato.ping')
Beispiel #18
0
def run(base_dir):

    os.chdir(base_dir)

    # We're doing it here even if someone doesn't use PostgreSQL at all
    # so we're not suprised when someone suddenly starts using PG.
    # TODO: Make sure it's registered for each of the subprocess
    psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
    psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)

    repo_location = os.path.join(base_dir, 'config', 'repo')

    # Configure the logging first, before configuring the actual server.
    logging.addLevelName('TRACE1', TRACE1)
    logging.config.fileConfig(os.path.join(repo_location, 'logging.conf'))

    config = get_config(repo_location, 'server.conf')
    app_context = get_app_context(config)

    crypto_manager = get_crypto_manager(repo_location, app_context, config)
    parallel_server = app_context.get_object('parallel_server')

    zato_gunicorn_app = ZatoGunicornApplication(parallel_server, config.main)

    parallel_server.crypto_manager = crypto_manager
    parallel_server.odb_data = config.odb
    parallel_server.host = zato_gunicorn_app.zato_host
    parallel_server.port = zato_gunicorn_app.zato_port
    parallel_server.repo_location = repo_location
    parallel_server.base_dir = base_dir
    parallel_server.fs_server_config = config
    parallel_server.stats_jobs = app_context.get_object('stats_jobs')
    parallel_server.app_context = app_context

    # Remove all locks possibly left over by previous server instances
    clear_locks(app_context.get_object('kvdb'), config.main.token, config.kvdb,
                crypto_manager.decrypt)

    # Turn the repo dir into an actual repository and commit any new/modified files
    RepoManager(repo_location).ensure_repo_consistency()

    # Run the app at last
    zato_gunicorn_app.run()
Beispiel #19
0
def run(base_dir):

    os.chdir(base_dir)
    
    # We're doing it here even if someone doesn't use PostgreSQL at all
    # so we're not suprised when someone suddenly starts using PG.
    # TODO: Make sure it's registered for each of the subprocess when the code's
    #       finally modified to use subprocesses.
    psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
    psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)

    repo_location = os.path.join(base_dir, 'config', 'repo')

    # Configure the logging first, before configuring the actual server.
    logging.addLevelName('TRACE1', TRACE1)
    logging.config.fileConfig(os.path.join(repo_location, 'logging.conf'))

    config = get_config(repo_location, 'server.conf')
    app_context = get_app_context(config)

    crypto_manager = get_crypto_manager(repo_location, app_context, config)
    parallel_server = app_context.get_object('parallel_server')
    
    zato_gunicorn_app = ZatoGunicornApplication(parallel_server, config.main)
    
    parallel_server.crypto_manager = crypto_manager
    parallel_server.odb_data = config.odb
    parallel_server.host = zato_gunicorn_app.zato_host
    parallel_server.port = zato_gunicorn_app.zato_port
    parallel_server.repo_location = repo_location
    parallel_server.base_dir = base_dir
    parallel_server.fs_server_config = config
    parallel_server.stats_jobs = app_context.get_object('stats_jobs')
    parallel_server.app_context = app_context

    # Remove all locks possibly left over by previous server instances
    clear_locks(app_context.get_object('kvdb'), config.main.token, config.kvdb, crypto_manager.decrypt)
        
    # Turn the repo dir into an actual repository and commit any new/modified files
    RepoManager(repo_location).ensure_repo_consistency()

    # Run the app at last
    zato_gunicorn_app.run()
Beispiel #20
0
    def _init(self):
        """ Initializes all the basic run-time data structures and connects
        to the Zato broker.
        """
        fs_server_config = get_config(self.repo_location, 'server.conf')
        app_context = get_app_context(fs_server_config)
        crypto_manager = get_crypto_manager(self.repo_location, app_context,
                                            fs_server_config)

        config_odb = fs_server_config.odb
        self.odb = app_context.get_object('odb_manager')
        self.odb.crypto_manager = crypto_manager
        self.odb.token = fs_server_config.main.token

        # Key-value DB
        self.kvdb = KVDB()
        self.kvdb.config = fs_server_config.kvdb
        self.kvdb.decrypt_func = self.odb.crypto_manager.decrypt
        self.kvdb.init()

        # Broker client
        self.broker_client = BrokerClient(self.kvdb, self.broker_client_id,
                                          self.broker_callbacks)
        self.broker_client.start()

        # ODB
        self.odb_config = Bunch()
        self.odb_config.db_name = config_odb.db_name
        self.odb_config.is_active = True
        self.odb_config.engine = config_odb.engine
        self.odb_config.extra = config_odb.extra
        self.odb_config.host = config_odb.host
        self.odb_config.password = self.odb.crypto_manager.decrypt(
            config_odb.password)
        self.odb_config.pool_size = config_odb.pool_size
        self.odb_config.username = config_odb.username
        self.odb_config.is_odb = True

        self.sql_pool_store = app_context.get_object('sql_pool_store')
        self.sql_pool_store[ZATO_ODB_POOL_NAME] = self.odb_config
        self.odb.pool = self.sql_pool_store[ZATO_ODB_POOL_NAME].pool

        self._setup_odb()
Beispiel #21
0
def run(host, port, base_dir, start_singleton):

    repo_location = os.path.join(base_dir, 'config', 'repo')

    # Configure the logging first, before configuring the actual server.
    logging.addLevelName('TRACE1', TRACE1)
    logging.config.fileConfig(os.path.join(repo_location, 'logging.conf'))

    config = get_config(repo_location, 'server.conf')
    app_context = get_app_context(config, ZatoContext)
    crypto_manager = get_crypto_manager(repo_location, app_context, config)

    parallel_server = app_context.get_object('parallel_server')
    parallel_server.crypto_manager = crypto_manager
    parallel_server.odb.crypto_manager = crypto_manager
    parallel_server.odb.odb_data = config['odb']
    parallel_server.host = host
    parallel_server.port = port
    parallel_server.repo_location = repo_location
    
    if start_singleton:
        singleton_server = app_context.get_object('singleton_server')
        singleton_server.initial_sleep_time = int(config['singleton']['initial_sleep_time']) / 1000.
        parallel_server.singleton_server = singleton_server
        
        # Wow, this line looks weird. What it does is simply assigning a parallel
        # server instance to the singleton server.
        parallel_server.singleton_server.parallel_server = parallel_server
        
    parallel_server.after_init()

    print('OK..')
    parallel_server.run_forever()
    
    # $ sudo netstat -an | grep TIME_WAIT | wc -l
    
    # cat /proc/sys/net/core/wmem_max - 109568
    # /sbin/sysctl net.ipv4.tcp_mem="109568 109568 109568"
    # echo 0 > /proc/sys/net/ipv4/conf/eth0/rp_filter


    '''
Beispiel #22
0
    def _init(self):
        """ Initializes all the basic run-time data structures and connects
        to the Zato broker.
        """
        
        # Imported here to avoid circular dependencies
        from zato.server.config.app import ZatoContext

        config = get_config(self.repo_location, 'server.conf')
        app_context = get_app_context(config, ZatoContext)
        crypto_manager = get_crypto_manager(self.repo_location, app_context, config)
        
        self.odb = app_context.get_object('odb_manager')
        self.odb.crypto_manager = crypto_manager
        self.odb.odb_data = config['odb']
        
        self._setup_odb()
        
        self.worker_data = Bunch()
        self.worker_data.broker_config = Bunch()
        self.worker_data.broker_config.name = self.broker_client_name
        self.worker_data.broker_config.broker_token = self.server.cluster.broker_token
        self.worker_data.broker_config.zmq_context = zmq.Context()

        broker_push_client_pull = 'tcp://{0}:{1}'.format(self.server.cluster.broker_host, 
            self.server.cluster.broker_start_port + self.broker_push_client_pull_port)
        
        client_push_broker_pull = 'tcp://{0}:{1}'.format(self.server.cluster.broker_host, 
            self.server.cluster.broker_start_port + self.client_push_broker_pull_port)
        
        broker_pub_client_sub = 'tcp://{0}:{1}'.format(self.server.cluster.broker_host, 
            self.server.cluster.broker_start_port + self.broker_pub_client_sub_port)
        
        self.worker_data.broker_config.broker_push_client_pull = broker_push_client_pull
        self.worker_data.broker_config.client_push_broker_pull = client_push_broker_pull
        self.worker_data.broker_config.broker_pub_client_sub = broker_pub_client_sub

        # Connects to the broker
        super(BaseConnector, self)._init()
Beispiel #23
0
    def _init(self):
        """ Initializes all the basic run-time data structures and connects
        to the Zato broker.
        """
        fs_server_config = get_config(self.repo_location, 'server.conf')
        app_context = get_app_context(fs_server_config)
        crypto_manager = get_crypto_manager(self.repo_location, app_context, fs_server_config)
        
        config_odb = fs_server_config.odb
        self.odb = app_context.get_object('odb_manager')
        self.odb.crypto_manager = crypto_manager
        self.odb.token = fs_server_config.main.token
        
        # Key-value DB
        self.kvdb = KVDB()
        self.kvdb.config = fs_server_config.kvdb
        self.kvdb.decrypt_func = self.odb.crypto_manager.decrypt
        self.kvdb.init()
        
        odb_data = Bunch()
        odb_data.db_name = config_odb.db_name
        odb_data.engine = config_odb.engine
        odb_data.extra = config_odb.extra
        odb_data.host = config_odb.host
        odb_data.password = self.odb.crypto_manager.decrypt(config_odb.password)
        odb_data.pool_size = config_odb.pool_size
        odb_data.username = config_odb.username
        odb_data.is_odb = True
        
        self.sql_pool_store = app_context.get_object('sql_pool_store')
        self.sql_pool_store[ZATO_ODB_POOL_NAME] = odb_data
        self.odb.pool = self.sql_pool_store[ZATO_ODB_POOL_NAME]
        
        self._setup_odb()

        # Connects to the broker
        super(BaseConnector, self)._init()
Beispiel #24
0
def run(base_dir, start_gunicorn_app=True):

    # Store a pidfile before doing anything else
    store_pidfile(base_dir)

    # For dumping stacktraces
    register_diag_handlers()

    # Start initializing the server now
    os.chdir(base_dir)

    try:
        import pymysql
        pymysql.install_as_MySQLdb()
    except ImportError:
        pass

    # We're doing it here even if someone doesn't use PostgreSQL at all
    # so we're not suprised when someone suddenly starts using PG.
    # TODO: Make sure it's registered for each of the subprocess
    psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
    psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)

    repo_location = os.path.join(base_dir, 'config', 'repo')

    # Configure the logging first, before configuring the actual server.
    logging.addLevelName('TRACE1', TRACE1)
    logging.config.fileConfig(os.path.join(repo_location, 'logging.conf'))

    logger = logging.getLogger(__name__)
    kvdb_logger = logging.getLogger('zato_kvdb')

    config = get_config(repo_location, 'server.conf')

    # New in 2.0 - Start monitoring as soon as possible
    if config.get('newrelic', {}).get('config'):
        import newrelic.agent
        newrelic.agent.initialize(
            config.newrelic.config, config.newrelic.environment or None, config.newrelic.ignore_errors or None,
            config.newrelic.log_file or None, config.newrelic.log_level or None)

    # Store KVDB config in logs, possibly replacing its password if told to
    kvdb_config = get_kvdb_config_for_log(config.kvdb)
    kvdb_logger.info('Master process config `%s`', kvdb_config)

    # New in 2.0 hence optional
    user_locale = config.misc.get('locale', None)
    if user_locale:
        locale.setlocale(locale.LC_ALL, user_locale)
        value = 12345
        logger.info('Locale is `%s`, amount of %s -> `%s`', user_locale, value,
                    locale.currency(value, grouping=True).decode('utf-8'))

    # Spring Python
    app_context = get_app_context(config)

    # Makes queries against Postgres asynchronous
    if asbool(config.odb.use_async_driver) and config.odb.engine == 'postgresql':
        make_psycopg_green()

    # New in 2.0 - Put HTTP_PROXY in os.environ.
    http_proxy = config.misc.get('http_proxy', False)
    if http_proxy:
        os.environ['http_proxy'] = http_proxy

    crypto_manager = get_crypto_manager(repo_location, app_context, config)
    parallel_server = app_context.get_object('parallel_server')

    zato_gunicorn_app = ZatoGunicornApplication(parallel_server, repo_location, config.main, config.crypto)

    parallel_server.crypto_manager = crypto_manager
    parallel_server.odb_data = config.odb
    parallel_server.host = zato_gunicorn_app.zato_host
    parallel_server.port = zato_gunicorn_app.zato_port
    parallel_server.repo_location = repo_location
    parallel_server.base_dir = base_dir
    parallel_server.fs_server_config = config
    parallel_server.user_config.update(config.user_config_items)
    parallel_server.startup_jobs = app_context.get_object('startup_jobs')
    parallel_server.app_context = app_context

    # Remove all locks possibly left over by previous server instances
    kvdb = app_context.get_object('kvdb')
    kvdb.component = 'master-proc'
    clear_locks(kvdb, config.main.token, config.kvdb, crypto_manager.decrypt)

    # Turn the repo dir into an actual repository and commit any new/modified files
    RepoManager(repo_location).ensure_repo_consistency()

    # New in 2.0 so it's optional.
    profiler_enabled = config.get('profiler', {}).get('enabled', False)

    if asbool(profiler_enabled):
        profiler_dir = os.path.abspath(os.path.join(base_dir, config.profiler.profiler_dir))
        parallel_server.on_wsgi_request = ProfileMiddleware(
            parallel_server.on_wsgi_request,
            log_filename = os.path.join(profiler_dir, config.profiler.log_filename),
            cachegrind_filename = os.path.join(profiler_dir, config.profiler.cachegrind_filename),
            discard_first_request = config.profiler.discard_first_request,
            flush_at_shutdown = config.profiler.flush_at_shutdown,
            path = config.profiler.url_path,
            unwind = config.profiler.unwind)

    # Run the app at last we execute from command line
    if start_gunicorn_app:
        zato_gunicorn_app.run()
    else:
        return zato_gunicorn_app.zato_wsgi_app
Beispiel #25
0
def run(base_dir, start_gunicorn_app=True):

    # Filter out warnings we are not interested in
    warnings.filterwarnings('ignore', 'Mean of empty slice.')
    warnings.filterwarnings('ignore',
                            'invalid value encountered in double_scalars')

    # Store a pidfile before doing anything else
    store_pidfile(base_dir)

    # For dumping stacktraces
    register_diag_handlers()

    # Start initializing the server now
    os.chdir(base_dir)

    try:
        import pymysql
        pymysql.install_as_MySQLdb()
    except ImportError:
        pass

    # We're doing it here even if someone doesn't use PostgreSQL at all
    # so we're not suprised when someone suddenly starts using PG.
    # TODO: Make sure it's registered for each of the subprocess
    psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
    psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)

    repo_location = os.path.join(base_dir, 'config', 'repo')

    # Configure the logging first, before configuring the actual server.
    logging.addLevelName('TRACE1', TRACE1)

    with open(os.path.join(repo_location, 'logging.conf')) as f:
        dictConfig(yaml.load(f))

    logger = logging.getLogger(__name__)
    kvdb_logger = logging.getLogger('zato_kvdb')

    config = get_config(repo_location, 'server.conf')

    # New in 2.0 - Start monitoring as soon as possible
    if config.get('newrelic', {}).get('config'):
        import newrelic.agent
        newrelic.agent.initialize(config.newrelic.config,
                                  config.newrelic.environment or None,
                                  config.newrelic.ignore_errors or None,
                                  config.newrelic.log_file or None,
                                  config.newrelic.log_level or None)

    # New in 2.0 - override gunicorn-set Server HTTP header
    gunicorn.SERVER_SOFTWARE = config.misc.get('http_server_header', 'Zato')

    # Store KVDB config in logs, possibly replacing its password if told to
    kvdb_config = get_kvdb_config_for_log(config.kvdb)
    kvdb_logger.info('Master process config `%s`', kvdb_config)

    # New in 2.0 hence optional
    user_locale = config.misc.get('locale', None)
    if user_locale:
        locale.setlocale(locale.LC_ALL, user_locale)
        value = 12345
        logger.info('Locale is `%s`, amount of %s -> `%s`', user_locale, value,
                    locale.currency(value, grouping=True).decode('utf-8'))

    # Spring Python
    app_context = get_app_context(config)

    # Makes queries against Postgres asynchronous
    if asbool(
            config.odb.use_async_driver) and config.odb.engine == 'postgresql':
        make_psycopg_green()

    # New in 2.0 - Put HTTP_PROXY in os.environ.
    http_proxy = config.misc.get('http_proxy', False)
    if http_proxy:
        os.environ['http_proxy'] = http_proxy

    crypto_manager = get_crypto_manager(repo_location, app_context, config)
    parallel_server = app_context.get_object('parallel_server')

    zato_gunicorn_app = ZatoGunicornApplication(parallel_server, repo_location,
                                                config.main, config.crypto)

    parallel_server.crypto_manager = crypto_manager
    parallel_server.odb_data = config.odb
    parallel_server.host = zato_gunicorn_app.zato_host
    parallel_server.port = zato_gunicorn_app.zato_port
    parallel_server.repo_location = repo_location
    parallel_server.base_dir = base_dir
    parallel_server.tls_dir = os.path.join(parallel_server.base_dir, 'config',
                                           'repo', 'tls')
    parallel_server.fs_server_config = config
    parallel_server.user_config.update(config.user_config_items)
    parallel_server.startup_jobs = app_context.get_object('startup_jobs')
    parallel_server.app_context = app_context

    # Remove all locks possibly left over by previous server instances
    kvdb = app_context.get_object('kvdb')
    kvdb.component = 'master-proc'
    clear_locks(kvdb, config.main.token, config.kvdb, crypto_manager.decrypt)

    # Turn the repo dir into an actual repository and commit any new/modified files
    RepoManager(repo_location).ensure_repo_consistency()

    # New in 2.0 so it's optional.
    profiler_enabled = config.get('profiler', {}).get('enabled', False)

    # New in 2.0 so it's optional.
    sentry_config = config.get('sentry')

    dsn = sentry_config.pop('dsn', None)
    if dsn:

        from raven import Client
        from raven.conf import setup_logging
        from raven.handlers.logging import SentryHandler

        handler_level = sentry_config.pop('level')
        client = Client(dsn, **sentry_config)

        handler = SentryHandler(client=client)
        handler.setLevel(getattr(logging, handler_level))

        logger = logging.getLogger('')
        logger.addHandler(handler)

        for name in logging.Logger.manager.loggerDict:
            if name.startswith('zato'):
                logger = logging.getLogger(name)
                logger.addHandler(handler)

    if asbool(profiler_enabled):
        profiler_dir = os.path.abspath(
            os.path.join(base_dir, config.profiler.profiler_dir))
        parallel_server.on_wsgi_request = ProfileMiddleware(
            parallel_server.on_wsgi_request,
            log_filename=os.path.join(profiler_dir,
                                      config.profiler.log_filename),
            cachegrind_filename=os.path.join(
                profiler_dir, config.profiler.cachegrind_filename),
            discard_first_request=config.profiler.discard_first_request,
            flush_at_shutdown=config.profiler.flush_at_shutdown,
            path=config.profiler.url_path,
            unwind=config.profiler.unwind)

    # Run the app at last we execute from command line
    if start_gunicorn_app:
        zato_gunicorn_app.run()
    else:
        return zato_gunicorn_app.zato_wsgi_app
Beispiel #26
0
def run(base_dir, start_gunicorn_app=True):

    # Store a pidfile before doing anything else
    store_pidfile(base_dir)

    # For dumping stacktraces
    register_diag_handlers()

    # Capture warnings to log files
    logging.captureWarnings(True)

    # Start initializing the server now
    os.chdir(base_dir)

    try:
        import pymysql
        pymysql.install_as_MySQLdb()
    except ImportError:
        pass

    # We're doing it here even if someone doesn't use PostgreSQL at all
    # so we're not suprised when someone suddenly starts using PG.
    # TODO: Make sure it's registered for each of the subprocess
    psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
    psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)

    # We know we don't need warnings because users may explicitly configure no certificate validation.
    # We don't want for urllib3 to warn us about it.
    import requests as _r
    _r.packages.urllib3.disable_warnings()

    repo_location = os.path.join(base_dir, 'config', 'repo')

    # Configure the logging first, before configuring the actual server.
    logging.addLevelName('TRACE1', TRACE1)

    with open(os.path.join(repo_location, 'logging.conf')) as f:
        dictConfig(yaml.load(f))

    logger = logging.getLogger(__name__)
    kvdb_logger = logging.getLogger('zato_kvdb')

    config = get_config(repo_location, 'server.conf')

    # New in 2.0 - Start monitoring as soon as possible
    if config.get('newrelic', {}).get('config'):
        import newrelic.agent
        newrelic.agent.initialize(
            config.newrelic.config, config.newrelic.environment or None, config.newrelic.ignore_errors or None,
            config.newrelic.log_file or None, config.newrelic.log_level or None)

    # New in 2.0 - override gunicorn-set Server HTTP header
    gunicorn.SERVER_SOFTWARE = config.misc.get('http_server_header', 'Zato')

    # Store KVDB config in logs, possibly replacing its password if told to
    kvdb_config = get_kvdb_config_for_log(config.kvdb)
    kvdb_logger.info('Master process config `%s`', kvdb_config)

    # New in 2.0 hence optional
    user_locale = config.misc.get('locale', None)
    if user_locale:
        locale.setlocale(locale.LC_ALL, user_locale)
        value = 12345
        logger.info('Locale is `%s`, amount of %s -> `%s`', user_locale, value,
                    locale.currency(value, grouping=True).decode('utf-8'))

    # Spring Python
    app_context = get_app_context(config)

    # Makes queries against Postgres asynchronous
    if asbool(config.odb.use_async_driver) and config.odb.engine == 'postgresql':
        make_psycopg_green()

    # New in 2.0 - Put HTTP_PROXY in os.environ.
    http_proxy = config.misc.get('http_proxy', False)
    if http_proxy:
        os.environ['http_proxy'] = http_proxy

    crypto_manager = get_crypto_manager(repo_location, app_context, config)
    parallel_server = app_context.get_object('parallel_server')

    zato_gunicorn_app = ZatoGunicornApplication(parallel_server, repo_location, config.main, config.crypto)

    parallel_server.crypto_manager = crypto_manager
    parallel_server.odb_data = config.odb
    parallel_server.host = zato_gunicorn_app.zato_host
    parallel_server.port = zato_gunicorn_app.zato_port
    parallel_server.repo_location = repo_location
    parallel_server.base_dir = base_dir
    parallel_server.tls_dir = os.path.join(parallel_server.base_dir, 'config', 'repo', 'tls')
    parallel_server.fs_server_config = config
    parallel_server.user_config.update(config.user_config_items)
    parallel_server.startup_jobs = app_context.get_object('startup_jobs')
    parallel_server.app_context = app_context

    # Remove all locks possibly left over by previous server instances
    kvdb = app_context.get_object('kvdb')
    kvdb.component = 'master-proc'
    clear_locks(kvdb, config.main.token, config.kvdb, crypto_manager.decrypt)

    # Turn the repo dir into an actual repository and commit any new/modified files
    RepoManager(repo_location).ensure_repo_consistency()

    # New in 2.0 so it's optional.
    profiler_enabled = config.get('profiler', {}).get('enabled', False)

    # New in 2.0 so it's optional.
    sentry_config = config.get('sentry')

    dsn = sentry_config.pop('dsn', None)
    if dsn:

        from raven import Client
        from raven.handlers.logging import SentryHandler

        handler_level = sentry_config.pop('level')
        client = Client(dsn, **sentry_config)

        handler = SentryHandler(client=client)
        handler.setLevel(getattr(logging, handler_level))

        logger = logging.getLogger('')
        logger.addHandler(handler)

        for name in logging.Logger.manager.loggerDict:
            if name.startswith('zato'):
                logger = logging.getLogger(name)
                logger.addHandler(handler)

    if asbool(profiler_enabled):
        profiler_dir = os.path.abspath(os.path.join(base_dir, config.profiler.profiler_dir))
        parallel_server.on_wsgi_request = ProfileMiddleware(
            parallel_server.on_wsgi_request,
            log_filename = os.path.join(profiler_dir, config.profiler.log_filename),
            cachegrind_filename = os.path.join(profiler_dir, config.profiler.cachegrind_filename),
            discard_first_request = config.profiler.discard_first_request,
            flush_at_shutdown = config.profiler.flush_at_shutdown,
            path = config.profiler.url_path,
            unwind = config.profiler.unwind)

    # New in 2.0 - set environmet variables for servers to inherit
    os_environ = config.get('os_environ', {})
    for key, value in os_environ.items():
        os.environ[key] = value

    # Run the app at last
    if start_gunicorn_app:
        zato_gunicorn_app.run()
    else:
        return zato_gunicorn_app.zato_wsgi_app
Beispiel #27
0
def run(base_dir, start_gunicorn_app=True, options=None):
    options = options or {}

    # Store a pidfile before doing anything else
    store_pidfile(base_dir)

    # For dumping stacktraces
    register_diag_handlers()

    # Capture warnings to log files
    logging.captureWarnings(True)

    # Start initializing the server now
    os.chdir(base_dir)

    try:
        import pymysql
        pymysql.install_as_MySQLdb()
    except ImportError:
        pass

    # We're doing it here even if someone doesn't use PostgreSQL at all
    # so we're not suprised when someone suddenly starts using PG.
    # TODO: Make sure it's registered for each of the subprocess
    psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
    psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)

    # We know we don't need warnings because users may explicitly configure no certificate validation.
    # We don't want for urllib3 to warn us about it.
    import requests as _r
    _r.packages.urllib3.disable_warnings()

    repo_location = os.path.join(base_dir, 'config', 'repo')

    # Configure the logging first, before configuring the actual server.
    logging.addLevelName('TRACE1', TRACE1)

    with open(os.path.join(repo_location, 'logging.conf')) as f:
        dictConfig(yaml.load(f))

    logger = logging.getLogger(__name__)
    kvdb_logger = logging.getLogger('zato_kvdb')

    config = get_config(repo_location, 'server.conf')
    pickup_config = get_config(repo_location, 'pickup.conf')

    # Do not proceed unless we can be certain our own preferred address or IP can be obtained.
    preferred_address = config.preferred_address.get('address')

    if not preferred_address:
        preferred_address = get_preferred_ip(config.main.gunicorn_bind,
                                             config.preferred_address)

    if not preferred_address and not config.server_to_server.boot_if_preferred_not_found:
        msg = 'Unable to start the server. Could not obtain a preferred address, please configure [bind_options] in server.conf'
        logger.warn(msg)
        raise Exception(msg)

    # New in 2.0 - Start monitoring as soon as possible
    if config.get('newrelic', {}).get('config'):
        import newrelic.agent
        newrelic.agent.initialize(config.newrelic.config,
                                  config.newrelic.environment or None,
                                  config.newrelic.ignore_errors or None,
                                  config.newrelic.log_file or None,
                                  config.newrelic.log_level or None)

    # New in 2.0 - override gunicorn-set Server HTTP header
    gunicorn.SERVER_SOFTWARE = config.misc.get('http_server_header', 'Zato')

    # Store KVDB config in logs, possibly replacing its password if told to
    kvdb_config = get_kvdb_config_for_log(config.kvdb)
    kvdb_logger.info('Master process config `%s`', kvdb_config)

    # New in 2.0 hence optional
    user_locale = config.misc.get('locale', None)
    if user_locale:
        locale.setlocale(locale.LC_ALL, user_locale)
        value = 12345
        logger.info('Locale is `%s`, amount of %s -> `%s`', user_locale, value,
                    locale.currency(value, grouping=True).decode('utf-8'))

    # Spring Python
    app_context = get_app_context(config)

    # Makes queries against Postgres asynchronous
    if asbool(
            config.odb.use_async_driver) and config.odb.engine == 'postgresql':
        make_psycopg_green()

    # New in 2.0 - Put HTTP_PROXY in os.environ.
    http_proxy = config.misc.get('http_proxy', False)
    if http_proxy:
        os.environ['http_proxy'] = http_proxy

    crypto_manager = get_crypto_manager(repo_location, app_context, config)
    server = app_context.get_object('server')

    zato_gunicorn_app = ZatoGunicornApplication(server, repo_location,
                                                config.main, config.crypto)

    server.crypto_manager = crypto_manager
    server.odb_data = config.odb
    server.host = zato_gunicorn_app.zato_host
    server.port = zato_gunicorn_app.zato_port
    server.repo_location = repo_location
    server.user_conf_location = os.path.join(server.repo_location, 'user-conf')
    server.base_dir = base_dir
    server.tls_dir = os.path.join(server.base_dir, 'config', 'repo', 'tls')
    server.fs_server_config = config
    server.pickup_config = pickup_config
    server.user_config.update(config.user_config_items)
    server.app_context = app_context
    server.preferred_address = preferred_address
    server.sync_internal = options['sync_internal']
    server.jwt_secret = server.fs_server_config.misc.jwt_secret.encode('utf8')

    # Remove all locks possibly left over by previous server instances
    kvdb = app_context.get_object('kvdb')
    kvdb.component = 'master-proc'
    clear_locks(kvdb, config.main.token, config.kvdb, crypto_manager.decrypt)

    # New in 2.0.8
    server.return_tracebacks = asbool(
        config.misc.get('return_tracebacks', True))
    server.default_error_message = config.misc.get('default_error_message',
                                                   'An error has occurred')

    # Turn the repo dir into an actual repository and commit any new/modified files
    RepoManager(repo_location).ensure_repo_consistency()

    # New in 2.0 so it's optional.
    profiler_enabled = config.get('profiler', {}).get('enabled', False)

    # New in 2.0 so it's optional.
    sentry_config = config.get('sentry')

    dsn = sentry_config.pop('dsn', None)
    if dsn:

        from raven import Client
        from raven.handlers.logging import SentryHandler

        handler_level = sentry_config.pop('level')
        client = Client(dsn, **sentry_config)

        handler = SentryHandler(client=client)
        handler.setLevel(getattr(logging, handler_level))

        logger = logging.getLogger('')
        logger.addHandler(handler)

        for name in logging.Logger.manager.loggerDict:
            if name.startswith('zato'):
                logger = logging.getLogger(name)
                logger.addHandler(handler)

    if asbool(profiler_enabled):
        profiler_dir = os.path.abspath(
            os.path.join(base_dir, config.profiler.profiler_dir))
        server.on_wsgi_request = ProfileMiddleware(
            server.on_wsgi_request,
            log_filename=os.path.join(profiler_dir,
                                      config.profiler.log_filename),
            cachegrind_filename=os.path.join(
                profiler_dir, config.profiler.cachegrind_filename),
            discard_first_request=config.profiler.discard_first_request,
            flush_at_shutdown=config.profiler.flush_at_shutdown,
            path=config.profiler.url_path,
            unwind=config.profiler.unwind)

    # New in 2.0 - set environmet variables for servers to inherit
    os_environ = config.get('os_environ', {})
    for key, value in os_environ.items():
        os.environ[key] = value

    # Run the app at last
    if start_gunicorn_app:
        zato_gunicorn_app.run()
    else:
        return zato_gunicorn_app.zato_wsgi_app
Beispiel #28
0
def run(base_dir, start_gunicorn_app=True, options=None):
    options = options or {}

    # Store a pidfile before doing anything else
    store_pidfile(base_dir)

    # For dumping stacktraces
    register_diag_handlers()

    # Capture warnings to log files
    logging.captureWarnings(True)

    # Start initializing the server now
    os.chdir(base_dir)

    try:
        import pymysql
        pymysql.install_as_MySQLdb()
    except ImportError:
        pass

    # We're doing it here even if someone doesn't use PostgreSQL at all
    # so we're not suprised when someone suddenly starts using PG.
    # TODO: Make sure it's registered for each of the subprocess
    psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
    psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)

    # We know we don't need warnings because users may explicitly configure no certificate validation.
    # We don't want for urllib3 to warn us about it.
    import requests as _r
    _r.packages.urllib3.disable_warnings()

    repo_location = os.path.join(base_dir, 'config', 'repo')

    # Configure the logging first, before configuring the actual server.
    logging.addLevelName('TRACE1', TRACE1)
    logging_conf_path = os.path.join(repo_location, 'logging.conf')

    with open(logging_conf_path) as f:
        logging_config = yaml.load(f)
        dictConfig(logging_config)

    logger = logging.getLogger(__name__)
    kvdb_logger = logging.getLogger('zato_kvdb')

    crypto_manager = ServerCryptoManager(repo_location,
                                         secret_key=options['secret_key'],
                                         stdin_data=read_stdin_data())
    secrets_config = ConfigObj(os.path.join(repo_location, 'secrets.conf'),
                               use_zato=False)
    server_config = get_config(repo_location,
                               'server.conf',
                               crypto_manager=crypto_manager,
                               secrets_conf=secrets_config)
    pickup_config = get_config(repo_location, 'pickup.conf')
    sio_config = get_config(repo_location,
                            'simple-io.conf',
                            needs_user_config=False)
    sso_config = get_config(repo_location, 'sso.conf', needs_user_config=False)
    normalize_sso_config(sso_config)

    # Now that we have access to server.conf, greenify libraries required to be made greenlet-friendly,
    # assuming that there are any - otherwise do not do anything.
    to_greenify = []
    for key, value in server_config.get('greenify', {}).items():
        if asbool(value):
            if not os.path.exists(key):
                raise ValueError('No such path `{}`'.format(key))
            else:
                to_greenify.append(key)

    # Go ahead only if we actually have anything to greenify
    if to_greenify:
        import greenify
        greenify.greenify()
        for name in to_greenify:
            result = greenify.patch_lib(name)
            if not result:
                raise ValueError(
                    'Library `{}` could not be greenified'.format(name))
            else:
                logger.info('Greenified library `%s`', name)

    server_config.main.token = server_config.main.token.encode('utf8')

    # Do not proceed unless we can be certain our own preferred address or IP can be obtained.
    preferred_address = server_config.preferred_address.get('address')

    if not preferred_address:
        preferred_address = get_preferred_ip(server_config.main.gunicorn_bind,
                                             server_config.preferred_address)

    if not preferred_address and not server_config.server_to_server.boot_if_preferred_not_found:
        msg = 'Unable to start the server. Could not obtain a preferred address, please configure [bind_options] in server.conf'
        logger.warn(msg)
        raise Exception(msg)

    # Create the startup callable tool as soon as practical
    startup_callable_tool = StartupCallableTool(server_config)

    # Run the hook before there is any server object created
    startup_callable_tool.invoke(SERVER_STARTUP.PHASE.FS_CONFIG_ONLY,
                                 kwargs={
                                     'server_config': server_config,
                                     'pickup_config': pickup_config,
                                     'sio_config': sio_config,
                                     'sso_config': sso_config,
                                 })

    # New in 2.0 - Start monitoring as soon as possible
    if server_config.get('newrelic', {}).get('config'):
        import newrelic.agent
        newrelic.agent.initialize(server_config.newrelic.config,
                                  server_config.newrelic.environment or None,
                                  server_config.newrelic.ignore_errors or None,
                                  server_config.newrelic.log_file or None,
                                  server_config.newrelic.log_level or None)

    zunicorn.SERVER_SOFTWARE = server_config.misc.get('http_server_header',
                                                      'Zato')

    # Store KVDB config in logs, possibly replacing its password if told to
    kvdb_config = get_kvdb_config_for_log(server_config.kvdb)
    kvdb_logger.info('Main process config `%s`', kvdb_config)

    # New in 2.0 hence optional
    user_locale = server_config.misc.get('locale', None)
    if user_locale:
        locale.setlocale(locale.LC_ALL, user_locale)
        value = 12345
        logger.info('Locale is `%s`, amount of %s -> `%s`', user_locale, value,
                    locale.currency(value, grouping=True).decode('utf-8'))

    # Makes queries against Postgres asynchronous
    if asbool(server_config.odb.use_async_driver
              ) and server_config.odb.engine == 'postgresql':
        make_psycopg_green()

    if server_config.misc.http_proxy:
        os.environ['http_proxy'] = server_config.misc.http_proxy

    # Basic components needed for the server to boot up
    kvdb = KVDB()
    odb_manager = ODBManager(well_known_data=ZATO_CRYPTO_WELL_KNOWN_DATA)
    sql_pool_store = PoolStore()

    service_store = ServiceStore()
    service_store.odb = odb_manager
    service_store.services = {}

    server = ParallelServer()
    server.odb = odb_manager
    server.service_store = service_store
    server.service_store.server = server
    server.sql_pool_store = sql_pool_store
    server.service_modules = []
    server.kvdb = kvdb

    # Assigned here because it is a circular dependency
    odb_manager.parallel_server = server

    zato_gunicorn_app = ZatoGunicornApplication(server, repo_location,
                                                server_config.main,
                                                server_config.crypto)

    server.has_fg = options.get('fg')
    server.crypto_manager = crypto_manager
    server.odb_data = server_config.odb
    server.host = zato_gunicorn_app.zato_host
    server.port = zato_gunicorn_app.zato_port
    server.repo_location = repo_location
    server.user_conf_location = os.path.join(server.repo_location, 'user-conf')
    server.base_dir = base_dir
    server.logs_dir = os.path.join(server.base_dir, 'logs')
    server.tls_dir = os.path.join(server.base_dir, 'config', 'repo', 'tls')
    server.static_dir = os.path.join(server.base_dir, 'config', 'repo',
                                     'static')
    server.json_schema_dir = os.path.join(server.base_dir, 'config', 'repo',
                                          'schema', 'json')
    server.fs_server_config = server_config
    server.fs_sql_config = get_config(repo_location,
                                      'sql.conf',
                                      needs_user_config=False)
    server.pickup_config = pickup_config
    server.logging_config = logging_config
    server.logging_conf_path = logging_conf_path
    server.sio_config = sio_config
    server.sso_config = sso_config
    server.user_config.update(server_config.user_config_items)
    server.preferred_address = preferred_address
    server.sync_internal = options['sync_internal']
    server.jwt_secret = server.fs_server_config.misc.jwt_secret.encode('utf8')
    server.startup_callable_tool = startup_callable_tool
    server.is_sso_enabled = server.fs_server_config.component_enabled.sso
    if server.is_sso_enabled:
        server.sso_api = SSOAPI(server, sso_config, None,
                                crypto_manager.encrypt, crypto_manager.decrypt,
                                crypto_manager.hash_secret,
                                crypto_manager.verify_hash, new_user_id)

    # Remove all locks possibly left over by previous server instances
    kvdb.component = 'master-proc'
    clear_locks(kvdb, server_config.main.token, server_config.kvdb,
                crypto_manager.decrypt)

    # New in 2.0.8
    server.return_tracebacks = asbool(
        server_config.misc.get('return_tracebacks', True))
    server.default_error_message = server_config.misc.get(
        'default_error_message', 'An error has occurred')

    # Turn the repo dir into an actual repository and commit any new/modified files
    RepoManager(repo_location).ensure_repo_consistency()

    # New in 2.0 so it's optional.
    profiler_enabled = server_config.get('profiler', {}).get('enabled', False)

    # New in 2.0 so it's optional.
    sentry_config = server_config.get('sentry')

    dsn = sentry_config.pop('dsn', None)
    if dsn:

        from raven import Client
        from raven.handlers.logging import SentryHandler

        handler_level = sentry_config.pop('level')
        client = Client(dsn, **sentry_config)

        handler = SentryHandler(client=client)
        handler.setLevel(getattr(logging, handler_level))

        logger = logging.getLogger('')
        logger.addHandler(handler)

        for name in logging.Logger.manager.loggerDict:
            if name.startswith('zato'):
                logger = logging.getLogger(name)
                logger.addHandler(handler)

    if asbool(profiler_enabled):
        profiler_dir = os.path.abspath(
            os.path.join(base_dir, server_config.profiler.profiler_dir))
        server.on_wsgi_request = ProfileMiddleware(
            server.on_wsgi_request,
            log_filename=os.path.join(profiler_dir,
                                      server_config.profiler.log_filename),
            cachegrind_filename=os.path.join(
                profiler_dir, server_config.profiler.cachegrind_filename),
            discard_first_request=server_config.profiler.discard_first_request,
            flush_at_shutdown=server_config.profiler.flush_at_shutdown,
            path=server_config.profiler.url_path,
            unwind=server_config.profiler.unwind)

    # New in 2.0 - set environmet variables for servers to inherit
    os_environ = server_config.get('os_environ', {})
    for key, value in os_environ.items():
        os.environ[key] = value

    # Run the hook right before the Gunicorn-level server actually starts
    startup_callable_tool.invoke(SERVER_STARTUP.PHASE.IMPL_BEFORE_RUN,
                                 kwargs={
                                     'zato_gunicorn_app': zato_gunicorn_app,
                                 })

    # Run the app at last
    if start_gunicorn_app:
        zato_gunicorn_app.run()
    else:
        return zato_gunicorn_app.zato_wsgi_app
Beispiel #29
0
    def execute(self, args):

        repo_dir = os.path.join(
            os.path.abspath(os.path.join(self.original_dir, args.path)),
            'config', 'repo')
        config = get_config(repo_dir, 'server.conf')
        priv_key_location = os.path.abspath(
            os.path.join(repo_dir, config.crypto.priv_key_location))

        cm = CryptoManager(priv_key_location=priv_key_location)
        cm.load_keys()

        engine_args = Bunch()
        engine_args.odb_type = config.odb.engine
        engine_args.odb_user = config.odb.username
        engine_args.odb_password = cm.decrypt(config.odb.password)
        engine_args.odb_host = config.odb.host
        engine_args.odb_db_name = config.odb.db_name

        engine = self._get_engine(engine_args)
        session = self._get_session(engine)

        auth = None
        headers = {}

        with closing(session) as session:
            cluster = session.query(Server).\
                filter(Server.token == config.main.token).\
                one().cluster

            channel = session.query(HTTPSOAP).\
                filter(HTTPSOAP.cluster_id == cluster.id).\
                filter(HTTPSOAP.url_path == args.url_path).\
                one()

            if channel.security_id:
                security = session.query(HTTPBasicAuth).\
                    filter(HTTPBasicAuth.id == channel.security_id).\
                    first()

                if security:
                    auth = (security.username, security.password)

        if args.headers:
            for pair in args.headers.strip().split(';'):
                k, v = pair.strip().split('=', 1)
                headers[k] = v

        client = AnyServiceInvoker(
            'http://{}'.format(config.main.gunicorn_bind),
            args.url_path,
            auth,
            max_response_repr=int(args.max_response_repr),
            max_cid_repr=int(args.max_cid_repr))

        func = client.invoke_async if args. async else client.invoke
        response = func(args.name, args.payload, headers, args.channel,
                        args.data_format, args.transport)

        if response.ok:
            self.logger.info(response.data or '(None)')
        else:
            self.logger.error(response.details)

        if args.verbose:
            self.logger.debug('inner.text:[{}]'.format(response.inner.text))
            self.logger.debug('response:[{}]'.format(response))