Exemple #1
0
    def _handle_sso_PATCH(self, ctx):
        """ Changes a user's password.
        """
        data = {
            'old_password': ctx.input.get('old_password')
            or uuid4().hex,  # So it will never match anything
            'new_password': ctx.input['new_password'],
        }

        user_id = ctx.input.get('user_id')
        if user_id:
            data['user_id'] = user_id

        password_expiry = ctx.input.get('password_expiry')
        if password_expiry != '':
            if password_expiry < 0:
                self._log_invalid_password_expiry(password_expiry)
                password_expiry = self.sso.password.expiry
            data['password_expiry'] = password_expiry

        must_change = ctx.input.get('must_change')
        if must_change != '':
            must_change = asbool(must_change)
            data['must_change'] = must_change

        self.sso.user.change_password(self.cid, data, ctx.input.ust,
                                      ctx.input.current_app, ctx.remote_addr)
Exemple #2
0
    def _handle_sso_PATCH(self, ctx, _not_given=NotGiven):
        """ Updates an existing user.
        """
        current_ust = ctx.input.pop('ust')
        current_app = ctx.input.pop('current_app')

        # Explicitly provide only what we know is allowed
        data = {}
        for name in update.all_attrs:
            value = ctx.input.get(name, _not_given)

            # No such key on input, we can ignore it
            if value is _not_given:
                continue

            if value != _invalid:

                # Boolean values will never be None on input (SIO will convert them to a default value of an empty string)..
                if name in update.boolean_attrs:
                    if value is None:
                        continue
                    else:
                        value = asbool(value)

                # .. same goes for datetime ones.
                elif name in update.datetime_attrs:
                    if value is None:
                        continue
                    value = dt_parser.parse(value)

                data[name] = value

        user_id = data.pop('user_id', None)

        if user_id:
            self.sso.user.update_user_by_id(self.cid, user_id, data,
                                            current_ust, current_app,
                                            ctx.remote_addr)
            user = self.sso.user.get_user_by_id(self.cid, user_id, current_ust,
                                                current_app, ctx.remote_addr)
        else:
            self.sso.user.update_current_user(self.cid, data, current_ust,
                                              current_app, ctx.remote_addr)
            user = self.sso.user.get_current_user(self.cid, current_ust,
                                                  current_app, ctx.remote_addr)
            user_id = user.user_id

        # Always notify all servers about this event in case we need to disable rate limiting
        self.broker_client.publish({
            'action':
            BROKER_MSG_SSO.USER_EDIT.value,
            'user_id':
            user_id,
            'is_rate_limit_active':
            ctx.input.is_rate_limit_active,
            'rate_limit_def':
            ctx.input.rate_limit_def
            if ctx.input.rate_limit_def != _invalid else None,
        })
Exemple #3
0
    def init(self, *ignored_args, **ignored_kwargs):
        self.cfg.set('post_fork',
                     self.zato_wsgi_app.post_fork)  # Initializes a worker
        self.cfg.set(
            'on_starting',
            self.zato_wsgi_app.on_starting)  # Generates the deployment key
        self.cfg.set(
            'worker_exit',
            self.zato_wsgi_app.worker_exit)  # Cleans up after the worker

        for k, v in self.config_main.items():
            if k.startswith('gunicorn') and v:
                k = k.replace('gunicorn_', '')
                if k == 'bind':
                    if not ':' in v:
                        raise ValueError(
                            'No port found in main.gunicorn_bind `{}`, such as `{}:17010`'
                            .format(v))
                    else:
                        host, port = v.split(':')
                        self.zato_host = host
                        self.zato_port = port
                self.cfg.set(k, v)
            else:
                if 'deployment_lock' in k:
                    v = int(v)

                self.zato_config[k] = v

        for name in ('deployment_lock_expires', 'deployment_lock_timeout'):
            setattr(self.zato_wsgi_app, name, self.zato_config[name])

        if asbool(self.crypto_config.use_tls):
            self.cfg.set(
                'ssl_version',
                getattr(ssl,
                        'PROTOCOL_{}'.format(self.crypto_config.tls_protocol)))
            self.cfg.set('ciphers', self.crypto_config.tls_ciphers)
            self.cfg.set(
                'cert_reqs',
                getattr(
                    ssl, 'CERT_{}'.format(
                        self.crypto_config.tls_client_certs.upper())))
            self.cfg.set(
                'ca_certs',
                absjoin(self.repo_location,
                        self.crypto_config.ca_certs_location))
            self.cfg.set(
                'keyfile',
                absjoin(self.repo_location,
                        self.crypto_config.priv_key_location))
            self.cfg.set(
                'certfile',
                absjoin(self.repo_location, self.crypto_config.cert_location))
            self.cfg.set('do_handshake_on_connect', True)

        self.zato_wsgi_app.has_gevent = 'gevent' in self.cfg.settings[
            'worker_class'].value
Exemple #4
0
    def init_jobs(self):
        sleep(
            initial_sleep
        )  # To make sure that at least one server is running if the environment was started from quickstart scripts
        cluster_conf = self.config.main.cluster
        add_startup_jobs(cluster_conf.id, self.odb, self.startup_jobs,
                         asbool(cluster_conf.stats_enabled))

        # Actually start jobs now, including any added above
        if self._add_scheduler_jobs:
            add_scheduler_jobs(self.api,
                               self.odb,
                               self.config.main.cluster.id,
                               spawn=False)
Exemple #5
0
def publish_action(req):

    try:

        msg_id = req.POST.get('msg_id') or new_msg_id()
        gd = req.POST['gd']

        if gd == PUBSUB.GD_CHOICE.DEFAULT_PER_TOPIC.id:
            has_gd = None
        else:
            has_gd = asbool(gd)

        service_input = {
            'msg_id': msg_id,
            'has_gd': has_gd,
            'skip_pattern_matching': True,
            'endpoint_id': req.POST['publisher_id'],
        }

        for name in ('reply_to_sk', 'deliver_to_sk'):
            value = req.POST.get(name, '')
            if value:
                value = value.split(',')
                value = [elem.strip() for elem in value]
                service_input[name] = value

        for name in ('cluster_id', 'topic_name', 'data'):
            service_input[name] = req.POST[name]

        for name in ('correl_id', 'priority', 'ext_client_id',
                     'position_in_group', 'expiration', 'in_reply_to'):
            service_input[name] = req.POST.get(
                name, None) or None  # Always use None instead of ''

        req.zato.client.invoke('zato.pubsub.publish.publish', service_input)

    except Exception as e:
        message = e.args[0]
        is_ok = False
    else:
        message = 'Successfully published message `{}`'.format(msg_id)
        is_ok = True

    return HttpResponse(dumps({
        'is_ok': is_ok,
        'message': message,
    }))
Exemple #6
0
    def set_up_config(self, server):

        # Which components are enabled
        self.component_enabled.stats = asbool(
            self.fs_server_config.component_enabled.stats)
        self.component_enabled.slow_response = asbool(
            self.fs_server_config.component_enabled.slow_response)

        #
        # Cassandra - start
        #

        query = self.odb.get_cassandra_conn_list(server.cluster.id, True)
        self.config.cassandra_conn = ConfigDict.from_query(
            'cassandra_conn', query, decrypt_func=self.decrypt)

        query = self.odb.get_cassandra_query_list(server.cluster.id, True)
        self.config.cassandra_query = ConfigDict.from_query(
            'cassandra_query', query, decrypt_func=self.decrypt)

        #
        # Cassandra - end
        #

        #
        # Search - start
        #

        query = self.odb.get_search_es_list(server.cluster.id, True)
        self.config.search_es = ConfigDict.from_query(
            'search_es', query, decrypt_func=self.decrypt)

        query = self.odb.get_search_solr_list(server.cluster.id, True)
        self.config.search_solr = ConfigDict.from_query(
            'search_solr', query, decrypt_func=self.decrypt)

        #
        # Search - end
        #

        #
        # SMS - start
        #

        query = self.odb.get_sms_twilio_list(server.cluster.id, True)
        self.config.sms_twilio = ConfigDict.from_query(
            'sms_twilio', query, decrypt_func=self.decrypt)

        #
        # SMS - end
        #

        #
        # Cloud - start
        #

        # OpenStack - Swift

        query = self.odb.get_cloud_openstack_swift_list(
            server.cluster.id, True)
        self.config.cloud_openstack_swift = ConfigDict.from_query(
            'cloud_openstack_swift', query, decrypt_func=self.decrypt)

        query = self.odb.get_cloud_aws_s3_list(server.cluster.id, True)
        self.config.cloud_aws_s3 = ConfigDict.from_query(
            'cloud_aws_s3', query, decrypt_func=self.decrypt)

        #
        # Cloud - end
        #

        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

        # Services
        query = self.odb.get_service_list(server.cluster.id, True)
        self.config.service = ConfigDict.from_query('service_list',
                                                    query,
                                                    decrypt_func=self.decrypt)

        #
        # Definitions - start
        #

        # AMQP
        query = self.odb.get_definition_amqp_list(server.cluster.id, True)
        self.config.definition_amqp = ConfigDict.from_query(
            'definition_amqp', query, decrypt_func=self.decrypt)

        # IBM MQ
        query = self.odb.get_definition_wmq_list(server.cluster.id, True)
        self.config.definition_wmq = ConfigDict.from_query(
            'definition_wmq', query, decrypt_func=self.decrypt)

        #
        # Definitions - end
        #

        #
        # Channels - start
        #

        # AMQP
        query = self.odb.get_channel_amqp_list(server.cluster.id, True)
        self.config.channel_amqp = ConfigDict.from_query(
            'channel_amqp', query, decrypt_func=self.decrypt)

        # IBM MQ
        query = self.odb.get_channel_wmq_list(server.cluster.id, True)
        self.config.channel_wmq = ConfigDict.from_query(
            'channel_wmq', query, decrypt_func=self.decrypt)

        #
        # Channels - end
        #

        #
        # Outgoing connections - start
        #

        # AMQP
        query = self.odb.get_out_amqp_list(server.cluster.id, True)
        self.config.out_amqp = ConfigDict.from_query('out_amqp',
                                                     query,
                                                     decrypt_func=self.decrypt)

        # Caches
        query = self.odb.get_cache_builtin_list(server.cluster.id, True)
        self.config.cache_builtin = ConfigDict.from_query(
            'cache_builtin', query, decrypt_func=self.decrypt)

        query = self.odb.get_cache_memcached_list(server.cluster.id, True)
        self.config.cache_memcached = ConfigDict.from_query(
            'cache_memcached', query, decrypt_func=self.decrypt)

        # FTP
        query = self.odb.get_out_ftp_list(server.cluster.id, True)
        self.config.out_ftp = ConfigDict.from_query('out_ftp',
                                                    query,
                                                    decrypt_func=self.decrypt)

        # IBM MQ
        query = self.odb.get_out_wmq_list(server.cluster.id, True)
        self.config.out_wmq = ConfigDict.from_query('out_wmq',
                                                    query,
                                                    decrypt_func=self.decrypt)

        # Odoo
        query = self.odb.get_out_odoo_list(server.cluster.id, True)
        self.config.out_odoo = ConfigDict.from_query('out_odoo',
                                                     query,
                                                     decrypt_func=self.decrypt)

        # SAP RFC
        query = self.odb.get_out_sap_list(server.cluster.id, True)
        self.config.out_sap = ConfigDict.from_query('out_sap',
                                                    query,
                                                    decrypt_func=self.decrypt)

        # REST
        query = self.odb.get_http_soap_list(server.cluster.id, 'outgoing',
                                            'plain_http', True)
        self.config.out_plain_http = ConfigDict.from_query(
            'out_plain_http', query, decrypt_func=self.decrypt)

        # SFTP
        query = self.odb.get_out_sftp_list(server.cluster.id, True)
        self.config.out_sftp = ConfigDict.from_query('out_sftp',
                                                     query,
                                                     decrypt_func=self.decrypt,
                                                     drop_opaque=True)

        # SOAP
        query = self.odb.get_http_soap_list(server.cluster.id, 'outgoing',
                                            'soap', True)
        self.config.out_soap = ConfigDict.from_query('out_soap',
                                                     query,
                                                     decrypt_func=self.decrypt)

        # SQL
        query = self.odb.get_out_sql_list(server.cluster.id, True)
        self.config.out_sql = ConfigDict.from_query('out_sql',
                                                    query,
                                                    decrypt_func=self.decrypt)

        # ZMQ channels
        query = self.odb.get_channel_zmq_list(server.cluster.id, True)
        self.config.channel_zmq = ConfigDict.from_query(
            'channel_zmq', query, decrypt_func=self.decrypt)

        # ZMQ outgoing
        query = self.odb.get_out_zmq_list(server.cluster.id, True)
        self.config.out_zmq = ConfigDict.from_query('out_zmq',
                                                    query,
                                                    decrypt_func=self.decrypt)

        # WebSocket channels
        query = self.odb.get_channel_web_socket_list(server.cluster.id, True)
        self.config.channel_web_socket = ConfigDict.from_query(
            'channel_web_socket', query, decrypt_func=self.decrypt)

        #
        # Outgoing connections - end
        #

        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

        #
        # Generic - start
        #

        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

        # Connections
        query = self.odb.get_generic_connection_list(server.cluster.id, True)
        self.config.generic_connection = ConfigDict.from_query(
            'generic_connection', query, decrypt_func=self.decrypt)

        #
        # Generic - end
        #

        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

        #
        # Notifications - start
        #

        # OpenStack Swift
        query = self.odb.get_notif_cloud_openstack_swift_list(
            server.cluster.id, True)
        self.config.notif_cloud_openstack_swift = ConfigDict.from_query(
            'notif_cloud_openstack_swift', query, decrypt_func=self.decrypt)

        # SQL
        query = self.odb.get_notif_sql_list(server.cluster.id, True)
        self.config.notif_sql = ConfigDict.from_query(
            'notif_sql', query, decrypt_func=self.decrypt)

        #
        # Notifications - end
        #

        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

        #
        # Security - start
        #

        # API keys
        query = self.odb.get_apikey_security_list(server.cluster.id, True)
        self.config.apikey = ConfigDict.from_query('apikey',
                                                   query,
                                                   decrypt_func=self.decrypt)

        # AWS
        query = self.odb.get_aws_security_list(server.cluster.id, True)
        self.config.aws = ConfigDict.from_query('aws',
                                                query,
                                                decrypt_func=self.decrypt)

        # HTTP Basic Auth
        query = self.odb.get_basic_auth_list(server.cluster.id, None, True)
        self.config.basic_auth = ConfigDict.from_query(
            'basic_auth', query, decrypt_func=self.decrypt)

        # JWT
        query = self.odb.get_jwt_list(server.cluster.id, None, True)
        self.config.jwt = ConfigDict.from_query('jwt',
                                                query,
                                                decrypt_func=self.decrypt)

        # NTLM
        query = self.odb.get_ntlm_list(server.cluster.id, True)
        self.config.ntlm = ConfigDict.from_query('ntlm',
                                                 query,
                                                 decrypt_func=self.decrypt)

        # OAuth
        query = self.odb.get_oauth_list(server.cluster.id, True)
        self.config.oauth = ConfigDict.from_query('oauth',
                                                  query,
                                                  decrypt_func=self.decrypt)

        # OpenStack
        query = self.odb.get_openstack_security_list(server.cluster.id, True)
        self.config.openstack_security = ConfigDict.from_query(
            'openstack_security', query, decrypt_func=self.decrypt)

        # RBAC - permissions
        query = self.odb.get_rbac_permission_list(server.cluster.id, True)
        self.config.rbac_permission = ConfigDict.from_query(
            'rbac_permission', query, decrypt_func=self.decrypt)

        # RBAC - roles
        query = self.odb.get_rbac_role_list(server.cluster.id, True)
        self.config.rbac_role = ConfigDict.from_query(
            'rbac_role', query, decrypt_func=self.decrypt)

        # RBAC - client roles
        query = self.odb.get_rbac_client_role_list(server.cluster.id, True)
        self.config.rbac_client_role = ConfigDict.from_query(
            'rbac_client_role', query, decrypt_func=self.decrypt)

        # RBAC - role permission
        query = self.odb.get_rbac_role_permission_list(server.cluster.id, True)
        self.config.rbac_role_permission = ConfigDict.from_query(
            'rbac_role_permission', query, decrypt_func=self.decrypt)

        # TLS CA certs
        query = self.odb.get_tls_ca_cert_list(server.cluster.id, True)
        self.config.tls_ca_cert = ConfigDict.from_query(
            'tls_ca_cert', query, decrypt_func=self.decrypt)

        # TLS channel security
        query = self.odb.get_tls_channel_sec_list(server.cluster.id, True)
        self.config.tls_channel_sec = ConfigDict.from_query(
            'tls_channel_sec', query, decrypt_func=self.decrypt)

        # TLS key/cert pairs
        query = self.odb.get_tls_key_cert_list(server.cluster.id, True)
        self.config.tls_key_cert = ConfigDict.from_query(
            'tls_key_cert', query, decrypt_func=self.decrypt)

        # WS-Security
        query = self.odb.get_wss_list(server.cluster.id, True)
        self.config.wss = ConfigDict.from_query('wss',
                                                query,
                                                decrypt_func=self.decrypt)

        # Vault connections
        query = self.odb.get_vault_connection_list(server.cluster.id, True)
        self.config.vault_conn_sec = ConfigDict.from_query(
            'vault_conn_sec', query, decrypt_func=self.decrypt)

        # XPath
        query = self.odb.get_xpath_sec_list(server.cluster.id, True)
        self.config.xpath_sec = ConfigDict.from_query(
            'xpath_sec', query, decrypt_func=self.decrypt)

        # Encrypt all secrets
        self._encrypt_secrets()

        #
        # Security - end
        #

        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

        # All the HTTP/SOAP channels.
        http_soap = []

        for item in elems_with_opaque(
                self.odb.get_http_soap_list(server.cluster.id, 'channel')):

            hs_item = {}
            for key in item.keys():
                hs_item[key] = getattr(item, key)

            hs_item['match_target'] = get_match_target(
                hs_item, http_methods_allowed_re=self.http_methods_allowed_re)
            hs_item['match_target_compiled'] = Matcher(
                hs_item['match_target'], hs_item.get('match_slash', ''))

            http_soap.append(hs_item)

        self.config.http_soap = http_soap

        # Namespaces
        query = self.odb.get_namespace_list(server.cluster.id, True)
        self.config.msg_ns = ConfigDict.from_query('msg_ns',
                                                   query,
                                                   decrypt_func=self.decrypt)

        # XPath
        query = self.odb.get_xpath_list(server.cluster.id, True)
        self.config.xpath = ConfigDict.from_query('msg_xpath',
                                                  query,
                                                  decrypt_func=self.decrypt)

        # JSON Pointer
        query = self.odb.get_json_pointer_list(server.cluster.id, True)
        self.config.json_pointer = ConfigDict.from_query(
            'json_pointer', query, decrypt_func=self.decrypt)

        # SimpleIO
        # In preparation for a SIO rewrite, we loaded SIO config from a file
        # but actual code paths require the pre-3.0 format so let's prepare it here.
        self.config.simple_io = ConfigDict('simple_io', Bunch())

        int_exact = self.sio_config.int_config.exact
        int_suffixes = self.sio_config.int_config.suffixes
        bool_prefixes = self.sio_config.bool_config.prefixes

        self.config.simple_io['int_parameters'] = int_exact
        self.config.simple_io['int_parameter_suffixes'] = int_suffixes
        self.config.simple_io['bool_parameter_prefixes'] = bool_prefixes

        # Maintain backward-compatibility with pre-3.1 versions that did not specify any particular encoding
        self.config.simple_io['bytes_to_str'] = {
            'encoding': self.sio_config.bytes_to_str_encoding or None
        }

        # Pub/sub
        self.config.pubsub = Bunch()

        # Pub/sub - endpoints
        query = self.odb.get_pubsub_endpoint_list(server.cluster.id, True)
        self.config.pubsub_endpoint = ConfigDict.from_query(
            'pubsub_endpoint', query, decrypt_func=self.decrypt)

        # Pub/sub - topics
        query = self.odb.get_pubsub_topic_list(server.cluster.id, True)
        self.config.pubsub_topic = ConfigDict.from_query(
            'pubsub_topic', query, decrypt_func=self.decrypt)

        # Pub/sub - subscriptions
        query = self.odb.get_pubsub_subscription_list(server.cluster.id, True)
        self.config.pubsub_subscription = ConfigDict.from_query(
            'pubsub_subscription', query, decrypt_func=self.decrypt)

        # E-mail - SMTP
        query = self.odb.get_email_smtp_list(server.cluster.id, True)
        self.config.email_smtp = ConfigDict.from_query(
            'email_smtp', query, decrypt_func=self.decrypt)

        # E-mail - IMAP
        query = self.odb.get_email_imap_list(server.cluster.id, True)
        self.config.email_imap = ConfigDict.from_query(
            'email_imap', query, decrypt_func=self.decrypt)

        # Message paths
        self.config.msg_ns_store = NamespaceStore()
        self.config.json_pointer_store = JSONPointerStore()
        self.config.xpath_store = XPathStore()

        # HTTP access log should optionally ignore certain requests
        access_log_ignore = self.fs_server_config.get(
            'logging', {}).get('http_access_log_ignore')
        if access_log_ignore:
            access_log_ignore = access_log_ignore if isinstance(
                access_log_ignore, list) else [access_log_ignore]
            self.needs_all_access_log = False
            self.access_log_ignore.update(access_log_ignore)

        # Assign config to worker
        self.worker_store.worker_config = self.config
Exemple #7
0
def run(base_dir, start_gunicorn_app=True, options=None):
    # type: (str, bool, dict)
    options = options or {}

    # Store a pidfile before doing anything else
    store_pidfile(base_dir)

    # For dumping stacktraces
    register_diag_handlers()

    # Capture warnings to log files
    logging.captureWarnings(True)

    # Start initializing the server now
    os.chdir(base_dir)

    try:
        import pymysql
        pymysql.install_as_MySQLdb()
    except ImportError:
        pass

    # We know we don't need warnings because users may explicitly configure no certificate validation.
    # We don't want for urllib3 to warn us about it.
    import requests as _r
    _r.packages.urllib3.disable_warnings()

    repo_location = os.path.join(base_dir, 'config', 'repo')

    # Configure the logging first, before configuring the actual server.
    logging.addLevelName('TRACE1', TRACE1)
    logging_conf_path = os.path.join(repo_location, 'logging.conf')

    with open(logging_conf_path) as f:
        logging_config = yaml.load(f, yaml.FullLoader)
        dictConfig(logging_config)

    logger = logging.getLogger(__name__)
    kvdb_logger = logging.getLogger('zato_kvdb')

    crypto_manager = ServerCryptoManager(repo_location,
                                         secret_key=options['secret_key'],
                                         stdin_data=read_stdin_data())
    secrets_config = ConfigObj(os.path.join(repo_location, 'secrets.conf'),
                               use_zato=False)
    server_config = get_config(repo_location,
                               'server.conf',
                               crypto_manager=crypto_manager,
                               secrets_conf=secrets_config)
    pickup_config = get_config(repo_location, 'pickup.conf')

    sio_config = get_config(repo_location,
                            'simple-io.conf',
                            needs_user_config=False)
    sio_config = get_sio_server_config(sio_config)

    sso_config = get_config(repo_location, 'sso.conf', needs_user_config=False)
    normalize_sso_config(sso_config)

    # Now that we have access to server.conf, greenify libraries required to be made greenlet-friendly,
    # assuming that there are any - otherwise do not do anything.
    to_greenify = []
    for key, value in server_config.get('greenify', {}).items():
        if asbool(value):
            if not os.path.exists(key):
                raise ValueError('No such path `{}`'.format(key))
            else:
                to_greenify.append(key)

    # Go ahead only if we actually have anything to greenify
    if to_greenify:
        import greenify
        greenify.greenify()
        for name in to_greenify:
            result = greenify.patch_lib(name)
            if not result:
                raise ValueError(
                    'Library `{}` could not be greenified'.format(name))
            else:
                logger.info('Greenified library `%s`', name)

    server_config.main.token = server_config.main.token.encode('utf8')

    # Do not proceed unless we can be certain our own preferred address or IP can be obtained.
    preferred_address = server_config.preferred_address.get('address')

    if not preferred_address:
        preferred_address = get_preferred_ip(server_config.main.gunicorn_bind,
                                             server_config.preferred_address)

    if not preferred_address and not server_config.server_to_server.boot_if_preferred_not_found:
        msg = 'Unable to start the server. Could not obtain a preferred address, please configure [bind_options] in server.conf'
        logger.warn(msg)
        raise Exception(msg)

    # Create the startup callable tool as soon as practical
    startup_callable_tool = StartupCallableTool(server_config)

    # Run the hook before there is any server object created
    startup_callable_tool.invoke(SERVER_STARTUP.PHASE.FS_CONFIG_ONLY,
                                 kwargs={
                                     'server_config': server_config,
                                     'pickup_config': pickup_config,
                                     'sio_config': sio_config,
                                     'sso_config': sso_config,
                                     'base_dir': base_dir,
                                 })

    # New in 2.0 - Start monitoring as soon as possible
    if server_config.get('newrelic', {}).get('config'):
        import newrelic.agent
        newrelic.agent.initialize(server_config.newrelic.config,
                                  server_config.newrelic.environment or None,
                                  server_config.newrelic.ignore_errors or None,
                                  server_config.newrelic.log_file or None,
                                  server_config.newrelic.log_level or None)

    zunicorn.SERVER_SOFTWARE = server_config.misc.get('http_server_header',
                                                      'Zato')

    # Store KVDB config in logs, possibly replacing its password if told to
    kvdb_config = get_kvdb_config_for_log(server_config.kvdb)
    kvdb_logger.info('Main process config `%s`', kvdb_config)

    # New in 2.0 hence optional
    user_locale = server_config.misc.get('locale', None)
    if user_locale:
        locale.setlocale(locale.LC_ALL, user_locale)
        value = 12345
        logger.info('Locale is `%s`, amount of %s -> `%s`', user_locale, value,
                    locale.currency(value, grouping=True).decode('utf-8'))

    if server_config.misc.http_proxy:
        os.environ['http_proxy'] = server_config.misc.http_proxy

    # Basic components needed for the server to boot up
    kvdb = KVDB()
    odb_manager = ODBManager(well_known_data=ZATO_CRYPTO_WELL_KNOWN_DATA)
    sql_pool_store = PoolStore()

    service_store = ServiceStore()
    service_store.odb = odb_manager
    service_store.services = {}

    server = ParallelServer()
    server.odb = odb_manager
    server.service_store = service_store
    server.service_store.server = server
    server.sql_pool_store = sql_pool_store
    server.service_modules = []
    server.kvdb = kvdb
    server.stderr_path = options.get('stderr_path')

    # Assigned here because it is a circular dependency
    odb_manager.parallel_server = server

    zato_gunicorn_app = ZatoGunicornApplication(server, repo_location,
                                                server_config.main,
                                                server_config.crypto)

    server.has_fg = options.get('fg')
    server.crypto_manager = crypto_manager
    server.odb_data = server_config.odb
    server.host = zato_gunicorn_app.zato_host
    server.port = zato_gunicorn_app.zato_port
    server.repo_location = repo_location
    server.user_conf_location = os.path.join(server.repo_location, 'user-conf')
    server.base_dir = base_dir
    server.logs_dir = os.path.join(server.base_dir, 'logs')
    server.tls_dir = os.path.join(server.base_dir, 'config', 'repo', 'tls')
    server.static_dir = os.path.join(server.base_dir, 'config', 'repo',
                                     'static')
    server.json_schema_dir = os.path.join(server.base_dir, 'config', 'repo',
                                          'schema', 'json')
    server.fs_server_config = server_config
    server.fs_sql_config = get_config(repo_location,
                                      'sql.conf',
                                      needs_user_config=False)
    server.pickup_config = pickup_config
    server.logging_config = logging_config
    server.logging_conf_path = logging_conf_path
    server.sio_config = sio_config
    server.sso_config = sso_config
    server.user_config.update(server_config.user_config_items)
    server.preferred_address = preferred_address
    server.sync_internal = options['sync_internal']
    server.jwt_secret = server.fs_server_config.misc.jwt_secret.encode('utf8')
    server.startup_callable_tool = startup_callable_tool
    server.is_sso_enabled = server.fs_server_config.component_enabled.sso
    if server.is_sso_enabled:
        server.sso_api = SSOAPI(server, sso_config, None,
                                crypto_manager.encrypt, crypto_manager.decrypt,
                                crypto_manager.hash_secret,
                                crypto_manager.verify_hash, new_user_id)

    # Remove all locks possibly left over by previous server instances
    kvdb.component = 'master-proc'
    clear_locks(kvdb, server_config.main.token, server_config.kvdb,
                crypto_manager.decrypt)

    # New in 2.0.8
    server.return_tracebacks = asbool(
        server_config.misc.get('return_tracebacks', True))
    server.default_error_message = server_config.misc.get(
        'default_error_message', 'An error has occurred')

    # Turn the repo dir into an actual repository and commit any new/modified files
    RepoManager(repo_location).ensure_repo_consistency()

    # New in 2.0 so it's optional.
    profiler_enabled = server_config.get('profiler', {}).get('enabled', False)

    # New in 2.0 so it's optional.
    sentry_config = server_config.get('sentry')

    dsn = sentry_config.pop('dsn', None)
    if dsn:

        from raven import Client
        from raven.handlers.logging import SentryHandler

        handler_level = sentry_config.pop('level')
        client = Client(dsn, **sentry_config)

        handler = SentryHandler(client=client)
        handler.setLevel(getattr(logging, handler_level))

        logger = logging.getLogger('')
        logger.addHandler(handler)

        for name in logging.Logger.manager.loggerDict:
            if name.startswith('zato'):
                logger = logging.getLogger(name)
                logger.addHandler(handler)

    if asbool(profiler_enabled):

        # Repoze
        from repoze.profile import ProfileMiddleware

        profiler_dir = os.path.abspath(
            os.path.join(base_dir, server_config.profiler.profiler_dir))
        server.on_wsgi_request = ProfileMiddleware(
            server.on_wsgi_request,
            log_filename=os.path.join(profiler_dir,
                                      server_config.profiler.log_filename),
            cachegrind_filename=os.path.join(
                profiler_dir, server_config.profiler.cachegrind_filename),
            discard_first_request=server_config.profiler.discard_first_request,
            flush_at_shutdown=server_config.profiler.flush_at_shutdown,
            path=server_config.profiler.url_path,
            unwind=server_config.profiler.unwind)

    # New in 2.0 - set environmet variables for servers to inherit
    os_environ = server_config.get('os_environ', {})
    for key, value in os_environ.items():
        os.environ[key] = value

    # Run the hook right before the Gunicorn-level server actually starts
    startup_callable_tool.invoke(SERVER_STARTUP.PHASE.IMPL_BEFORE_RUN,
                                 kwargs={
                                     'zato_gunicorn_app': zato_gunicorn_app,
                                 })

    # Run the app at last
    if start_gunicorn_app:
        zato_gunicorn_app.run()
    else:
        return zato_gunicorn_app.zato_wsgi_app
Exemple #8
0
def _publish_update_action(req,
                           cluster_id,
                           action,
                           msg_id=None,
                           topic_id=None):

    has_gd = asbool(req.POST['has_gd'])
    expiration = req.POST.get('expiration')
    exp_from_now = asbool(req.POST.get('exp_from_now'))

    correl_id = req.POST.get('correl_id')
    in_reply_to = req.POST.get('in_reply_to')

    priority = req.POST['priority']
    mime_type = req.POST['mime_type']
    data = req.POST['data']

    server_name = req.POST['server_name']
    server_pid = req.POST['server_pid']

    try:
        expiration_time = None
        size = None

        input = {
            'cluster_id': cluster_id,
            'data': data,
            'expiration': expiration,
            'exp_from_now': exp_from_now,
            'correl_id': correl_id,
            'in_reply_to': in_reply_to,
            'priority': priority,
            'mime_type': mime_type,
            'server_name': server_name,
            'server_pid': server_pid,
        }

        if msg_id:
            input['msg_id'] = msg_id

        if action == 'update':
            suffix = '-gd' if has_gd else '-non-gd'
            action += suffix

        response = req.zato.client.invoke(
            'zato.pubsub.message.{}'.format(action), input).data.response

    except Exception:
        is_ok = False
        message = format_exc()

    else:

        if not response.found:
            is_ok = False
            message = 'Could not find message `{}`'.format(response.msg_id)
        else:
            is_ok = True
            message = 'Message {}'.format(
                'updated' if action.startswith('update') else 'created')
            size = response.size
            if response.expiration_time:

                expiration_time = """
                <a
                    id="a_expiration_time"
                    href="javascript:$.fn.zato.pubsub.message.details.toggle_time('expiration_time', '{expiration_time_user}', '{expiration_time_utc}')">{expiration_time_user}
                </a>
                """.format(
                    **{
                        'expiration_time_utc':
                        response.expiration_time,
                        'expiration_time_user':
                        from_utc_to_user(response.expiration_time +
                                         '+00:00', req.zato.user_profile),
                    })

    return HttpResponse(
        dumps({
            'is_ok': is_ok,
            'message': message,
            'expiration_time': expiration_time,
            'size': size
        }))
Exemple #9
0
 def set_input(self, *args, **kwargs):
     self.req.has_gd = asbool(self.req.GET['has_gd'])
     super(TopicMessages, self).set_input(*args, **kwargs)
Exemple #10
0
def get_message(req, cluster_id, object_type, object_id, msg_id):

    return_data = bunchify({
        'action': 'update',
    })

    _has_gd = asbool(req.GET['has_gd'])
    _server_name = req.GET.get('server_name')
    _server_pid = req.GET.get('server_pid')

    _is_topic = object_type == 'topic'
    suffix = '-gd' if _has_gd else '-non-gd'

    input_dict = {
        'cluster_id': cluster_id,
        'msg_id': msg_id,
    }

    if not _has_gd:
        input_dict['server_name'] = _server_name
        input_dict['server_pid'] = _server_pid

    return_data.cluster_id = cluster_id
    return_data.object_type = object_type
    return_data['{}_id'.format(object_type)] = object_id
    return_data.msg_id = msg_id
    return_data.server_name = _server_name
    return_data.server_pid = _server_pid
    return_data.has_gd = _has_gd

    if _is_topic:
        object_service_name = 'zato.pubsub.topic.get'
        msg_service_name = 'zato.pubsub.message.get-from-topic' + suffix
    else:
        object_service_name = 'zato.pubsub.endpoint.get-endpoint-queue'
        msg_service_name = 'zato.pubsub.message.get-from-queue' + suffix
        input_dict['sub_key'] = req.GET['sub_key']

    object_service_response = req.zato.client.invoke(object_service_name, {
        'cluster_id': cluster_id,
        'id': object_id,
    }).data.response

    return_data.object_name = object_service_response.name

    if object_type == 'queue':
        return_data.ws_ext_client_id = object_service_response.ws_ext_client_id

    return_data.object_name_slug = slugify(return_data.object_name)

    try:
        msg_service_response = req.zato.client.invoke(msg_service_name,
                                                      input_dict).data.response
    except Exception:
        logger.warn(format_exc())
        return_data.has_msg = False
    else:
        if not msg_service_response['msg_id']:
            return_data.has_msg = False
        else:
            return_data.has_msg = True
            return_data.update(msg_service_response)

            return_data.pub_endpoint_html = get_endpoint_html(
                return_data, cluster_id, 'published_by_id',
                'published_by_name')
            return_data.sub_endpoint_html = get_endpoint_html(
                return_data, cluster_id, 'subscriber_id', 'subscriber_name')

            if _is_topic:
                hook_pub_endpoint_id = return_data.endpoint_id
                hook_sub_endpoint_id = None
                return_data.object_id = return_data.pop('topic_id')
                return_data.pub_endpoint_html = get_endpoint_html(
                    return_data, cluster_id)
            else:

                # If it's a GD queue, we still need to get metadata about the message's underlying publisher
                if _has_gd:
                    topic_msg_service_response = req.zato.client.invoke(
                        'zato.pubsub.message.get-from-topic' + suffix, {
                            'cluster_id': cluster_id,
                            'msg_id': msg_id,
                            'needs_sub_queue_check': False,
                        }).data.response

                    return_data.topic_id = topic_msg_service_response.topic_id
                    return_data.topic_name = topic_msg_service_response.topic_name
                    return_data.pub_endpoint_id = topic_msg_service_response.endpoint_id
                    return_data.pub_endpoint_name = topic_msg_service_response.endpoint_name
                    return_data.pub_pattern_matched = topic_msg_service_response.pub_pattern_matched
                    return_data.pub_endpoint_html = get_endpoint_html(
                        return_data, cluster_id, 'pub_endpoint_id',
                        'pub_endpoint_name')
                    return_data.sub_endpoint_html = get_endpoint_html(
                        return_data, cluster_id, 'subscriber_id',
                        'subscriber_name')
                    return_data.object_id = return_data.pop('queue_id')

                    hook_pub_endpoint_id = return_data.pub_endpoint_id
                    hook_sub_endpoint_id = return_data.subscriber_id

                    hook_pub_service_response = req.zato.client.invoke(
                        'zato.pubsub.hook.get-hook-service', {
                            'cluster_id': cluster_id,
                            'endpoint_id': hook_pub_endpoint_id,
                            'hook_type': PUBSUB.HOOK_TYPE.BEFORE_PUBLISH,
                        }).data.response
                    return_data.hook_pub_service_id = hook_pub_service_response.id
                    return_data.hook_pub_service_name = hook_pub_service_response.name

                    if hook_sub_endpoint_id:
                        hook_sub_service_response = req.zato.client.invoke(
                            'zato.pubsub.hook.get-hook-service', {
                                'cluster_id': cluster_id,
                                'endpoint_id': hook_sub_endpoint_id,
                                'hook_type': PUBSUB.HOOK_TYPE.BEFORE_DELIVERY,
                            }).data.response
                        return_data.hook_sub_service_id = hook_sub_service_response.id
                        return_data.hook_sub_service_name = hook_sub_service_response.name

            return_data.form = MsgForm(return_data)

            for name in ('pub_time', 'ext_pub_time', 'expiration_time',
                         'recv_time'):
                value = return_data.get(name)
                if value:
                    return_data[name] = from_utc_to_user(
                        value + '+00:00', req.zato.user_profile)
                    return_data[name + '_utc'] = value

    return TemplateResponse(req, 'zato/pubsub/message-details.html',
                            return_data)
Exemple #11
0
 def on_after_set_input(self):
     self.input.has_gd = asbool(self.input.has_gd)