def execute(self, args, show_output=True): engine = self._get_engine(args) session = self._get_session(engine) if engine.dialect.has_table(engine.connect(), 'install_state'): if is_arg_given(args, 'skip-if-exists'): if show_output: if self.verbose: self.logger.debug( 'ODB already exists, skipped its creation') else: self.logger.info('OK') else: if show_output: version = session.query( ZatoInstallState.version).one().version msg = ( 'The ODB (v. {}) already exists, not creating it. ' + \ "Use the 'zato delete odb' command first if you'd like to start afresh and " + \ 'recreate all ODB objects.').format(version) self.logger.error(msg) return self.SYS_ERROR.ODB_EXISTS else: # This is needed so that PubSubMessage.data can continue to use length # in the column's specification which in itself is needed for MySQL to use LONGTEXT. def _render_string_type(self, type_, name): text = name if type_.length and name != 'TEXT': text += "(%d)" % type_.length if type_.collation: text += ' COLLATE "%s"' % type_.collation return text PGTypeCompiler._render_string_type = _render_string_type Base.metadata.create_all(engine) state = ZatoInstallState(None, VERSION, datetime.now(), gethostname(), getuser()) alembic_rev = AlembicRevision(LATEST_ALEMBIC_REVISION) session.add(state) session.add(alembic_rev) session.commit() if show_output: if self.verbose: self.logger.debug('Successfully created the ODB') else: self.logger.info('OK')
def execute(self, args, use_default_backend=False, server02_port=None, show_output=True): os.mkdir(os.path.join(self.target_dir, 'config')) # noqa os.mkdir(os.path.join(self.target_dir, 'logs')) # noqa repo_dir = os.path.join(self.target_dir, 'config', 'repo') # noqa os.mkdir(repo_dir) # noqa log_path = os.path.abspath( os.path.join(repo_dir, '..', '..', 'logs', 'lb-agent.log')) # noqa stats_socket = os.path.join(self.target_dir, 'haproxy-stat.sock') # noqa is_tls_enabled = is_arg_given(args, 'priv_key_path') config = config_template.format( **{ 'is_tls_enabled': 'true' if is_tls_enabled else 'false', }) open(os.path.join(repo_dir, 'lb-agent.conf'), 'w').write(config) # noqa open(os.path.join(repo_dir, 'logging.conf'), 'w').write( (common_logging_conf_contents.format(log_path=log_path))) # noqa if use_default_backend: backend = default_backend.format( server01_port=http_plain_server_port, server02_port=server02_port) else: backend = '\n# ZATO default_backend_empty' zato_config = zato_config_template.format( stats_socket=stats_socket, stats_password=uuid.uuid4().hex, default_backend=backend, http_503_path=os.path.join(repo_dir, '503.http')) # noqa open(os.path.join(repo_dir, 'zato.config'), 'w').write(zato_config) # noqa open(os.path.join(repo_dir, '503.http'), 'w').write(http_503) # noqa self.copy_lb_crypto(repo_dir, args) # Initial info self.store_initial_info(self.target_dir, self.COMPONENTS.LOAD_BALANCER.code) if show_output: if self.verbose: msg = "Successfully created a load-balancer's agent in {}".format( self.target_dir) self.logger.debug(msg) else: self.logger.info('OK')
def execute(self, args, show_output=True, needs_created_flag=False): os.chdir(self.target_dir) repo_dir = os.path.join(self.target_dir, 'config', 'repo') conf_path = os.path.join(repo_dir, 'scheduler.conf') startup_jobs_conf_path = os.path.join(repo_dir, 'startup_jobs.conf') sql_conf_path = os.path.join(repo_dir, 'sql.conf') os.mkdir(os.path.join(self.target_dir, 'logs')) os.mkdir(os.path.join(self.target_dir, 'config')) os.mkdir(repo_dir) self.copy_scheduler_crypto(repo_dir, args) if hasattr(args, 'get'): secret_key = args.get('secret_key') else: secret_key = args.secret_key secret_key = secret_key or SchedulerCryptoManager.generate_key() cm = SchedulerCryptoManager.from_secret_key(secret_key) odb_engine = args.odb_type if odb_engine.startswith('postgresql'): odb_engine = 'postgresql+pg8000' if args.cluster_id: cluster_id = args.cluster_id else: cluster_id = self._get_cluster_id_by_name(args, args.cluster_name) odb_password = args.odb_password or '' odb_password = odb_password.encode('utf8') odb_password = cm.encrypt(odb_password) odb_password = odb_password.decode('utf8') kvdb_password = args.kvdb_password or '' kvdb_password = kvdb_password.encode('utf8') kvdb_password = cm.encrypt(kvdb_password) kvdb_password = kvdb_password.decode('utf8') user1_password = cm.generate_password() user1_password = cm.encrypt(user1_password) user1_password = user1_password.decode('utf8') zato_well_known_data = well_known_data.encode('utf8') zato_well_known_data = cm.encrypt(zato_well_known_data) zato_well_known_data = zato_well_known_data.decode('utf8') if isinstance(secret_key, (bytes, bytearray)): secret_key = secret_key.decode('utf8') # We will use TLS only if we were given crypto material on input use_tls = is_arg_given(args, 'priv_key_path') config = { 'odb_db_name': args.odb_db_name or args.sqlite_path, 'odb_engine': odb_engine, 'odb_host': args.odb_host or '', 'odb_port': args.odb_port or '', 'odb_password': odb_password, 'odb_username': args.odb_user or '', 'broker_host': args.kvdb_host, 'broker_port': args.kvdb_port, 'broker_password': kvdb_password, 'user1_password': user1_password, 'cluster_id': cluster_id, 'cluster_name': args.cluster_name, 'secret_key1': secret_key, 'well_known_data': zato_well_known_data, 'use_tls': 'true' if use_tls else 'false' } open(os.path.join(repo_dir, 'logging.conf'), 'w').write( common_logging_conf_contents.format( log_path='./logs/scheduler.log')) open(conf_path, 'w').write(config_template.format(**config)) open(startup_jobs_conf_path, 'w').write(startup_jobs) open(sql_conf_path, 'w').write(sql_conf_contents) # Initial info self.store_initial_info(self.target_dir, self.COMPONENTS.SCHEDULER.code) if show_output: if self.verbose: msg = """Successfully created a scheduler instance. You can start it with the 'zato start {path}' command.""".format( path=os.path.abspath( os.path.join(os.getcwd(), self.target_dir))) self.logger.debug(msg) else: self.logger.info('OK') # We return it only when told to explicitly so when the command runs from CLI # it doesn't return a non-zero exit code. if needs_created_flag: return True
def execute(self, args, show_output=True, admin_password=None, needs_admin_created_flag=False): os.chdir(self.target_dir) repo_dir = os.path.join(self.target_dir, 'config', 'repo') web_admin_conf_path = os.path.join(repo_dir, 'web-admin.conf') initial_data_json_path = os.path.join(repo_dir, 'initial-data.json') os.mkdir(os.path.join(self.target_dir, 'logs')) os.mkdir(os.path.join(self.target_dir, 'config')) os.mkdir(repo_dir) user_name = 'admin' admin_password = admin_password if admin_password else WebAdminCryptoManager.generate_password( ) # If we have a CA's certificate then it implicitly means that there is some CA # which tells us that we are to trust both the CA and the certificates that it issues, # and the only certificate we are interested in is the one to the load-balancer. # This is why, if we get ca_certs_path, it must be because we are to use TLS # in communication with the load-balancer's agent which in turn means that we have crypto material on input. has_crypto = is_arg_given(args, 'ca_certs_path') if has_crypto: self.copy_web_admin_crypto(repo_dir, args) zato_secret_key = WebAdminCryptoManager.generate_key() cm = WebAdminCryptoManager.from_secret_key(zato_secret_key) django_secret_key = uuid4().hex.encode('utf8') django_site_id = getrandbits(20) admin_invoke_password = getattr(args, 'admin_invoke_password', None) if not admin_invoke_password: admin_invoke_password = '******' + uuid4().hex if isinstance(admin_invoke_password, unicode): admin_invoke_password = admin_invoke_password.encode('utf8') odb_password = args.odb_password or '' odb_password = odb_password.encode('utf8') config = { 'host': web_admin_host, 'port': web_admin_port, 'db_type': args.odb_type, 'log_config': 'logging.conf', 'lb_agent_use_tls': 'true' if has_crypto else 'false', 'zato_secret_key': zato_secret_key, 'well_known_data': cm.encrypt(well_known_data.encode('utf8')), 'DATABASE_NAME': args.odb_db_name or args.sqlite_path, 'DATABASE_USER': args.odb_user or '', 'DATABASE_PASSWORD': cm.encrypt(odb_password), 'DATABASE_HOST': args.odb_host or '', 'DATABASE_PORT': args.odb_port or '', 'SITE_ID': django_site_id, 'SECRET_KEY': cm.encrypt(django_secret_key), 'ADMIN_INVOKE_NAME': 'admin.invoke', 'ADMIN_INVOKE_PASSWORD': cm.encrypt(admin_invoke_password), } for name in 'zato_secret_key', 'well_known_data', 'DATABASE_PASSWORD', 'SECRET_KEY', 'ADMIN_INVOKE_PASSWORD': config[name] = config[name].decode('utf8') open(os.path.join(repo_dir, 'logging.conf'), 'w').write( common_logging_conf_contents.format( log_path='./logs/web-admin.log')) open(web_admin_conf_path, 'w').write(config_template.format(**config)) open(initial_data_json_path, 'w').write(initial_data_json.format(**config)) # Initial info self.store_initial_info(self.target_dir, self.COMPONENTS.WEB_ADMIN.code) config = json.loads( open(os.path.join(repo_dir, 'web-admin.conf')).read()) config['config_dir'] = self.target_dir update_globals(config, self.target_dir) os.environ['DJANGO_SETTINGS_MODULE'] = 'zato.admin.settings' import django django.setup() self.reset_logger(args, True) # Can't import these without DJANGO_SETTINGS_MODULE being set from django.contrib.auth.models import User from django.db import connection from django.db.utils import IntegrityError call_command('migrate', run_syncdb=True, interactive=False, verbosity=0) call_command('loaddata', initial_data_json_path, verbosity=0) try: call_command('createsuperuser', interactive=False, username=user_name, first_name='admin-first-name', last_name='admin-last-name', email='*****@*****.**') admin_created = True user = User.objects.get(username=user_name) user.set_password(admin_password) user.save() except IntegrityError: # This will happen if user 'admin' already exists, e.g. if this is not the first cluster in this database admin_created = False connection._rollback() # Needed because Django took over our logging config self.reset_logger(args, True) if show_output: if self.verbose: msg = """Successfully created a web admin instance. You can start it with the 'zato start {path}' command.""".format( path=os.path.abspath( os.path.join(os.getcwd(), self.target_dir))) self.logger.debug(msg) else: self.logger.info('OK') # We return it only when told to explicitly so when the command runs from CLI # it doesn't return a non-zero exit code. if needs_admin_created_flag: return admin_created
def execute(self, args, show_output=True): # stdlib from datetime import datetime from traceback import format_exc # SQLAlchemy from sqlalchemy.exc import IntegrityError # Zato from zato.common.odb.model import Cluster, HTTPBasicAuth from zato.common.odb.post_process import ODBPostProcess engine = self._get_engine(args) session = self._get_session(engine) if engine.dialect.has_table(engine.connect(), 'install_state'): if is_arg_given(args, 'skip-if-exists', 'skip_if_exists'): if show_output: if self.verbose: self.logger.debug('Cluster already exists, skipped its creation') else: self.logger.info('OK') return with session.no_autoflush: cluster = Cluster() cluster.name = args.cluster_name cluster.description = 'Created by {} on {} (UTC)'.format(self._get_user_host(), datetime.utcnow().isoformat()) for name in( 'odb_type', 'odb_host', 'odb_port', 'odb_user', 'odb_db_name', 'broker_host', 'broker_port', 'lb_host', 'lb_port', 'lb_agent_port'): setattr(cluster, name, getattr(args, name)) session.add(cluster) # With a cluster object in place, we can construct the ODB post-processor odb_post_process = ODBPostProcess(session, cluster, None) # admin.invoke user's password may be possibly in one of these attributes, # but if it is now, generate a new one. admin_invoke_password = getattr(args, 'admin-invoke-password', None) if not admin_invoke_password: admin_invoke_password = getattr(args, 'admin_invoke_password', None) if not admin_invoke_password: admin_invoke_password = new_password() admin_invoke_sec = HTTPBasicAuth(None, 'admin.invoke', True, 'admin.invoke', 'Zato admin invoke', admin_invoke_password, cluster) session.add(admin_invoke_sec) pubapi_sec = HTTPBasicAuth(None, 'pubapi', True, 'pubapi', 'Zato public API', new_password(), cluster) session.add(pubapi_sec) internal_invoke_sec = HTTPBasicAuth(None, 'zato.internal.invoke', True, 'zato.internal.invoke.user', 'Zato internal invoker', new_password(), cluster) session.add(internal_invoke_sec) self.add_default_rbac_permissions(session, cluster) root_rbac_role = self.add_default_rbac_roles(session, cluster) ide_pub_rbac_role = self.add_rbac_role_and_acct( session, cluster, root_rbac_role, 'IDE Publishers', 'ide_publisher', 'ide_publisher') # We need to flush the session here, after adding default RBAC permissions # which are needed by REST channels with security delegated to RBAC. session.flush() self.add_internal_services(session, cluster, admin_invoke_sec, pubapi_sec, internal_invoke_sec, ide_pub_rbac_role) self.add_ping_services(session, cluster) self.add_default_cache(session, cluster) self.add_cache_endpoints(session, cluster) self.add_crypto_endpoints(session, cluster) self.add_pubsub_sec_endpoints(session, cluster) # IBM MQ connections / connectors self.add_internal_callback_wmq(session, cluster) # SFTP connections / connectors self.add_sftp_credentials(session, cluster) # Account to access cache services with self.add_cache_credentials(session, cluster) # SSO self.add_sso_endpoints(session, cluster) # Run ODB post-processing tasks odb_post_process.run() try: session.commit() except IntegrityError as e: msg = 'SQL IntegrityError caught `{}`'.format(e.message) if self.verbose: msg += '\nDetails:`{}`'.format(format_exc().decode('utf-8')) self.logger.error(msg) self.logger.error(msg) session.rollback() return self.SYS_ERROR.CLUSTER_NAME_ALREADY_EXISTS if show_output: if self.verbose: msg = 'Successfully created a new cluster [{}]'.format(args.cluster_name) self.logger.debug(msg) else: self.logger.info('OK')