def _install_influxdb(): influxdb_source_url = ctx.node.properties['influxdb_rpm_source_url'] influxdb_user = '******' influxdb_group = 'influxdb' influxdb_home = '/opt/influxdb' influxdb_log_path = '/var/log/cloudify/influxdb' ctx.logger.info('Installing InfluxDB...') utils.set_selinux_permissive() utils.copy_notice('influxdb') utils.mkdir(influxdb_home) utils.mkdir(influxdb_log_path) utils.yum_install(influxdb_source_url) utils.sudo(['rm', '-rf', '/etc/init.d/influxdb']) ctx.logger.info('Deploying InfluxDB config.toml...') utils.deploy_blueprint_resource( '{0}/config.toml'.format(CONFIG_PATH), '{0}/shared/config.toml'.format(influxdb_home)) ctx.logger.info('Fixing user permissions...') utils.chown(influxdb_user, influxdb_group, influxdb_home) utils.chown(influxdb_user, influxdb_group, influxdb_log_path) utils.logrotate('influxdb') utils.systemd.configure('influxdb')
def dump_database(): dumpfile = os.path.join(dconf.DB_DUMP_DIR, dconf.DB_NAME + '.dump') if dconf.DB_TYPE == 'oracle': if not dconf.ORACLE_FLASH_BACK and file_exists(dumpfile): LOG.info('%s already exists ! ', dumpfile) return False else: if file_exists(dumpfile): LOG.info('%s already exists ! ', dumpfile) return False if dconf.DB_TYPE == 'oracle' and dconf.ORACLE_FLASH_BACK: LOG.info('create restore point %s for database %s in %s', dconf.RESTORE_POINT, dconf.DB_NAME, dconf.RECOVERY_FILE_DEST) else: LOG.info('Dump database %s to %s', dconf.DB_NAME, dumpfile) if dconf.DB_TYPE == 'oracle': if dconf.ORACLE_FLASH_BACK: run_sql_script('createRestore.sh', dconf.RESTORE_POINT, dconf.RECOVERY_FILE_DEST_SIZE, dconf.RECOVERY_FILE_DEST) else: run_sql_script('dumpOracle.sh', dconf.DB_USER, dconf.DB_PASSWORD, dconf.DB_NAME, dconf.DB_DUMP_DIR) elif dconf.DB_TYPE == 'postgres': run('PGPASSWORD={} pg_dump -U {} -h {} -F c -d {} > {}'.format( dconf.DB_PASSWORD, dconf.DB_USER, dconf.DB_HOST, dconf.DB_NAME, dumpfile)) elif dconf.DB_TYPE == 'mysql': sudo('mysqldump --user={} --password={} --databases {} > {}'.format( dconf.DB_USER, dconf.DB_PASSWORD, dconf.DB_NAME, dumpfile)) else: raise Exception("Database Type {} Not Implemented !".format(dconf.DB_TYPE)) return True
def _install_influxdb(): influxdb_source_url = ctx_properties['influxdb_rpm_source_url'] influxdb_user = '******' influxdb_group = 'influxdb' influxdb_home = '/opt/influxdb' influxdb_log_path = '/var/log/cloudify/influxdb' ctx.logger.info('Installing InfluxDB...') utils.set_selinux_permissive() utils.copy_notice(INFLUX_SERVICE_NAME) utils.mkdir(influxdb_home) utils.mkdir(influxdb_log_path) utils.yum_install(influxdb_source_url, service_name=INFLUX_SERVICE_NAME) utils.sudo(['rm', '-rf', '/etc/init.d/influxdb']) ctx.logger.info('Deploying InfluxDB config.toml...') utils.deploy_blueprint_resource( '{0}/config.toml'.format(CONFIG_PATH), '{0}/shared/config.toml'.format(influxdb_home), INFLUX_SERVICE_NAME) ctx.logger.info('Fixing user permissions...') utils.chown(influxdb_user, influxdb_group, influxdb_home) utils.chown(influxdb_user, influxdb_group, influxdb_log_path) utils.systemd.configure(INFLUX_SERVICE_NAME) # Provided with InfluxDB's package. Will be removed if it exists. utils.remove('/etc/init.d/influxdb') utils.logrotate(INFLUX_SERVICE_NAME)
def _install_influxdb(): influxdb_source_url = ctx_properties['influxdb_rpm_source_url'] influxdb_user = '******' influxdb_group = 'influxdb' influxdb_home = '/opt/influxdb' influxdb_log_path = '/var/log/cloudify/influxdb' ctx.logger.info('Installing InfluxDB...') utils.set_selinux_permissive() utils.copy_notice(INFLUX_SERVICE_NAME) utils.mkdir(influxdb_home) utils.mkdir(influxdb_log_path) utils.yum_install(influxdb_source_url, service_name=INFLUX_SERVICE_NAME) utils.sudo(['rm', '-rf', '/etc/init.d/influxdb']) ctx.logger.info('Deploying InfluxDB config.toml...') utils.deploy_blueprint_resource( '{0}/config.toml'.format(CONFIG_PATH), '{0}/shared/config.toml'.format(influxdb_home), INFLUX_SERVICE_NAME) ctx.logger.info('Fixing user permissions...') utils.chown(influxdb_user, influxdb_group, influxdb_home) utils.chown(influxdb_user, influxdb_group, influxdb_log_path) utils.systemd.configure(INFLUX_SERVICE_NAME) utils.logrotate(INFLUX_SERVICE_NAME)
def install_system_packages(): """ Perform the installation of system-level packages needed by NGAS to work. """ # Install required packages linux_flavor = get_linux_flavor() if (linux_flavor in ['CentOS','Amazon Linux']): # Update the machine completely errmsg = sudo('yum --assumeyes --quiet update', combine_stderr=True, warn_only=True) processCentOSErrMsg(errmsg) install_yum(YUM_PACKAGES) if linux_flavor == 'CentOS': sudo('/etc/init.d/iptables stop') # CentOS firewall blocks NGAS port! elif (linux_flavor in ['Ubuntu', 'Debian']): errmsg = sudo('apt-get -qq -y update', combine_stderr=True, warn_only=True) install_apt(APT_PACKAGES) elif linux_flavor in ['SUSE','SLES-SP2', 'SLES-SP3', 'SLES', 'openSUSE']: errmsg = sudo('zypper -n -q patch', combine_stderr=True, warn_only=True) install_zypper(SLES_PACKAGES) elif linux_flavor == 'Darwin': pkg_mgr = check_brew_port() if pkg_mgr == None: install_homebrew() pkg_mgr = 'brew' if pkg_mgr == 'brew': for package in BREW_PACKAGES: install_brew(package) elif pkg_mgr == 'port': for package in PORT_PACKAGES: install_port(package) else: abort("Unsupported linux flavor detected: {0}".format(linux_flavor))
def _install_stage(): nodejs_source_url = ctx_properties['nodejs_tar_source_url'] stage_source_url = ctx_properties['stage_tar_source_url'] if not utils.resource_factory.local_resource_exists(stage_source_url): ctx.logger.info('Stage package not found in manager resources ' 'package. Stage will not be installed.') ctx.instance.runtime_properties['skip_installation'] = 'true' return # injected as an input to the script ctx.instance.runtime_properties['influxdb_endpoint_ip'] = \ os.environ.get('INFLUXDB_ENDPOINT_IP') utils.set_selinux_permissive() utils.copy_notice(SERVICE_NAME) utils.mkdir(NODEJS_DIR) utils.mkdir(HOME_DIR) utils.mkdir(LOG_DIR) utils.create_service_user(STAGE_USER, STAGE_GROUP, HOME_DIR) ctx.logger.info('Installing NodeJS...') nodejs = utils.download_cloudify_resource(nodejs_source_url, SERVICE_NAME) utils.untar(nodejs, NODEJS_DIR) utils.remove(nodejs) ctx.logger.info('Installing Cloudify Stage (UI)...') stage_tar = utils.download_cloudify_resource(stage_source_url, SERVICE_NAME) if 'community' in stage_tar: ctx.logger.info('Community edition') ctx.instance.runtime_properties['community_mode'] = '-mode community' else: ctx.instance.runtime_properties['community_mode'] = '' utils.untar(stage_tar, HOME_DIR) utils.remove(stage_tar) ctx.logger.info('Fixing permissions...') utils.chown(STAGE_USER, STAGE_GROUP, HOME_DIR) utils.chown(STAGE_USER, STAGE_GROUP, NODEJS_DIR) utils.chown(STAGE_USER, STAGE_GROUP, LOG_DIR) utils.deploy_sudo_command_script( 'restore-snapshot.py', 'Restore stage directories from a snapshot path', component=SERVICE_NAME, allow_as=STAGE_USER) utils.chmod('a+rx', '/opt/cloudify/stage/restore-snapshot.py') utils.sudo(['usermod', '-aG', utils.CLOUDIFY_GROUP, STAGE_USER]) utils.logrotate(SERVICE_NAME) utils.systemd.configure(SERVICE_NAME) backend_dir = join(HOME_DIR, 'backend') npm_path = join(NODEJS_DIR, 'bin', 'npm') subprocess.check_call('cd {0}; {1} run db-migrate'.format( backend_dir, npm_path), shell=True)
def _install_rabbitmq(): erlang_rpm_source_url = ctx.node.properties['erlang_rpm_source_url'] rabbitmq_rpm_source_url = ctx.node.properties['rabbitmq_rpm_source_url'] # TODO: maybe we don't need this env var os.putenv('RABBITMQ_FD_LIMIT', str(ctx.node.properties['rabbitmq_fd_limit'])) rabbitmq_log_path = '/var/log/cloudify/rabbitmq' rabbitmq_username = ctx.node.properties['rabbitmq_username'] rabbitmq_password = ctx.node.properties['rabbitmq_password'] rabbitmq_cert_public = ctx.node.properties['rabbitmq_cert_public'] rabbitmq_ssl_enabled = ctx.node.properties['rabbitmq_ssl_enabled'] rabbitmq_cert_private = ctx.node.properties['rabbitmq_cert_private'] ctx.logger.info('Installing RabbitMQ...') utils.set_selinux_permissive() utils.copy_notice('rabbitmq') utils.mkdir(rabbitmq_log_path) utils.yum_install(erlang_rpm_source_url) utils.yum_install(rabbitmq_rpm_source_url) utils.logrotate('rabbitmq') utils.deploy_blueprint_resource( '{0}/kill-rabbit'.format(CONFIG_PATH), '/usr/local/bin/kill-rabbit') utils.chmod('500', '/usr/local/bin/kill-rabbit') utils.systemd.configure('rabbitmq') ctx.logger.info('Configuring File Descriptors Limit...') utils.deploy_blueprint_resource( '{0}/rabbitmq_ulimit.conf'.format(CONFIG_PATH), '/etc/security/limits.d/rabbitmq.conf') utils.systemd.systemctl('daemon-reload') utils.chown('rabbitmq', 'rabbitmq', rabbitmq_log_path) utils.systemd.start('cloudify-rabbitmq') time.sleep(10) utils.wait_for_port(5672) ctx.logger.info('Enabling RabbitMQ Plugins...') # Occasional timing issues with rabbitmq starting have resulted in # failures when first trying to enable plugins utils.sudo(['rabbitmq-plugins', 'enable', 'rabbitmq_management'], retries=5) utils.sudo(['rabbitmq-plugins', 'enable', 'rabbitmq_tracing'], retries=5) _clear_guest_permissions_if_guest_exists() _create_user_and_set_permissions(rabbitmq_username, rabbitmq_password) _set_security( rabbitmq_ssl_enabled, rabbitmq_cert_private, rabbitmq_cert_public) utils.systemd.stop('cloudify-rabbitmq', retries=5)
def _deploy_security_configuration(): ctx.logger.info('Deploying REST Security configuration file...') # Generating random hash salt and secret key security_configuration = { 'hash_salt': base64.b64encode(os.urandom(32)), 'secret_key': base64.b64encode(os.urandom(32)), 'encoding_alphabet': _random_alphanumeric(), 'encoding_block_size': 24, 'encoding_min_length': 5 } # Pre-creating paths so permissions fix can work correctly # in mgmtworker for path in utils.MANAGER_RESOURCES_SNAPSHOT_PATHS: utils.mkdir(path) utils.chown(CLOUDIFY_USER, CLOUDIFY_GROUP, utils.MANAGER_RESOURCES_HOME) utils.sudo(['ls', '-la', '/opt/manager']) current_props = runtime_props['security_configuration'] current_props.update(security_configuration) runtime_props['security_configuration'] = current_props for key in ['admin_username', 'admin_password']: security_configuration[key] = current_props[key] fd, path = tempfile.mkstemp() os.close(fd) with open(path, 'w') as f: json.dump(security_configuration, f) rest_security_path = join(runtime_props['home_dir'], 'rest-security.conf') utils.move(path, rest_security_path) utils.chown(CLOUDIFY_USER, CLOUDIFY_GROUP, rest_security_path) utils.chmod('g+r', rest_security_path)
def _deploy_security_configuration(): ctx.logger.info('Deploying REST Security configuration file...') # Generating random hash salt and secret key security_configuration = { 'hash_salt': base64.b64encode(os.urandom(32)), 'secret_key': base64.b64encode(os.urandom(32)), 'encoding_alphabet': _random_alphanumeric(), 'encoding_block_size': 24, 'encoding_min_length': 5 } # Pre-creating paths so permissions fix can work correctly # in mgmtworker for path in utils.MANAGER_RESOURCES_SNAPSHOT_PATHS: utils.mkdir(path) utils.chown( CLOUDIFY_USER, CLOUDIFY_GROUP, utils.MANAGER_RESOURCES_HOME) utils.sudo(['ls', '-la', '/opt/manager']) current_props = runtime_props['security_configuration'] current_props.update(security_configuration) runtime_props['security_configuration'] = current_props fd, path = tempfile.mkstemp() os.close(fd) with open(path, 'w') as f: json.dump(security_configuration, f) rest_security_path = join(runtime_props['home_dir'], 'rest-security.conf') utils.move(path, rest_security_path) utils.chown(CLOUDIFY_USER, CLOUDIFY_GROUP, rest_security_path)
def restart_database(): if dconf.DB_TYPE == 'postgres': if dconf.HOST_CONN == 'docker': # Restarting the docker container here is the cleanest way to do it # becaues there's no init system running and the only process running # in the container is postgres itself local('docker restart {}'.format(dconf.CONTAINER_NAME)) else: sudo('pg_ctl -D {} -w -t 600 restart -m fast'.format( dconf.PG_DATADIR), user=dconf.ADMIN_USER, capture=False) elif dconf.DB_TYPE == 'oracle': db_log_path = os.path.join( os.path.split(dconf.DB_CONF)[0], 'startup.log') local_log_path = os.path.join(dconf.LOG_DIR, 'startup.log') local_logs_path = os.path.join(dconf.LOG_DIR, 'startups.log') run_sql_script('restartOracle.sh', db_log_path) get(db_log_path, local_log_path) with open(local_log_path, 'r') as fin, open(local_logs_path, 'a') as fout: lines = fin.readlines() for line in lines: if line.startswith('ORACLE instance started.'): return True if not line.startswith('SQL>'): fout.write(line) fout.write('\n') return False else: raise Exception("Database Type {} Not Implemented !".format( dconf.DB_TYPE)) return True
def set_rabbitmq_policy(name, expression, policy): policy = json.dumps(policy) ctx.logger.info('Setting policy {0} on queues {1} to {2}'.format( name, expression, policy)) # shlex screws this up because we need to pass json and shlex # strips quotes so we explicitly pass it as a list. utils.sudo(['rabbitmqctl', 'set_policy', name, expression, policy, '--apply-to', 'queues'])
def free_cache(): if dconf.HOST_CONN not in ['docker', 'remote_docker']: with show('everything'), settings(warn_only=True): # pylint: disable=not-context-manager res = sudo("sh -c \"echo 3 > /proc/sys/vm/drop_caches\"") if res.failed: LOG.error('%s (return code %s)', res.stderr.strip(), res.return_code) else: res = sudo("sh -c \"echo 3 > /proc/sys/vm/drop_caches\"", remote_only=True)
def _disable_requiretty(): script_dest = '/tmp/configure_manager.sh' utils.deploy_blueprint_resource( 'components/manager/scripts' '/configure_manager.sh', script_dest, NODE_NAME) utils.sudo('chmod +x {0}'.format(script_dest)) utils.sudo(script_dest)
def install_zypper(packages): """ Install packages using zypper (SLES) """ sudo('zypper --non-interactive install {0}'.format( ' '.join(packages + extra_packages())), combine_stderr=True, warn_only=True)
def set_rabbitmq_policy(name, expression, policy): policy = json.dumps(policy) ctx.logger.debug('Setting policy {0} on queues {1} to {2}'.format( name, expression, policy)) # shlex screws this up because we need to pass json and shlex # strips quotes so we explicitly pass it as a list. utils.sudo(['rabbitmqctl', 'set_policy', name, expression, policy, '--apply-to', 'queues'])
def install_logstash(): logstash_unit_override = '/etc/systemd/system/logstash.service.d' logstash_source_url = ctx_properties['logstash_rpm_source_url'] rabbitmq_username = ctx_properties['rabbitmq_username'] rabbitmq_password = ctx_properties['rabbitmq_password'] logstash_log_path = '/var/log/cloudify/logstash' logstash_conf_path = '/etc/logstash/conf.d' # injected as an input to the script ctx.instance.runtime_properties['es_endpoint_ip'] = \ os.environ.get('ES_ENDPOINT_IP') ctx.instance.runtime_properties['rabbitmq_endpoint_ip'] = \ utils.get_rabbitmq_endpoint_ip( ctx_properties.get('rabbitmq_endpoint_ip')) # Confirm username and password have been supplied for broker before # continuing. # Components other than logstash and riemann have this handled in code. # Note that these are not directly used in this script, but are used by the # deployed resources, hence the check here. if not rabbitmq_username or not rabbitmq_password: utils.error_exit( 'Both rabbitmq_username and rabbitmq_password must be supplied ' 'and at least 1 character long in the manager blueprint inputs.') ctx.logger.info('Installing Logstash...') utils.set_selinux_permissive() utils.copy_notice(LOGSTASH_SERVICE_NAME) utils.yum_install(logstash_source_url, service_name=LOGSTASH_SERVICE_NAME) utils.mkdir(logstash_log_path) utils.chown('logstash', 'logstash', logstash_log_path) ctx.logger.info('Creating systemd unit override...') utils.mkdir(logstash_unit_override) utils.deploy_blueprint_resource( '{0}/restart.conf'.format(CONFIG_PATH), '{0}/restart.conf'.format(logstash_unit_override), LOGSTASH_SERVICE_NAME) ctx.logger.info('Deploying Logstash conf...') utils.deploy_blueprint_resource( '{0}/logstash.conf'.format(CONFIG_PATH), '{0}/logstash.conf'.format(logstash_conf_path), LOGSTASH_SERVICE_NAME) ctx.logger.info('Deploying Logstash sysconfig...') utils.deploy_blueprint_resource('{0}/logstash'.format(CONFIG_PATH), '/etc/sysconfig/logstash', LOGSTASH_SERVICE_NAME) utils.logrotate(LOGSTASH_SERVICE_NAME) utils.sudo(['/sbin/chkconfig', 'logstash', 'on']) utils.clean_var_log_dir(LOGSTASH_SERVICE_NAME)
def configure_script(script_name, description): utils.deploy_sudo_command_script( script_name, description, component=SERVICE_NAME, allow_as=STAGE_USER, ) utils.chmod('a+rx', '/opt/cloudify/stage/' + script_name) utils.sudo(['usermod', '-aG', utils.CLOUDIFY_GROUP, STAGE_USER])
def _configure_index_rotation(): ctx.logger.info('Configuring Elasticsearch Index Rotation cronjob for ' 'logstash-YYYY.mm.dd index patterns...') utils.deploy_blueprint_resource( 'components/elasticsearch/scripts/rotate_es_indices', '/etc/cron.daily/rotate_es_indices', ES_SERVICE_NAME) utils.chown('root', 'root', '/etc/cron.daily/rotate_es_indices') # VALIDATE! utils.sudo('chmod +x /etc/cron.daily/rotate_es_indices')
def _disable_requiretty(): script_dest = '/tmp/configure_manager.sh' utils.deploy_blueprint_resource('components/manager/scripts' '/configure_manager.sh', script_dest, NODE_NAME) utils.sudo('chmod +x {0}'.format(script_dest)) utils.sudo(script_dest)
def install_riemann(): langohr_source_url = ctx_properties['langohr_jar_source_url'] daemonize_source_url = ctx_properties['daemonize_rpm_source_url'] riemann_source_url = ctx_properties['riemann_rpm_source_url'] # Needed for Riemann's config cloudify_resources_url = ctx_properties['cloudify_resources_url'] rabbitmq_username = ctx_properties['rabbitmq_username'] rabbitmq_password = ctx_properties['rabbitmq_password'] riemann_config_path = '/etc/riemann' riemann_log_path = '/var/log/cloudify/riemann' langohr_home = '/opt/lib' extra_classpath = '{0}/langohr.jar'.format(langohr_home) # Confirm username and password have been supplied for broker before # continuing. # Components other than logstash and riemann have this handled in code. # Note that these are not directly used in this script, but are used by the # deployed resources, hence the check here. if not rabbitmq_username or not rabbitmq_password: ctx.abort_operation( 'Both rabbitmq_username and rabbitmq_password must be supplied ' 'and at least 1 character long in the manager blueprint inputs.') rabbit_props = utils.ctx_factory.get('rabbitmq') ctx.instance.runtime_properties['rabbitmq_endpoint_ip'] = \ utils.get_rabbitmq_endpoint_ip( rabbit_props.get('rabbitmq_endpoint_ip')) ctx.instance.runtime_properties['rabbitmq_username'] = \ rabbit_props.get('rabbitmq_username') ctx.instance.runtime_properties['rabbitmq_password'] = \ rabbit_props.get('rabbitmq_password') ctx.logger.info('Installing Riemann...') utils.set_selinux_permissive() utils.copy_notice(RIEMANN_SERVICE_NAME) utils.mkdir(riemann_log_path) utils.mkdir(langohr_home) utils.mkdir(riemann_config_path) utils.mkdir('{0}/conf.d'.format(riemann_config_path)) langohr = utils.download_cloudify_resource(langohr_source_url, RIEMANN_SERVICE_NAME) utils.sudo(['cp', langohr, extra_classpath]) ctx.logger.info('Applying Langohr permissions...') utils.sudo(['chmod', '644', extra_classpath]) utils.yum_install(daemonize_source_url, service_name=RIEMANN_SERVICE_NAME) utils.yum_install(riemann_source_url, service_name=RIEMANN_SERVICE_NAME) utils.logrotate(RIEMANN_SERVICE_NAME) ctx.logger.info('Downloading cloudify-manager Repository...') manager_repo = utils.download_cloudify_resource(cloudify_resources_url, RIEMANN_SERVICE_NAME) ctx.logger.info('Extracting Manager Repository...') utils.untar(manager_repo, '/tmp')
def change_conf(next_conf=None): signal = "# configurations recommended by ottertune:\n" next_conf = next_conf or {} tmp_conf_in = os.path.join(dconf.TEMP_DIR, os.path.basename(dconf.DB_CONF) + '.in') get(dconf.DB_CONF, tmp_conf_in) with open(tmp_conf_in, 'r') as f: lines = f.readlines() if signal not in lines: lines += ['\n', signal] signal_idx = lines.index(signal) lines = lines[0:signal_idx + 1] if dconf.DB_TYPE == 'mysql': lines.append('[mysqld]\n') if dconf.BASE_DB_CONF: assert isinstance(dconf.BASE_DB_CONF, dict), \ (type(dconf.BASE_DB_CONF), dconf.BASE_DB_CONF) for name, value in sorted(dconf.BASE_DB_CONF.items()): if value is None: lines.append('{}\n'.format(name)) else: lines.append('{} = {}\n'.format(name, value)) if isinstance(next_conf, str): with open(next_conf, 'r') as f: recommendation = json.load( f, encoding="UTF-8", object_pairs_hook=OrderedDict)['recommendation'] else: recommendation = next_conf assert isinstance(recommendation, dict) for name, value in recommendation.items(): if dconf.DB_TYPE == 'oracle' and isinstance(value, str): value = value.strip('B') # If innodb_flush_method is set to NULL on a Unix-like system, # the fsync option is used by default. if name == 'innodb_flush_method' and value == '': value = "fsync" lines.append('{} = {}\n'.format(name, value)) lines.append('\n') tmp_conf_out = os.path.join(dconf.TEMP_DIR, os.path.basename(dconf.DB_CONF) + '.out') with open(tmp_conf_out, 'w') as f: f.write(''.join(lines)) sudo('cp {0} {0}.ottertune.bak'.format(dconf.DB_CONF), remote_only=True) put(tmp_conf_out, dconf.DB_CONF, use_sudo=True) local('rm -f {} {}'.format(tmp_conf_in, tmp_conf_out))
def _create_user_and_set_permissions(rabbitmq_username, rabbitmq_password): if not check_if_user_exists(rabbitmq_username): ctx.logger.info('Creating new user {0}:{1} and setting ' 'permissions...'.format( rabbitmq_username, rabbitmq_password)) utils.sudo(['rabbitmqctl', 'add_user', rabbitmq_username, rabbitmq_password]) utils.sudo(['rabbitmqctl', 'set_permissions', rabbitmq_username, '.*', '.*', '.*'], retries=5)
def install_logstash(): logstash_unit_override = '/etc/systemd/system/logstash.service.d' logstash_source_url = ctx.node.properties['logstash_rpm_source_url'] rabbitmq_username = ctx.node.properties['rabbitmq_username'] rabbitmq_password = ctx.node.properties['rabbitmq_password'] logstash_log_path = '/var/log/cloudify/logstash' logstash_conf_path = '/etc/logstash/conf.d' # injected as an input to the script ctx.instance.runtime_properties['es_endpoint_ip'] = \ os.environ.get('ES_ENDPOINT_IP') ctx.instance.runtime_properties['rabbitmq_endpoint_ip'] = \ utils.get_rabbitmq_endpoint_ip() # Confirm username and password have been supplied for broker before # continuing. # Components other than logstash and riemann have this handled in code. # Note that these are not directly used in this script, but are used by the # deployed resources, hence the check here. if not rabbitmq_username or not rabbitmq_password: utils.error_exit( 'Both rabbitmq_username and rabbitmq_password must be supplied ' 'and at least 1 character long in the manager blueprint inputs.') ctx.logger.info('Installing Logstash...') utils.set_selinux_permissive() utils.copy_notice('logstash') utils.yum_install(logstash_source_url) utils.mkdir(logstash_log_path) utils.chown('logstash', 'logstash', logstash_log_path) ctx.logger.info('Creating systemd unit override...') utils.mkdir(logstash_unit_override) utils.deploy_blueprint_resource( '{0}/restart.conf'.format(CONFIG_PATH), '{0}/restart.conf'.format(logstash_unit_override)) ctx.logger.info('Deploying Logstash conf...') utils.deploy_blueprint_resource( '{0}/logstash.conf'.format(CONFIG_PATH), '{0}/logstash.conf'.format(logstash_conf_path)) ctx.logger.info('Deploying Logstash sysconfig...') utils.deploy_blueprint_resource( '{0}/logstash'.format(CONFIG_PATH), '/etc/sysconfig/logstash') utils.logrotate('logstash') utils.sudo(['/sbin/chkconfig', 'logstash', 'on']) utils.clean_var_log_dir('logstash')
def install_apt(packages): """ Install packages using APT """ # We need to iterate over each one because if at least one of them # is actually not a package (misspelled, doesn't exist anymore, debian- # or ubuntu-specific, etc) the whole install process would fail # On the other hand there appears to be no flag to ignore these errors # on apt-get (tested on Ubuntu 12.04) for pkg in packages + extra_packages(): sudo('apt-get -qq -y install {0}'.format(pkg))
def _configure_index_rotation(): ctx.logger.info('Configurating index rotation...') ctx.logger.debug( 'Setting up curator rotation cronjob for logstash-YYYY.mm.dd ' 'index patterns...') utils.deploy_blueprint_resource( 'components/elasticsearch/scripts/rotate_es_indices', '/etc/cron.daily/rotate_es_indices', ES_SERVICE_NAME) utils.chown('root', 'root', '/etc/cron.daily/rotate_es_indices') # TODO: VALIDATE! # TODO: use utils.chmod utils.sudo('chmod +x /etc/cron.daily/rotate_es_indices')
def _create_default_db(db_name, username, password): ctx.logger.info('Creating default postgresql database: {0}...'.format( db_name)) ps_config_source = 'components/postgresql/config/create_default_db.sh' ps_config_destination = join(tempfile.gettempdir(), 'create_default_db.sh') ctx.download_resource(source=ps_config_source, destination=ps_config_destination) utils.chmod('+x', ps_config_destination) # TODO: Can't we use a rest call here? Is there such a thing? utils.sudo('su - postgres -c "{cmd} {db} {user} {password}"' .format(cmd=ps_config_destination, db=db_name, user=username, password=password))
def _deploy_db_cleanup_script(): """ Copy the script that deletes logs and events from the Cloudify DB to /etc/cloudify, so it will be available to the users.""" try: script_name = 'delete_logs_and_events_from_db.py' script_destination = join(utils.get_exec_tempdir(), script_name) ctx.download_resource(join(CONFIG_PATH, script_name), script_destination) utils.sudo(['mv', script_destination, join(utils.CLOUDIFY_HOME_DIR, script_name)]) except Exception as ex: ctx.logger.info('Failed to deploy delete_logs script. Error: {0}' ''.format(ex))
def configure_mgmtworker(): celery_work_dir = '{0}/work'.format(runtime_props['home_dir']) runtime_props['file_server_root'] = utils.MANAGER_RESOURCES_HOME ctx.logger.info('Configuring Management worker...') broker_conf_path = join(celery_work_dir, 'broker_config.json') utils.deploy_blueprint_resource( '{0}/broker_config.json'.format(CONFIG_PATH), broker_conf_path, SERVICE_NAME) # The config contains credentials, do not let the world read it utils.sudo(['chmod', '440', broker_conf_path]) utils.chown(CLOUDIFY_USER, CLOUDIFY_GROUP, broker_conf_path) utils.systemd.configure(SERVICE_NAME) utils.logrotate(SERVICE_NAME)
def _init_postgresql(): ctx.logger.info('Init PostreSQL DATA folder...') postgresql95_setup = '/usr/pgsql-9.5/bin/postgresql95-setup' utils.sudo(command=[postgresql95_setup, 'initdb']) ctx.logger.info('Starting PostgreSQL server...') utils.systemd.enable(service_name=PS_SERVICE_NAME, append_prefix=False) utils.systemd.start(service_name=PS_SERVICE_NAME, append_prefix=False) ctx.logger.info('Setting PostgreSQL logs path...') ps_95_logs_path = "/var/lib/pgsql/9.5/data/pg_log" ps_logs_path = "/var/log/cloudify/postgresql" utils.mkdir(ps_logs_path) utils.ln(source=ps_95_logs_path, target=ps_logs_path, params='-s')
def _create_default_db(db_name, username, password): ctx.logger.info( 'Creating default postgresql database: {0}...'.format(db_name)) ps_config_source = 'components/postgresql/config/create_default_db.sh' ps_config_destination = join(tempfile.gettempdir(), 'create_default_db.sh') ctx.download_resource(source=ps_config_source, destination=ps_config_destination) utils.chmod('+x', ps_config_destination) # TODO: Can't we use a rest call here? Is there such a thing? utils.sudo('su - postgres -c "{cmd} {db} {user} {password}"'.format( cmd=ps_config_destination, db=db_name, user=username, password=password))
def install_docker(): ctx.logger.info('Installing the latest version of docker...') script_name = 'get_docker.sh' utils.run('curl -o {0} https://get.docker.com/'.format(script_name), workdir=utils.WORKDIR) utils.run('chmod +x {0}'.format(script_name), workdir=utils.WORKDIR) utils.sudo('bash {0}'.format(script_name), workdir=utils.WORKDIR) utils.sudo('usermod -aG docker {0}'.format( ctx.node.properties['ssh_user'])) utils.sudo('service docker stop') _create_cloudify_bridge() utils.sudo("sed -i '$ a DOCKER_OPTS=\"--bridge cfy0 " "--host 172.20.0.1\"' /etc/default/docker") utils.sudo('service docker start')
def download(url, target=None, root=False): if target is None: parts = urlparse.urlparse(url) target = parts.path.split('/')[-1] if check_command('wget'): cmd = 'wget --no-check-certificate -q -O {0} {1}'.format(target, url) elif check_command('curl'): cmd = 'curl -o {0} {1}'.format(target, url) else: raise Exception("Neither wget nor curl are installed") if root: sudo(cmd) else: run(cmd) return target
def install_docker(): ctx.logger.info('Installing the latest version of docker...') script_name = 'get_docker.sh' utils.run('curl -o {0} https://get.docker.com/'.format(script_name), workdir=utils.WORKDIR) utils.run('chmod +x {0}'.format(script_name), workdir=utils.WORKDIR) utils.sudo('bash {0}'.format(script_name), workdir=utils.WORKDIR) utils.sudo('usermod -aG docker {0}' .format(ctx.node.properties['ssh_user'])) utils.sudo('service docker stop') _create_cloudify_bridge() utils.sudo("sed -i '$ a DOCKER_OPTS=\"--bridge cfy0 " "--host 172.20.0.1\"' /etc/default/docker") utils.sudo('service docker start')
def _create_db_tables_and_add_users(): ctx.logger.info('Creating SQL tables and adding admin users...') create_script_path = 'components/restservice/config' \ '/create_tables_and_add_users.py' create_script_destination = join(tempfile.gettempdir(), 'create_tables_and_add_users.py') ctx.download_resource(source=create_script_path, destination=create_script_destination) # Directly calling with this python bin, in order to make sure it's run # in the correct venv python_path = '{0}/env/bin/python'.format(REST_SERVICE_HOME) runtime_props = ctx.instance.runtime_properties args_dict = json.loads(runtime_props['security_configuration']) args_dict['postgresql_host'] = runtime_props['postgresql_host'] # The script won't have access to the ctx, so we dump the relevant args # to a JSON file, and pass its path to the script args_file_location = join(tempfile.gettempdir(), 'security_config.json') with open(args_file_location, 'w') as f: json.dump(args_dict, f) result = utils.sudo( [python_path, create_script_destination, args_file_location] ) _log_results(result) utils.remove(args_file_location)
def install_yum(packages): """ Install packages using YUM """ errmsg = sudo('yum --assumeyes --quiet install {0}'.format(' '.join(packages + extra_packages())),\ combine_stderr=True, warn_only=True) processCentOSErrMsg(errmsg)
def _create_db_tables_and_add_defaults(): ctx.logger.info('Creating SQL tables and adding default values...') script_name = 'create_tables_and_add_defaults.py' source_script_path = join('components/restservice/config', script_name) destination_script_path = join(tempfile.gettempdir(), script_name) ctx.download_resource(source_script_path, destination_script_path) args_dict = runtime_props['security_configuration'] args_dict['amqp_host'] = runtime_props['rabbitmq_endpoint_ip'] args_dict['amqp_username'] = runtime_props['rabbitmq_username'] args_dict['amqp_password'] = runtime_props['rabbitmq_password'] args_dict['postgresql_host'] = runtime_props['postgresql_host'] args_dict['db_migrate_dir'] = join( utils.MANAGER_RESOURCES_HOME, 'cloudify', 'migrations' ) # The script won't have access to the ctx, so we dump the relevant args # to a JSON file, and pass its path to the script args_file_location = join(tempfile.gettempdir(), 'security_config.json') with open(args_file_location, 'w') as f: json.dump(args_dict, f) # Directly calling with this python bin, in order to make sure it's run # in the correct venv python_path = join(runtime_props['home_dir'], 'env', 'bin', 'python') result = utils.sudo( [python_path, destination_script_path, args_file_location] ) _log_results(result) utils.remove(args_file_location) utils.remove(destination_script_path)
def _create_db_tables_and_add_defaults(): ctx.logger.info('Creating SQL tables and adding default values...') script_name = 'create_tables_and_add_defaults.py' source_script_path = join('components/restservice/config', script_name) destination_script_path = join(tempfile.gettempdir(), script_name) ctx.download_resource(source_script_path, destination_script_path) args_dict = runtime_props['security_configuration'] args_dict['amqp_host'] = runtime_props['rabbitmq_endpoint_ip'] args_dict['amqp_username'] = ctx_properties['rabbitmq_username'] args_dict['amqp_password'] = ctx_properties['rabbitmq_password'] args_dict['postgresql_host'] = ctx_properties['postgresql_host'] args_dict['authorization_file_path'] = \ runtime_props['authorization_file_path'] args_dict['db_migrate_dir'] = join(utils.MANAGER_RESOURCES_HOME, 'cloudify', 'migrations') # The script won't have access to the ctx, so we dump the relevant args # to a JSON file, and pass its path to the script args_file_location = join(tempfile.gettempdir(), 'security_config.json') with open(args_file_location, 'w') as f: json.dump(args_dict, f) # Directly calling with this python bin, in order to make sure it's run # in the correct venv python_path = join(runtime_props['home_dir'], 'env', 'bin', 'python') result = utils.sudo( [python_path, destination_script_path, args_file_location]) _log_results(result) utils.remove(args_file_location) utils.remove(destination_script_path)
def status(job, status='start/running'): result = sudo('status %s' % job) if result.succeeded: return result.find(status) > -1 else: return False
def configure_logstash(): logstash_conf_path = '/etc/logstash/conf.d' runtime_properties = ctx.instance.runtime_properties rabbitmq_username = runtime_properties.get('rabbitmq_username') rabbitmq_password = runtime_properties.get('rabbitmq_password') # Confirm username and password have been supplied for broker before # continuing. # Components other than logstash and riemann have this handled in code. # Note that these are not directly used in this script, but are used by the # deployed resources, hence the check here. if not rabbitmq_username or not rabbitmq_password: ctx.abort_operation( 'Both rabbitmq_username and rabbitmq_password must be supplied ' 'and at least 1 character long in the manager blueprint inputs.') ctx.logger.info('Deploying Logstash configuration...') utils.deploy_blueprint_resource( '{0}/logstash.conf'.format(CONFIG_PATH), '{0}/logstash.conf'.format(logstash_conf_path), LOGSTASH_SERVICE_NAME) # Due to a bug in the handling of configuration files, # configuration files with the same name cannot be deployed. # Since the logrotate config file is called `logstash`, # we change the name of the logstash env vars config file # from logstash to cloudify-logstash to be consistent with # other service env var files. init_file = '/etc/init.d/logstash' utils.replace_in_file( 'sysconfig/\$name', 'sysconfig/cloudify-$name', init_file) utils.chmod('755', init_file) utils.chown('root', 'root', init_file) ctx.logger.debug('Deploying Logstash sysconfig...') utils.deploy_blueprint_resource( '{0}/cloudify-logstash'.format(CONFIG_PATH), '/etc/sysconfig/cloudify-logstash', LOGSTASH_SERVICE_NAME) utils.logrotate(LOGSTASH_SERVICE_NAME) utils.sudo(['/sbin/chkconfig', 'logstash', 'on']) utils.clean_var_log_dir(LOGSTASH_SERVICE_NAME)
def change_conf(next_conf=None): signal = "# configurations recommended by ottertune:\n" next_conf = next_conf or {} tmp_conf_in = os.path.join(dconf.TEMP_DIR, os.path.basename(dconf.DB_CONF) + '.in') get(dconf.DB_CONF, tmp_conf_in) with open(tmp_conf_in, 'r') as f: lines = f.readlines() if signal not in lines: lines += ['\n', signal] signal_idx = lines.index(signal) lines = lines[0:signal_idx + 1] if dconf.BASE_DB_CONF: assert isinstance(dconf.BASE_DB_CONF, dict), \ (type(dconf.BASE_DB_CONF), dconf.BASE_DB_CONF) base_conf = [ '{} = {}\n'.format(*c) for c in sorted(dconf.BASE_DB_CONF.items()) ] lines.extend(base_conf) if isinstance(next_conf, str): with open(next_conf, 'r') as f: recommendation = json.load( f, encoding="UTF-8", object_pairs_hook=OrderedDict)['recommendation'] else: recommendation = next_conf assert isinstance(recommendation, dict) for name, value in recommendation.items(): if dconf.DB_TYPE == 'oracle' and isinstance(value, str): value = value.strip('B') lines.append('{} = {}\n'.format(name, value)) lines.append('\n') tmp_conf_out = os.path.join(dconf.TEMP_DIR, os.path.basename(dconf.DB_CONF) + '.out') with open(tmp_conf_out, 'w') as f: f.write(''.join(lines)) sudo('cp {0} {0}.ottertune.bak'.format(dconf.DB_CONF)) put(tmp_conf_out, dconf.DB_CONF, use_sudo=False) local('rm -f {} {}'.format(tmp_conf_in, tmp_conf_out))
def install_java(): java_source_url = ctx.node.properties['java_rpm_source_url'] ctx.logger.info('Installing Java...') utils.set_selinux_permissive() utils.copy_notice('java') utils.yum_install(java_source_url) # Make sure the cloudify logs dir exists before we try moving the java log # there -p will cause it not to error if the dir already exists utils.mkdir('/var/log/cloudify') # Java install log is dropped in /var/log. # Move it to live with the rest of the cloudify logs if os.path.isfile('/var/log/java_install.log'): utils.sudo('mv /var/log/java_install.log /var/log/cloudify')
def install_java(): java_source_url = ctx_properties['java_rpm_source_url'] ctx.logger.info('Installing Java...') utils.set_selinux_permissive() utils.copy_notice('java') utils.yum_install(java_source_url, service_name='java') # Make sure the cloudify logs dir exists before we try moving the java log # there -p will cause it not to error if the dir already exists utils.mkdir('/var/log/cloudify') # Java install log is dropped in /var/log. # Move it to live with the rest of the cloudify logs if os.path.isfile('/var/log/java_install.log'): utils.sudo('mv /var/log/java_install.log /var/log/cloudify')
def _create_cloudify_bridge(): proc, stdout, stderr = utils.sudo('brctl show') if 'cfy0' not in stdout: ctx.logger.info('creating cfy0 network bridge...') utils.sudo('brctl addbr cfy0') utils.sudo('ip addr add 172.20.0.1/24 dev cfy0') utils.sudo('ip link set dev cfy0 up')
def _init_postgresql(): ctx.logger.info('Initializing PostreSQL DATA folder...') postgresql95_setup = join(PGSQL_USR_DIR, 'bin', 'postgresql95-setup') try: utils.sudo(command=[postgresql95_setup, 'initdb']) except Exception: ctx.logger.debug('PostreSQL DATA folder already been init...') pass ctx.logger.info('Starting PostgreSQL server...') utils.systemd.enable(service_name=SERVICE_NAME, append_prefix=False) utils.systemd.start(service_name=SERVICE_NAME, append_prefix=False) ctx.logger.info('Setting PostgreSQL logs path...') ps_95_logs_path = join(PGSQL_LIB_DIR, '9.5', 'data', 'pg_log') utils.mkdir(PGSQL_LOGS_DIR) if not os.path.isdir(ps_95_logs_path): utils.ln(source=ps_95_logs_path, target=PGSQL_LOGS_DIR, params='-s')
def check_rabbit_running(): """Use rabbitmqctl status to check if RabbitMQ is working. Sometimes rabbit takes a while to start, so this is retried several times. Note that this is currently impossible to do on a remote host, so this check only runs when rabbitmq is installed locally. """ result = utils.sudo(['rabbitmqctl', 'status'], ignore_failures=True) if result.returncode != 0: raise ValueError('rabbitmqctl status: rabbitmq not running')
def add(grpname, gid=False): cmd = 'groupadd --system %s' % grpname if gid: cmd = 'groupadd --system --gid %i %s' % (gid, grpname) result = sudo(cmd) if not result.failed: return result.stderr.endswith('already exists') else: return False
def check_worker_running(): """Use `celery status` to check if the worker is running.""" result = utils.sudo([ 'CELERY_WORK_DIR=/opt/mgmtworker/work', CELERY_PATH, '--config=cloudify.broker_config', 'status' ], ignore_failures=True) if result.returncode != 0: raise ValueError('celery status: worker not running')
def check_worker_running(): """Use `celery status` to check if the worker is running.""" work_dir = join(HOME_DIR, 'work') celery_path = join(HOME_DIR, 'env', 'bin', 'celery') result = utils.sudo([ 'CELERY_WORK_DIR={0}'.format(work_dir), celery_path, '--config=cloudify.broker_config', 'status' ], ignore_failures=True) if result.returncode != 0: raise ValueError('celery status: worker not running')
def uninstall(pkg, warn=True, silent=False): status = installed(pkg) if status == True: _msg('Uninstalling %s...' % pkg, silent) cmd = 'apt-get remove -y %s' % pkg if warn: if confirm('Are you sure you want to uninstall %s from: %s?' % (pkg, env.host_string)): result = sudo(cmd) else: result = sudo(cmd) if result.failed: about(result.stderr) else: _msg('Uninstalled %s' % pkg, silent) return elif status == False: _msg('Not Already Installed %s: Skipped' % pkg, silent) else: abort(result.stderr)
def add(username, uid=False, gid=False, verbose=False): cmd = 'useradd --no-create-home --system ' if uid: cmd += '--uid %s ' % uid if gid: cmd += '--gid %s ' % uid cmd += username result = sudo(cmd) if not result.failed: return result.stderr.endswith('already exists') else: return False
def configure_mgmtworker(): # these must all be exported as part of the start operation. # they will not persist, so we should use the new agent # don't forget to change all localhosts to the relevant ips mgmtworker_home = '/opt/mgmtworker' mgmtworker_venv = '{0}/env'.format(mgmtworker_home) celery_work_dir = '{0}/work'.format(mgmtworker_home) ctx.logger.info('Configuring Management worker...') # Deploy the broker configuration # TODO: This will break interestingly if mgmtworker_venv is empty. # Some sort of check for that would be sensible. # To sandy: I don't quite understand this check... # there is no else here.. # for python_path in ${mgmtworker_venv}/lib/python*; do if os.path.isfile(os.path.join(mgmtworker_venv, 'bin/python')): broker_conf_path = os.path.join(celery_work_dir, 'broker_config.json') utils.deploy_blueprint_resource( '{0}/broker_config.json'.format(CONFIG_PATH), broker_conf_path, MGMT_WORKER_SERVICE_NAME) # The config contains credentials, do not let the world read it utils.sudo(['chmod', '440', broker_conf_path]) utils.systemd.configure(MGMT_WORKER_SERVICE_NAME) utils.logrotate(MGMT_WORKER_SERVICE_NAME)
def install(pkg, silent=False): status = installed(pkg) if status == True: _msg('Already installed: %s' % pkg, silent) return True elif status == False: _msg('Installing %s...' % pkg, silent) # Update result, as generally you want to know # if something did actually install result = sudo('apt-get install -y %s' % pkg) if result.failed: abort(result.stderr) else: _msg('Installed %s' % pkg, silent) return result else: abort(result.stderr)
def install_mgmtworker(): management_worker_rpm_source_url = \ ctx_properties['management_worker_rpm_source_url'] # these must all be exported as part of the start operation. # they will not persist, so we should use the new agent # don't forget to change all localhosts to the relevant ips mgmtworker_home = '/opt/mgmtworker' mgmtworker_venv = '{0}/env'.format(mgmtworker_home) celery_work_dir = '{0}/work'.format(mgmtworker_home) celery_log_dir = "/var/log/cloudify/mgmtworker" broker_port_ssl = '5671' broker_port_no_ssl = '5672' rabbit_props = utils.ctx_factory.get('rabbitmq') rabbitmq_ssl_enabled = rabbit_props['rabbitmq_ssl_enabled'] ctx.logger.info("rabbitmq_ssl_enabled: {0}".format(rabbitmq_ssl_enabled)) rabbitmq_cert_public = rabbit_props['rabbitmq_cert_public'] ctx.instance.runtime_properties['rabbitmq_endpoint_ip'] = \ utils.get_rabbitmq_endpoint_ip( rabbit_props.get('rabbitmq_endpoint_ip')) # Fix possible injections in json of rabbit credentials # See json.org for string spec for key in ['rabbitmq_username', 'rabbitmq_password']: # We will not escape newlines or other control characters, # we will accept them breaking # things noisily, e.g. on newlines and backspaces. # TODO: add: # sed 's/"/\\"/' | sed 's/\\/\\\\/' | sed s-/-\\/- | sed 's/\t/\\t/' ctx.instance.runtime_properties[key] = ctx_properties[key] # Make the ssl enabled flag work with json (boolean in lower case) # TODO: check if still needed: # broker_ssl_enabled = "$(echo ${rabbitmq_ssl_enabled} | tr '[:upper:]' '[:lower:]')" # NOQA ctx.instance.runtime_properties['rabbitmq_ssl_enabled'] = \ rabbitmq_ssl_enabled ctx.logger.info('Installing Management Worker...') utils.set_selinux_permissive() utils.copy_notice(MGMT_WORKER_SERVICE_NAME) utils.mkdir(mgmtworker_home) utils.mkdir('{0}/config'.format(mgmtworker_home)) utils.mkdir(celery_log_dir) utils.mkdir(celery_work_dir) # this create the mgmtworker_venv and installs the relevant # modules into it. utils.yum_install(management_worker_rpm_source_url, service_name=MGMT_WORKER_SERVICE_NAME) _install_optional(mgmtworker_venv) # Add certificate and select port, as applicable if rabbitmq_ssl_enabled: broker_cert_path = '{0}/amqp_pub.pem'.format(mgmtworker_home) utils.deploy_ssl_certificate( 'public', broker_cert_path, 'root', rabbitmq_cert_public) ctx.instance.runtime_properties['broker_cert_path'] = broker_cert_path # Use SSL port ctx.instance.runtime_properties['broker_port'] = broker_port_ssl else: # No SSL, don't use SSL port ctx.instance.runtime_properties['broker_port'] = broker_port_no_ssl if rabbitmq_cert_public is not None: ctx.logger.warn('Broker SSL cert supplied but SSL not enabled ' '(broker_ssl_enabled is False).') ctx.logger.info("broker_port: {0}".format( ctx.instance.runtime_properties['broker_port'])) ctx.logger.info('Configuring Management worker...') # Deploy the broker configuration # TODO: This will break interestingly if mgmtworker_venv is empty. # Some sort of check for that would be sensible. # To sandy: I don't quite understand this check... # there is no else here.. # for python_path in ${mgmtworker_venv}/lib/python*; do if os.path.isfile(os.path.join(mgmtworker_venv, 'bin/python')): broker_conf_path = os.path.join(celery_work_dir, 'broker_config.json') utils.deploy_blueprint_resource( '{0}/broker_config.json'.format(CONFIG_PATH), broker_conf_path, MGMT_WORKER_SERVICE_NAME) # The config contains credentials, do not let the world read it utils.sudo(['chmod', '440', broker_conf_path]) utils.systemd.configure(MGMT_WORKER_SERVICE_NAME) utils.logrotate(MGMT_WORKER_SERVICE_NAME)