def _install_rabbitmq(): erlang_rpm_source_url = ctx.node.properties['erlang_rpm_source_url'] rabbitmq_rpm_source_url = ctx.node.properties['rabbitmq_rpm_source_url'] # TODO: maybe we don't need this env var os.putenv('RABBITMQ_FD_LIMIT', str(ctx.node.properties['rabbitmq_fd_limit'])) rabbitmq_log_path = '/var/log/cloudify/rabbitmq' rabbitmq_username = ctx.node.properties['rabbitmq_username'] rabbitmq_password = ctx.node.properties['rabbitmq_password'] rabbitmq_cert_public = ctx.node.properties['rabbitmq_cert_public'] rabbitmq_ssl_enabled = ctx.node.properties['rabbitmq_ssl_enabled'] rabbitmq_cert_private = ctx.node.properties['rabbitmq_cert_private'] ctx.logger.info('Installing RabbitMQ...') utils.set_selinux_permissive() utils.copy_notice('rabbitmq') utils.mkdir(rabbitmq_log_path) utils.yum_install(erlang_rpm_source_url) utils.yum_install(rabbitmq_rpm_source_url) utils.logrotate('rabbitmq') utils.deploy_blueprint_resource( '{0}/kill-rabbit'.format(CONFIG_PATH), '/usr/local/bin/kill-rabbit') utils.chmod('500', '/usr/local/bin/kill-rabbit') utils.systemd.configure('rabbitmq') ctx.logger.info('Configuring File Descriptors Limit...') utils.deploy_blueprint_resource( '{0}/rabbitmq_ulimit.conf'.format(CONFIG_PATH), '/etc/security/limits.d/rabbitmq.conf') utils.systemd.systemctl('daemon-reload') utils.chown('rabbitmq', 'rabbitmq', rabbitmq_log_path) utils.systemd.start('cloudify-rabbitmq') time.sleep(10) utils.wait_for_port(5672) ctx.logger.info('Enabling RabbitMQ Plugins...') # Occasional timing issues with rabbitmq starting have resulted in # failures when first trying to enable plugins utils.sudo(['rabbitmq-plugins', 'enable', 'rabbitmq_management'], retries=5) utils.sudo(['rabbitmq-plugins', 'enable', 'rabbitmq_tracing'], retries=5) _clear_guest_permissions_if_guest_exists() _create_user_and_set_permissions(rabbitmq_username, rabbitmq_password) _set_security( rabbitmq_ssl_enabled, rabbitmq_cert_private, rabbitmq_cert_public) utils.systemd.stop('cloudify-rabbitmq', retries=5)
def install_amqpinflux(): amqpinflux_rpm_source_url = \ ctx.node.properties['amqpinflux_rpm_source_url'] # injected as an input to the script ctx.instance.runtime_properties['influxdb_endpoint_ip'] = \ os.environ['INFLUXDB_ENDPOINT_IP'] ctx.instance.runtime_properties['rabbitmq_endpoint_ip'] = \ utils.get_rabbitmq_endpoint_ip() amqpinflux_user = '******' amqpinflux_group = 'amqpinflux' amqpinflux_venv = '{0}/env'.format(AMQPINFLUX_HOME) ctx.logger.info('Installing AQMPInflux...') utils.set_selinux_permissive() utils.copy_notice('amqpinflux') utils.mkdir(AMQPINFLUX_HOME) utils.yum_install(amqpinflux_rpm_source_url) _install_optional(amqpinflux_venv) utils.create_service_user(amqpinflux_user, AMQPINFLUX_HOME) _deploy_broker_configuration(amqpinflux_group) ctx.logger.info('Fixing permissions...') utils.chown(amqpinflux_user, amqpinflux_group, AMQPINFLUX_HOME) utils.systemd.configure('amqpinflux')
def install_restservice(): utils.set_service_as_cloudify_service(runtime_props) rest_service_rpm_source_url = ctx_properties['rest_service_rpm_source_url'] rest_venv = join(HOME_DIR, 'env') agent_dir = join(utils.MANAGER_RESOURCES_HOME, 'cloudify_agent') ctx.logger.info('Installing REST Service...') utils.set_selinux_permissive() utils.copy_notice(SERVICE_NAME) utils.mkdir(HOME_DIR) utils.mkdir(LOG_DIR) utils.chown(utils.CLOUDIFY_USER, utils.CLOUDIFY_GROUP, LOG_DIR) utils.mkdir(utils.MANAGER_RESOURCES_HOME) utils.mkdir(agent_dir) deploy_broker_configuration() utils.yum_install(rest_service_rpm_source_url, service_name=SERVICE_NAME) _configure_dbus(rest_venv) install_optional(rest_venv) utils.logrotate(SERVICE_NAME) utils.deploy_sudo_command_script(script='/usr/bin/systemctl', description='Run systemctl')
def install_amqpinflux(): amqpinflux_rpm_source_url = \ ctx_properties['amqpinflux_rpm_source_url'] # injected as an input to the script ctx.instance.runtime_properties['influxdb_endpoint_ip'] = \ os.environ['INFLUXDB_ENDPOINT_IP'] rabbit_props = utils.ctx_factory.get('rabbitmq') ctx.instance.runtime_properties['rabbitmq_endpoint_ip'] = \ utils.get_rabbitmq_endpoint_ip() ctx.instance.runtime_properties['rabbitmq_username'] = \ rabbit_props.get('rabbitmq_username') ctx.instance.runtime_properties['rabbitmq_password'] = \ rabbit_props.get('rabbitmq_password') ctx.instance.runtime_properties['rabbitmq_ssl_enabled'] = True amqpinflux_venv = '{0}/env'.format(HOME_DIR) ctx.logger.info('Installing AQMPInflux...') utils.set_selinux_permissive() utils.copy_notice(SERVICE_NAME) utils.mkdir(HOME_DIR) utils.yum_install(amqpinflux_rpm_source_url, service_name=SERVICE_NAME) _install_optional(amqpinflux_venv) ctx.logger.info('Configuring AMQPInflux...') utils.create_service_user(AMQPINFLUX_USER, AMQPINFLUX_GROUP, HOME_DIR) ctx.instance.runtime_properties['broker_cert_path'] = \ utils.INTERNAL_CERT_PATH utils.chown(AMQPINFLUX_USER, AMQPINFLUX_GROUP, HOME_DIR) utils.systemd.configure(SERVICE_NAME)
def install_restservice(): rest_service_rpm_source_url = \ ctx_properties['rest_service_rpm_source_url'] rest_venv = os.path.join(REST_SERVICE_HOME, 'env') rest_service_log_path = '/var/log/cloudify/rest' ctx.logger.info('Installing REST Service...') utils.set_selinux_permissive() utils.copy_notice(REST_SERVICE_NAME) utils.mkdir(REST_SERVICE_HOME) utils.mkdir(rest_service_log_path) utils.mkdir(MANAGER_RESOURCES_HOME) deploy_broker_configuration() utils.yum_install(rest_service_rpm_source_url, service_name=REST_SERVICE_NAME) _configure_dbus(rest_venv) install_optional(rest_venv) utils.logrotate(REST_SERVICE_NAME) ctx.logger.info('Deploying REST Service Configuration file...') # rest ports are set as runtime properties in nginx/scripts/create.py # cloudify-rest.conf currently contains localhost for fileserver endpoint. # We need to change that if we want to deploy nginx on another machine. utils.deploy_blueprint_resource( os.path.join(CONFIG_PATH, 'cloudify-rest.conf'), os.path.join(REST_SERVICE_HOME, 'cloudify-rest.conf'), REST_SERVICE_NAME)
def install_restservice(): utils.set_service_as_cloudify_service(runtime_props) rest_service_rpm_source_url = ctx_properties['rest_service_rpm_source_url'] rest_venv = join(HOME_DIR, 'env') agent_dir = join(utils.MANAGER_RESOURCES_HOME, 'cloudify_agent') ctx.logger.info('Installing REST Service...') utils.set_selinux_permissive() utils.copy_notice(SERVICE_NAME) utils.mkdir(HOME_DIR) utils.mkdir(LOG_DIR) utils.chown(utils.CLOUDIFY_USER, utils.CLOUDIFY_GROUP, LOG_DIR) utils.mkdir(utils.MANAGER_RESOURCES_HOME) utils.mkdir(agent_dir) runtime_props['rabbitmq_endpoint_ip'] = utils.get_rabbitmq_endpoint_ip() runtime_props['broker_cert_path'] = utils.INTERNAL_CA_CERT_PATH utils.yum_install(rest_service_rpm_source_url, service_name=SERVICE_NAME) _configure_dbus(rest_venv) install_optional(rest_venv) utils.logrotate(SERVICE_NAME) utils.deploy_sudo_command_script(script='/usr/bin/systemctl', description='Run systemctl') utils.deploy_sudo_command_script('set-manager-ssl.py', 'Script for setting manager SSL', SERVICE_NAME) utils.deploy_sudo_command_script(script='/usr/sbin/shutdown', description='Perform shutdown (reboot)')
def install_logstash(): """Install logstash as a systemd service.""" logstash_unit_override = '/etc/systemd/system/logstash.service.d' logstash_source_url = ctx_properties['logstash_rpm_source_url'] logstash_log_path = '/var/log/cloudify/logstash' ctx.logger.info('Installing Logstash...') utils.set_selinux_permissive() utils.copy_notice(LOGSTASH_SERVICE_NAME) utils.yum_install(logstash_source_url, service_name=LOGSTASH_SERVICE_NAME) install_logstash_output_jdbc_plugin() install_postgresql_jdbc_driver() utils.mkdir(logstash_log_path) utils.chown('logstash', 'logstash', logstash_log_path) ctx.logger.debug('Creating systemd unit override...') utils.mkdir(logstash_unit_override) utils.deploy_blueprint_resource( '{0}/restart.conf'.format(CONFIG_PATH), '{0}/restart.conf'.format(logstash_unit_override), LOGSTASH_SERVICE_NAME)
def install_restservice(): utils.set_service_as_cloudify_service(runtime_props) rest_service_rpm_source_url = ctx_properties['rest_service_rpm_source_url'] rest_venv = join(HOME_DIR, 'env') agent_dir = join(utils.MANAGER_RESOURCES_HOME, 'cloudify_agent') ctx.logger.info('Installing REST Service...') utils.set_selinux_permissive() utils.copy_notice(SERVICE_NAME) utils.mkdir(HOME_DIR) utils.mkdir(LOG_DIR) utils.chown(utils.CLOUDIFY_USER, utils.CLOUDIFY_GROUP, LOG_DIR) utils.mkdir(utils.MANAGER_RESOURCES_HOME) utils.mkdir(agent_dir) deploy_broker_configuration() utils.yum_install(rest_service_rpm_source_url, service_name=SERVICE_NAME) _configure_dbus(rest_venv) install_optional(rest_venv) utils.logrotate(SERVICE_NAME) utils.deploy_sudo_command_script( script='/usr/bin/systemctl', description='Run systemctl' ) utils.deploy_sudo_command_script( script='/usr/sbin/shutdown', description='Perform shutdown (reboot)' )
def _install_influxdb(): influxdb_source_url = ctx_properties['influxdb_rpm_source_url'] influxdb_user = '******' influxdb_group = 'influxdb' influxdb_home = '/opt/influxdb' influxdb_log_path = '/var/log/cloudify/influxdb' ctx.logger.info('Installing InfluxDB...') utils.set_selinux_permissive() utils.copy_notice(INFLUX_SERVICE_NAME) utils.mkdir(influxdb_home) utils.mkdir(influxdb_log_path) utils.yum_install(influxdb_source_url, service_name=INFLUX_SERVICE_NAME) utils.sudo(['rm', '-rf', '/etc/init.d/influxdb']) ctx.logger.info('Deploying InfluxDB config.toml...') utils.deploy_blueprint_resource( '{0}/config.toml'.format(CONFIG_PATH), '{0}/shared/config.toml'.format(influxdb_home), INFLUX_SERVICE_NAME) ctx.logger.info('Fixing user permissions...') utils.chown(influxdb_user, influxdb_group, influxdb_home) utils.chown(influxdb_user, influxdb_group, influxdb_log_path) utils.systemd.configure(INFLUX_SERVICE_NAME) utils.logrotate(INFLUX_SERVICE_NAME)
def _install_influxdb(): influxdb_source_url = ctx_properties['influxdb_rpm_source_url'] influxdb_user = '******' influxdb_group = 'influxdb' influxdb_home = '/opt/influxdb' influxdb_log_path = '/var/log/cloudify/influxdb' ctx.logger.info('Installing InfluxDB...') utils.set_selinux_permissive() utils.copy_notice(INFLUX_SERVICE_NAME) utils.mkdir(influxdb_home) utils.mkdir(influxdb_log_path) utils.yum_install(influxdb_source_url, service_name=INFLUX_SERVICE_NAME) utils.sudo(['rm', '-rf', '/etc/init.d/influxdb']) ctx.logger.info('Deploying InfluxDB config.toml...') utils.deploy_blueprint_resource( '{0}/config.toml'.format(CONFIG_PATH), '{0}/shared/config.toml'.format(influxdb_home), INFLUX_SERVICE_NAME) ctx.logger.info('Fixing user permissions...') utils.chown(influxdb_user, influxdb_group, influxdb_home) utils.chown(influxdb_user, influxdb_group, influxdb_log_path) utils.systemd.configure(INFLUX_SERVICE_NAME) # Provided with InfluxDB's package. Will be removed if it exists. utils.remove('/etc/init.d/influxdb') utils.logrotate(INFLUX_SERVICE_NAME)
def _install_influxdb(): influxdb_source_url = ctx.node.properties['influxdb_rpm_source_url'] influxdb_user = '******' influxdb_group = 'influxdb' influxdb_home = '/opt/influxdb' influxdb_log_path = '/var/log/cloudify/influxdb' ctx.logger.info('Installing InfluxDB...') utils.set_selinux_permissive() utils.copy_notice('influxdb') utils.mkdir(influxdb_home) utils.mkdir(influxdb_log_path) utils.yum_install(influxdb_source_url) utils.sudo(['rm', '-rf', '/etc/init.d/influxdb']) ctx.logger.info('Deploying InfluxDB config.toml...') utils.deploy_blueprint_resource( '{0}/config.toml'.format(CONFIG_PATH), '{0}/shared/config.toml'.format(influxdb_home)) ctx.logger.info('Fixing user permissions...') utils.chown(influxdb_user, influxdb_group, influxdb_home) utils.chown(influxdb_user, influxdb_group, influxdb_log_path) utils.logrotate('influxdb') utils.systemd.configure('influxdb')
def _install_stage(): nodejs_source_url = ctx_properties['nodejs_tar_source_url'] stage_source_url = ctx_properties['stage_tar_source_url'] if not utils.resource_factory.local_resource_exists(stage_source_url): ctx.logger.info('Stage package not found in manager resources ' 'package. Stage will not be installed.') ctx.instance.runtime_properties['skip_installation'] = 'true' return # injected as an input to the script ctx.instance.runtime_properties['influxdb_endpoint_ip'] = \ os.environ.get('INFLUXDB_ENDPOINT_IP') utils.set_selinux_permissive() utils.copy_notice(SERVICE_NAME) utils.mkdir(NODEJS_DIR) utils.mkdir(HOME_DIR) utils.mkdir(LOG_DIR) utils.create_service_user(STAGE_USER, STAGE_GROUP, HOME_DIR) ctx.logger.info('Installing NodeJS...') nodejs = utils.download_cloudify_resource(nodejs_source_url, SERVICE_NAME) utils.untar(nodejs, NODEJS_DIR) utils.remove(nodejs) ctx.logger.info('Installing Cloudify Stage (UI)...') stage_tar = utils.download_cloudify_resource(stage_source_url, SERVICE_NAME) if 'community' in stage_tar: ctx.logger.info('Community edition') ctx.instance.runtime_properties['community_mode'] = '-mode community' else: ctx.instance.runtime_properties['community_mode'] = '' utils.untar(stage_tar, HOME_DIR) utils.remove(stage_tar) ctx.logger.info('Fixing permissions...') utils.chown(STAGE_USER, STAGE_GROUP, HOME_DIR) utils.chown(STAGE_USER, STAGE_GROUP, NODEJS_DIR) utils.chown(STAGE_USER, STAGE_GROUP, LOG_DIR) utils.deploy_sudo_command_script( 'restore-snapshot.py', 'Restore stage directories from a snapshot path', component=SERVICE_NAME, allow_as=STAGE_USER) utils.chmod('a+rx', '/opt/cloudify/stage/restore-snapshot.py') utils.sudo(['usermod', '-aG', utils.CLOUDIFY_GROUP, STAGE_USER]) utils.logrotate(SERVICE_NAME) utils.systemd.configure(SERVICE_NAME) backend_dir = join(HOME_DIR, 'backend') npm_path = join(NODEJS_DIR, 'bin', 'npm') subprocess.check_call('cd {0}; {1} run db-migrate'.format( backend_dir, npm_path), shell=True)
def install_amqpinflux(): amqpinflux_rpm_source_url = \ ctx_properties['amqpinflux_rpm_source_url'] # injected as an input to the script ctx.instance.runtime_properties['influxdb_endpoint_ip'] = \ os.environ['INFLUXDB_ENDPOINT_IP'] ctx.instance.runtime_properties['rabbitmq_endpoint_ip'] = \ utils.get_rabbitmq_endpoint_ip( ctx_properties.get('rabbitmq_endpoint_ip')) amqpinflux_user = '******' amqpinflux_group = 'amqpinflux' amqpinflux_venv = '{0}/env'.format(AMQPINFLUX_HOME) ctx.logger.info('Installing AQMPInflux...') utils.set_selinux_permissive() utils.copy_notice(AMQPINFLUX_SERVICE_NAME) utils.mkdir(AMQPINFLUX_HOME) utils.yum_install(amqpinflux_rpm_source_url, service_name=AMQPINFLUX_SERVICE_NAME) _install_optional(amqpinflux_venv) utils.create_service_user(amqpinflux_user, AMQPINFLUX_HOME) _deploy_broker_configuration(amqpinflux_group) ctx.logger.info('Fixing permissions...') utils.chown(amqpinflux_user, amqpinflux_group, AMQPINFLUX_HOME) utils.systemd.configure(AMQPINFLUX_SERVICE_NAME)
def _install_influxdb(): influxdb_source_url = ctx_properties['influxdb_rpm_source_url'] influxdb_user = '******' influxdb_group = 'influxdb' ctx.logger.info('Installing InfluxDB...') utils.set_selinux_permissive() utils.copy_notice(SERVICE_NAME) utils.mkdir(HOME_DIR) utils.mkdir(LOG_DIR) utils.yum_install(influxdb_source_url, service_name=SERVICE_NAME) ctx.logger.info('Deploying InfluxDB configuration...') utils.deploy_blueprint_resource('{0}/config.toml'.format(CONFIG_PATH), '{0}/shared/config.toml'.format(HOME_DIR), SERVICE_NAME) utils.chown(influxdb_user, influxdb_group, HOME_DIR) utils.chown(influxdb_user, influxdb_group, LOG_DIR) utils.systemd.configure(SERVICE_NAME) # Provided with InfluxDB's package. Will be removed if it exists. utils.remove(INIT_D_PATH) utils.logrotate(SERVICE_NAME)
def install_amqpinflux(): amqpinflux_rpm_source_url = \ ctx_properties['amqpinflux_rpm_source_url'] # injected as an input to the script ctx.instance.runtime_properties['influxdb_endpoint_ip'] = \ os.environ['INFLUXDB_ENDPOINT_IP'] rabbit_props = utils.ctx_factory.get('rabbitmq') ctx.instance.runtime_properties['rabbitmq_endpoint_ip'] = \ utils.get_rabbitmq_endpoint_ip() ctx.instance.runtime_properties['rabbitmq_username'] = \ rabbit_props.get('rabbitmq_username') ctx.instance.runtime_properties['rabbitmq_password'] = \ rabbit_props.get('rabbitmq_password') amqpinflux_venv = '{0}/env'.format(HOME_DIR) ctx.logger.info('Installing AQMPInflux...') utils.set_selinux_permissive() utils.copy_notice(SERVICE_NAME) utils.mkdir(HOME_DIR) utils.yum_install(amqpinflux_rpm_source_url, service_name=SERVICE_NAME) _install_optional(amqpinflux_venv) ctx.logger.info('Configuring AMQPInflux...') utils.create_service_user(AMQPINFLUX_USER, AMQPINFLUX_GROUP, HOME_DIR) ctx.instance.runtime_properties['broker_cert_path'] = \ utils.INTERNAL_CERT_PATH utils.chown(AMQPINFLUX_USER, AMQPINFLUX_GROUP, HOME_DIR) utils.systemd.configure(SERVICE_NAME)
def install_webui(): nodejs_source_url = ctx.node.properties['nodejs_tar_source_url'] webui_source_url = ctx.node.properties['webui_tar_source_url'] grafana_source_url = ctx.node.properties['grafana_tar_source_url'] # injected as an input to the script ctx.instance.runtime_properties['influxdb_endpoint_ip'] = \ os.environ.get('INFLUXDB_ENDPOINT_IP') nodejs_home = '/opt/nodejs' webui_home = '/opt/cloudify-ui' webui_log_path = '/var/log/cloudify/webui' grafana_home = '{0}/grafana'.format(webui_home) webui_user = '******' webui_group = 'webui' ctx.logger.info('Installing Cloudify\'s WebUI...') utils.set_selinux_permissive() utils.copy_notice('webui') utils.mkdir(nodejs_home) utils.mkdir(webui_home) utils.mkdir('{0}/backend'.format(webui_home)) utils.mkdir(webui_log_path) utils.mkdir(grafana_home) utils.create_service_user(webui_user, webui_home) ctx.logger.info('Installing NodeJS...') nodejs = utils.download_file(nodejs_source_url) utils.untar(nodejs, nodejs_home) ctx.logger.info('Installing Cloudify\'s WebUI...') webui = utils.download_file(webui_source_url) utils.untar(webui, webui_home) ctx.logger.info('Installing Grafana...') grafana = utils.download_file(grafana_source_url) utils.untar(grafana, grafana_home) ctx.logger.info('Deploying WebUI Configuration...') utils.deploy_blueprint_resource( '{0}/gsPresets.json'.format(CONFIG_PATH), '{0}/backend/gsPresets.json'.format(webui_home)) ctx.logger.info('Deploying Grafana Configuration...') utils.deploy_blueprint_resource( '{0}/grafana_config.js'.format(CONFIG_PATH), '{0}/config.js'.format(grafana_home)) ctx.logger.info('Fixing permissions...') utils.chown(webui_user, webui_group, webui_home) utils.chown(webui_user, webui_group, nodejs_home) utils.chown(webui_user, webui_group, webui_log_path) utils.logrotate('webui') utils.systemd.configure('webui')
def install_riemann(): langohr_source_url = ctx_properties['langohr_jar_source_url'] daemonize_source_url = ctx_properties['daemonize_rpm_source_url'] riemann_source_url = ctx_properties['riemann_rpm_source_url'] # Needed for Riemann's config cloudify_resources_url = ctx_properties['cloudify_resources_url'] rabbitmq_username = ctx_properties['rabbitmq_username'] rabbitmq_password = ctx_properties['rabbitmq_password'] riemann_config_path = '/etc/riemann' riemann_log_path = '/var/log/cloudify/riemann' langohr_home = '/opt/lib' extra_classpath = '{0}/langohr.jar'.format(langohr_home) # Confirm username and password have been supplied for broker before # continuing. # Components other than logstash and riemann have this handled in code. # Note that these are not directly used in this script, but are used by the # deployed resources, hence the check here. if not rabbitmq_username or not rabbitmq_password: ctx.abort_operation( 'Both rabbitmq_username and rabbitmq_password must be supplied ' 'and at least 1 character long in the manager blueprint inputs.') rabbit_props = utils.ctx_factory.get('rabbitmq') ctx.instance.runtime_properties['rabbitmq_endpoint_ip'] = \ utils.get_rabbitmq_endpoint_ip( rabbit_props.get('rabbitmq_endpoint_ip')) ctx.instance.runtime_properties['rabbitmq_username'] = \ rabbit_props.get('rabbitmq_username') ctx.instance.runtime_properties['rabbitmq_password'] = \ rabbit_props.get('rabbitmq_password') ctx.logger.info('Installing Riemann...') utils.set_selinux_permissive() utils.copy_notice(RIEMANN_SERVICE_NAME) utils.mkdir(riemann_log_path) utils.mkdir(langohr_home) utils.mkdir(riemann_config_path) utils.mkdir('{0}/conf.d'.format(riemann_config_path)) langohr = utils.download_cloudify_resource(langohr_source_url, RIEMANN_SERVICE_NAME) utils.sudo(['cp', langohr, extra_classpath]) ctx.logger.info('Applying Langohr permissions...') utils.sudo(['chmod', '644', extra_classpath]) utils.yum_install(daemonize_source_url, service_name=RIEMANN_SERVICE_NAME) utils.yum_install(riemann_source_url, service_name=RIEMANN_SERVICE_NAME) utils.logrotate(RIEMANN_SERVICE_NAME) ctx.logger.info('Downloading cloudify-manager Repository...') manager_repo = utils.download_cloudify_resource(cloudify_resources_url, RIEMANN_SERVICE_NAME) ctx.logger.info('Extracting Manager Repository...') utils.untar(manager_repo, '/tmp')
def install_riemann(): langohr_source_url = ctx_properties['langohr_jar_source_url'] daemonize_source_url = ctx_properties['daemonize_rpm_source_url'] riemann_source_url = ctx_properties['riemann_rpm_source_url'] utils.create_service_user(user=RIEMANN_USER, group=RIEMANN_GROUP, home=utils.CLOUDIFY_HOME_DIR) riemann_config_path = '/etc/riemann' riemann_log_path = '/var/log/cloudify/riemann' langohr_home = '/opt/lib' extra_classpath = '{0}/langohr.jar'.format(langohr_home) riemann_dir = '/opt/riemann' # Confirm username and password have been supplied for broker before # continuing. # Components other than logstash and riemann have this handled in code. # Note that these are not directly used in this script, but are used by the # deployed resources, hence the check here. rabbitmq_username = ctx_properties['rabbitmq_username'] rabbitmq_password = ctx_properties['rabbitmq_password'] if not rabbitmq_username or not rabbitmq_password: ctx.abort_operation( 'Both rabbitmq_username and rabbitmq_password must be supplied ' 'and at least 1 character long in the manager blueprint inputs.') runtime_props['rabbitmq_endpoint_ip'] = utils.get_rabbitmq_endpoint_ip() ctx.logger.info('Installing Riemann...') utils.set_selinux_permissive() utils.copy_notice(RIEMANN_SERVICE_NAME) utils.mkdir(riemann_log_path) utils.mkdir(langohr_home) utils.mkdir(riemann_config_path) utils.mkdir('{0}/conf.d'.format(riemann_config_path)) # utils.chown cannot be used as it will change both user and group utils.sudo(['chown', RIEMANN_USER, riemann_dir]) langohr = utils.download_cloudify_resource(langohr_source_url, RIEMANN_SERVICE_NAME) utils.sudo(['cp', langohr, extra_classpath]) ctx.logger.info('Applying Langohr permissions...') utils.sudo(['chmod', '644', extra_classpath]) utils.yum_install(daemonize_source_url, service_name=RIEMANN_SERVICE_NAME) utils.yum_install(riemann_source_url, service_name=RIEMANN_SERVICE_NAME) utils.chown(RIEMANN_USER, RIEMANN_GROUP, riemann_log_path) utils.logrotate(RIEMANN_SERVICE_NAME) files_to_remove = [ riemann_config_path, riemann_log_path, extra_classpath, riemann_dir ] runtime_props['files_to_remove'] = files_to_remove
def install_logstash(): logstash_unit_override = '/etc/systemd/system/logstash.service.d' logstash_source_url = ctx_properties['logstash_rpm_source_url'] rabbitmq_username = ctx_properties['rabbitmq_username'] rabbitmq_password = ctx_properties['rabbitmq_password'] logstash_log_path = '/var/log/cloudify/logstash' logstash_conf_path = '/etc/logstash/conf.d' # injected as an input to the script ctx.instance.runtime_properties['es_endpoint_ip'] = \ os.environ.get('ES_ENDPOINT_IP') ctx.instance.runtime_properties['rabbitmq_endpoint_ip'] = \ utils.get_rabbitmq_endpoint_ip( ctx_properties.get('rabbitmq_endpoint_ip')) # Confirm username and password have been supplied for broker before # continuing. # Components other than logstash and riemann have this handled in code. # Note that these are not directly used in this script, but are used by the # deployed resources, hence the check here. if not rabbitmq_username or not rabbitmq_password: utils.error_exit( 'Both rabbitmq_username and rabbitmq_password must be supplied ' 'and at least 1 character long in the manager blueprint inputs.') ctx.logger.info('Installing Logstash...') utils.set_selinux_permissive() utils.copy_notice(LOGSTASH_SERVICE_NAME) utils.yum_install(logstash_source_url, service_name=LOGSTASH_SERVICE_NAME) utils.mkdir(logstash_log_path) utils.chown('logstash', 'logstash', logstash_log_path) ctx.logger.info('Creating systemd unit override...') utils.mkdir(logstash_unit_override) utils.deploy_blueprint_resource( '{0}/restart.conf'.format(CONFIG_PATH), '{0}/restart.conf'.format(logstash_unit_override), LOGSTASH_SERVICE_NAME) ctx.logger.info('Deploying Logstash conf...') utils.deploy_blueprint_resource( '{0}/logstash.conf'.format(CONFIG_PATH), '{0}/logstash.conf'.format(logstash_conf_path), LOGSTASH_SERVICE_NAME) ctx.logger.info('Deploying Logstash sysconfig...') utils.deploy_blueprint_resource('{0}/logstash'.format(CONFIG_PATH), '/etc/sysconfig/logstash', LOGSTASH_SERVICE_NAME) utils.logrotate(LOGSTASH_SERVICE_NAME) utils.sudo(['/sbin/chkconfig', 'logstash', 'on']) utils.clean_var_log_dir(LOGSTASH_SERVICE_NAME)
def install_mgmtworker(): riemann_dir = '/opt/riemann' management_worker_rpm_source_url = \ ctx_properties['management_worker_rpm_source_url'] runtime_props['rabbitmq_endpoint_ip'] = utils.get_rabbitmq_endpoint_ip() # Fix possible injections in json of rabbit credentials # See json.org for string spec for key in ['rabbitmq_username', 'rabbitmq_password']: # We will not escape newlines or other control characters, # we will accept them breaking # things noisily, e.g. on newlines and backspaces. # TODO: add: # sed 's/"/\\"/' | sed 's/\\/\\\\/' | sed s-/-\\/- | sed 's/\t/\\t/' runtime_props[key] = ctx_properties[key] utils.set_service_as_cloudify_service(runtime_props) ctx.logger.info('Installing Management Worker...') utils.set_selinux_permissive() utils.copy_notice(SERVICE_NAME) utils.mkdir(HOME_DIR) utils.mkdir(join(HOME_DIR, 'config')) utils.mkdir(join(HOME_DIR, 'work')) utils.mkdir(LOG_DIR) utils.mkdir(riemann_dir) mgmtworker_venv = join(HOME_DIR, 'env') # used to run the sanity check runtime_props['python_executable'] = join(mgmtworker_venv, 'bin', 'python') # this create the mgmtworker_venv and installs the relevant # modules into it. utils.yum_install(management_worker_rpm_source_url, service_name=SERVICE_NAME) _install_optional(mgmtworker_venv) # Add certificate and select port, as applicable runtime_props['broker_cert_path'] = utils.INTERNAL_CA_CERT_PATH # Use SSL port runtime_props['broker_port'] = AMQP_SSL_PORT utils.chown(CLOUDIFY_USER, CLOUDIFY_GROUP, HOME_DIR) utils.chown(CLOUDIFY_USER, CLOUDIFY_GROUP, LOG_DIR) # Changing perms on workdir and venv in case they are put outside homedir utils.chown(CLOUDIFY_USER, CLOUDIFY_GROUP, mgmtworker_venv) # Prepare riemann dir. We will change the owner to riemann later, but the # management worker will still need access to it utils.chown(CLOUDIFY_USER, CLOUDIFY_GROUP, riemann_dir) utils.chmod('770', riemann_dir) ctx.logger.info("Using broker port: {0}".format( ctx.instance.runtime_properties['broker_port']))
def install_mgmtworker(): riemann_dir = '/opt/riemann' management_worker_rpm_source_url = \ ctx_properties['management_worker_rpm_source_url'] runtime_props['rabbitmq_endpoint_ip'] = utils.get_rabbitmq_endpoint_ip() # Fix possible injections in json of rabbit credentials # See json.org for string spec for key in ['rabbitmq_username', 'rabbitmq_password']: # We will not escape newlines or other control characters, # we will accept them breaking # things noisily, e.g. on newlines and backspaces. # TODO: add: # sed 's/"/\\"/' | sed 's/\\/\\\\/' | sed s-/-\\/- | sed 's/\t/\\t/' runtime_props[key] = ctx_properties[key] runtime_props['rabbitmq_ssl_enabled'] = True utils.set_service_as_cloudify_service(runtime_props) ctx.logger.info('Installing Management Worker...') utils.set_selinux_permissive() utils.copy_notice(SERVICE_NAME) utils.mkdir(HOME_DIR) utils.mkdir(join(HOME_DIR, 'config')) utils.mkdir(join(HOME_DIR, 'work')) utils.mkdir(LOG_DIR) utils.mkdir(riemann_dir) mgmtworker_venv = join(HOME_DIR, 'env') # this create the mgmtworker_venv and installs the relevant # modules into it. utils.yum_install(management_worker_rpm_source_url, service_name=SERVICE_NAME) _install_optional(mgmtworker_venv) # Add certificate and select port, as applicable runtime_props['broker_cert_path'] = utils.INTERNAL_CERT_PATH # Use SSL port runtime_props['broker_port'] = AMQP_SSL_PORT utils.chown(CLOUDIFY_USER, CLOUDIFY_GROUP, HOME_DIR) utils.chown(CLOUDIFY_USER, CLOUDIFY_GROUP, LOG_DIR) # Changing perms on workdir and venv in case they are put outside homedir utils.chown(CLOUDIFY_USER, CLOUDIFY_GROUP, mgmtworker_venv) # Prepare riemann dir. We will change the owner to riemann later, but the # management worker will still need access to it utils.chown(CLOUDIFY_USER, CLOUDIFY_GROUP, riemann_dir) utils.chmod('770', riemann_dir) ctx.logger.info("Using broker port: {0}".format( ctx.instance.runtime_properties['broker_port']))
def install_logstash(): logstash_unit_override = '/etc/systemd/system/logstash.service.d' logstash_source_url = ctx.node.properties['logstash_rpm_source_url'] rabbitmq_username = ctx.node.properties['rabbitmq_username'] rabbitmq_password = ctx.node.properties['rabbitmq_password'] logstash_log_path = '/var/log/cloudify/logstash' logstash_conf_path = '/etc/logstash/conf.d' # injected as an input to the script ctx.instance.runtime_properties['es_endpoint_ip'] = \ os.environ.get('ES_ENDPOINT_IP') ctx.instance.runtime_properties['rabbitmq_endpoint_ip'] = \ utils.get_rabbitmq_endpoint_ip() # Confirm username and password have been supplied for broker before # continuing. # Components other than logstash and riemann have this handled in code. # Note that these are not directly used in this script, but are used by the # deployed resources, hence the check here. if not rabbitmq_username or not rabbitmq_password: utils.error_exit( 'Both rabbitmq_username and rabbitmq_password must be supplied ' 'and at least 1 character long in the manager blueprint inputs.') ctx.logger.info('Installing Logstash...') utils.set_selinux_permissive() utils.copy_notice('logstash') utils.yum_install(logstash_source_url) utils.mkdir(logstash_log_path) utils.chown('logstash', 'logstash', logstash_log_path) ctx.logger.info('Creating systemd unit override...') utils.mkdir(logstash_unit_override) utils.deploy_blueprint_resource( '{0}/restart.conf'.format(CONFIG_PATH), '{0}/restart.conf'.format(logstash_unit_override)) ctx.logger.info('Deploying Logstash conf...') utils.deploy_blueprint_resource( '{0}/logstash.conf'.format(CONFIG_PATH), '{0}/logstash.conf'.format(logstash_conf_path)) ctx.logger.info('Deploying Logstash sysconfig...') utils.deploy_blueprint_resource( '{0}/logstash'.format(CONFIG_PATH), '/etc/sysconfig/logstash') utils.logrotate('logstash') utils.sudo(['/sbin/chkconfig', 'logstash', 'on']) utils.clean_var_log_dir('logstash')
def install_nginx(): nginx_source_url = ctx_properties['nginx_rpm_source_url'] # this is a bit tricky. the rest_service_source_url contains files that # should be deployed in the fileserver. the thing is, that since the # rest service and nginx cannot be distributed between vms right now # anyway, these resources are deployed by the rest service node instead. # rest_service_source_url = \ # ctx.node.properties['rest_service_module_source_url'] nginx_log_path = '/var/log/cloudify/nginx' manager_resources_home = utils.MANAGER_RESOURCES_HOME manager_agents_path = utils.AGENT_ARCHIVES_PATH # TODO: check if can remove these two (should come with the agent package) manager_scripts_path = '{0}/packages/scripts'.format( manager_resources_home) manager_templates_path = '{0}/packages/templates'.format( manager_resources_home) nginx_unit_override = '/etc/systemd/system/nginx.service.d' # this is propagated to the agent retrieval script later on so that it's # not defined twice. ctx.instance.runtime_properties['agent_packages_path'] = \ manager_agents_path # TODO: can we use static (not runtime) attributes for some of these? # how to set them? ctx.instance.runtime_properties['default_rest_service_port'] = '8100' ctx.instance.runtime_properties['internal_rest_service_port'] = '8101' ctx.logger.info('Installing Nginx...') utils.set_selinux_permissive() utils.copy_notice(NGINX_SERVICE_NAME) utils.mkdir(nginx_log_path) utils.mkdir(manager_resources_home) utils.mkdir(manager_agents_path) # TODO: check if can remove these two (should come with the agent package) utils.mkdir(manager_scripts_path) utils.mkdir(manager_templates_path) utils.mkdir(nginx_unit_override) utils.yum_install(nginx_source_url, service_name=NGINX_SERVICE_NAME) ctx.logger.info('Creating systemd unit override...') utils.deploy_blueprint_resource( '{0}/restart.conf'.format(CONFIG_PATH), '{0}/restart.conf'.format(nginx_unit_override), NGINX_SERVICE_NAME) utils.logrotate(NGINX_SERVICE_NAME) utils.clean_var_log_dir(NGINX_SERVICE_NAME)
def _install_stage(): nodejs_source_url = ctx_properties['nodejs_tar_source_url'] stage_source_url = ctx_properties['stage_tar_source_url'] if not utils.resource_factory.local_resource_exists(stage_source_url): ctx.logger.info('Stage package not found in manager resources ' 'package. Stage will not be installed.') ctx.instance.runtime_properties['skip_installation'] = 'true' return # injected as an input to the script ctx.instance.runtime_properties['influxdb_endpoint_ip'] = \ os.environ.get('INFLUXDB_ENDPOINT_IP') utils.set_selinux_permissive() utils.copy_notice(SERVICE_NAME) utils.mkdir(NODEJS_DIR) utils.mkdir(HOME_DIR) utils.mkdir(LOG_DIR) utils.create_service_user(STAGE_USER, STAGE_GROUP, HOME_DIR) ctx.logger.info('Installing NodeJS...') nodejs = utils.download_cloudify_resource(nodejs_source_url, SERVICE_NAME) utils.untar(nodejs, NODEJS_DIR) utils.remove(nodejs) ctx.logger.info('Installing Cloudify Stage (UI)...') stage_tar = utils.download_cloudify_resource(stage_source_url, SERVICE_NAME) utils.untar(stage_tar, HOME_DIR) utils.remove(stage_tar) ctx.logger.info('Fixing permissions...') utils.chown(STAGE_USER, STAGE_GROUP, HOME_DIR) utils.chown(STAGE_USER, STAGE_GROUP, NODEJS_DIR) utils.chown(STAGE_USER, STAGE_GROUP, LOG_DIR) utils.deploy_sudo_command_script( 'restore-snapshot.py', 'Restore stage directories from a snapshot path', component=SERVICE_NAME, allow_as=STAGE_USER) utils.chmod('a+rx', '/opt/cloudify/stage/restore-snapshot.py') utils.logrotate(SERVICE_NAME) utils.systemd.configure(SERVICE_NAME) backend_dir = join(HOME_DIR, 'backend') npm_path = join(NODEJS_DIR, 'bin', 'npm') subprocess.check_call( 'cd {0}; {1} run db-migrate'.format(backend_dir, npm_path), shell=True)
def install_nginx(): nginx_source_url = ctx.node.properties['nginx_rpm_source_url'] # this is a bit tricky. the rest_service_source_url contains files that # should be deployed in the fileserver. the thing is, that since the # rest service and nginx cannot be distributed between vms right now # anyway, these resources are deployed by the rest service node instead. # rest_service_source_url = \ # ctx.node.properties['rest_service_module_source_url'] nginx_log_path = '/var/log/cloudify/nginx' manager_resources_home = '/opt/manager/resources' manager_agents_path = '{0}/packages/agents'.format(manager_resources_home) # TODO: check if can remove these two (should come with the agent package) manager_scripts_path = '{0}/packages/scripts'.format( manager_resources_home) manager_templates_path = '{0}/packages/templates'.format( manager_resources_home) nginx_unit_override = '/etc/systemd/system/nginx.service.d' # this is propagated to the agent retrieval script later on so that it's # not defined twice. ctx.instance.runtime_properties['agent_packages_path'] = \ manager_agents_path # TODO: can we use static (not runtime) attributes for some of these? # how to set them? ctx.instance.runtime_properties['default_rest_service_port'] = '8100' ctx.instance.runtime_properties['internal_rest_service_port'] = '8101' ctx.logger.info('Installing Nginx...') utils.set_selinux_permissive() utils.copy_notice('nginx') utils.mkdir(nginx_log_path) utils.mkdir(manager_resources_home) utils.mkdir(manager_agents_path) # TODO: check if can remove these two (should come with the agent package) utils.mkdir(manager_scripts_path) utils.mkdir(manager_templates_path) utils.mkdir(nginx_unit_override) utils.yum_install(nginx_source_url) ctx.logger.info('Creating systemd unit override...') utils.deploy_blueprint_resource( '{0}/restart.conf'.format(CONFIG_PATH), '{0}/restart.conf'.format(nginx_unit_override)) utils.logrotate('nginx') utils.clean_var_log_dir('nginx')
def install_restservice(): rest_service_rpm_source_url = \ ctx_properties['rest_service_rpm_source_url'] rest_venv = os.path.join(REST_SERVICE_HOME, 'env') # Also, manager_rest_config_path is mandatory since the manager's code # reads this env var. it should be renamed to rest_service_config_path. os.environ['manager_rest_config_path'] = os.path.join( REST_SERVICE_HOME, 'cloudify-rest.conf') os.environ['rest_service_config_path'] = os.path.join( REST_SERVICE_HOME, 'cloudify-rest.conf') os.environ['manager_rest_security_config_path'] = os.path.join( REST_SERVICE_HOME, 'rest-security.conf') rest_service_log_path = '/var/log/cloudify/rest' ctx.logger.info('Installing REST Service...') utils.set_selinux_permissive() utils.copy_notice(REST_SERVICE_NAME) utils.mkdir(REST_SERVICE_HOME) utils.mkdir(rest_service_log_path) utils.mkdir(MANAGER_RESOURCES_HOME) deploy_broker_configuration() utils.yum_install(rest_service_rpm_source_url, service_name=REST_SERVICE_NAME) _configure_dbus(rest_venv) install_optional(rest_venv) utils.logrotate(REST_SERVICE_NAME) ctx.logger.info('Copying role configuration files...') utils.deploy_blueprint_resource( os.path.join(REST_RESOURCES_PATH, 'roles_config.yaml'), os.path.join(REST_SERVICE_HOME, 'roles_config.yaml'), REST_SERVICE_NAME, user_resource=True) utils.deploy_blueprint_resource( os.path.join(REST_RESOURCES_PATH, 'userstore.yaml'), os.path.join(REST_SERVICE_HOME, 'userstore.yaml'), REST_SERVICE_NAME, user_resource=True) # copy_security_config_files() ctx.logger.info('Deploying REST Service Configuration file...') # rest ports are set as runtime properties in nginx/scripts/create.py # cloudify-rest.conf currently contains localhost for fileserver endpoint. # We need to change that if we want to deploy nginx on another machine. utils.deploy_blueprint_resource( os.path.join(CONFIG_PATH, 'cloudify-rest.conf'), os.path.join(REST_SERVICE_HOME, 'cloudify-rest.conf'), REST_SERVICE_NAME)
def install_nginx(): nginx_source_url = ctx_properties['nginx_rpm_source_url'] # this is a bit tricky. the rest_service_source_url contains files that # should be deployed in the fileserver. the thing is, that since the # rest service and nginx cannot be distributed between vms right now # anyway, these resources are deployed by the rest service node instead. # rest_service_source_url = \ # ctx.node.properties['rest_service_module_source_url'] manager_resources_home = utils.MANAGER_RESOURCES_HOME manager_agents_path = utils.AGENT_ARCHIVES_PATH # TODO: check if can remove these two (should come with the agent package) manager_scripts_path = '{0}/packages/scripts'.format( manager_resources_home) manager_templates_path = '{0}/packages/templates'.format( manager_resources_home) # this is propagated to the agent retrieval script later on so that it's # not defined twice. ctx.instance.runtime_properties['agent_packages_path'] = \ manager_agents_path # TODO: can we use static (not runtime) attributes for some of these? # how to set them? ctx.instance.runtime_properties['default_rest_service_port'] = '8100' ctx.logger.info('Installing Nginx...') utils.set_selinux_permissive() utils.copy_notice(SERVICE_NAME) utils.mkdir(LOG_DIR) utils.mkdir(manager_resources_home) utils.mkdir(manager_agents_path) # TODO: check if can remove these two (should come with the agent package) utils.mkdir(manager_scripts_path) utils.mkdir(manager_templates_path) utils.mkdir(UNIT_OVERRIDE_PATH) utils.yum_install(nginx_source_url, service_name=SERVICE_NAME) ctx.logger.info('Creating systemd unit override...') utils.deploy_blueprint_resource( '{0}/restart.conf'.format(CONFIG_PATH), '{0}/restart.conf'.format(UNIT_OVERRIDE_PATH), SERVICE_NAME) utils.logrotate(SERVICE_NAME) utils.clean_var_log_dir(SERVICE_NAME)
def install_java(): java_source_url = ctx_properties['java_rpm_source_url'] ctx.logger.info('Installing Java...') utils.set_selinux_permissive() utils.copy_notice(SERVICE_NAME) utils.yum_install(java_source_url, SERVICE_NAME) utils.mkdir(LOG_DIR) # Java install log is dropped in /var/log. # Move it to live with the rest of the cloudify logs java_install_log = '/var/log/java_install.log' if os.path.isfile(java_install_log): utils.move(java_install_log, LOG_DIR)
def install_python_requirements(): pip_source_rpm_url = ctx_properties['pip_source_rpm_url'] install_python_compilers = ctx_properties['install_python_compilers'] ctx.logger.info('Installing Python Requirements...') utils.set_selinux_permissive() utils.copy_notice('python') utils.yum_install(pip_source_rpm_url, service_name='python') if install_python_compilers: ctx.logger.info('Installing Compilers...') utils.yum_install('python-devel', service_name='python') utils.yum_install('gcc', service_name='python') utils.yum_install('gcc-c++', service_name='python')
def install_python_requirements(): pip_source_rpm_url = ctx.node.properties['pip_source_rpm_url'] install_python_compilers = ctx.node.properties['install_python_compilers'] ctx.logger.info('Installing Python Requirements...') utils.set_selinux_permissive() utils.copy_notice('python') utils.yum_install(pip_source_rpm_url) if install_python_compilers: ctx.logger.info('Installing Compilers...') utils.yum_install('python-devel') utils.yum_install('gcc') utils.yum_install('gcc-c++')
def install_java(): java_source_url = ctx.node.properties['java_rpm_source_url'] ctx.logger.info('Installing Java...') utils.set_selinux_permissive() utils.copy_notice('java') utils.yum_install(java_source_url) # Make sure the cloudify logs dir exists before we try moving the java log # there -p will cause it not to error if the dir already exists utils.mkdir('/var/log/cloudify') # Java install log is dropped in /var/log. # Move it to live with the rest of the cloudify logs if os.path.isfile('/var/log/java_install.log'): utils.sudo('mv /var/log/java_install.log /var/log/cloudify')
def install_java(): java_source_url = ctx_properties['java_rpm_source_url'] ctx.logger.info('Installing Java...') utils.set_selinux_permissive() utils.copy_notice('java') utils.yum_install(java_source_url, service_name='java') # Make sure the cloudify logs dir exists before we try moving the java log # there -p will cause it not to error if the dir already exists utils.mkdir('/var/log/cloudify') # Java install log is dropped in /var/log. # Move it to live with the rest of the cloudify logs if os.path.isfile('/var/log/java_install.log'): utils.sudo('mv /var/log/java_install.log /var/log/cloudify')
def _install_composer(): composer_source_url = ctx_properties['composer_tar_source_url'] if not utils.resource_factory.local_resource_exists(composer_source_url): ctx.logger.info('Composer package not found in manager resources ' 'package. Composer will not be installed.') ctx.instance.runtime_properties['skip_installation'] = 'true' return utils.set_selinux_permissive() utils.copy_notice(SERVICE_NAME) utils.mkdir(NODEJS_DIR) utils.mkdir(HOME_DIR) utils.mkdir(LOG_DIR) utils.create_service_user(COMPOSER_USER, COMPOSER_GROUP, HOME_DIR) # adding cfyuser to the composer group so that its files are r/w for # replication and snapshots (restart of mgmtworker necessary for change # to take effect) utils.sudo(['usermod', '-aG', COMPOSER_GROUP, utils.CLOUDIFY_USER]) # This makes sure that the composer folders will be writable after # snapshot restore utils.sudo(['usermod', '-aG', utils.CLOUDIFY_GROUP, COMPOSER_USER]) utils.systemd.restart('mgmtworker') ctx.logger.info('Installing Cloudify Composer...') composer_tar = utils.download_cloudify_resource(composer_source_url, SERVICE_NAME) utils.untar(composer_tar, HOME_DIR) utils.remove(composer_tar) ctx.logger.info('Fixing permissions...') utils.chown(COMPOSER_USER, COMPOSER_GROUP, HOME_DIR) utils.chown(COMPOSER_USER, COMPOSER_GROUP, LOG_DIR) utils.chmod('g+w', CONF_DIR) utils.chmod('g+w', dirname(CONF_DIR)) utils.logrotate(SERVICE_NAME) utils.systemd.configure(SERVICE_NAME) npm_path = join(NODEJS_DIR, 'bin', 'npm') subprocess.check_call('cd {}; {} run db-migrate'.format( HOME_DIR, npm_path), shell=True)
def install_stage(): nodejs_source_url = ctx_properties['nodejs_tar_source_url'] stage_source_url = ctx_properties['stage_tar_source_url'] # injected as an input to the script ctx.instance.runtime_properties['influxdb_endpoint_ip'] = \ os.environ.get('INFLUXDB_ENDPOINT_IP') nodejs_home = '/opt/nodejs' stage_home = '/opt/cloudify-stage' stage_log_path = '/var/log/cloudify/stage' stage_user = '******' stage_group = 'stage' utils.set_selinux_permissive() utils.copy_notice(STAGE_SERVICE_NAME) utils.mkdir(nodejs_home) utils.mkdir(stage_home) utils.mkdir(stage_log_path) utils.create_service_user(stage_user, stage_home) ctx.logger.info('Installing NodeJS...') nodejs = utils.download_cloudify_resource(nodejs_source_url, STAGE_SERVICE_NAME) utils.untar(nodejs, nodejs_home) ctx.logger.info('Installing Cloudify Stage (UI)...') stage = utils.download_cloudify_resource(stage_source_url, STAGE_SERVICE_NAME) utils.untar(stage, stage_home) ctx.logger.info('Fixing permissions...') utils.chown(stage_user, stage_group, stage_home) utils.chown(stage_user, stage_group, nodejs_home) utils.chown(stage_user, stage_group, stage_log_path) utils.logrotate(STAGE_SERVICE_NAME) utils.systemd.configure(STAGE_SERVICE_NAME)
def _install_stage(): nodejs_source_url = ctx_properties['nodejs_tar_source_url'] stage_source_url = ctx_properties['stage_tar_source_url'] if not utils.resource_factory.local_resource_exists(stage_source_url): ctx.logger.info('Stage package not found in manager resources ' 'package. Stage will not be installed.') ctx.instance.runtime_properties['skip_installation'] = 'true' return # injected as an input to the script ctx.instance.runtime_properties['influxdb_endpoint_ip'] = \ os.environ.get('INFLUXDB_ENDPOINT_IP') utils.set_selinux_permissive() utils.copy_notice(SERVICE_NAME) utils.mkdir(NODEJS_DIR) utils.mkdir(HOME_DIR) utils.mkdir(LOG_DIR) utils.create_service_user(STAGE_USER, STAGE_GROUP, HOME_DIR) ctx.logger.info('Installing NodeJS...') nodejs = utils.download_cloudify_resource(nodejs_source_url, SERVICE_NAME) utils.untar(nodejs, NODEJS_DIR) utils.remove(nodejs) ctx.logger.info('Installing Cloudify Stage (UI)...') stage_tar = utils.download_cloudify_resource(stage_source_url, SERVICE_NAME) utils.untar(stage_tar, HOME_DIR) utils.remove(stage_tar) ctx.logger.info('Fixing permissions...') utils.chown(STAGE_USER, STAGE_GROUP, HOME_DIR) utils.chown(STAGE_USER, STAGE_GROUP, NODEJS_DIR) utils.chown(STAGE_USER, STAGE_GROUP, LOG_DIR) utils.logrotate(SERVICE_NAME) utils.systemd.configure(SERVICE_NAME)
def install_logstash(): """Install logstash as a systemd service.""" logstash_source_url = ctx_properties['logstash_rpm_source_url'] ctx.logger.info('Installing Logstash...') utils.set_selinux_permissive() utils.copy_notice(SERVICE_NAME) utils.yum_install(logstash_source_url, SERVICE_NAME) install_logstash_filter_json_encode_plugin() install_logstash_output_jdbc_plugin() install_postgresql_jdbc_driver() utils.mkdir(LOG_DIR) utils.chown('logstash', 'logstash', LOG_DIR) ctx.logger.debug('Creating systemd unit override...') utils.mkdir(UNIT_OVERRIDE_PATH) utils.deploy_blueprint_resource( '{0}/restart.conf'.format(CONFIG_PATH), '{0}/restart.conf'.format(UNIT_OVERRIDE_PATH), SERVICE_NAME)
def install_amqpinflux(): amqpinflux_rpm_source_url = \ ctx_properties['amqpinflux_rpm_source_url'] # injected as an input to the script ctx.instance.runtime_properties['influxdb_endpoint_ip'] = \ os.environ['INFLUXDB_ENDPOINT_IP'] rabbit_props = utils.ctx_factory.get('rabbitmq') ctx.instance.runtime_properties['rabbitmq_endpoint_ip'] = \ utils.get_rabbitmq_endpoint_ip( rabbit_props.get('rabbitmq_endpoint_ip')) ctx.instance.runtime_properties['rabbitmq_username'] = \ rabbit_props.get('rabbitmq_username') ctx.instance.runtime_properties['rabbitmq_password'] = \ rabbit_props.get('rabbitmq_password') ctx.instance.runtime_properties['rabbitmq_ssl_enabled'] = \ rabbit_props.get('rabbitmq_ssl_enabled') amqpinflux_user = '******' amqpinflux_group = 'amqpinflux' amqpinflux_venv = '{0}/env'.format(AMQPINFLUX_HOME) ctx.logger.info('Installing AQMPInflux...') utils.set_selinux_permissive() utils.copy_notice(AMQPINFLUX_SERVICE_NAME) utils.mkdir(AMQPINFLUX_HOME) utils.yum_install(amqpinflux_rpm_source_url, service_name=AMQPINFLUX_SERVICE_NAME) _install_optional(amqpinflux_venv) utils.create_service_user(amqpinflux_user, AMQPINFLUX_HOME) _deploy_broker_configuration(amqpinflux_group) ctx.logger.info('Fixing permissions...') utils.chown(amqpinflux_user, amqpinflux_group, AMQPINFLUX_HOME) utils.systemd.configure(AMQPINFLUX_SERVICE_NAME)
def install_riemann(): langohr_source_url = ctx_properties['langohr_jar_source_url'] daemonize_source_url = ctx_properties['daemonize_rpm_source_url'] riemann_source_url = ctx_properties['riemann_rpm_source_url'] # Needed for Riemann's config cloudify_resources_url = ctx_properties['cloudify_resources_url'] rabbitmq_username = ctx_properties['rabbitmq_username'] rabbitmq_password = ctx_properties['rabbitmq_password'] riemann_config_path = '/etc/riemann' riemann_log_path = '/var/log/cloudify/riemann' langohr_home = '/opt/lib' extra_classpath = '{0}/langohr.jar'.format(langohr_home) # Confirm username and password have been supplied for broker before # continuing. # Components other than logstash and riemann have this handled in code. # Note that these are not directly used in this script, but are used by the # deployed resources, hence the check here. if not rabbitmq_username or not rabbitmq_password: ctx.abort_operation( 'Both rabbitmq_username and rabbitmq_password must be supplied ' 'and at least 1 character long in the manager blueprint inputs.') rabbit_props = utils.ctx_factory.get('rabbitmq') ctx.instance.runtime_properties['rabbitmq_endpoint_ip'] = \ utils.get_rabbitmq_endpoint_ip( rabbit_props.get('rabbitmq_endpoint_ip')) ctx.instance.runtime_properties['rabbitmq_username'] = \ rabbit_props.get('rabbitmq_username') ctx.instance.runtime_properties['rabbitmq_password'] = \ rabbit_props.get('rabbitmq_password') ctx.logger.info('Installing Riemann...') utils.set_selinux_permissive() utils.copy_notice(RIEMANN_SERVICE_NAME) utils.mkdir(riemann_log_path) utils.mkdir(langohr_home) utils.mkdir(riemann_config_path) utils.mkdir('{0}/conf.d'.format(riemann_config_path)) langohr = utils.download_cloudify_resource(langohr_source_url, RIEMANN_SERVICE_NAME) utils.sudo(['cp', langohr, extra_classpath]) ctx.logger.info('Applying Langohr permissions...') utils.sudo(['chmod', '644', extra_classpath]) utils.yum_install(daemonize_source_url, service_name=RIEMANN_SERVICE_NAME) utils.yum_install(riemann_source_url, service_name=RIEMANN_SERVICE_NAME) utils.logrotate(RIEMANN_SERVICE_NAME) ctx.logger.info('Downloading cloudify-manager Repository...') manager_repo = utils.download_cloudify_resource(cloudify_resources_url, RIEMANN_SERVICE_NAME) ctx.logger.info('Extracting Manager Repository...') utils.untar(manager_repo, '/tmp') ctx.logger.info('Deploying Riemann manager.config...') utils.move( '/tmp/plugins/riemann-controller/riemann_controller/resources/manager.config', # NOQA '{0}/conf.d/manager.config'.format(riemann_config_path)) ctx.logger.info('Deploying Riemann conf...') utils.deploy_blueprint_resource( '{0}/main.clj'.format(CONFIG_PATH), '{0}/main.clj'.format(riemann_config_path), RIEMANN_SERVICE_NAME) # our riemann configuration will (by default) try to read these environment # variables. If they don't exist, it will assume # that they're found at "localhost" # export MANAGEMENT_IP="" # export RABBITMQ_HOST="" # we inject the management_ip for both of these to Riemann's systemd # config. # These should be potentially different # if the manager and rabbitmq are running on different hosts. utils.systemd.configure(RIEMANN_SERVICE_NAME) utils.clean_var_log_dir(RIEMANN_SERVICE_NAME)
def install_riemann(): langohr_source_url = ctx_properties['langohr_jar_source_url'] daemonize_source_url = ctx_properties['daemonize_rpm_source_url'] riemann_source_url = ctx_properties['riemann_rpm_source_url'] # Needed for Riemann's config cloudify_resources_url = ctx_properties['cloudify_resources_url'] rabbitmq_username = ctx_properties['rabbitmq_username'] rabbitmq_password = ctx_properties['rabbitmq_password'] riemann_config_path = '/etc/riemann' riemann_log_path = '/var/log/cloudify/riemann' langohr_home = '/opt/lib' extra_classpath = '{0}/langohr.jar'.format(langohr_home) # Confirm username and password have been supplied for broker before # continuing. # Components other than logstash and riemann have this handled in code. # Note that these are not directly used in this script, but are used by the # deployed resources, hence the check here. if not rabbitmq_username or not rabbitmq_password: ctx.abort_operation( 'Both rabbitmq_username and rabbitmq_password must be supplied ' 'and at least 1 character long in the manager blueprint inputs.') rabbit_props = utils.ctx_factory.get('rabbitmq') ctx.instance.runtime_properties['rabbitmq_endpoint_ip'] = \ utils.get_rabbitmq_endpoint_ip( rabbit_props.get('rabbitmq_endpoint_ip')) ctx.instance.runtime_properties['rabbitmq_username'] = \ rabbit_props.get('rabbitmq_username') ctx.instance.runtime_properties['rabbitmq_password'] = \ rabbit_props.get('rabbitmq_password') ctx.logger.info('Installing Riemann...') utils.set_selinux_permissive() utils.copy_notice(RIEMANN_SERVICE_NAME) utils.mkdir(riemann_log_path) utils.mkdir(langohr_home) utils.mkdir(riemann_config_path) utils.mkdir('{0}/conf.d'.format(riemann_config_path)) langohr = utils.download_cloudify_resource(langohr_source_url, RIEMANN_SERVICE_NAME) utils.sudo(['cp', langohr, extra_classpath]) ctx.logger.info('Applying Langohr permissions...') utils.sudo(['chmod', '644', extra_classpath]) utils.yum_install(daemonize_source_url, service_name=RIEMANN_SERVICE_NAME) utils.yum_install(riemann_source_url, service_name=RIEMANN_SERVICE_NAME) utils.logrotate(RIEMANN_SERVICE_NAME) ctx.logger.info('Downloading cloudify-manager Repository...') manager_repo = utils.download_cloudify_resource(cloudify_resources_url, RIEMANN_SERVICE_NAME) ctx.logger.info('Extracting Manager Repository...') utils.untar(manager_repo, '/tmp') ctx.logger.info('Deploying Riemann manager.config...') utils.move( '/tmp/plugins/riemann-controller/riemann_controller/resources/manager.config', # NOQA '{0}/conf.d/manager.config'.format(riemann_config_path)) ctx.logger.info('Deploying Riemann conf...') utils.deploy_blueprint_resource('{0}/main.clj'.format(CONFIG_PATH), '{0}/main.clj'.format(riemann_config_path), RIEMANN_SERVICE_NAME) # our riemann configuration will (by default) try to read these environment # variables. If they don't exist, it will assume # that they're found at "localhost" # export MANAGEMENT_IP="" # export RABBITMQ_HOST="" # we inject the management_ip for both of these to Riemann's systemd # config. # These should be potentially different # if the manager and rabbitmq are running on different hosts. utils.systemd.configure(RIEMANN_SERVICE_NAME) utils.clean_var_log_dir(RIEMANN_SERVICE_NAME)
def _prepare_env(): ctx.logger.info('Preparing environment for PostgreSQL installation...') utils.set_selinux_permissive() postgresql_components_folder = 'postgresql' utils.copy_notice(postgresql_components_folder)
def _install_rabbitmq(): erlang_rpm_source_url = ctx_properties['erlang_rpm_source_url'] rabbitmq_rpm_source_url = ctx_properties['rabbitmq_rpm_source_url'] # TODO: maybe we don't need this env var os.putenv('RABBITMQ_FD_LIMIT', str(ctx_properties['rabbitmq_fd_limit'])) rabbitmq_username = ctx_properties['rabbitmq_username'] rabbitmq_password = ctx_properties['rabbitmq_password'] ctx.logger.info('Installing RabbitMQ...') utils.set_selinux_permissive() utils.copy_notice(SERVICE_NAME) utils.mkdir(LOG_DIR) utils.yum_install(erlang_rpm_source_url, service_name=SERVICE_NAME) utils.yum_install(rabbitmq_rpm_source_url, service_name=SERVICE_NAME) utils.logrotate(SERVICE_NAME) utils.systemd.configure(SERVICE_NAME) ctx.logger.info('Configuring File Descriptors Limit...') utils.deploy_blueprint_resource( '{0}/rabbitmq_ulimit.conf'.format(CONFIG_PATH), FD_LIMIT_PATH, SERVICE_NAME) utils.deploy_blueprint_resource( '{0}/rabbitmq-definitions.json'.format(CONFIG_PATH), join(HOME_DIR, 'definitions.json'), SERVICE_NAME) # This stops rabbit from failing if the host name changes, e.g. when # a manager is deployed from an image but given a new hostname. # This is likely to cause problems with clustering of rabbitmq if this is # done at any point, so at that point a change to the file and cleaning of # mnesia would likely be necessary. utils.deploy_blueprint_resource( '{0}/rabbitmq-env.conf'.format(CONFIG_PATH), '/etc/rabbitmq/rabbitmq-env.conf', SERVICE_NAME) # Delete old mnesia node utils.sudo(['rm', '-rf', '/var/lib/rabbitmq/mnesia']) utils.systemd.systemctl('daemon-reload') utils.chown('rabbitmq', 'rabbitmq', LOG_DIR) # rabbitmq restart exits with 143 status code that is valid in this case. utils.systemd.restart(SERVICE_NAME, ignore_failure=True) time.sleep(10) utils.wait_for_port(5672) ctx.logger.info('Enabling RabbitMQ Plugins...') # Occasional timing issues with rabbitmq starting have resulted in # failures when first trying to enable plugins utils.sudo(['rabbitmq-plugins', 'enable', 'rabbitmq_management'], retries=5) utils.sudo(['rabbitmq-plugins', 'enable', 'rabbitmq_tracing'], retries=5) _clear_guest_permissions_if_guest_exists() _create_user_and_set_permissions(rabbitmq_username, rabbitmq_password) utils.deploy_blueprint_resource( '{0}/rabbitmq.config'.format(CONFIG_PATH), join(HOME_DIR, 'rabbitmq.config'), SERVICE_NAME, user_resource=True) utils.systemd.stop(SERVICE_NAME, retries=5)
def install_logstash(): logstash_unit_override = '/etc/systemd/system/logstash.service.d' logstash_source_url = ctx_properties['logstash_rpm_source_url'] rabbitmq_username = ctx_properties['rabbitmq_username'] rabbitmq_password = ctx_properties['rabbitmq_password'] logstash_log_path = '/var/log/cloudify/logstash' logstash_conf_path = '/etc/logstash/conf.d' # injected as an input to the script ctx.instance.runtime_properties['es_endpoint_ip'] = \ os.environ['ES_ENDPOINT_IP'] elasticsearch_props = utils.ctx_factory.get('elasticsearch') ctx.instance.runtime_properties['es_endpoint_port'] = \ elasticsearch_props['es_endpoint_port'] rabbit_props = utils.ctx_factory.get('rabbitmq') ctx.instance.runtime_properties['rabbitmq_endpoint_ip'] = \ utils.get_rabbitmq_endpoint_ip( rabbit_props.get('rabbitmq_endpoint_ip')) ctx.instance.runtime_properties['rabbitmq_username'] = \ rabbit_props['rabbitmq_username'] ctx.instance.runtime_properties['rabbitmq_password'] = \ rabbit_props['rabbitmq_password'] # Confirm username and password have been supplied for broker before # continuing. # Components other than logstash and riemann have this handled in code. # Note that these are not directly used in this script, but are used by the # deployed resources, hence the check here. if not rabbitmq_username or not rabbitmq_password: ctx.abort_operation( 'Both rabbitmq_username and rabbitmq_password must be supplied ' 'and at least 1 character long in the manager blueprint inputs.') ctx.logger.info('Installing Logstash...') utils.set_selinux_permissive() utils.copy_notice(LOGSTASH_SERVICE_NAME) utils.yum_install(logstash_source_url, service_name=LOGSTASH_SERVICE_NAME) utils.mkdir(logstash_log_path) utils.chown('logstash', 'logstash', logstash_log_path) ctx.logger.debug('Creating systemd unit override...') utils.mkdir(logstash_unit_override) utils.deploy_blueprint_resource( '{0}/restart.conf'.format(CONFIG_PATH), '{0}/restart.conf'.format(logstash_unit_override), LOGSTASH_SERVICE_NAME) ctx.logger.info('Deploying Logstash configuration...') utils.deploy_blueprint_resource( '{0}/logstash.conf'.format(CONFIG_PATH), '{0}/logstash.conf'.format(logstash_conf_path), LOGSTASH_SERVICE_NAME) # Due to a bug in the handling of configuration files, # configuration files with the same name cannot be deployed. # Since the logrotate config file is called `logstash`, # we change the name of the logstash env vars config file # from logstash to cloudify-logstash to be consistent with # other service env var files. init_file = '/etc/init.d/logstash' utils.replace_in_file('sysconfig/\$name', 'sysconfig/cloudify-$name', init_file) utils.chmod('755', init_file) utils.chown('root', 'root', init_file) ctx.logger.debug('Deploying Logstash sysconfig...') utils.deploy_blueprint_resource( '{0}/cloudify-logstash'.format(CONFIG_PATH), '/etc/sysconfig/cloudify-logstash', LOGSTASH_SERVICE_NAME) utils.logrotate(LOGSTASH_SERVICE_NAME) utils.sudo(['/sbin/chkconfig', 'logstash', 'on']) utils.clean_var_log_dir(LOGSTASH_SERVICE_NAME)
def install_mgmtworker(): management_worker_rpm_source_url = \ ctx_properties['management_worker_rpm_source_url'] # these must all be exported as part of the start operation. # they will not persist, so we should use the new agent # don't forget to change all localhosts to the relevant ips mgmtworker_home = '/opt/mgmtworker' mgmtworker_venv = '{0}/env'.format(mgmtworker_home) celery_work_dir = '{0}/work'.format(mgmtworker_home) celery_log_dir = "/var/log/cloudify/mgmtworker" broker_port_ssl = '5671' broker_port_no_ssl = '5672' rabbitmq_ssl_enabled = ctx_properties['rabbitmq_ssl_enabled'] ctx.logger.info("rabbitmq_ssl_enabled: {0}".format(rabbitmq_ssl_enabled)) rabbitmq_cert_public = ctx_properties['rabbitmq_cert_public'] ctx.instance.runtime_properties['rabbitmq_endpoint_ip'] = \ utils.get_rabbitmq_endpoint_ip( ctx_properties.get('rabbitmq_endpoint_ip')) # Fix possible injections in json of rabbit credentials # See json.org for string spec for key in ['rabbitmq_username', 'rabbitmq_password']: # We will not escape newlines or other control characters, # we will accept them breaking # things noisily, e.g. on newlines and backspaces. # TODO: add: # sed 's/"/\\"/' | sed 's/\\/\\\\/' | sed s-/-\\/- | sed 's/\t/\\t/' ctx.instance.runtime_properties[key] = ctx_properties[key] # Make the ssl enabled flag work with json (boolean in lower case) # TODO: check if still needed: # broker_ssl_enabled = "$(echo ${rabbitmq_ssl_enabled} | tr '[:upper:]' '[:lower:]')" # NOQA ctx.instance.runtime_properties['rabbitmq_ssl_enabled'] = \ rabbitmq_ssl_enabled ctx.logger.info('Installing Management Worker...') utils.set_selinux_permissive() utils.copy_notice(MGMT_WORKER_SERVICE_NAME) utils.mkdir(mgmtworker_home) utils.mkdir('{0}/config'.format(mgmtworker_home)) utils.mkdir(celery_log_dir) utils.mkdir(celery_work_dir) # this create the mgmtworker_venv and installs the relevant # modules into it. utils.yum_install(management_worker_rpm_source_url, service_name=MGMT_WORKER_SERVICE_NAME) _install_optional(mgmtworker_venv) # Add certificate and select port, as applicable if rabbitmq_ssl_enabled: broker_cert_path = '{0}/amqp_pub.pem'.format(mgmtworker_home) utils.deploy_ssl_certificate('public', broker_cert_path, 'root', rabbitmq_cert_public) ctx.instance.runtime_properties['broker_cert_path'] = broker_cert_path # Use SSL port ctx.instance.runtime_properties['broker_port'] = broker_port_ssl else: # No SSL, don't use SSL port ctx.instance.runtime_properties['broker_port'] = broker_port_no_ssl if rabbitmq_cert_public is not None: ctx.logger.warn('Broker SSL cert supplied but SSL not enabled ' '(broker_ssl_enabled is False).') ctx.logger.info("broker_port: {0}".format( ctx.instance.runtime_properties['broker_port'])) ctx.logger.info('Configuring Management worker...') # Deploy the broker configuration # TODO: This will break interestingly if mgmtworker_venv is empty. # Some sort of check for that would be sensible. # To sandy: I don't quite understand this check... # there is no else here.. # for python_path in ${mgmtworker_venv}/lib/python*; do if os.path.isfile(os.path.join(mgmtworker_venv, 'bin/python')): broker_conf_path = os.path.join(celery_work_dir, 'broker_config.json') utils.deploy_blueprint_resource( '{0}/broker_config.json'.format(CONFIG_PATH), broker_conf_path, MGMT_WORKER_SERVICE_NAME) # The config contains credentials, do not let the world read it utils.sudo(['chmod', '440', broker_conf_path]) utils.systemd.configure(MGMT_WORKER_SERVICE_NAME) utils.logrotate(MGMT_WORKER_SERVICE_NAME)
def install_riemann(): langohr_source_url = ctx_properties['langohr_jar_source_url'] daemonize_source_url = ctx_properties['daemonize_rpm_source_url'] riemann_source_url = ctx_properties['riemann_rpm_source_url'] rabbitmq_username = ctx_properties['rabbitmq_username'] rabbitmq_password = ctx_properties['rabbitmq_password'] utils.create_service_user( user=RIEMANN_USER, group=RIEMANN_GROUP, home=utils.CLOUDIFY_HOME_DIR ) riemann_config_path = '/etc/riemann' riemann_log_path = '/var/log/cloudify/riemann' langohr_home = '/opt/lib' extra_classpath = '{0}/langohr.jar'.format(langohr_home) riemann_dir = '/opt/riemann' # Confirm username and password have been supplied for broker before # continuing. # Components other than logstash and riemann have this handled in code. # Note that these are not directly used in this script, but are used by the # deployed resources, hence the check here. if not rabbitmq_username or not rabbitmq_password: ctx.abort_operation( 'Both rabbitmq_username and rabbitmq_password must be supplied ' 'and at least 1 character long in the manager blueprint inputs.') rabbit_props = utils.ctx_factory.get('rabbitmq') runtime_props['rabbitmq_endpoint_ip'] = utils.get_rabbitmq_endpoint_ip() runtime_props['rabbitmq_username'] = rabbit_props.get('rabbitmq_username') runtime_props['rabbitmq_password'] = rabbit_props.get('rabbitmq_password') ctx.logger.info('Installing Riemann...') utils.set_selinux_permissive() utils.copy_notice(RIEMANN_SERVICE_NAME) utils.mkdir(riemann_log_path) utils.mkdir(langohr_home) utils.mkdir(riemann_config_path) utils.mkdir('{0}/conf.d'.format(riemann_config_path)) # utils.chown cannot be used as it will change both user and group utils.sudo(['chown', RIEMANN_USER, riemann_dir]) langohr = utils.download_cloudify_resource(langohr_source_url, RIEMANN_SERVICE_NAME) utils.sudo(['cp', langohr, extra_classpath]) ctx.logger.info('Applying Langohr permissions...') utils.sudo(['chmod', '644', extra_classpath]) utils.yum_install(daemonize_source_url, service_name=RIEMANN_SERVICE_NAME) utils.yum_install(riemann_source_url, service_name=RIEMANN_SERVICE_NAME) utils.chown(RIEMANN_USER, RIEMANN_GROUP, riemann_log_path) utils.logrotate(RIEMANN_SERVICE_NAME) files_to_remove = [riemann_config_path, riemann_log_path, extra_classpath, riemann_dir] runtime_props['files_to_remove'] = files_to_remove
def install_mgmtworker(): management_worker_rpm_source_url = \ ctx_properties['management_worker_rpm_source_url'] # these must all be exported as part of the start operation. # they will not persist, so we should use the new agent # don't forget to change all localhosts to the relevant ips mgmtworker_home = '/opt/mgmtworker' mgmtworker_venv = '{0}/env'.format(mgmtworker_home) celery_work_dir = '{0}/work'.format(mgmtworker_home) celery_log_dir = "/var/log/cloudify/mgmtworker" broker_port_ssl = '5671' broker_port_no_ssl = '5672' rabbit_props = utils.ctx_factory.get('rabbitmq') rabbitmq_ssl_enabled = rabbit_props['rabbitmq_ssl_enabled'] ctx.logger.info("rabbitmq_ssl_enabled: {0}".format(rabbitmq_ssl_enabled)) rabbitmq_cert_public = rabbit_props['rabbitmq_cert_public'] ctx.instance.runtime_properties['rabbitmq_endpoint_ip'] = \ utils.get_rabbitmq_endpoint_ip( rabbit_props.get('rabbitmq_endpoint_ip')) # Fix possible injections in json of rabbit credentials # See json.org for string spec for key in ['rabbitmq_username', 'rabbitmq_password']: # We will not escape newlines or other control characters, # we will accept them breaking # things noisily, e.g. on newlines and backspaces. # TODO: add: # sed 's/"/\\"/' | sed 's/\\/\\\\/' | sed s-/-\\/- | sed 's/\t/\\t/' ctx.instance.runtime_properties[key] = ctx_properties[key] # Make the ssl enabled flag work with json (boolean in lower case) # TODO: check if still needed: # broker_ssl_enabled = "$(echo ${rabbitmq_ssl_enabled} | tr '[:upper:]' '[:lower:]')" # NOQA ctx.instance.runtime_properties['rabbitmq_ssl_enabled'] = \ rabbitmq_ssl_enabled ctx.logger.info('Installing Management Worker...') utils.set_selinux_permissive() utils.copy_notice(MGMT_WORKER_SERVICE_NAME) utils.mkdir(mgmtworker_home) utils.mkdir('{0}/config'.format(mgmtworker_home)) utils.mkdir(celery_log_dir) utils.mkdir(celery_work_dir) # this create the mgmtworker_venv and installs the relevant # modules into it. utils.yum_install(management_worker_rpm_source_url, service_name=MGMT_WORKER_SERVICE_NAME) _install_optional(mgmtworker_venv) # Add certificate and select port, as applicable if rabbitmq_ssl_enabled: broker_cert_path = '{0}/amqp_pub.pem'.format(mgmtworker_home) utils.deploy_ssl_certificate( 'public', broker_cert_path, 'root', rabbitmq_cert_public) ctx.instance.runtime_properties['broker_cert_path'] = broker_cert_path # Use SSL port ctx.instance.runtime_properties['broker_port'] = broker_port_ssl else: # No SSL, don't use SSL port ctx.instance.runtime_properties['broker_port'] = broker_port_no_ssl if rabbitmq_cert_public is not None: ctx.logger.warn('Broker SSL cert supplied but SSL not enabled ' '(broker_ssl_enabled is False).') ctx.logger.info("broker_port: {0}".format( ctx.instance.runtime_properties['broker_port'])) ctx.logger.info('Configuring Management worker...') # Deploy the broker configuration # TODO: This will break interestingly if mgmtworker_venv is empty. # Some sort of check for that would be sensible. # To sandy: I don't quite understand this check... # there is no else here.. # for python_path in ${mgmtworker_venv}/lib/python*; do if os.path.isfile(os.path.join(mgmtworker_venv, 'bin/python')): broker_conf_path = os.path.join(celery_work_dir, 'broker_config.json') utils.deploy_blueprint_resource( '{0}/broker_config.json'.format(CONFIG_PATH), broker_conf_path, MGMT_WORKER_SERVICE_NAME) # The config contains credentials, do not let the world read it utils.sudo(['chmod', '440', broker_conf_path]) utils.systemd.configure(MGMT_WORKER_SERVICE_NAME) utils.logrotate(MGMT_WORKER_SERVICE_NAME)
def _install_rabbitmq(): erlang_rpm_source_url = ctx_properties['erlang_rpm_source_url'] rabbitmq_rpm_source_url = ctx_properties['rabbitmq_rpm_source_url'] # TODO: maybe we don't need this env var os.putenv('RABBITMQ_FD_LIMIT', str(ctx_properties['rabbitmq_fd_limit'])) rabbitmq_username = ctx_properties['rabbitmq_username'] rabbitmq_password = ctx_properties['rabbitmq_password'] ctx.logger.info('Installing RabbitMQ...') utils.set_selinux_permissive() utils.copy_notice(SERVICE_NAME) utils.mkdir(LOG_DIR) utils.yum_install(erlang_rpm_source_url, service_name=SERVICE_NAME) utils.yum_install(rabbitmq_rpm_source_url, service_name=SERVICE_NAME) utils.logrotate(SERVICE_NAME) utils.systemd.configure(SERVICE_NAME) ctx.logger.info('Configuring File Descriptors Limit...') utils.deploy_blueprint_resource( '{0}/rabbitmq_ulimit.conf'.format(CONFIG_PATH), FD_LIMIT_PATH, SERVICE_NAME) utils.deploy_blueprint_resource( '{0}/rabbitmq-definitions.json'.format(CONFIG_PATH), join(HOME_DIR, 'definitions.json'), SERVICE_NAME) # This stops rabbit from failing if the host name changes, e.g. when # a manager is deployed from an image but given a new hostname. # This is likely to cause problems with clustering of rabbitmq if this is # done at any point, so at that point a change to the file and cleaning of # mnesia would likely be necessary. utils.deploy_blueprint_resource( '{0}/rabbitmq-env.conf'.format(CONFIG_PATH), '/etc/rabbitmq/rabbitmq-env.conf', SERVICE_NAME) # Delete old mnesia node utils.sudo(['rm', '-rf', '/var/lib/rabbitmq/mnesia']) utils.systemd.systemctl('daemon-reload') utils.chown('rabbitmq', 'rabbitmq', LOG_DIR) # rabbitmq restart exits with 143 status code that is valid in this case. utils.systemd.restart(SERVICE_NAME, ignore_failure=True) time.sleep(10) utils.wait_for_port(5672) ctx.logger.info('Enabling RabbitMQ Plugins...') # Occasional timing issues with rabbitmq starting have resulted in # failures when first trying to enable plugins utils.sudo(['rabbitmq-plugins', 'enable', 'rabbitmq_management'], retries=5) utils.sudo(['rabbitmq-plugins', 'enable', 'rabbitmq_tracing'], retries=5) _clear_guest_permissions_if_guest_exists() _create_user_and_set_permissions(rabbitmq_username, rabbitmq_password) utils.deploy_blueprint_resource('{0}/rabbitmq.config'.format(CONFIG_PATH), join(HOME_DIR, 'rabbitmq.config'), SERVICE_NAME, user_resource=True) utils.systemd.stop(SERVICE_NAME, retries=5)
def _install_elasticsearch(): es_java_opts = ctx_properties['es_java_opts'] es_heap_size = ctx_properties['es_heap_size'] es_source_url = ctx_properties['es_rpm_source_url'] es_curator_rpm_source_url = \ ctx_properties['es_curator_rpm_source_url'] # this will be used only if elasticsearch-curator is not installed via # an rpm and an internet connection is available es_curator_version = "3.2.3" es_home = "/opt/elasticsearch" es_logs_path = "/var/log/cloudify/elasticsearch" es_conf_path = "/etc/elasticsearch" es_unit_override = "/etc/systemd/system/elasticsearch.service.d" es_scripts_path = os.path.join(es_conf_path, 'scripts') ctx.logger.info('Installing Elasticsearch...') utils.set_selinux_permissive() utils.copy_notice('elasticsearch') utils.mkdir(es_home) utils.mkdir(es_logs_path) utils.yum_install(es_source_url, service_name=ES_SERVICE_NAME) ctx.logger.info('Chowning {0} by elasticsearch user...'.format( es_logs_path)) utils.chown('elasticsearch', 'elasticsearch', es_logs_path) ctx.logger.info('Creating systemd unit override...') utils.mkdir(es_unit_override) utils.deploy_blueprint_resource( os.path.join(CONFIG_PATH, 'restart.conf'), os.path.join(es_unit_override, 'restart.conf'), ES_SERVICE_NAME) ctx.logger.info('Deploying Elasticsearch Configuration...') utils.deploy_blueprint_resource( os.path.join(CONFIG_PATH, 'elasticsearch.yml'), os.path.join(es_conf_path, 'elasticsearch.yml'), ES_SERVICE_NAME) utils.chown('elasticsearch', 'elasticsearch', os.path.join(es_conf_path, 'elasticsearch.yml')) ctx.logger.info('Deploying elasticsearch logging configuration file...') utils.deploy_blueprint_resource( os.path.join(CONFIG_PATH, 'logging.yml'), os.path.join(es_conf_path, 'logging.yml'), ES_SERVICE_NAME) utils.chown('elasticsearch', 'elasticsearch', os.path.join(es_conf_path, 'logging.yml')) ctx.logger.info('Creating Elasticsearch scripts folder and ' 'additional external Elasticsearch scripts...') utils.mkdir(es_scripts_path) utils.deploy_blueprint_resource( os.path.join(CONFIG_PATH, 'scripts', 'append.groovy'), os.path.join(es_scripts_path, 'append.groovy'), ES_SERVICE_NAME ) ctx.logger.info('Setting Elasticsearch Heap Size...') # we should treat these as templates. utils.replace_in_file( '(?:#|)ES_HEAP_SIZE=(.*)', 'ES_HEAP_SIZE={0}'.format(es_heap_size), '/etc/sysconfig/elasticsearch') if es_java_opts: ctx.logger.info('Setting additional JAVA_OPTS...') utils.replace_in_file( '(?:#|)ES_JAVA_OPTS=(.*)', 'ES_JAVA_OPTS={0}'.format(es_java_opts), '/etc/sysconfig/elasticsearch') ctx.logger.info('Setting Elasticsearch logs path...') utils.replace_in_file( '(?:#|)LOG_DIR=(.*)', 'LOG_DIR={0}'.format(es_logs_path), '/etc/sysconfig/elasticsearch') utils.replace_in_file( '(?:#|)ES_GC_LOG_FILE=(.*)', 'ES_GC_LOG_FILE={0}'.format(os.path.join(es_logs_path, 'gc.log')), '/etc/sysconfig/elasticsearch') utils.logrotate(ES_SERVICE_NAME) ctx.logger.info('Installing Elasticsearch Curator...') if not es_curator_rpm_source_url: ctx.install_python_package('elasticsearch-curator=={0}'.format( es_curator_version)) else: utils.yum_install(es_curator_rpm_source_url, service_name=ES_SERVICE_NAME) _configure_index_rotation() # elasticsearch provides a systemd init env. we just enable it. utils.systemd.enable(ES_SERVICE_NAME, append_prefix=False)
def install_logstash(): logstash_unit_override = '/etc/systemd/system/logstash.service.d' logstash_source_url = ctx_properties['logstash_rpm_source_url'] rabbitmq_username = ctx_properties['rabbitmq_username'] rabbitmq_password = ctx_properties['rabbitmq_password'] logstash_log_path = '/var/log/cloudify/logstash' logstash_conf_path = '/etc/logstash/conf.d' # injected as an input to the script ctx.instance.runtime_properties['es_endpoint_ip'] = \ os.environ['ES_ENDPOINT_IP'] elasticsearch_props = utils.ctx_factory.get('elasticsearch') ctx.instance.runtime_properties['es_endpoint_port'] = \ elasticsearch_props['es_endpoint_port'] rabbit_props = utils.ctx_factory.get('rabbitmq') ctx.instance.runtime_properties['rabbitmq_endpoint_ip'] = \ utils.get_rabbitmq_endpoint_ip( rabbit_props.get('rabbitmq_endpoint_ip')) ctx.instance.runtime_properties['rabbitmq_username'] = \ rabbit_props['rabbitmq_username'] ctx.instance.runtime_properties['rabbitmq_password'] = \ rabbit_props['rabbitmq_password'] # Confirm username and password have been supplied for broker before # continuing. # Components other than logstash and riemann have this handled in code. # Note that these are not directly used in this script, but are used by the # deployed resources, hence the check here. if not rabbitmq_username or not rabbitmq_password: ctx.abort_operation( 'Both rabbitmq_username and rabbitmq_password must be supplied ' 'and at least 1 character long in the manager blueprint inputs.') ctx.logger.info('Installing Logstash...') utils.set_selinux_permissive() utils.copy_notice(LOGSTASH_SERVICE_NAME) utils.yum_install(logstash_source_url, service_name=LOGSTASH_SERVICE_NAME) utils.mkdir(logstash_log_path) utils.chown('logstash', 'logstash', logstash_log_path) ctx.logger.info('Creating systemd unit override...') utils.mkdir(logstash_unit_override) utils.deploy_blueprint_resource( '{0}/restart.conf'.format(CONFIG_PATH), '{0}/restart.conf'.format(logstash_unit_override), LOGSTASH_SERVICE_NAME) ctx.logger.info('Deploying Logstash conf...') utils.deploy_blueprint_resource( '{0}/logstash.conf'.format(CONFIG_PATH), '{0}/logstash.conf'.format(logstash_conf_path), LOGSTASH_SERVICE_NAME) # Due to a bug in the handling of configuration files, # configuration files with the same name cannot be deployed. # Since the logrotate config file is called `logstash`, # we change the name of the logstash env vars config file # from logstash to cloudify-logstash to be consistent with # other service env var files. init_file = '/etc/init.d/logstash' utils.replace_in_file( 'sysconfig/\$name', 'sysconfig/cloudify-$name', init_file) utils.chmod('755', init_file) utils.chown('root', 'root', init_file) ctx.logger.info('Deploying Logstash sysconfig...') utils.deploy_blueprint_resource( '{0}/cloudify-logstash'.format(CONFIG_PATH), '/etc/sysconfig/cloudify-logstash', LOGSTASH_SERVICE_NAME) utils.logrotate(LOGSTASH_SERVICE_NAME) utils.sudo(['/sbin/chkconfig', 'logstash', 'on']) utils.clean_var_log_dir(LOGSTASH_SERVICE_NAME)