def install_restservice(): rest_service_rpm_source_url = \ ctx_properties['rest_service_rpm_source_url'] rest_venv = os.path.join(REST_SERVICE_HOME, 'env') rest_service_log_path = '/var/log/cloudify/rest' ctx.logger.info('Installing REST Service...') utils.set_selinux_permissive() utils.copy_notice(REST_SERVICE_NAME) utils.mkdir(REST_SERVICE_HOME) utils.mkdir(rest_service_log_path) utils.mkdir(MANAGER_RESOURCES_HOME) deploy_broker_configuration() utils.yum_install(rest_service_rpm_source_url, service_name=REST_SERVICE_NAME) _configure_dbus(rest_venv) install_optional(rest_venv) utils.logrotate(REST_SERVICE_NAME) ctx.logger.info('Deploying REST Service Configuration file...') # rest ports are set as runtime properties in nginx/scripts/create.py # cloudify-rest.conf currently contains localhost for fileserver endpoint. # We need to change that if we want to deploy nginx on another machine. utils.deploy_blueprint_resource( os.path.join(CONFIG_PATH, 'cloudify-rest.conf'), os.path.join(REST_SERVICE_HOME, 'cloudify-rest.conf'), REST_SERVICE_NAME)
def _set_security(rabbitmq_ssl_enabled, rabbitmq_cert_private, rabbitmq_cert_public): # Deploy certificates if both have been provided. # Complain loudly if one has been provided and the other hasn't. if rabbitmq_ssl_enabled: if rabbitmq_cert_private and rabbitmq_cert_public: utils.deploy_ssl_certificate('private', '/etc/rabbitmq/rabbit-priv.pem', 'rabbitmq', rabbitmq_cert_private) utils.deploy_ssl_certificate('public', '/etc/rabbitmq/rabbit-pub.pem', 'rabbitmq', rabbitmq_cert_public) # Configure for SSL utils.deploy_blueprint_resource( '{0}/rabbitmq.config-ssl'.format(CONFIG_PATH), '/etc/rabbitmq/rabbitmq.config', RABBITMQ_SERVICE_NAME, user_resource=True) else: ctx.abort_operation('When providing a certificate for rabbitmq, ' 'both public and private certificates must be ' 'supplied.') else: utils.deploy_blueprint_resource( '{0}/rabbitmq.config-nossl'.format(CONFIG_PATH), '/etc/rabbitmq/rabbitmq.config', RABBITMQ_SERVICE_NAME, user_resource=True) if rabbitmq_cert_private or rabbitmq_cert_public: ctx.logger.warn('Broker SSL cert supplied but SSL not enabled ' '(broker_ssl_enabled is False).')
def install_logstash(): """Install logstash as a systemd service.""" logstash_unit_override = '/etc/systemd/system/logstash.service.d' logstash_source_url = ctx_properties['logstash_rpm_source_url'] logstash_log_path = '/var/log/cloudify/logstash' ctx.logger.info('Installing Logstash...') utils.set_selinux_permissive() utils.copy_notice(LOGSTASH_SERVICE_NAME) utils.yum_install(logstash_source_url, service_name=LOGSTASH_SERVICE_NAME) install_logstash_output_jdbc_plugin() install_postgresql_jdbc_driver() utils.mkdir(logstash_log_path) utils.chown('logstash', 'logstash', logstash_log_path) ctx.logger.debug('Creating systemd unit override...') utils.mkdir(logstash_unit_override) utils.deploy_blueprint_resource( '{0}/restart.conf'.format(CONFIG_PATH), '{0}/restart.conf'.format(logstash_unit_override), LOGSTASH_SERVICE_NAME)
def install_logstash(): """Install logstash as a systemd service.""" logstash_unit_override = '/etc/systemd/system/logstash.service.d' logstash_source_url = ctx_properties['logstash_rpm_source_url'] logstash_log_path = '/var/log/cloudify/logstash' ctx.logger.info('Installing Logstash...') utils.set_selinux_permissive() utils.copy_notice(LOGSTASH_SERVICE_NAME) utils.yum_install(logstash_source_url, service_name=LOGSTASH_SERVICE_NAME) install_logstash_output_jdbc_plugin() install_postgresql_jdbc_driver() utils.mkdir(logstash_log_path) utils.chown('logstash', 'logstash', logstash_log_path) ctx.logger.debug('Creating systemd unit override...') utils.mkdir(logstash_unit_override) utils.deploy_blueprint_resource( '{0}/restart.conf'.format(CONFIG_PATH), '{0}/restart.conf'.format(logstash_unit_override), LOGSTASH_SERVICE_NAME)
def _install_influxdb(): influxdb_source_url = ctx.node.properties['influxdb_rpm_source_url'] influxdb_user = '******' influxdb_group = 'influxdb' influxdb_home = '/opt/influxdb' influxdb_log_path = '/var/log/cloudify/influxdb' ctx.logger.info('Installing InfluxDB...') utils.set_selinux_permissive() utils.copy_notice('influxdb') utils.mkdir(influxdb_home) utils.mkdir(influxdb_log_path) utils.yum_install(influxdb_source_url) utils.sudo(['rm', '-rf', '/etc/init.d/influxdb']) ctx.logger.info('Deploying InfluxDB config.toml...') utils.deploy_blueprint_resource( '{0}/config.toml'.format(CONFIG_PATH), '{0}/shared/config.toml'.format(influxdb_home)) ctx.logger.info('Fixing user permissions...') utils.chown(influxdb_user, influxdb_group, influxdb_home) utils.chown(influxdb_user, influxdb_group, influxdb_log_path) utils.logrotate('influxdb') utils.systemd.configure('influxdb')
def _install_influxdb(): influxdb_source_url = ctx_properties['influxdb_rpm_source_url'] influxdb_user = '******' influxdb_group = 'influxdb' ctx.logger.info('Installing InfluxDB...') utils.set_selinux_permissive() utils.copy_notice(SERVICE_NAME) utils.mkdir(HOME_DIR) utils.mkdir(LOG_DIR) utils.yum_install(influxdb_source_url, service_name=SERVICE_NAME) ctx.logger.info('Deploying InfluxDB configuration...') utils.deploy_blueprint_resource('{0}/config.toml'.format(CONFIG_PATH), '{0}/shared/config.toml'.format(HOME_DIR), SERVICE_NAME) utils.chown(influxdb_user, influxdb_group, HOME_DIR) utils.chown(influxdb_user, influxdb_group, LOG_DIR) utils.systemd.configure(SERVICE_NAME) # Provided with InfluxDB's package. Will be removed if it exists. utils.remove(INIT_D_PATH) utils.logrotate(SERVICE_NAME)
def configure_riemann(): ctx.logger.info('Deploying Riemann conf...') utils.deploy_blueprint_resource( '{0}/main.clj'.format(CONFIG_PATH), '{0}/main.clj'.format(RIEMANN_CONFIG_PATH), SERVICE_NAME) utils.chown( runtime_props['service_user'], runtime_props['service_group'], RIEMANN_CONFIG_PATH ) # our riemann configuration will (by default) try to read these environment # variables. If they don't exist, it will assume # that they're found at "localhost" # export REST_HOST="" # export RABBITMQ_HOST="" # we inject the management_ip for both of these to Riemann's systemd # config. # These should be potentially different # if the manager and rabbitmq are running on different hosts. utils.systemd.configure(SERVICE_NAME) utils.clean_var_log_dir(SERVICE_NAME)
def _configure_influxdb(host, port): db_user = "******" db_pass = "******" db_name = "cloudify" ctx.logger.info('Creating InfluxDB Database...') # the below request is equivalent to running: # curl -S -s "http://localhost:8086/db?u=root&p=root" '-d "{\"name\": \"cloudify\"}" # NOQA import urllib import urllib2 import ast endpoint_for_list = 'http://{0}:{1}/db'.format(host, port) endpoint_for_creation = ('http://{0}:{1}/cluster/database_configs/' '{2}'.format(host, port, db_name)) params = urllib.urlencode(dict(u=db_user, p=db_pass)) url_for_list = endpoint_for_list + '?' + params url_for_creation = endpoint_for_creation + '?' + params # check if db already exists db_list = eval(urllib2.urlopen(urllib2.Request(url_for_list)).read()) try: assert not any(d.get('name') == db_name for d in db_list) except AssertionError: ctx.logger.info('Database {0} already exists!'.format(db_name)) return try: utils.deploy_blueprint_resource( '{0}/retention.json'.format(CONFIG_PATH), '/tmp/retention.json', INFLUX_SERVICE_NAME) with open('/tmp/retention.json') as policy_file: retention_policy = policy_file.read() ctx.logger.debug( 'Using retention policy: \n{0}'.format(retention_policy)) data = json.dumps(ast.literal_eval(retention_policy)) ctx.logger.debug('Using retention policy: \n{0}'.format(data)) content_length = len(data) request = urllib2.Request(url_for_creation, data, { 'Content-Type': 'application/json', 'Content-Length': content_length}) ctx.logger.debug('Request is: {0}'.format(request)) request_reader = urllib2.urlopen(request) response = request_reader.read() ctx.logger.debug('Response: {0}'.format(response)) request_reader.close() utils.remove('/tmp/retention.json') except Exception as ex: ctx.abort_operation('Failed to create: {0} ({1}).'.format(db_name, ex)) # verify db created ctx.logger.info('Verifying database create successfully...') db_list = eval(urllib2.urlopen(urllib2.Request(url_for_list)).read()) try: assert any(d.get('name') == db_name for d in db_list) except AssertionError: ctx.abort_operation('Verification failed!') ctx.logger.info('Databased {0} created successfully.'.format(db_name))
def _set_security(rabbitmq_ssl_enabled, rabbitmq_cert_private, rabbitmq_cert_public): # Deploy certificates if both have been provided. # Complain loudly if one has been provided and the other hasn't. if rabbitmq_ssl_enabled: if rabbitmq_cert_private and rabbitmq_cert_public: utils.deploy_ssl_certificate( 'private', '/etc/rabbitmq/rabbit-priv.pem', 'rabbitmq', rabbitmq_cert_private) utils.deploy_ssl_certificate( 'public', '/etc/rabbitmq/rabbit-pub.pem', 'rabbitmq', rabbitmq_cert_public) # Configure for SSL utils.deploy_blueprint_resource( '{0}/rabbitmq.config-ssl'.format(CONFIG_PATH), '/etc/rabbitmq/rabbitmq.config', RABBITMQ_SERVICE_NAME, user_resource=True) else: ctx.abort_operation('When providing a certificate for rabbitmq, ' 'both public and private certificates must be ' 'supplied.') else: utils.deploy_blueprint_resource( '{0}/rabbitmq.config-nossl'.format(CONFIG_PATH), '/etc/rabbitmq/rabbitmq.config', RABBITMQ_SERVICE_NAME, user_resource=True) if rabbitmq_cert_private or rabbitmq_cert_public: ctx.logger.warn('Broker SSL cert supplied but SSL not enabled ' '(broker_ssl_enabled is False).')
def configure_riemann(): riemann_config_path = '/etc/riemann' ctx.logger.info('Deploying Riemann manager.config...') utils.move( '/tmp/plugins/riemann-controller/riemann_controller/resources/manager.config', # NOQA '{0}/conf.d/manager.config'.format(riemann_config_path)) ctx.logger.info('Deploying Riemann conf...') utils.deploy_blueprint_resource( '{0}/main.clj'.format(CONFIG_PATH), '{0}/main.clj'.format(riemann_config_path), RIEMANN_SERVICE_NAME) # our riemann configuration will (by default) try to read these environment # variables. If they don't exist, it will assume # that they're found at "localhost" # export REST_HOST="" # export RABBITMQ_HOST="" # we inject the management_ip for both of these to Riemann's systemd # config. # These should be potentially different # if the manager and rabbitmq are running on different hosts. utils.systemd.configure(RIEMANN_SERVICE_NAME) utils.clean_var_log_dir(RIEMANN_SERVICE_NAME)
def _install_influxdb(): influxdb_source_url = ctx_properties['influxdb_rpm_source_url'] influxdb_user = '******' influxdb_group = 'influxdb' influxdb_home = '/opt/influxdb' influxdb_log_path = '/var/log/cloudify/influxdb' ctx.logger.info('Installing InfluxDB...') utils.set_selinux_permissive() utils.copy_notice(INFLUX_SERVICE_NAME) utils.mkdir(influxdb_home) utils.mkdir(influxdb_log_path) utils.yum_install(influxdb_source_url, service_name=INFLUX_SERVICE_NAME) utils.sudo(['rm', '-rf', '/etc/init.d/influxdb']) ctx.logger.info('Deploying InfluxDB config.toml...') utils.deploy_blueprint_resource( '{0}/config.toml'.format(CONFIG_PATH), '{0}/shared/config.toml'.format(influxdb_home), INFLUX_SERVICE_NAME) ctx.logger.info('Fixing user permissions...') utils.chown(influxdb_user, influxdb_group, influxdb_home) utils.chown(influxdb_user, influxdb_group, influxdb_log_path) utils.systemd.configure(INFLUX_SERVICE_NAME) utils.logrotate(INFLUX_SERVICE_NAME)
def _install_rabbitmq(): erlang_rpm_source_url = ctx.node.properties['erlang_rpm_source_url'] rabbitmq_rpm_source_url = ctx.node.properties['rabbitmq_rpm_source_url'] # TODO: maybe we don't need this env var os.putenv('RABBITMQ_FD_LIMIT', str(ctx.node.properties['rabbitmq_fd_limit'])) rabbitmq_log_path = '/var/log/cloudify/rabbitmq' rabbitmq_username = ctx.node.properties['rabbitmq_username'] rabbitmq_password = ctx.node.properties['rabbitmq_password'] rabbitmq_cert_public = ctx.node.properties['rabbitmq_cert_public'] rabbitmq_ssl_enabled = ctx.node.properties['rabbitmq_ssl_enabled'] rabbitmq_cert_private = ctx.node.properties['rabbitmq_cert_private'] ctx.logger.info('Installing RabbitMQ...') utils.set_selinux_permissive() utils.copy_notice('rabbitmq') utils.mkdir(rabbitmq_log_path) utils.yum_install(erlang_rpm_source_url) utils.yum_install(rabbitmq_rpm_source_url) utils.logrotate('rabbitmq') utils.deploy_blueprint_resource( '{0}/kill-rabbit'.format(CONFIG_PATH), '/usr/local/bin/kill-rabbit') utils.chmod('500', '/usr/local/bin/kill-rabbit') utils.systemd.configure('rabbitmq') ctx.logger.info('Configuring File Descriptors Limit...') utils.deploy_blueprint_resource( '{0}/rabbitmq_ulimit.conf'.format(CONFIG_PATH), '/etc/security/limits.d/rabbitmq.conf') utils.systemd.systemctl('daemon-reload') utils.chown('rabbitmq', 'rabbitmq', rabbitmq_log_path) utils.systemd.start('cloudify-rabbitmq') time.sleep(10) utils.wait_for_port(5672) ctx.logger.info('Enabling RabbitMQ Plugins...') # Occasional timing issues with rabbitmq starting have resulted in # failures when first trying to enable plugins utils.sudo(['rabbitmq-plugins', 'enable', 'rabbitmq_management'], retries=5) utils.sudo(['rabbitmq-plugins', 'enable', 'rabbitmq_tracing'], retries=5) _clear_guest_permissions_if_guest_exists() _create_user_and_set_permissions(rabbitmq_username, rabbitmq_password) _set_security( rabbitmq_ssl_enabled, rabbitmq_cert_private, rabbitmq_cert_public) utils.systemd.stop('cloudify-rabbitmq', retries=5)
def configure_riemann(): ctx.logger.info('Deploying Riemann conf...') utils.deploy_blueprint_resource( '{0}/main.clj'.format(CONFIG_PATH), '{0}/main.clj'.format(RIEMANN_CONFIG_PATH), SERVICE_NAME) utils.chown( runtime_props['service_user'], runtime_props['service_group'], RIEMANN_CONFIG_PATH ) # our riemann configuration will (by default) try to read these environment # variables. If they don't exist, it will assume # that they're found at "localhost" # export REST_HOST="" # export RABBITMQ_HOST="" # we inject the management_ip for both of these to Riemann's systemd # config. # These should be potentially different # if the manager and rabbitmq are running on different hosts. utils.systemd.configure(SERVICE_NAME) utils.clean_var_log_dir(SERVICE_NAME)
def _install_influxdb(): influxdb_source_url = ctx_properties['influxdb_rpm_source_url'] influxdb_user = '******' influxdb_group = 'influxdb' influxdb_home = '/opt/influxdb' influxdb_log_path = '/var/log/cloudify/influxdb' ctx.logger.info('Installing InfluxDB...') utils.set_selinux_permissive() utils.copy_notice(INFLUX_SERVICE_NAME) utils.mkdir(influxdb_home) utils.mkdir(influxdb_log_path) utils.yum_install(influxdb_source_url, service_name=INFLUX_SERVICE_NAME) utils.sudo(['rm', '-rf', '/etc/init.d/influxdb']) ctx.logger.info('Deploying InfluxDB config.toml...') utils.deploy_blueprint_resource( '{0}/config.toml'.format(CONFIG_PATH), '{0}/shared/config.toml'.format(influxdb_home), INFLUX_SERVICE_NAME) ctx.logger.info('Fixing user permissions...') utils.chown(influxdb_user, influxdb_group, influxdb_home) utils.chown(influxdb_user, influxdb_group, influxdb_log_path) utils.systemd.configure(INFLUX_SERVICE_NAME) # Provided with InfluxDB's package. Will be removed if it exists. utils.remove('/etc/init.d/influxdb') utils.logrotate(INFLUX_SERVICE_NAME)
def configure_riemann(): riemann_config_path = '/etc/riemann' ctx.logger.info('Deploying Riemann manager.config...') utils.move( '/tmp/plugins/riemann-controller/riemann_controller/resources/manager.config', # NOQA '{0}/conf.d/manager.config'.format(riemann_config_path)) ctx.logger.info('Deploying Riemann conf...') utils.deploy_blueprint_resource( '{0}/main.clj'.format(CONFIG_PATH), '{0}/main.clj'.format(riemann_config_path), RIEMANN_SERVICE_NAME) # our riemann configuration will (by default) try to read these environment # variables. If they don't exist, it will assume # that they're found at "localhost" # export REST_HOST="" # export RABBITMQ_HOST="" # we inject the management_ip for both of these to Riemann's systemd # config. # These should be potentially different # if the manager and rabbitmq are running on different hosts. utils.systemd.configure(RIEMANN_SERVICE_NAME) utils.clean_var_log_dir(RIEMANN_SERVICE_NAME)
def install_webui(): nodejs_source_url = ctx.node.properties['nodejs_tar_source_url'] webui_source_url = ctx.node.properties['webui_tar_source_url'] grafana_source_url = ctx.node.properties['grafana_tar_source_url'] # injected as an input to the script ctx.instance.runtime_properties['influxdb_endpoint_ip'] = \ os.environ.get('INFLUXDB_ENDPOINT_IP') nodejs_home = '/opt/nodejs' webui_home = '/opt/cloudify-ui' webui_log_path = '/var/log/cloudify/webui' grafana_home = '{0}/grafana'.format(webui_home) webui_user = '******' webui_group = 'webui' ctx.logger.info('Installing Cloudify\'s WebUI...') utils.set_selinux_permissive() utils.copy_notice('webui') utils.mkdir(nodejs_home) utils.mkdir(webui_home) utils.mkdir('{0}/backend'.format(webui_home)) utils.mkdir(webui_log_path) utils.mkdir(grafana_home) utils.create_service_user(webui_user, webui_home) ctx.logger.info('Installing NodeJS...') nodejs = utils.download_file(nodejs_source_url) utils.untar(nodejs, nodejs_home) ctx.logger.info('Installing Cloudify\'s WebUI...') webui = utils.download_file(webui_source_url) utils.untar(webui, webui_home) ctx.logger.info('Installing Grafana...') grafana = utils.download_file(grafana_source_url) utils.untar(grafana, grafana_home) ctx.logger.info('Deploying WebUI Configuration...') utils.deploy_blueprint_resource( '{0}/gsPresets.json'.format(CONFIG_PATH), '{0}/backend/gsPresets.json'.format(webui_home)) ctx.logger.info('Deploying Grafana Configuration...') utils.deploy_blueprint_resource( '{0}/grafana_config.js'.format(CONFIG_PATH), '{0}/config.js'.format(grafana_home)) ctx.logger.info('Fixing permissions...') utils.chown(webui_user, webui_group, webui_home) utils.chown(webui_user, webui_group, nodejs_home) utils.chown(webui_user, webui_group, webui_log_path) utils.logrotate('webui') utils.systemd.configure('webui')
def install_webui(): nodejs_source_url = ctx.node.properties['nodejs_tar_source_url'] webui_source_url = ctx.node.properties['webui_tar_source_url'] grafana_source_url = ctx.node.properties['grafana_tar_source_url'] # injected as an input to the script ctx.instance.runtime_properties['influxdb_endpoint_ip'] = \ os.environ.get('INFLUXDB_ENDPOINT_IP') nodejs_home = '/opt/nodejs' webui_home = '/opt/cloudify-ui' webui_log_path = '/var/log/cloudify/webui' grafana_home = '{0}/grafana'.format(webui_home) webui_user = '******' webui_group = 'webui' ctx.logger.info('Installing Cloudify\'s WebUI...') utils.set_selinux_permissive() utils.copy_notice('webui') utils.mkdir(nodejs_home) utils.mkdir(webui_home) utils.mkdir('{0}/backend'.format(webui_home)) utils.mkdir(webui_log_path) utils.mkdir(grafana_home) utils.create_service_user(webui_user, webui_home) ctx.logger.info('Installing NodeJS...') nodejs = utils.download_file(nodejs_source_url) utils.untar(nodejs, nodejs_home) ctx.logger.info('Installing Cloudify\'s WebUI...') webui = utils.download_file(webui_source_url) utils.untar(webui, webui_home) ctx.logger.info('Installing Grafana...') grafana = utils.download_file(grafana_source_url) utils.untar(grafana, grafana_home) ctx.logger.info('Deploying WebUI Configuration...') utils.deploy_blueprint_resource( '{0}/gsPresets.json'.format(CONFIG_PATH), '{0}/backend/gsPresets.json'.format(webui_home)) ctx.logger.info('Deploying Grafana Configuration...') utils.deploy_blueprint_resource( '{0}/grafana_config.js'.format(CONFIG_PATH), '{0}/config.js'.format(grafana_home)) ctx.logger.info('Fixing permissions...') utils.chown(webui_user, webui_group, webui_home) utils.chown(webui_user, webui_group, nodejs_home) utils.chown(webui_user, webui_group, webui_log_path) utils.logrotate('webui') utils.systemd.configure('webui')
def _disable_requiretty(): script_dest = '/tmp/configure_manager.sh' utils.deploy_blueprint_resource( 'components/manager/scripts' '/configure_manager.sh', script_dest, NODE_NAME) utils.sudo('chmod +x {0}'.format(script_dest)) utils.sudo(script_dest)
def _deploy_rest_configuration(): ctx.logger.info('Deploying REST Service Configuration file...') runtime_props['file_server_root'] = utils.MANAGER_RESOURCES_HOME utils.deploy_blueprint_resource( join(CONFIG_PATH, 'cloudify-rest.conf'), join(runtime_props['home_dir'], 'cloudify-rest.conf'), SERVICE_NAME) utils.chown(CLOUDIFY_USER, CLOUDIFY_GROUP, join(runtime_props['home_dir'], 'cloudify-rest.conf'))
def _disable_requiretty(): script_dest = '/tmp/configure_manager.sh' utils.deploy_blueprint_resource('components/manager/scripts' '/configure_manager.sh', script_dest, NODE_NAME) utils.sudo('chmod +x {0}'.format(script_dest)) utils.sudo(script_dest)
def _deploy_authorization_configuration(): authorization_file_name = 'authorization.conf' authorization_file_path = join(runtime_props['home_dir'], authorization_file_name) runtime_props['authorization_file_path'] = authorization_file_path ctx.logger.info('Deploying REST authorization Configuration file...') utils.deploy_blueprint_resource(join(CONFIG_PATH, authorization_file_name), authorization_file_path, SERVICE_NAME) utils.chown(CLOUDIFY_USER, CLOUDIFY_GROUP, authorization_file_path)
def _configure_index_rotation(): ctx.logger.info('Configuring Elasticsearch Index Rotation cronjob for ' 'logstash-YYYY.mm.dd index patterns...') utils.deploy_blueprint_resource( 'components/elasticsearch/scripts/rotate_es_indices', '/etc/cron.daily/rotate_es_indices', ES_SERVICE_NAME) utils.chown('root', 'root', '/etc/cron.daily/rotate_es_indices') # VALIDATE! utils.sudo('chmod +x /etc/cron.daily/rotate_es_indices')
def _configure_index_rotation(): ctx.logger.info('Configuring Elasticsearch Index Rotation cronjob for ' 'logstash-YYYY.mm.dd index patterns...') utils.deploy_blueprint_resource( 'components/elasticsearch/scripts/rotate_es_indices', '/etc/cron.daily/rotate_es_indices', ES_SERVICE_NAME) utils.chown('root', 'root', '/etc/cron.daily/rotate_es_indices') # VALIDATE! utils.sudo('chmod +x /etc/cron.daily/rotate_es_indices')
def _deploy_rest_configuration(): ctx.logger.info('Deploying REST Service Configuration file...') runtime_props['file_server_root'] = utils.MANAGER_RESOURCES_HOME utils.deploy_blueprint_resource( join(CONFIG_PATH, 'cloudify-rest.conf'), join(runtime_props['home_dir'], 'cloudify-rest.conf'), SERVICE_NAME) utils.chown(CLOUDIFY_USER, CLOUDIFY_GROUP, join(runtime_props['home_dir'], 'cloudify-rest.conf'))
def _deploy_nginx_config_files(): resource = namedtuple('Resource', 'src dst') ctx.logger.info('Deploying Nginx configuration files...') resources = [ resource( src='{0}/http-external-rest-server.cloudify'.format(CONFIG_PATH), dst='/etc/nginx/conf.d/http-external-rest-server.cloudify'), resource( src='{0}/https-external-rest-server.cloudify'.format(CONFIG_PATH), dst='/etc/nginx/conf.d/https-external-rest-server.cloudify'), resource( src='{0}/https-internal-rest-server.cloudify'.format(CONFIG_PATH), dst='/etc/nginx/conf.d/https-internal-rest-server.cloudify'), resource(src='{0}/https-file-server.cloudify'.format(CONFIG_PATH), dst='/etc/nginx/conf.d/https-file-server.cloudify'), resource(src='{0}/nginx.conf'.format(CONFIG_PATH), dst='/etc/nginx/nginx.conf'), resource( src='{0}/default.conf'.format(CONFIG_PATH), dst='/etc/nginx/conf.d/default.conf', ), resource( src='{0}/rest-location.cloudify'.format(CONFIG_PATH), dst='/etc/nginx/conf.d/rest-location.cloudify', ), resource( src='{0}/fileserver-location.cloudify'.format(CONFIG_PATH), dst='/etc/nginx/conf.d/fileserver-location.cloudify', ), resource( src='{0}/redirect-to-fileserver.cloudify'.format(CONFIG_PATH), dst='/etc/nginx/conf.d/redirect-to-fileserver.cloudify', ), resource( src='{0}/ui-locations.cloudify'.format(CONFIG_PATH), dst='/etc/nginx/conf.d/ui-locations.cloudify', ), resource( src='{0}/composer-location.cloudify'.format(CONFIG_PATH), dst='/etc/nginx/conf.d/composer-location.cloudify', ), resource( src='{0}/logs-conf.cloudify'.format(CONFIG_PATH), dst='/etc/nginx/conf.d/logs-conf.cloudify', ) ] for resource in resources: utils.deploy_blueprint_resource(resource.src, resource.dst, NGINX_SERVICE_NAME, load_ctx=False)
def _configure_index_rotation(): ctx.logger.info('Configurating index rotation...') ctx.logger.debug( 'Setting up curator rotation cronjob for logstash-YYYY.mm.dd ' 'index patterns...') utils.deploy_blueprint_resource( 'components/elasticsearch/scripts/rotate_es_indices', '/etc/cron.daily/rotate_es_indices', ES_SERVICE_NAME) utils.chown('root', 'root', '/etc/cron.daily/rotate_es_indices') # TODO: VALIDATE! # TODO: use utils.chmod utils.sudo('chmod +x /etc/cron.daily/rotate_es_indices')
def install_nginx(): nginx_source_url = ctx_properties['nginx_rpm_source_url'] # this is a bit tricky. the rest_service_source_url contains files that # should be deployed in the fileserver. the thing is, that since the # rest service and nginx cannot be distributed between vms right now # anyway, these resources are deployed by the rest service node instead. # rest_service_source_url = \ # ctx.node.properties['rest_service_module_source_url'] nginx_log_path = '/var/log/cloudify/nginx' manager_resources_home = utils.MANAGER_RESOURCES_HOME manager_agents_path = utils.AGENT_ARCHIVES_PATH # TODO: check if can remove these two (should come with the agent package) manager_scripts_path = '{0}/packages/scripts'.format( manager_resources_home) manager_templates_path = '{0}/packages/templates'.format( manager_resources_home) nginx_unit_override = '/etc/systemd/system/nginx.service.d' # this is propagated to the agent retrieval script later on so that it's # not defined twice. ctx.instance.runtime_properties['agent_packages_path'] = \ manager_agents_path # TODO: can we use static (not runtime) attributes for some of these? # how to set them? ctx.instance.runtime_properties['default_rest_service_port'] = '8100' ctx.instance.runtime_properties['internal_rest_service_port'] = '8101' ctx.logger.info('Installing Nginx...') utils.set_selinux_permissive() utils.copy_notice(NGINX_SERVICE_NAME) utils.mkdir(nginx_log_path) utils.mkdir(manager_resources_home) utils.mkdir(manager_agents_path) # TODO: check if can remove these two (should come with the agent package) utils.mkdir(manager_scripts_path) utils.mkdir(manager_templates_path) utils.mkdir(nginx_unit_override) utils.yum_install(nginx_source_url, service_name=NGINX_SERVICE_NAME) ctx.logger.info('Creating systemd unit override...') utils.deploy_blueprint_resource( '{0}/restart.conf'.format(CONFIG_PATH), '{0}/restart.conf'.format(nginx_unit_override), NGINX_SERVICE_NAME) utils.logrotate(NGINX_SERVICE_NAME) utils.clean_var_log_dir(NGINX_SERVICE_NAME)
def install_nginx(): nginx_source_url = ctx.node.properties['nginx_rpm_source_url'] # this is a bit tricky. the rest_service_source_url contains files that # should be deployed in the fileserver. the thing is, that since the # rest service and nginx cannot be distributed between vms right now # anyway, these resources are deployed by the rest service node instead. # rest_service_source_url = \ # ctx.node.properties['rest_service_module_source_url'] nginx_log_path = '/var/log/cloudify/nginx' manager_resources_home = '/opt/manager/resources' manager_agents_path = '{0}/packages/agents'.format(manager_resources_home) # TODO: check if can remove these two (should come with the agent package) manager_scripts_path = '{0}/packages/scripts'.format( manager_resources_home) manager_templates_path = '{0}/packages/templates'.format( manager_resources_home) nginx_unit_override = '/etc/systemd/system/nginx.service.d' # this is propagated to the agent retrieval script later on so that it's # not defined twice. ctx.instance.runtime_properties['agent_packages_path'] = \ manager_agents_path # TODO: can we use static (not runtime) attributes for some of these? # how to set them? ctx.instance.runtime_properties['default_rest_service_port'] = '8100' ctx.instance.runtime_properties['internal_rest_service_port'] = '8101' ctx.logger.info('Installing Nginx...') utils.set_selinux_permissive() utils.copy_notice('nginx') utils.mkdir(nginx_log_path) utils.mkdir(manager_resources_home) utils.mkdir(manager_agents_path) # TODO: check if can remove these two (should come with the agent package) utils.mkdir(manager_scripts_path) utils.mkdir(manager_templates_path) utils.mkdir(nginx_unit_override) utils.yum_install(nginx_source_url) ctx.logger.info('Creating systemd unit override...') utils.deploy_blueprint_resource( '{0}/restart.conf'.format(CONFIG_PATH), '{0}/restart.conf'.format(nginx_unit_override)) utils.logrotate('nginx') utils.clean_var_log_dir('nginx')
def configure_mgmtworker(): celery_work_dir = '{0}/work'.format(runtime_props['home_dir']) runtime_props['file_server_root'] = utils.MANAGER_RESOURCES_HOME ctx.logger.info('Configuring Management worker...') broker_conf_path = join(celery_work_dir, 'broker_config.json') utils.deploy_blueprint_resource( '{0}/broker_config.json'.format(CONFIG_PATH), broker_conf_path, SERVICE_NAME) # The config contains credentials, do not let the world read it utils.sudo(['chmod', '440', broker_conf_path]) utils.chown(CLOUDIFY_USER, CLOUDIFY_GROUP, broker_conf_path) utils.systemd.configure(SERVICE_NAME) utils.logrotate(SERVICE_NAME)
def configure_mgmtworker(): celery_work_dir = '{0}/work'.format(runtime_props['home_dir']) runtime_props['file_server_root'] = utils.MANAGER_RESOURCES_HOME ctx.logger.info('Configuring Management worker...') broker_conf_path = join(celery_work_dir, 'broker_config.json') utils.deploy_blueprint_resource( '{0}/broker_config.json'.format(CONFIG_PATH), broker_conf_path, SERVICE_NAME) # The config contains credentials, do not let the world read it utils.sudo(['chmod', '440', broker_conf_path]) utils.chown(CLOUDIFY_USER, CLOUDIFY_GROUP, broker_conf_path) utils.systemd.configure(SERVICE_NAME) utils.logrotate(SERVICE_NAME)
def _try_deploy(src, dest): src_path = src_runtime_props[src] if not src_path: return False try: utils.deploy_blueprint_resource( src_path, dest, NGINX_SERVICE_NAME, user_resource=True, load_ctx=False) return True except Exception as e: if "No such file or directory" in e.stderr: return False else: raise
def install_nginx(): nginx_source_url = ctx_properties['nginx_rpm_source_url'] # this is a bit tricky. the rest_service_source_url contains files that # should be deployed in the fileserver. the thing is, that since the # rest service and nginx cannot be distributed between vms right now # anyway, these resources are deployed by the rest service node instead. # rest_service_source_url = \ # ctx.node.properties['rest_service_module_source_url'] manager_resources_home = utils.MANAGER_RESOURCES_HOME manager_agents_path = utils.AGENT_ARCHIVES_PATH # TODO: check if can remove these two (should come with the agent package) manager_scripts_path = '{0}/packages/scripts'.format( manager_resources_home) manager_templates_path = '{0}/packages/templates'.format( manager_resources_home) # this is propagated to the agent retrieval script later on so that it's # not defined twice. ctx.instance.runtime_properties['agent_packages_path'] = \ manager_agents_path # TODO: can we use static (not runtime) attributes for some of these? # how to set them? ctx.instance.runtime_properties['default_rest_service_port'] = '8100' ctx.logger.info('Installing Nginx...') utils.set_selinux_permissive() utils.copy_notice(SERVICE_NAME) utils.mkdir(LOG_DIR) utils.mkdir(manager_resources_home) utils.mkdir(manager_agents_path) # TODO: check if can remove these two (should come with the agent package) utils.mkdir(manager_scripts_path) utils.mkdir(manager_templates_path) utils.mkdir(UNIT_OVERRIDE_PATH) utils.yum_install(nginx_source_url, service_name=SERVICE_NAME) ctx.logger.info('Creating systemd unit override...') utils.deploy_blueprint_resource( '{0}/restart.conf'.format(CONFIG_PATH), '{0}/restart.conf'.format(UNIT_OVERRIDE_PATH), SERVICE_NAME) utils.logrotate(SERVICE_NAME) utils.clean_var_log_dir(SERVICE_NAME)
def configure_logstash(): logstash_conf_path = '/etc/logstash/conf.d' runtime_properties = ctx.instance.runtime_properties rabbitmq_username = runtime_properties.get('rabbitmq_username') rabbitmq_password = runtime_properties.get('rabbitmq_password') # Confirm username and password have been supplied for broker before # continuing. # Components other than logstash and riemann have this handled in code. # Note that these are not directly used in this script, but are used by the # deployed resources, hence the check here. if not rabbitmq_username or not rabbitmq_password: ctx.abort_operation( 'Both rabbitmq_username and rabbitmq_password must be supplied ' 'and at least 1 character long in the manager blueprint inputs.') ctx.logger.info('Deploying Logstash configuration...') utils.deploy_blueprint_resource( '{0}/logstash.conf'.format(CONFIG_PATH), '{0}/logstash.conf'.format(logstash_conf_path), LOGSTASH_SERVICE_NAME) # Due to a bug in the handling of configuration files, # configuration files with the same name cannot be deployed. # Since the logrotate config file is called `logstash`, # we change the name of the logstash env vars config file # from logstash to cloudify-logstash to be consistent with # other service env var files. init_file = '/etc/init.d/logstash' utils.replace_in_file( 'sysconfig/\$name', 'sysconfig/cloudify-$name', init_file) utils.chmod('755', init_file) utils.chown('root', 'root', init_file) ctx.logger.debug('Deploying Logstash sysconfig...') utils.deploy_blueprint_resource( '{0}/cloudify-logstash'.format(CONFIG_PATH), '/etc/sysconfig/cloudify-logstash', LOGSTASH_SERVICE_NAME) utils.logrotate(LOGSTASH_SERVICE_NAME) utils.sudo(['/sbin/chkconfig', 'logstash', 'on']) utils.clean_var_log_dir(LOGSTASH_SERVICE_NAME)
def install_logstash(): """Install logstash as a systemd service.""" logstash_source_url = ctx_properties['logstash_rpm_source_url'] ctx.logger.info('Installing Logstash...') utils.set_selinux_permissive() utils.copy_notice(SERVICE_NAME) utils.yum_install(logstash_source_url, SERVICE_NAME) install_logstash_filter_json_encode_plugin() install_logstash_output_jdbc_plugin() install_postgresql_jdbc_driver() utils.mkdir(LOG_DIR) utils.chown('logstash', 'logstash', LOG_DIR) ctx.logger.debug('Creating systemd unit override...') utils.mkdir(UNIT_OVERRIDE_PATH) utils.deploy_blueprint_resource( '{0}/restart.conf'.format(CONFIG_PATH), '{0}/restart.conf'.format(UNIT_OVERRIDE_PATH), SERVICE_NAME)
def install_logstash(): """Install logstash as a systemd service.""" logstash_source_url = ctx_properties['logstash_rpm_source_url'] ctx.logger.info('Installing Logstash...') utils.set_selinux_permissive() utils.copy_notice(SERVICE_NAME) utils.yum_install(logstash_source_url, SERVICE_NAME) install_logstash_filter_json_encode_plugin() install_logstash_output_jdbc_plugin() install_postgresql_jdbc_driver() utils.mkdir(LOG_DIR) utils.chown('logstash', 'logstash', LOG_DIR) ctx.logger.debug('Creating systemd unit override...') utils.mkdir(UNIT_OVERRIDE_PATH) utils.deploy_blueprint_resource( '{0}/restart.conf'.format(CONFIG_PATH), '{0}/restart.conf'.format(UNIT_OVERRIDE_PATH), SERVICE_NAME)
def install_logstash(): logstash_unit_override = '/etc/systemd/system/logstash.service.d' logstash_source_url = ctx_properties['logstash_rpm_source_url'] rabbitmq_username = ctx_properties['rabbitmq_username'] rabbitmq_password = ctx_properties['rabbitmq_password'] logstash_log_path = '/var/log/cloudify/logstash' logstash_conf_path = '/etc/logstash/conf.d' # injected as an input to the script ctx.instance.runtime_properties['es_endpoint_ip'] = \ os.environ.get('ES_ENDPOINT_IP') ctx.instance.runtime_properties['rabbitmq_endpoint_ip'] = \ utils.get_rabbitmq_endpoint_ip( ctx_properties.get('rabbitmq_endpoint_ip')) # Confirm username and password have been supplied for broker before # continuing. # Components other than logstash and riemann have this handled in code. # Note that these are not directly used in this script, but are used by the # deployed resources, hence the check here. if not rabbitmq_username or not rabbitmq_password: utils.error_exit( 'Both rabbitmq_username and rabbitmq_password must be supplied ' 'and at least 1 character long in the manager blueprint inputs.') ctx.logger.info('Installing Logstash...') utils.set_selinux_permissive() utils.copy_notice(LOGSTASH_SERVICE_NAME) utils.yum_install(logstash_source_url, service_name=LOGSTASH_SERVICE_NAME) utils.mkdir(logstash_log_path) utils.chown('logstash', 'logstash', logstash_log_path) ctx.logger.info('Creating systemd unit override...') utils.mkdir(logstash_unit_override) utils.deploy_blueprint_resource( '{0}/restart.conf'.format(CONFIG_PATH), '{0}/restart.conf'.format(logstash_unit_override), LOGSTASH_SERVICE_NAME) ctx.logger.info('Deploying Logstash conf...') utils.deploy_blueprint_resource( '{0}/logstash.conf'.format(CONFIG_PATH), '{0}/logstash.conf'.format(logstash_conf_path), LOGSTASH_SERVICE_NAME) ctx.logger.info('Deploying Logstash sysconfig...') utils.deploy_blueprint_resource('{0}/logstash'.format(CONFIG_PATH), '/etc/sysconfig/logstash', LOGSTASH_SERVICE_NAME) utils.logrotate(LOGSTASH_SERVICE_NAME) utils.sudo(['/sbin/chkconfig', 'logstash', 'on']) utils.clean_var_log_dir(LOGSTASH_SERVICE_NAME)
def configure_mgmtworker(): # these must all be exported as part of the start operation. # they will not persist, so we should use the new agent # don't forget to change all localhosts to the relevant ips mgmtworker_home = '/opt/mgmtworker' mgmtworker_venv = '{0}/env'.format(mgmtworker_home) celery_work_dir = '{0}/work'.format(mgmtworker_home) ctx.logger.info('Configuring Management worker...') # Deploy the broker configuration # TODO: This will break interestingly if mgmtworker_venv is empty. # Some sort of check for that would be sensible. # To sandy: I don't quite understand this check... # there is no else here.. # for python_path in ${mgmtworker_venv}/lib/python*; do if os.path.isfile(os.path.join(mgmtworker_venv, 'bin/python')): broker_conf_path = os.path.join(celery_work_dir, 'broker_config.json') utils.deploy_blueprint_resource( '{0}/broker_config.json'.format(CONFIG_PATH), broker_conf_path, MGMT_WORKER_SERVICE_NAME) # The config contains credentials, do not let the world read it utils.sudo(['chmod', '440', broker_conf_path]) utils.systemd.configure(MGMT_WORKER_SERVICE_NAME) utils.logrotate(MGMT_WORKER_SERVICE_NAME)
def configure_mgmtworker(): # these must all be exported as part of the start operation. # they will not persist, so we should use the new agent # don't forget to change all localhosts to the relevant ips mgmtworker_home = '/opt/mgmtworker' mgmtworker_venv = '{0}/env'.format(mgmtworker_home) celery_work_dir = '{0}/work'.format(mgmtworker_home) ctx.logger.info('Configuring Management worker...') # Deploy the broker configuration # TODO: This will break interestingly if mgmtworker_venv is empty. # Some sort of check for that would be sensible. # To sandy: I don't quite understand this check... # there is no else here.. # for python_path in ${mgmtworker_venv}/lib/python*; do if isfile(join(mgmtworker_venv, 'bin/python')): broker_conf_path = join(celery_work_dir, 'broker_config.json') utils.deploy_blueprint_resource( '{0}/broker_config.json'.format(CONFIG_PATH), broker_conf_path, MGMT_WORKER_SERVICE_NAME) # The config contains credentials, do not let the world read it utils.sudo(['chmod', '440', broker_conf_path]) utils.systemd.configure(MGMT_WORKER_SERVICE_NAME) utils.logrotate(MGMT_WORKER_SERVICE_NAME)
def install_logstash(): logstash_unit_override = '/etc/systemd/system/logstash.service.d' logstash_source_url = ctx.node.properties['logstash_rpm_source_url'] rabbitmq_username = ctx.node.properties['rabbitmq_username'] rabbitmq_password = ctx.node.properties['rabbitmq_password'] logstash_log_path = '/var/log/cloudify/logstash' logstash_conf_path = '/etc/logstash/conf.d' # injected as an input to the script ctx.instance.runtime_properties['es_endpoint_ip'] = \ os.environ.get('ES_ENDPOINT_IP') ctx.instance.runtime_properties['rabbitmq_endpoint_ip'] = \ utils.get_rabbitmq_endpoint_ip() # Confirm username and password have been supplied for broker before # continuing. # Components other than logstash and riemann have this handled in code. # Note that these are not directly used in this script, but are used by the # deployed resources, hence the check here. if not rabbitmq_username or not rabbitmq_password: utils.error_exit( 'Both rabbitmq_username and rabbitmq_password must be supplied ' 'and at least 1 character long in the manager blueprint inputs.') ctx.logger.info('Installing Logstash...') utils.set_selinux_permissive() utils.copy_notice('logstash') utils.yum_install(logstash_source_url) utils.mkdir(logstash_log_path) utils.chown('logstash', 'logstash', logstash_log_path) ctx.logger.info('Creating systemd unit override...') utils.mkdir(logstash_unit_override) utils.deploy_blueprint_resource( '{0}/restart.conf'.format(CONFIG_PATH), '{0}/restart.conf'.format(logstash_unit_override)) ctx.logger.info('Deploying Logstash conf...') utils.deploy_blueprint_resource( '{0}/logstash.conf'.format(CONFIG_PATH), '{0}/logstash.conf'.format(logstash_conf_path)) ctx.logger.info('Deploying Logstash sysconfig...') utils.deploy_blueprint_resource( '{0}/logstash'.format(CONFIG_PATH), '/etc/sysconfig/logstash') utils.logrotate('logstash') utils.sudo(['/sbin/chkconfig', 'logstash', 'on']) utils.clean_var_log_dir('logstash')
def install_restservice(): rest_service_rpm_source_url = \ ctx_properties['rest_service_rpm_source_url'] rest_venv = os.path.join(REST_SERVICE_HOME, 'env') # Also, manager_rest_config_path is mandatory since the manager's code # reads this env var. it should be renamed to rest_service_config_path. os.environ['manager_rest_config_path'] = os.path.join( REST_SERVICE_HOME, 'cloudify-rest.conf') os.environ['rest_service_config_path'] = os.path.join( REST_SERVICE_HOME, 'cloudify-rest.conf') os.environ['manager_rest_security_config_path'] = os.path.join( REST_SERVICE_HOME, 'rest-security.conf') rest_service_log_path = '/var/log/cloudify/rest' ctx.logger.info('Installing REST Service...') utils.set_selinux_permissive() utils.copy_notice(REST_SERVICE_NAME) utils.mkdir(REST_SERVICE_HOME) utils.mkdir(rest_service_log_path) utils.mkdir(MANAGER_RESOURCES_HOME) deploy_broker_configuration() utils.yum_install(rest_service_rpm_source_url, service_name=REST_SERVICE_NAME) _configure_dbus(rest_venv) install_optional(rest_venv) utils.logrotate(REST_SERVICE_NAME) ctx.logger.info('Copying role configuration files...') utils.deploy_blueprint_resource( os.path.join(REST_RESOURCES_PATH, 'roles_config.yaml'), os.path.join(REST_SERVICE_HOME, 'roles_config.yaml'), REST_SERVICE_NAME, user_resource=True) utils.deploy_blueprint_resource( os.path.join(REST_RESOURCES_PATH, 'userstore.yaml'), os.path.join(REST_SERVICE_HOME, 'userstore.yaml'), REST_SERVICE_NAME, user_resource=True) # copy_security_config_files() ctx.logger.info('Deploying REST Service Configuration file...') # rest ports are set as runtime properties in nginx/scripts/create.py # cloudify-rest.conf currently contains localhost for fileserver endpoint. # We need to change that if we want to deploy nginx on another machine. utils.deploy_blueprint_resource( os.path.join(CONFIG_PATH, 'cloudify-rest.conf'), os.path.join(REST_SERVICE_HOME, 'cloudify-rest.conf'), REST_SERVICE_NAME)
def install_restservice(): rest_service_rpm_source_url = \ ctx_properties['rest_service_rpm_source_url'] rest_venv = os.path.join(REST_SERVICE_HOME, 'env') # Also, manager_rest_config_path is mandatory since the manager's code # reads this env var. it should be renamed to rest_service_config_path. os.environ['manager_rest_config_path'] = os.path.join( REST_SERVICE_HOME, 'cloudify-rest.conf') os.environ['rest_service_config_path'] = os.path.join( REST_SERVICE_HOME, 'cloudify-rest.conf') os.environ['manager_rest_security_config_path'] = os.path.join( REST_SERVICE_HOME, 'rest-security.conf') rest_service_log_path = '/var/log/cloudify/rest' ctx.logger.info('Installing REST Service...') utils.set_selinux_permissive() utils.copy_notice(REST_SERVICE_NAME) utils.mkdir(REST_SERVICE_HOME) utils.mkdir(rest_service_log_path) utils.mkdir(MANAGER_RESOURCES_HOME) deploy_broker_configuration() utils.yum_install(rest_service_rpm_source_url, service_name=REST_SERVICE_NAME) _configure_dbus(rest_venv) install_optional(rest_venv) utils.logrotate(REST_SERVICE_NAME) ctx.logger.info('Copying role configuration files...') utils.deploy_blueprint_resource( os.path.join(REST_RESOURCES_PATH, 'roles_config.yaml'), os.path.join(REST_SERVICE_HOME, 'roles_config.yaml'), REST_SERVICE_NAME, user_resource=True) utils.deploy_blueprint_resource( os.path.join(REST_RESOURCES_PATH, 'userstore.yaml'), os.path.join(REST_SERVICE_HOME, 'userstore.yaml'), REST_SERVICE_NAME, user_resource=True) # copy_security_config_files() ctx.logger.info('Deploying REST Service Configuration file...') # rest ports are set as runtime properties in nginx/scripts/create.py # cloudify-rest.conf currently contains localhost for fileserver endpoint. # We need to change that if we want to deploy nginx on another machine. utils.deploy_blueprint_resource( os.path.join(CONFIG_PATH, 'cloudify-rest.conf'), os.path.join(REST_SERVICE_HOME, 'cloudify-rest.conf'), REST_SERVICE_NAME)
def _install_elasticsearch(): es_java_opts = ctx_properties['es_java_opts'] es_heap_size = ctx_properties['es_heap_size'] es_source_url = ctx_properties['es_rpm_source_url'] es_curator_rpm_source_url = \ ctx_properties['es_curator_rpm_source_url'] # this will be used only if elasticsearch-curator is not installed via # an rpm and an internet connection is available es_curator_version = "3.2.3" es_home = "/opt/elasticsearch" es_logs_path = "/var/log/cloudify/elasticsearch" es_conf_path = "/etc/elasticsearch" es_unit_override = "/etc/systemd/system/elasticsearch.service.d" es_scripts_path = os.path.join(es_conf_path, 'scripts') ctx.logger.info('Installing Elasticsearch...') utils.set_selinux_permissive() utils.copy_notice('elasticsearch') utils.mkdir(es_home) utils.mkdir(es_logs_path) utils.yum_install(es_source_url, service_name=ES_SERVICE_NAME) ctx.logger.info('Chowning {0} by elasticsearch user...'.format( es_logs_path)) utils.chown('elasticsearch', 'elasticsearch', es_logs_path) ctx.logger.info('Creating systemd unit override...') utils.mkdir(es_unit_override) utils.deploy_blueprint_resource( os.path.join(CONFIG_PATH, 'restart.conf'), os.path.join(es_unit_override, 'restart.conf'), ES_SERVICE_NAME) ctx.logger.info('Deploying Elasticsearch Configuration...') utils.deploy_blueprint_resource( os.path.join(CONFIG_PATH, 'elasticsearch.yml'), os.path.join(es_conf_path, 'elasticsearch.yml'), ES_SERVICE_NAME) utils.chown('elasticsearch', 'elasticsearch', os.path.join(es_conf_path, 'elasticsearch.yml')) ctx.logger.info('Deploying elasticsearch logging configuration file...') utils.deploy_blueprint_resource( os.path.join(CONFIG_PATH, 'logging.yml'), os.path.join(es_conf_path, 'logging.yml'), ES_SERVICE_NAME) utils.chown('elasticsearch', 'elasticsearch', os.path.join(es_conf_path, 'logging.yml')) ctx.logger.info('Creating Elasticsearch scripts folder and ' 'additional external Elasticsearch scripts...') utils.mkdir(es_scripts_path) utils.deploy_blueprint_resource( os.path.join(CONFIG_PATH, 'scripts', 'append.groovy'), os.path.join(es_scripts_path, 'append.groovy'), ES_SERVICE_NAME ) ctx.logger.info('Setting Elasticsearch Heap Size...') # we should treat these as templates. utils.replace_in_file( '(?:#|)ES_HEAP_SIZE=(.*)', 'ES_HEAP_SIZE={0}'.format(es_heap_size), '/etc/sysconfig/elasticsearch') if es_java_opts: ctx.logger.info('Setting additional JAVA_OPTS...') utils.replace_in_file( '(?:#|)ES_JAVA_OPTS=(.*)', 'ES_JAVA_OPTS={0}'.format(es_java_opts), '/etc/sysconfig/elasticsearch') ctx.logger.info('Setting Elasticsearch logs path...') utils.replace_in_file( '(?:#|)LOG_DIR=(.*)', 'LOG_DIR={0}'.format(es_logs_path), '/etc/sysconfig/elasticsearch') utils.replace_in_file( '(?:#|)ES_GC_LOG_FILE=(.*)', 'ES_GC_LOG_FILE={0}'.format(os.path.join(es_logs_path, 'gc.log')), '/etc/sysconfig/elasticsearch') utils.logrotate(ES_SERVICE_NAME) ctx.logger.info('Installing Elasticsearch Curator...') if not es_curator_rpm_source_url: ctx.install_python_package('elasticsearch-curator=={0}'.format( es_curator_version)) else: utils.yum_install(es_curator_rpm_source_url, service_name=ES_SERVICE_NAME) _configure_index_rotation() # elasticsearch provides a systemd init env. we just enable it. utils.systemd.enable(ES_SERVICE_NAME, append_prefix=False)
def install_mgmtworker(): management_worker_rpm_source_url = \ ctx_properties['management_worker_rpm_source_url'] # these must all be exported as part of the start operation. # they will not persist, so we should use the new agent # don't forget to change all localhosts to the relevant ips mgmtworker_home = '/opt/mgmtworker' mgmtworker_venv = '{0}/env'.format(mgmtworker_home) celery_work_dir = '{0}/work'.format(mgmtworker_home) celery_log_dir = "/var/log/cloudify/mgmtworker" broker_port_ssl = '5671' broker_port_no_ssl = '5672' rabbit_props = utils.ctx_factory.get('rabbitmq') rabbitmq_ssl_enabled = rabbit_props['rabbitmq_ssl_enabled'] ctx.logger.info("rabbitmq_ssl_enabled: {0}".format(rabbitmq_ssl_enabled)) rabbitmq_cert_public = rabbit_props['rabbitmq_cert_public'] ctx.instance.runtime_properties['rabbitmq_endpoint_ip'] = \ utils.get_rabbitmq_endpoint_ip( rabbit_props.get('rabbitmq_endpoint_ip')) # Fix possible injections in json of rabbit credentials # See json.org for string spec for key in ['rabbitmq_username', 'rabbitmq_password']: # We will not escape newlines or other control characters, # we will accept them breaking # things noisily, e.g. on newlines and backspaces. # TODO: add: # sed 's/"/\\"/' | sed 's/\\/\\\\/' | sed s-/-\\/- | sed 's/\t/\\t/' ctx.instance.runtime_properties[key] = ctx_properties[key] # Make the ssl enabled flag work with json (boolean in lower case) # TODO: check if still needed: # broker_ssl_enabled = "$(echo ${rabbitmq_ssl_enabled} | tr '[:upper:]' '[:lower:]')" # NOQA ctx.instance.runtime_properties['rabbitmq_ssl_enabled'] = \ rabbitmq_ssl_enabled ctx.logger.info('Installing Management Worker...') utils.set_selinux_permissive() utils.copy_notice(MGMT_WORKER_SERVICE_NAME) utils.mkdir(mgmtworker_home) utils.mkdir('{0}/config'.format(mgmtworker_home)) utils.mkdir(celery_log_dir) utils.mkdir(celery_work_dir) # this create the mgmtworker_venv and installs the relevant # modules into it. utils.yum_install(management_worker_rpm_source_url, service_name=MGMT_WORKER_SERVICE_NAME) _install_optional(mgmtworker_venv) # Add certificate and select port, as applicable if rabbitmq_ssl_enabled: broker_cert_path = '{0}/amqp_pub.pem'.format(mgmtworker_home) utils.deploy_ssl_certificate( 'public', broker_cert_path, 'root', rabbitmq_cert_public) ctx.instance.runtime_properties['broker_cert_path'] = broker_cert_path # Use SSL port ctx.instance.runtime_properties['broker_port'] = broker_port_ssl else: # No SSL, don't use SSL port ctx.instance.runtime_properties['broker_port'] = broker_port_no_ssl if rabbitmq_cert_public is not None: ctx.logger.warn('Broker SSL cert supplied but SSL not enabled ' '(broker_ssl_enabled is False).') ctx.logger.info("broker_port: {0}".format( ctx.instance.runtime_properties['broker_port'])) ctx.logger.info('Configuring Management worker...') # Deploy the broker configuration # TODO: This will break interestingly if mgmtworker_venv is empty. # Some sort of check for that would be sensible. # To sandy: I don't quite understand this check... # there is no else here.. # for python_path in ${mgmtworker_venv}/lib/python*; do if os.path.isfile(os.path.join(mgmtworker_venv, 'bin/python')): broker_conf_path = os.path.join(celery_work_dir, 'broker_config.json') utils.deploy_blueprint_resource( '{0}/broker_config.json'.format(CONFIG_PATH), broker_conf_path, MGMT_WORKER_SERVICE_NAME) # The config contains credentials, do not let the world read it utils.sudo(['chmod', '440', broker_conf_path]) utils.systemd.configure(MGMT_WORKER_SERVICE_NAME) utils.logrotate(MGMT_WORKER_SERVICE_NAME)
def install_riemann(): langohr_source_url = ctx_properties['langohr_jar_source_url'] daemonize_source_url = ctx_properties['daemonize_rpm_source_url'] riemann_source_url = ctx_properties['riemann_rpm_source_url'] # Needed for Riemann's config cloudify_resources_url = ctx_properties['cloudify_resources_url'] rabbitmq_username = ctx_properties['rabbitmq_username'] rabbitmq_password = ctx_properties['rabbitmq_password'] riemann_config_path = '/etc/riemann' riemann_log_path = '/var/log/cloudify/riemann' langohr_home = '/opt/lib' extra_classpath = '{0}/langohr.jar'.format(langohr_home) # Confirm username and password have been supplied for broker before # continuing. # Components other than logstash and riemann have this handled in code. # Note that these are not directly used in this script, but are used by the # deployed resources, hence the check here. if not rabbitmq_username or not rabbitmq_password: ctx.abort_operation( 'Both rabbitmq_username and rabbitmq_password must be supplied ' 'and at least 1 character long in the manager blueprint inputs.') rabbit_props = utils.ctx_factory.get('rabbitmq') ctx.instance.runtime_properties['rabbitmq_endpoint_ip'] = \ utils.get_rabbitmq_endpoint_ip( rabbit_props.get('rabbitmq_endpoint_ip')) ctx.instance.runtime_properties['rabbitmq_username'] = \ rabbit_props.get('rabbitmq_username') ctx.instance.runtime_properties['rabbitmq_password'] = \ rabbit_props.get('rabbitmq_password') ctx.logger.info('Installing Riemann...') utils.set_selinux_permissive() utils.copy_notice(RIEMANN_SERVICE_NAME) utils.mkdir(riemann_log_path) utils.mkdir(langohr_home) utils.mkdir(riemann_config_path) utils.mkdir('{0}/conf.d'.format(riemann_config_path)) langohr = utils.download_cloudify_resource(langohr_source_url, RIEMANN_SERVICE_NAME) utils.sudo(['cp', langohr, extra_classpath]) ctx.logger.info('Applying Langohr permissions...') utils.sudo(['chmod', '644', extra_classpath]) utils.yum_install(daemonize_source_url, service_name=RIEMANN_SERVICE_NAME) utils.yum_install(riemann_source_url, service_name=RIEMANN_SERVICE_NAME) utils.logrotate(RIEMANN_SERVICE_NAME) ctx.logger.info('Downloading cloudify-manager Repository...') manager_repo = utils.download_cloudify_resource(cloudify_resources_url, RIEMANN_SERVICE_NAME) ctx.logger.info('Extracting Manager Repository...') utils.untar(manager_repo, '/tmp') ctx.logger.info('Deploying Riemann manager.config...') utils.move( '/tmp/plugins/riemann-controller/riemann_controller/resources/manager.config', # NOQA '{0}/conf.d/manager.config'.format(riemann_config_path)) ctx.logger.info('Deploying Riemann conf...') utils.deploy_blueprint_resource( '{0}/main.clj'.format(CONFIG_PATH), '{0}/main.clj'.format(riemann_config_path), RIEMANN_SERVICE_NAME) # our riemann configuration will (by default) try to read these environment # variables. If they don't exist, it will assume # that they're found at "localhost" # export MANAGEMENT_IP="" # export RABBITMQ_HOST="" # we inject the management_ip for both of these to Riemann's systemd # config. # These should be potentially different # if the manager and rabbitmq are running on different hosts. utils.systemd.configure(RIEMANN_SERVICE_NAME) utils.clean_var_log_dir(RIEMANN_SERVICE_NAME)
def _install_rabbitmq(): erlang_rpm_source_url = ctx_properties['erlang_rpm_source_url'] rabbitmq_rpm_source_url = ctx_properties['rabbitmq_rpm_source_url'] # TODO: maybe we don't need this env var os.putenv('RABBITMQ_FD_LIMIT', str(ctx_properties['rabbitmq_fd_limit'])) rabbitmq_username = ctx_properties['rabbitmq_username'] rabbitmq_password = ctx_properties['rabbitmq_password'] ctx.logger.info('Installing RabbitMQ...') utils.set_selinux_permissive() utils.copy_notice(SERVICE_NAME) utils.mkdir(LOG_DIR) utils.yum_install(erlang_rpm_source_url, service_name=SERVICE_NAME) utils.yum_install(rabbitmq_rpm_source_url, service_name=SERVICE_NAME) utils.logrotate(SERVICE_NAME) utils.systemd.configure(SERVICE_NAME) ctx.logger.info('Configuring File Descriptors Limit...') utils.deploy_blueprint_resource( '{0}/rabbitmq_ulimit.conf'.format(CONFIG_PATH), FD_LIMIT_PATH, SERVICE_NAME) utils.deploy_blueprint_resource( '{0}/rabbitmq-definitions.json'.format(CONFIG_PATH), join(HOME_DIR, 'definitions.json'), SERVICE_NAME) # This stops rabbit from failing if the host name changes, e.g. when # a manager is deployed from an image but given a new hostname. # This is likely to cause problems with clustering of rabbitmq if this is # done at any point, so at that point a change to the file and cleaning of # mnesia would likely be necessary. utils.deploy_blueprint_resource( '{0}/rabbitmq-env.conf'.format(CONFIG_PATH), '/etc/rabbitmq/rabbitmq-env.conf', SERVICE_NAME) # Delete old mnesia node utils.sudo(['rm', '-rf', '/var/lib/rabbitmq/mnesia']) utils.systemd.systemctl('daemon-reload') utils.chown('rabbitmq', 'rabbitmq', LOG_DIR) # rabbitmq restart exits with 143 status code that is valid in this case. utils.systemd.restart(SERVICE_NAME, ignore_failure=True) time.sleep(10) utils.wait_for_port(5672) ctx.logger.info('Enabling RabbitMQ Plugins...') # Occasional timing issues with rabbitmq starting have resulted in # failures when first trying to enable plugins utils.sudo(['rabbitmq-plugins', 'enable', 'rabbitmq_management'], retries=5) utils.sudo(['rabbitmq-plugins', 'enable', 'rabbitmq_tracing'], retries=5) _clear_guest_permissions_if_guest_exists() _create_user_and_set_permissions(rabbitmq_username, rabbitmq_password) utils.deploy_blueprint_resource( '{0}/rabbitmq.config'.format(CONFIG_PATH), join(HOME_DIR, 'rabbitmq.config'), SERVICE_NAME, user_resource=True) utils.systemd.stop(SERVICE_NAME, retries=5)
def preconfigure_nginx(): ssl_resources_rel_path = 'resources/ssl' ssl_certs_root = '/root/cloudify' # this is used by nginx's default.conf to select the relevant configuration rest_protocol = ctx.target.instance.runtime_properties['rest_protocol'] # TODO: NEED TO IMPLEMENT THIS IN CTX UTILS ctx.source.instance.runtime_properties['rest_protocol'] = rest_protocol if rest_protocol == 'https': ctx.logger.info('Copying SSL Certs...') utils.mkdir(ssl_certs_root) utils.deploy_blueprint_resource( '{0}/server.crt'.format(ssl_resources_rel_path), '{0}/server.crt'.format(ssl_certs_root)) utils.deploy_blueprint_resource( '{0}/server.key'.format(ssl_resources_rel_path), '{0}/server.key'.format(ssl_certs_root)) ctx.logger.info('Deploying Nginx configuration files...') utils.deploy_blueprint_resource( '{0}/{1}-rest-server.cloudify'.format(CONFIG_PATH, rest_protocol), '/etc/nginx/conf.d/{0}-rest-server.cloudify'.format(rest_protocol)) utils.deploy_blueprint_resource( '{0}/nginx.conf'.format(CONFIG_PATH), '/etc/nginx/nginx.conf') utils.deploy_blueprint_resource( '{0}/default.conf'.format(CONFIG_PATH), '/etc/nginx/conf.d/default.conf') utils.deploy_blueprint_resource( '{0}/rest-location.cloudify'.format(CONFIG_PATH), '/etc/nginx/conf.d/rest-location.cloudify') utils.deploy_blueprint_resource( '{0}/fileserver-location.cloudify'.format(CONFIG_PATH), '/etc/nginx/conf.d/fileserver-location.cloudify') utils.deploy_blueprint_resource( '{0}/ui-locations.cloudify'.format(CONFIG_PATH), '/etc/nginx/conf.d/ui-locations.cloudify') utils.deploy_blueprint_resource( '{0}/logs-conf.cloudify'.format(CONFIG_PATH), '/etc/nginx/conf.d/logs-conf.cloudify') utils.systemd.enable('nginx')
def preconfigure_nginx(): ssl_resources_rel_path = 'resources/ssl' ssl_certs_root = '/root/cloudify' # this is used by nginx's default.conf to select the relevant configuration rest_protocol = ctx.target.instance.runtime_properties['rest_protocol'] # TODO: NEED TO IMPLEMENT THIS IN CTX UTILS ctx.source.instance.runtime_properties['rest_protocol'] = rest_protocol if rest_protocol == 'https': ctx.logger.info('Copying SSL Certs...') utils.mkdir(ssl_certs_root) utils.deploy_blueprint_resource( '{0}/server.crt'.format(ssl_resources_rel_path), '{0}/server.crt'.format(ssl_certs_root), NGINX_SERVICE_NAME, user_resource=True, load_ctx=False) utils.deploy_blueprint_resource( '{0}/server.key'.format(ssl_resources_rel_path), '{0}/server.key'.format(ssl_certs_root), NGINX_SERVICE_NAME, user_resource=True, load_ctx=False) ctx.logger.info('Deploying Nginx configuration files...') utils.deploy_blueprint_resource( '{0}/{1}-rest-server.cloudify'.format(CONFIG_PATH, rest_protocol), '/etc/nginx/conf.d/{0}-rest-server.cloudify'.format(rest_protocol), NGINX_SERVICE_NAME, load_ctx=False) utils.deploy_blueprint_resource( '{0}/nginx.conf'.format(CONFIG_PATH), '/etc/nginx/nginx.conf', NGINX_SERVICE_NAME, load_ctx=False) utils.deploy_blueprint_resource( '{0}/default.conf'.format(CONFIG_PATH), '/etc/nginx/conf.d/default.conf', NGINX_SERVICE_NAME, load_ctx=False) utils.deploy_blueprint_resource( '{0}/rest-location.cloudify'.format(CONFIG_PATH), '/etc/nginx/conf.d/rest-location.cloudify', NGINX_SERVICE_NAME, load_ctx=False) utils.deploy_blueprint_resource( '{0}/fileserver-location.cloudify'.format(CONFIG_PATH), '/etc/nginx/conf.d/fileserver-location.cloudify', NGINX_SERVICE_NAME, load_ctx=False) utils.deploy_blueprint_resource( '{0}/ui-locations.cloudify'.format(CONFIG_PATH), '/etc/nginx/conf.d/ui-locations.cloudify', NGINX_SERVICE_NAME, load_ctx=False) utils.deploy_blueprint_resource( '{0}/logs-conf.cloudify'.format(CONFIG_PATH), '/etc/nginx/conf.d/logs-conf.cloudify', NGINX_SERVICE_NAME, load_ctx=False) utils.systemd.enable(NGINX_SERVICE_NAME, append_prefix=False)
def install_logstash(): logstash_unit_override = '/etc/systemd/system/logstash.service.d' logstash_source_url = ctx_properties['logstash_rpm_source_url'] rabbitmq_username = ctx_properties['rabbitmq_username'] rabbitmq_password = ctx_properties['rabbitmq_password'] logstash_log_path = '/var/log/cloudify/logstash' logstash_conf_path = '/etc/logstash/conf.d' # injected as an input to the script ctx.instance.runtime_properties['es_endpoint_ip'] = \ os.environ['ES_ENDPOINT_IP'] elasticsearch_props = utils.ctx_factory.get('elasticsearch') ctx.instance.runtime_properties['es_endpoint_port'] = \ elasticsearch_props['es_endpoint_port'] rabbit_props = utils.ctx_factory.get('rabbitmq') ctx.instance.runtime_properties['rabbitmq_endpoint_ip'] = \ utils.get_rabbitmq_endpoint_ip( rabbit_props.get('rabbitmq_endpoint_ip')) ctx.instance.runtime_properties['rabbitmq_username'] = \ rabbit_props['rabbitmq_username'] ctx.instance.runtime_properties['rabbitmq_password'] = \ rabbit_props['rabbitmq_password'] # Confirm username and password have been supplied for broker before # continuing. # Components other than logstash and riemann have this handled in code. # Note that these are not directly used in this script, but are used by the # deployed resources, hence the check here. if not rabbitmq_username or not rabbitmq_password: ctx.abort_operation( 'Both rabbitmq_username and rabbitmq_password must be supplied ' 'and at least 1 character long in the manager blueprint inputs.') ctx.logger.info('Installing Logstash...') utils.set_selinux_permissive() utils.copy_notice(LOGSTASH_SERVICE_NAME) utils.yum_install(logstash_source_url, service_name=LOGSTASH_SERVICE_NAME) utils.mkdir(logstash_log_path) utils.chown('logstash', 'logstash', logstash_log_path) ctx.logger.info('Creating systemd unit override...') utils.mkdir(logstash_unit_override) utils.deploy_blueprint_resource( '{0}/restart.conf'.format(CONFIG_PATH), '{0}/restart.conf'.format(logstash_unit_override), LOGSTASH_SERVICE_NAME) ctx.logger.info('Deploying Logstash conf...') utils.deploy_blueprint_resource( '{0}/logstash.conf'.format(CONFIG_PATH), '{0}/logstash.conf'.format(logstash_conf_path), LOGSTASH_SERVICE_NAME) # Due to a bug in the handling of configuration files, # configuration files with the same name cannot be deployed. # Since the logrotate config file is called `logstash`, # we change the name of the logstash env vars config file # from logstash to cloudify-logstash to be consistent with # other service env var files. init_file = '/etc/init.d/logstash' utils.replace_in_file( 'sysconfig/\$name', 'sysconfig/cloudify-$name', init_file) utils.chmod('755', init_file) utils.chown('root', 'root', init_file) ctx.logger.info('Deploying Logstash sysconfig...') utils.deploy_blueprint_resource( '{0}/cloudify-logstash'.format(CONFIG_PATH), '/etc/sysconfig/cloudify-logstash', LOGSTASH_SERVICE_NAME) utils.logrotate(LOGSTASH_SERVICE_NAME) utils.sudo(['/sbin/chkconfig', 'logstash', 'on']) utils.clean_var_log_dir(LOGSTASH_SERVICE_NAME)
def _deploy_nginx_config_files(external_rest_protocol): resource = namedtuple('Resource', 'src dst') ctx.logger.info('Deploying Nginx configuration files...') resources = [ resource( src='{0}/{1}-external-rest-server.cloudify'.format( CONFIG_PATH, external_rest_protocol ), dst='/etc/nginx/conf.d/{0}-external-rest-server.cloudify'.format( external_rest_protocol ) ), resource( src='{0}/https-internal-rest-server.cloudify'.format( CONFIG_PATH ), dst='/etc/nginx/conf.d/https-internal-rest-server.cloudify' ), resource( src='{0}/https-file-server.cloudify'.format(CONFIG_PATH), dst='/etc/nginx/conf.d/https-file-server.cloudify' ), resource( src='{0}/nginx.conf'.format(CONFIG_PATH), dst='/etc/nginx/nginx.conf' ), resource( src='{0}/default.conf'.format(CONFIG_PATH), dst='/etc/nginx/conf.d/default.conf', ), resource( src='{0}/rest-location.cloudify'.format(CONFIG_PATH), dst='/etc/nginx/conf.d/rest-location.cloudify', ), resource( src='{0}/fileserver-location.cloudify'.format(CONFIG_PATH), dst='/etc/nginx/conf.d/fileserver-location.cloudify', ), resource( src='{0}/redirect-to-fileserver.cloudify'.format(CONFIG_PATH), dst='/etc/nginx/conf.d/redirect-to-fileserver.cloudify', ), resource( src='{0}/ui-locations.cloudify'.format(CONFIG_PATH), dst='/etc/nginx/conf.d/ui-locations.cloudify', ), resource( src='{0}/logs-conf.cloudify'.format(CONFIG_PATH), dst='/etc/nginx/conf.d/logs-conf.cloudify', ) ] for resource in resources: utils.deploy_blueprint_resource( resource.src, resource.dst, NGINX_SERVICE_NAME, load_ctx=False )
def install_riemann(): langohr_source_url = ctx_properties['langohr_jar_source_url'] daemonize_source_url = ctx_properties['daemonize_rpm_source_url'] riemann_source_url = ctx_properties['riemann_rpm_source_url'] # Needed for Riemann's config cloudify_resources_url = ctx_properties['cloudify_resources_url'] rabbitmq_username = ctx_properties['rabbitmq_username'] rabbitmq_password = ctx_properties['rabbitmq_password'] riemann_config_path = '/etc/riemann' riemann_log_path = '/var/log/cloudify/riemann' langohr_home = '/opt/lib' extra_classpath = '{0}/langohr.jar'.format(langohr_home) # Confirm username and password have been supplied for broker before # continuing. # Components other than logstash and riemann have this handled in code. # Note that these are not directly used in this script, but are used by the # deployed resources, hence the check here. if not rabbitmq_username or not rabbitmq_password: ctx.abort_operation( 'Both rabbitmq_username and rabbitmq_password must be supplied ' 'and at least 1 character long in the manager blueprint inputs.') rabbit_props = utils.ctx_factory.get('rabbitmq') ctx.instance.runtime_properties['rabbitmq_endpoint_ip'] = \ utils.get_rabbitmq_endpoint_ip( rabbit_props.get('rabbitmq_endpoint_ip')) ctx.instance.runtime_properties['rabbitmq_username'] = \ rabbit_props.get('rabbitmq_username') ctx.instance.runtime_properties['rabbitmq_password'] = \ rabbit_props.get('rabbitmq_password') ctx.logger.info('Installing Riemann...') utils.set_selinux_permissive() utils.copy_notice(RIEMANN_SERVICE_NAME) utils.mkdir(riemann_log_path) utils.mkdir(langohr_home) utils.mkdir(riemann_config_path) utils.mkdir('{0}/conf.d'.format(riemann_config_path)) langohr = utils.download_cloudify_resource(langohr_source_url, RIEMANN_SERVICE_NAME) utils.sudo(['cp', langohr, extra_classpath]) ctx.logger.info('Applying Langohr permissions...') utils.sudo(['chmod', '644', extra_classpath]) utils.yum_install(daemonize_source_url, service_name=RIEMANN_SERVICE_NAME) utils.yum_install(riemann_source_url, service_name=RIEMANN_SERVICE_NAME) utils.logrotate(RIEMANN_SERVICE_NAME) ctx.logger.info('Downloading cloudify-manager Repository...') manager_repo = utils.download_cloudify_resource(cloudify_resources_url, RIEMANN_SERVICE_NAME) ctx.logger.info('Extracting Manager Repository...') utils.untar(manager_repo, '/tmp') ctx.logger.info('Deploying Riemann manager.config...') utils.move( '/tmp/plugins/riemann-controller/riemann_controller/resources/manager.config', # NOQA '{0}/conf.d/manager.config'.format(riemann_config_path)) ctx.logger.info('Deploying Riemann conf...') utils.deploy_blueprint_resource('{0}/main.clj'.format(CONFIG_PATH), '{0}/main.clj'.format(riemann_config_path), RIEMANN_SERVICE_NAME) # our riemann configuration will (by default) try to read these environment # variables. If they don't exist, it will assume # that they're found at "localhost" # export MANAGEMENT_IP="" # export RABBITMQ_HOST="" # we inject the management_ip for both of these to Riemann's systemd # config. # These should be potentially different # if the manager and rabbitmq are running on different hosts. utils.systemd.configure(RIEMANN_SERVICE_NAME) utils.clean_var_log_dir(RIEMANN_SERVICE_NAME)
def preconfigure_nginx(): target_runtime_props = ctx.target.instance.runtime_properties # this is used by nginx's default.conf to select the relevant configuration rest_protocol = target_runtime_props['rest_protocol'] file_server_protocol = target_runtime_props['file_server_protocol'] # TODO: NEED TO IMPLEMENT THIS IN CTX UTILS ctx.source.instance.runtime_properties['rest_protocol'] = rest_protocol ctx.source.instance.runtime_properties['file_server_protocol'] = \ file_server_protocol if rest_protocol == 'https': utils.deploy_rest_certificates( internal_rest_host=target_runtime_props['internal_rest_host'], external_rest_host=target_runtime_props['external_rest_host']) # get rest public certificate for output later external_rest_cert_content = \ utils.get_file_content(EXTERNAL_REST_CERT_PATH) target_runtime_props['external_rest_cert_content'] = \ external_rest_cert_content ctx.logger.info('Deploying Nginx configuration files...') utils.deploy_blueprint_resource( '{0}/{1}-rest-server.cloudify'.format(CONFIG_PATH, rest_protocol), '/etc/nginx/conf.d/{0}-rest-server.cloudify'.format(rest_protocol), NGINX_SERVICE_NAME, load_ctx=False) utils.deploy_blueprint_resource( '{0}/{1}-file-server.cloudify'.format(CONFIG_PATH, file_server_protocol), '/etc/nginx/conf.d/{0}-file-server.cloudify'.format( file_server_protocol), NGINX_SERVICE_NAME, load_ctx=False) utils.deploy_blueprint_resource('{0}/nginx.conf'.format(CONFIG_PATH), '/etc/nginx/nginx.conf', NGINX_SERVICE_NAME, load_ctx=False) utils.deploy_blueprint_resource('{0}/default.conf'.format(CONFIG_PATH), '/etc/nginx/conf.d/default.conf', NGINX_SERVICE_NAME, load_ctx=False) utils.deploy_blueprint_resource( '{0}/rest-location.cloudify'.format(CONFIG_PATH), '/etc/nginx/conf.d/rest-location.cloudify', NGINX_SERVICE_NAME, load_ctx=False) utils.deploy_blueprint_resource( '{0}/fileserver-location.cloudify'.format(CONFIG_PATH), '/etc/nginx/conf.d/fileserver-location.cloudify', NGINX_SERVICE_NAME, load_ctx=False) utils.deploy_blueprint_resource( '{0}/redirect-to-fileserver.cloudify'.format(CONFIG_PATH), '/etc/nginx/conf.d/redirect-to-fileserver.cloudify', NGINX_SERVICE_NAME, load_ctx=False) utils.deploy_blueprint_resource( '{0}/ui-locations.cloudify'.format(CONFIG_PATH), '/etc/nginx/conf.d/ui-locations.cloudify', NGINX_SERVICE_NAME, load_ctx=False) utils.deploy_blueprint_resource( '{0}/logs-conf.cloudify'.format(CONFIG_PATH), '/etc/nginx/conf.d/logs-conf.cloudify', NGINX_SERVICE_NAME, load_ctx=False) utils.systemd.enable(NGINX_SERVICE_NAME, append_prefix=False)
def install_mgmtworker(): management_worker_rpm_source_url = \ ctx_properties['management_worker_rpm_source_url'] # these must all be exported as part of the start operation. # they will not persist, so we should use the new agent # don't forget to change all localhosts to the relevant ips mgmtworker_home = '/opt/mgmtworker' mgmtworker_venv = '{0}/env'.format(mgmtworker_home) celery_work_dir = '{0}/work'.format(mgmtworker_home) celery_log_dir = "/var/log/cloudify/mgmtworker" broker_port_ssl = '5671' broker_port_no_ssl = '5672' rabbitmq_ssl_enabled = ctx_properties['rabbitmq_ssl_enabled'] ctx.logger.info("rabbitmq_ssl_enabled: {0}".format(rabbitmq_ssl_enabled)) rabbitmq_cert_public = ctx_properties['rabbitmq_cert_public'] ctx.instance.runtime_properties['rabbitmq_endpoint_ip'] = \ utils.get_rabbitmq_endpoint_ip( ctx_properties.get('rabbitmq_endpoint_ip')) # Fix possible injections in json of rabbit credentials # See json.org for string spec for key in ['rabbitmq_username', 'rabbitmq_password']: # We will not escape newlines or other control characters, # we will accept them breaking # things noisily, e.g. on newlines and backspaces. # TODO: add: # sed 's/"/\\"/' | sed 's/\\/\\\\/' | sed s-/-\\/- | sed 's/\t/\\t/' ctx.instance.runtime_properties[key] = ctx_properties[key] # Make the ssl enabled flag work with json (boolean in lower case) # TODO: check if still needed: # broker_ssl_enabled = "$(echo ${rabbitmq_ssl_enabled} | tr '[:upper:]' '[:lower:]')" # NOQA ctx.instance.runtime_properties['rabbitmq_ssl_enabled'] = \ rabbitmq_ssl_enabled ctx.logger.info('Installing Management Worker...') utils.set_selinux_permissive() utils.copy_notice(MGMT_WORKER_SERVICE_NAME) utils.mkdir(mgmtworker_home) utils.mkdir('{0}/config'.format(mgmtworker_home)) utils.mkdir(celery_log_dir) utils.mkdir(celery_work_dir) # this create the mgmtworker_venv and installs the relevant # modules into it. utils.yum_install(management_worker_rpm_source_url, service_name=MGMT_WORKER_SERVICE_NAME) _install_optional(mgmtworker_venv) # Add certificate and select port, as applicable if rabbitmq_ssl_enabled: broker_cert_path = '{0}/amqp_pub.pem'.format(mgmtworker_home) utils.deploy_ssl_certificate('public', broker_cert_path, 'root', rabbitmq_cert_public) ctx.instance.runtime_properties['broker_cert_path'] = broker_cert_path # Use SSL port ctx.instance.runtime_properties['broker_port'] = broker_port_ssl else: # No SSL, don't use SSL port ctx.instance.runtime_properties['broker_port'] = broker_port_no_ssl if rabbitmq_cert_public is not None: ctx.logger.warn('Broker SSL cert supplied but SSL not enabled ' '(broker_ssl_enabled is False).') ctx.logger.info("broker_port: {0}".format( ctx.instance.runtime_properties['broker_port'])) ctx.logger.info('Configuring Management worker...') # Deploy the broker configuration # TODO: This will break interestingly if mgmtworker_venv is empty. # Some sort of check for that would be sensible. # To sandy: I don't quite understand this check... # there is no else here.. # for python_path in ${mgmtworker_venv}/lib/python*; do if os.path.isfile(os.path.join(mgmtworker_venv, 'bin/python')): broker_conf_path = os.path.join(celery_work_dir, 'broker_config.json') utils.deploy_blueprint_resource( '{0}/broker_config.json'.format(CONFIG_PATH), broker_conf_path, MGMT_WORKER_SERVICE_NAME) # The config contains credentials, do not let the world read it utils.sudo(['chmod', '440', broker_conf_path]) utils.systemd.configure(MGMT_WORKER_SERVICE_NAME) utils.logrotate(MGMT_WORKER_SERVICE_NAME)
def install_logstash(): logstash_unit_override = '/etc/systemd/system/logstash.service.d' logstash_source_url = ctx_properties['logstash_rpm_source_url'] rabbitmq_username = ctx_properties['rabbitmq_username'] rabbitmq_password = ctx_properties['rabbitmq_password'] logstash_log_path = '/var/log/cloudify/logstash' logstash_conf_path = '/etc/logstash/conf.d' # injected as an input to the script ctx.instance.runtime_properties['es_endpoint_ip'] = \ os.environ['ES_ENDPOINT_IP'] elasticsearch_props = utils.ctx_factory.get('elasticsearch') ctx.instance.runtime_properties['es_endpoint_port'] = \ elasticsearch_props['es_endpoint_port'] rabbit_props = utils.ctx_factory.get('rabbitmq') ctx.instance.runtime_properties['rabbitmq_endpoint_ip'] = \ utils.get_rabbitmq_endpoint_ip( rabbit_props.get('rabbitmq_endpoint_ip')) ctx.instance.runtime_properties['rabbitmq_username'] = \ rabbit_props['rabbitmq_username'] ctx.instance.runtime_properties['rabbitmq_password'] = \ rabbit_props['rabbitmq_password'] # Confirm username and password have been supplied for broker before # continuing. # Components other than logstash and riemann have this handled in code. # Note that these are not directly used in this script, but are used by the # deployed resources, hence the check here. if not rabbitmq_username or not rabbitmq_password: ctx.abort_operation( 'Both rabbitmq_username and rabbitmq_password must be supplied ' 'and at least 1 character long in the manager blueprint inputs.') ctx.logger.info('Installing Logstash...') utils.set_selinux_permissive() utils.copy_notice(LOGSTASH_SERVICE_NAME) utils.yum_install(logstash_source_url, service_name=LOGSTASH_SERVICE_NAME) utils.mkdir(logstash_log_path) utils.chown('logstash', 'logstash', logstash_log_path) ctx.logger.debug('Creating systemd unit override...') utils.mkdir(logstash_unit_override) utils.deploy_blueprint_resource( '{0}/restart.conf'.format(CONFIG_PATH), '{0}/restart.conf'.format(logstash_unit_override), LOGSTASH_SERVICE_NAME) ctx.logger.info('Deploying Logstash configuration...') utils.deploy_blueprint_resource( '{0}/logstash.conf'.format(CONFIG_PATH), '{0}/logstash.conf'.format(logstash_conf_path), LOGSTASH_SERVICE_NAME) # Due to a bug in the handling of configuration files, # configuration files with the same name cannot be deployed. # Since the logrotate config file is called `logstash`, # we change the name of the logstash env vars config file # from logstash to cloudify-logstash to be consistent with # other service env var files. init_file = '/etc/init.d/logstash' utils.replace_in_file('sysconfig/\$name', 'sysconfig/cloudify-$name', init_file) utils.chmod('755', init_file) utils.chown('root', 'root', init_file) ctx.logger.debug('Deploying Logstash sysconfig...') utils.deploy_blueprint_resource( '{0}/cloudify-logstash'.format(CONFIG_PATH), '/etc/sysconfig/cloudify-logstash', LOGSTASH_SERVICE_NAME) utils.logrotate(LOGSTASH_SERVICE_NAME) utils.sudo(['/sbin/chkconfig', 'logstash', 'on']) utils.clean_var_log_dir(LOGSTASH_SERVICE_NAME)
def preconfigure_nginx(): target_runtime_props = ctx.target.instance.runtime_properties # this is used by nginx's default.conf to select the relevant configuration rest_protocol = target_runtime_props['rest_protocol'] file_server_protocol = target_runtime_props['file_server_protocol'] # TODO: NEED TO IMPLEMENT THIS IN CTX UTILS ctx.source.instance.runtime_properties['rest_protocol'] = rest_protocol ctx.source.instance.runtime_properties['file_server_protocol'] = \ file_server_protocol if rest_protocol == 'https': utils.deploy_rest_certificates( internal_rest_host=target_runtime_props['internal_rest_host'], external_rest_host=target_runtime_props['external_rest_host']) # get rest public certificate for output later external_rest_cert_content = \ utils.get_file_content(EXTERNAL_REST_CERT_PATH) target_runtime_props['external_rest_cert_content'] = \ external_rest_cert_content ctx.logger.info('Deploying Nginx configuration files...') utils.deploy_blueprint_resource( '{0}/{1}-rest-server.cloudify'.format(CONFIG_PATH, rest_protocol), '/etc/nginx/conf.d/{0}-rest-server.cloudify'.format(rest_protocol), NGINX_SERVICE_NAME, load_ctx=False) utils.deploy_blueprint_resource( '{0}/{1}-file-server.cloudify' .format(CONFIG_PATH, file_server_protocol), '/etc/nginx/conf.d/{0}-file-server.cloudify' .format(file_server_protocol), NGINX_SERVICE_NAME, load_ctx=False) utils.deploy_blueprint_resource( '{0}/nginx.conf'.format(CONFIG_PATH), '/etc/nginx/nginx.conf', NGINX_SERVICE_NAME, load_ctx=False) utils.deploy_blueprint_resource( '{0}/default.conf'.format(CONFIG_PATH), '/etc/nginx/conf.d/default.conf', NGINX_SERVICE_NAME, load_ctx=False) utils.deploy_blueprint_resource( '{0}/rest-location.cloudify'.format(CONFIG_PATH), '/etc/nginx/conf.d/rest-location.cloudify', NGINX_SERVICE_NAME, load_ctx=False) utils.deploy_blueprint_resource( '{0}/fileserver-location.cloudify'.format(CONFIG_PATH), '/etc/nginx/conf.d/fileserver-location.cloudify', NGINX_SERVICE_NAME, load_ctx=False) utils.deploy_blueprint_resource( '{0}/redirect-to-fileserver.cloudify'.format(CONFIG_PATH), '/etc/nginx/conf.d/redirect-to-fileserver.cloudify', NGINX_SERVICE_NAME, load_ctx=False) utils.deploy_blueprint_resource( '{0}/ui-locations.cloudify'.format(CONFIG_PATH), '/etc/nginx/conf.d/ui-locations.cloudify', NGINX_SERVICE_NAME, load_ctx=False) utils.deploy_blueprint_resource( '{0}/logs-conf.cloudify'.format(CONFIG_PATH), '/etc/nginx/conf.d/logs-conf.cloudify', NGINX_SERVICE_NAME, load_ctx=False) utils.systemd.enable(NGINX_SERVICE_NAME, append_prefix=False)
def _configure_influxdb(host, port): db_user = "******" db_pass = "******" db_name = "cloudify" ctx.logger.info('Creating InfluxDB Database...') # the below request is equivalent to running: # curl -S -s "http://localhost:8086/db?u=root&p=root" '-d "{\"name\": \"cloudify\"}" # NOQA import urllib import urllib2 import ast endpoint_for_list = 'http://{0}:{1}/db'.format(host, port) endpoint_for_creation = ('http://{0}:{1}/cluster/database_configs/' '{2}'.format(host, port, db_name)) params = urllib.urlencode(dict(u=db_user, p=db_pass)) url_for_list = endpoint_for_list + '?' + params url_for_creation = endpoint_for_creation + '?' + params # check if db already exists db_list = eval(urllib2.urlopen(urllib2.Request(url_for_list)).read()) try: assert not any(d.get('name') == db_name for d in db_list) except AssertionError: ctx.logger.info('Database {0} already exists!'.format(db_name)) return try: utils.deploy_blueprint_resource( '{0}/retention.json'.format(CONFIG_PATH), '/tmp/retention.json', SERVICE_NAME) with open('/tmp/retention.json') as policy_file: retention_policy = policy_file.read() ctx.logger.debug( 'Using retention policy: \n{0}'.format(retention_policy)) data = json.dumps(ast.literal_eval(retention_policy)) ctx.logger.debug('Using retention policy: \n{0}'.format(data)) content_length = len(data) request = urllib2.Request(url_for_creation, data, { 'Content-Type': 'application/json', 'Content-Length': content_length }) ctx.logger.debug('Request is: {0}'.format(request)) request_reader = urllib2.urlopen(request) response = request_reader.read() ctx.logger.debug('Response: {0}'.format(response)) request_reader.close() utils.remove('/tmp/retention.json') except Exception as ex: ctx.abort_operation('Failed to create: {0} ({1}).'.format(db_name, ex)) # verify db created ctx.logger.info('Verifying database create successfully...') db_list = eval(urllib2.urlopen(urllib2.Request(url_for_list)).read()) try: assert any(d.get('name') == db_name for d in db_list) except AssertionError: ctx.abort_operation('Verification failed!') ctx.logger.info('Databased {0} created successfully.'.format(db_name))