def _install_stage(): nodejs_source_url = ctx_properties['nodejs_tar_source_url'] stage_source_url = ctx_properties['stage_tar_source_url'] if not utils.resource_factory.local_resource_exists(stage_source_url): ctx.logger.info('Stage package not found in manager resources ' 'package. Stage will not be installed.') ctx.instance.runtime_properties['skip_installation'] = 'true' return # injected as an input to the script ctx.instance.runtime_properties['influxdb_endpoint_ip'] = \ os.environ.get('INFLUXDB_ENDPOINT_IP') utils.set_selinux_permissive() utils.copy_notice(SERVICE_NAME) utils.mkdir(NODEJS_DIR) utils.mkdir(HOME_DIR) utils.mkdir(LOG_DIR) utils.create_service_user(STAGE_USER, STAGE_GROUP, HOME_DIR) ctx.logger.info('Installing NodeJS...') nodejs = utils.download_cloudify_resource(nodejs_source_url, SERVICE_NAME) utils.untar(nodejs, NODEJS_DIR) utils.remove(nodejs) ctx.logger.info('Installing Cloudify Stage (UI)...') stage_tar = utils.download_cloudify_resource(stage_source_url, SERVICE_NAME) if 'community' in stage_tar: ctx.logger.info('Community edition') ctx.instance.runtime_properties['community_mode'] = '-mode community' else: ctx.instance.runtime_properties['community_mode'] = '' utils.untar(stage_tar, HOME_DIR) utils.remove(stage_tar) ctx.logger.info('Fixing permissions...') utils.chown(STAGE_USER, STAGE_GROUP, HOME_DIR) utils.chown(STAGE_USER, STAGE_GROUP, NODEJS_DIR) utils.chown(STAGE_USER, STAGE_GROUP, LOG_DIR) utils.deploy_sudo_command_script( 'restore-snapshot.py', 'Restore stage directories from a snapshot path', component=SERVICE_NAME, allow_as=STAGE_USER) utils.chmod('a+rx', '/opt/cloudify/stage/restore-snapshot.py') utils.sudo(['usermod', '-aG', utils.CLOUDIFY_GROUP, STAGE_USER]) utils.logrotate(SERVICE_NAME) utils.systemd.configure(SERVICE_NAME) backend_dir = join(HOME_DIR, 'backend') npm_path = join(NODEJS_DIR, 'bin', 'npm') subprocess.check_call('cd {0}; {1} run db-migrate'.format( backend_dir, npm_path), shell=True)
def install_webui(): nodejs_source_url = ctx.node.properties['nodejs_tar_source_url'] webui_source_url = ctx.node.properties['webui_tar_source_url'] grafana_source_url = ctx.node.properties['grafana_tar_source_url'] # injected as an input to the script ctx.instance.runtime_properties['influxdb_endpoint_ip'] = \ os.environ.get('INFLUXDB_ENDPOINT_IP') nodejs_home = '/opt/nodejs' webui_home = '/opt/cloudify-ui' webui_log_path = '/var/log/cloudify/webui' grafana_home = '{0}/grafana'.format(webui_home) webui_user = '******' webui_group = 'webui' ctx.logger.info('Installing Cloudify\'s WebUI...') utils.set_selinux_permissive() utils.copy_notice('webui') utils.mkdir(nodejs_home) utils.mkdir(webui_home) utils.mkdir('{0}/backend'.format(webui_home)) utils.mkdir(webui_log_path) utils.mkdir(grafana_home) utils.create_service_user(webui_user, webui_home) ctx.logger.info('Installing NodeJS...') nodejs = utils.download_cloudify_resource(nodejs_source_url) utils.untar(nodejs, nodejs_home) ctx.logger.info('Installing Cloudify\'s WebUI...') webui = utils.download_cloudify_resource(webui_source_url) utils.untar(webui, webui_home) ctx.logger.info('Installing Grafana...') grafana = utils.download_cloudify_resource(grafana_source_url) utils.untar(grafana, grafana_home) ctx.logger.info('Deploying WebUI Configuration...') utils.deploy_blueprint_resource( '{0}/gsPresets.json'.format(CONFIG_PATH), '{0}/backend/gsPresets.json'.format(webui_home)) ctx.logger.info('Deploying Grafana Configuration...') utils.deploy_blueprint_resource( '{0}/grafana_config.js'.format(CONFIG_PATH), '{0}/config.js'.format(grafana_home)) ctx.logger.info('Fixing permissions...') utils.chown(webui_user, webui_group, webui_home) utils.chown(webui_user, webui_group, nodejs_home) utils.chown(webui_user, webui_group, webui_log_path) utils.logrotate('webui') utils.systemd.configure('webui')
def install_riemann(): langohr_source_url = ctx_properties['langohr_jar_source_url'] daemonize_source_url = ctx_properties['daemonize_rpm_source_url'] riemann_source_url = ctx_properties['riemann_rpm_source_url'] # Needed for Riemann's config cloudify_resources_url = ctx_properties['cloudify_resources_url'] rabbitmq_username = ctx_properties['rabbitmq_username'] rabbitmq_password = ctx_properties['rabbitmq_password'] riemann_config_path = '/etc/riemann' riemann_log_path = '/var/log/cloudify/riemann' langohr_home = '/opt/lib' extra_classpath = '{0}/langohr.jar'.format(langohr_home) # Confirm username and password have been supplied for broker before # continuing. # Components other than logstash and riemann have this handled in code. # Note that these are not directly used in this script, but are used by the # deployed resources, hence the check here. if not rabbitmq_username or not rabbitmq_password: ctx.abort_operation( 'Both rabbitmq_username and rabbitmq_password must be supplied ' 'and at least 1 character long in the manager blueprint inputs.') rabbit_props = utils.ctx_factory.get('rabbitmq') ctx.instance.runtime_properties['rabbitmq_endpoint_ip'] = \ utils.get_rabbitmq_endpoint_ip( rabbit_props.get('rabbitmq_endpoint_ip')) ctx.instance.runtime_properties['rabbitmq_username'] = \ rabbit_props.get('rabbitmq_username') ctx.instance.runtime_properties['rabbitmq_password'] = \ rabbit_props.get('rabbitmq_password') ctx.logger.info('Installing Riemann...') utils.set_selinux_permissive() utils.copy_notice(RIEMANN_SERVICE_NAME) utils.mkdir(riemann_log_path) utils.mkdir(langohr_home) utils.mkdir(riemann_config_path) utils.mkdir('{0}/conf.d'.format(riemann_config_path)) langohr = utils.download_cloudify_resource(langohr_source_url, RIEMANN_SERVICE_NAME) utils.sudo(['cp', langohr, extra_classpath]) ctx.logger.info('Applying Langohr permissions...') utils.sudo(['chmod', '644', extra_classpath]) utils.yum_install(daemonize_source_url, service_name=RIEMANN_SERVICE_NAME) utils.yum_install(riemann_source_url, service_name=RIEMANN_SERVICE_NAME) utils.logrotate(RIEMANN_SERVICE_NAME) ctx.logger.info('Downloading cloudify-manager Repository...') manager_repo = utils.download_cloudify_resource(cloudify_resources_url, RIEMANN_SERVICE_NAME) ctx.logger.info('Extracting Manager Repository...') utils.untar(manager_repo, '/tmp')
def _install_stage(): nodejs_source_url = ctx_properties['nodejs_tar_source_url'] stage_source_url = ctx_properties['stage_tar_source_url'] if not utils.resource_factory.local_resource_exists(stage_source_url): ctx.logger.info('Stage package not found in manager resources ' 'package. Stage will not be installed.') ctx.instance.runtime_properties['skip_installation'] = 'true' return # injected as an input to the script ctx.instance.runtime_properties['influxdb_endpoint_ip'] = \ os.environ.get('INFLUXDB_ENDPOINT_IP') utils.set_selinux_permissive() utils.copy_notice(SERVICE_NAME) utils.mkdir(NODEJS_DIR) utils.mkdir(HOME_DIR) utils.mkdir(LOG_DIR) utils.create_service_user(STAGE_USER, STAGE_GROUP, HOME_DIR) ctx.logger.info('Installing NodeJS...') nodejs = utils.download_cloudify_resource(nodejs_source_url, SERVICE_NAME) utils.untar(nodejs, NODEJS_DIR) utils.remove(nodejs) ctx.logger.info('Installing Cloudify Stage (UI)...') stage_tar = utils.download_cloudify_resource(stage_source_url, SERVICE_NAME) utils.untar(stage_tar, HOME_DIR) utils.remove(stage_tar) ctx.logger.info('Fixing permissions...') utils.chown(STAGE_USER, STAGE_GROUP, HOME_DIR) utils.chown(STAGE_USER, STAGE_GROUP, NODEJS_DIR) utils.chown(STAGE_USER, STAGE_GROUP, LOG_DIR) utils.deploy_sudo_command_script( 'restore-snapshot.py', 'Restore stage directories from a snapshot path', component=SERVICE_NAME, allow_as=STAGE_USER) utils.chmod('a+rx', '/opt/cloudify/stage/restore-snapshot.py') utils.logrotate(SERVICE_NAME) utils.systemd.configure(SERVICE_NAME) backend_dir = join(HOME_DIR, 'backend') npm_path = join(NODEJS_DIR, 'bin', 'npm') subprocess.check_call( 'cd {0}; {1} run db-migrate'.format(backend_dir, npm_path), shell=True)
def _prepare_sanity_app(): sanity_app_source_url = ctx_properties['sanity_app_source_url'] app_tar = utils.download_cloudify_resource( url=sanity_app_source_url, service_name=SANITY_SERVICE_NAME) _upload_app_blueprint(app_tar) _deploy_app()
def install_syncthing(): syncthing_package = \ utils.download_cloudify_resource( ctx_properties['syncthing_package_url'], SERVICE_NAME ) utils.mkdir(HOME_DIR) utils.untar(syncthing_package, destination=HOME_DIR) utils.remove(syncthing_package)
def install_riemann(): langohr_source_url = ctx_properties['langohr_jar_source_url'] daemonize_source_url = ctx_properties['daemonize_rpm_source_url'] riemann_source_url = ctx_properties['riemann_rpm_source_url'] utils.create_service_user(user=RIEMANN_USER, group=RIEMANN_GROUP, home=utils.CLOUDIFY_HOME_DIR) riemann_config_path = '/etc/riemann' riemann_log_path = '/var/log/cloudify/riemann' langohr_home = '/opt/lib' extra_classpath = '{0}/langohr.jar'.format(langohr_home) riemann_dir = '/opt/riemann' # Confirm username and password have been supplied for broker before # continuing. # Components other than logstash and riemann have this handled in code. # Note that these are not directly used in this script, but are used by the # deployed resources, hence the check here. rabbitmq_username = ctx_properties['rabbitmq_username'] rabbitmq_password = ctx_properties['rabbitmq_password'] if not rabbitmq_username or not rabbitmq_password: ctx.abort_operation( 'Both rabbitmq_username and rabbitmq_password must be supplied ' 'and at least 1 character long in the manager blueprint inputs.') runtime_props['rabbitmq_endpoint_ip'] = utils.get_rabbitmq_endpoint_ip() ctx.logger.info('Installing Riemann...') utils.set_selinux_permissive() utils.copy_notice(RIEMANN_SERVICE_NAME) utils.mkdir(riemann_log_path) utils.mkdir(langohr_home) utils.mkdir(riemann_config_path) utils.mkdir('{0}/conf.d'.format(riemann_config_path)) # utils.chown cannot be used as it will change both user and group utils.sudo(['chown', RIEMANN_USER, riemann_dir]) langohr = utils.download_cloudify_resource(langohr_source_url, RIEMANN_SERVICE_NAME) utils.sudo(['cp', langohr, extra_classpath]) ctx.logger.info('Applying Langohr permissions...') utils.sudo(['chmod', '644', extra_classpath]) utils.yum_install(daemonize_source_url, service_name=RIEMANN_SERVICE_NAME) utils.yum_install(riemann_source_url, service_name=RIEMANN_SERVICE_NAME) utils.chown(RIEMANN_USER, RIEMANN_GROUP, riemann_log_path) utils.logrotate(RIEMANN_SERVICE_NAME) files_to_remove = [ riemann_config_path, riemann_log_path, extra_classpath, riemann_dir ] runtime_props['files_to_remove'] = files_to_remove
def _upload_app_blueprint(client): if _is_sanity_blueprint_exist(client): return sanity_app_source_url = ctx_properties['sanity_app_source_url'] app_tar = utils.download_cloudify_resource(url=sanity_app_source_url, service_name=SERVICE_NAME) client.blueprints.publish_archive( app_tar, blueprint_id=BLUEPRINT_ID, blueprint_filename='no-monitoring-singlehost-blueprint.yaml')
def install_stage(): nodejs_source_url = ctx_properties['nodejs_tar_source_url'] stage_source_url = ctx_properties['stage_tar_source_url'] # injected as an input to the script ctx.instance.runtime_properties['influxdb_endpoint_ip'] = \ os.environ.get('INFLUXDB_ENDPOINT_IP') nodejs_home = '/opt/nodejs' stage_home = '/opt/cloudify-stage' stage_log_path = '/var/log/cloudify/stage' stage_user = '******' stage_group = 'stage' utils.set_selinux_permissive() utils.copy_notice(STAGE_SERVICE_NAME) utils.mkdir(nodejs_home) utils.mkdir(stage_home) utils.mkdir(stage_log_path) utils.create_service_user(stage_user, stage_home) ctx.logger.info('Installing NodeJS...') nodejs = utils.download_cloudify_resource(nodejs_source_url, STAGE_SERVICE_NAME) utils.untar(nodejs, nodejs_home) ctx.logger.info('Installing Cloudify Stage (UI)...') stage = utils.download_cloudify_resource(stage_source_url, STAGE_SERVICE_NAME) utils.untar(stage, stage_home) ctx.logger.info('Fixing permissions...') utils.chown(stage_user, stage_group, stage_home) utils.chown(stage_user, stage_group, nodejs_home) utils.chown(stage_user, stage_group, stage_log_path) utils.logrotate(STAGE_SERVICE_NAME) utils.systemd.configure(STAGE_SERVICE_NAME)
def install_optional(rest_venv): props = ctx_properties dsl_parser_source_url = props['dsl_parser_module_source_url'] rest_client_source_url = props['rest_client_module_source_url'] plugins_common_source_url = props['plugins_common_module_source_url'] script_plugin_source_url = props['script_plugin_module_source_url'] agent_source_url = props['agent_module_source_url'] pip_constraints = props['pip_constraints'] rest_service_source_url = props['rest_service_module_source_url'] constraints_file = utils.write_to_tempfile(pip_constraints) \ if pip_constraints else None # this allows to upgrade modules if necessary. ctx.logger.info('Installing Optional Packages if supplied...') if dsl_parser_source_url: utils.install_python_package(dsl_parser_source_url, rest_venv, constraints_file) if rest_client_source_url: utils.install_python_package(rest_client_source_url, rest_venv, constraints_file) if plugins_common_source_url: utils.install_python_package(plugins_common_source_url, rest_venv, constraints_file) if script_plugin_source_url: utils.install_python_package(script_plugin_source_url, rest_venv, constraints_file) if agent_source_url: utils.install_python_package(agent_source_url, rest_venv, constraints_file) if rest_service_source_url: ctx.logger.info('Downloading cloudify-manager Repository...') manager_repo = \ utils.download_cloudify_resource(rest_service_source_url, SERVICE_NAME) ctx.logger.info('Extracting Manager Repository...') tmp_dir = utils.untar(manager_repo, unique_tmp_dir=True) rest_service_dir = join(tmp_dir, 'rest-service') resources_dir = join(tmp_dir, 'resources/rest-service/cloudify/') ctx.logger.info('Installing REST Service...') utils.install_python_package(rest_service_dir, rest_venv, constraints_file) ctx.logger.info('Deploying Required Manager Resources...') utils.move(resources_dir, utils.MANAGER_RESOURCES_HOME) utils.remove(tmp_dir) if constraints_file: os.remove(constraints_file)
def install_logstash_output_jdbc_plugin(): """"Install output plugin needed to write to SQL databases.""" plugin_url = ctx_properties['logstash_output_jdbc_plugin_url'] ctx.logger.info('Installing logstash-output-jdbc plugin...') plugin_path = utils.download_cloudify_resource( plugin_url, service_name=LOGSTASH_SERVICE_NAME) utils.run([ 'sudo', '-u', 'logstash', '/opt/logstash/bin/plugin', 'install', plugin_path, ])
def _install_stage(): nodejs_source_url = ctx_properties['nodejs_tar_source_url'] stage_source_url = ctx_properties['stage_tar_source_url'] if not utils.resource_factory.local_resource_exists(stage_source_url): ctx.logger.info('Stage package not found in manager resources ' 'package. Stage will not be installed.') ctx.instance.runtime_properties['skip_installation'] = 'true' return # injected as an input to the script ctx.instance.runtime_properties['influxdb_endpoint_ip'] = \ os.environ.get('INFLUXDB_ENDPOINT_IP') utils.set_selinux_permissive() utils.copy_notice(SERVICE_NAME) utils.mkdir(NODEJS_DIR) utils.mkdir(HOME_DIR) utils.mkdir(LOG_DIR) utils.create_service_user(STAGE_USER, STAGE_GROUP, HOME_DIR) ctx.logger.info('Installing NodeJS...') nodejs = utils.download_cloudify_resource(nodejs_source_url, SERVICE_NAME) utils.untar(nodejs, NODEJS_DIR) utils.remove(nodejs) ctx.logger.info('Installing Cloudify Stage (UI)...') stage_tar = utils.download_cloudify_resource(stage_source_url, SERVICE_NAME) utils.untar(stage_tar, HOME_DIR) utils.remove(stage_tar) ctx.logger.info('Fixing permissions...') utils.chown(STAGE_USER, STAGE_GROUP, HOME_DIR) utils.chown(STAGE_USER, STAGE_GROUP, NODEJS_DIR) utils.chown(STAGE_USER, STAGE_GROUP, LOG_DIR) utils.logrotate(SERVICE_NAME) utils.systemd.configure(SERVICE_NAME)
def _install_optional(mgmtworker_venv): rest_client_source_url = ctx_properties['rest_client_module_source_url'] plugins_common_source_url = \ ctx_properties['plugins_common_module_source_url'] script_plugin_source_url = \ ctx_properties['script_plugin_module_source_url'] rest_service_source_url = ctx_properties['rest_service_module_source_url'] agent_source_url = ctx_properties['agent_module_source_url'] pip_constraints = ctx_properties['pip_constraints'] constraints_file = utils.write_to_tempfile(pip_constraints) if \ pip_constraints else None # this allows to upgrade modules if necessary. ctx.logger.info('Installing Optional Packages if supplied...') if rest_client_source_url: utils.install_python_package(rest_client_source_url, mgmtworker_venv, constraints_file) if plugins_common_source_url: utils.install_python_package( plugins_common_source_url, mgmtworker_venv, constraints_file) if script_plugin_source_url: utils.install_python_package(script_plugin_source_url, mgmtworker_venv, constraints_file) if agent_source_url: utils.install_python_package(agent_source_url, mgmtworker_venv, constraints_file) if rest_service_source_url: ctx.logger.info('Downloading cloudify-manager Repository...') manager_repo = \ utils.download_cloudify_resource(rest_service_source_url, SERVICE_NAME) ctx.logger.info('Extracting Manager Repository...') tmp_dir = utils.untar(manager_repo, unique_tmp_dir=True) workflows_dir = join(tmp_dir, 'workflows') riemann_dir = join(tmp_dir, 'plugins/riemann-controller') ctx.logger.info('Installing Management Worker Plugins...') utils.install_python_package(riemann_dir, mgmtworker_venv, constraints_file) utils.install_python_package(workflows_dir, mgmtworker_venv, constraints_file) utils.remove(tmp_dir) if constraints_file: os.remove(constraints_file)
def install_postgresql_jdbc_driver(): """Install driver used by the jdbc plugin to write data to postgresql.""" driver_url = ctx_properties['postgresql_jdbc_driver_url'] ctx.logger.info('Installing PostgreSQL JDBC driver...') jar_path = '/opt/logstash/vendor/jar' jdbc_path = join(jar_path, 'jdbc') utils.mkdir(jdbc_path) utils.chown('logstash', 'logstash', jar_path) driver_path = utils.download_cloudify_resource( driver_url, service_name=LOGSTASH_SERVICE_NAME) utils.run([ 'sudo', '-u', 'logstash', 'cp', driver_path, join(jdbc_path, basename(driver_url)), ])
def _install_composer(): composer_source_url = ctx_properties['composer_tar_source_url'] if not utils.resource_factory.local_resource_exists(composer_source_url): ctx.logger.info('Composer package not found in manager resources ' 'package. Composer will not be installed.') ctx.instance.runtime_properties['skip_installation'] = 'true' return utils.set_selinux_permissive() utils.copy_notice(SERVICE_NAME) utils.mkdir(NODEJS_DIR) utils.mkdir(HOME_DIR) utils.mkdir(LOG_DIR) utils.create_service_user(COMPOSER_USER, COMPOSER_GROUP, HOME_DIR) # adding cfyuser to the composer group so that its files are r/w for # replication and snapshots (restart of mgmtworker necessary for change # to take effect) utils.sudo(['usermod', '-aG', COMPOSER_GROUP, utils.CLOUDIFY_USER]) # This makes sure that the composer folders will be writable after # snapshot restore utils.sudo(['usermod', '-aG', utils.CLOUDIFY_GROUP, COMPOSER_USER]) utils.systemd.restart('mgmtworker') ctx.logger.info('Installing Cloudify Composer...') composer_tar = utils.download_cloudify_resource(composer_source_url, SERVICE_NAME) utils.untar(composer_tar, HOME_DIR) utils.remove(composer_tar) ctx.logger.info('Fixing permissions...') utils.chown(COMPOSER_USER, COMPOSER_GROUP, HOME_DIR) utils.chown(COMPOSER_USER, COMPOSER_GROUP, LOG_DIR) utils.chmod('g+w', CONF_DIR) utils.chmod('g+w', dirname(CONF_DIR)) utils.logrotate(SERVICE_NAME) utils.systemd.configure(SERVICE_NAME) npm_path = join(NODEJS_DIR, 'bin', 'npm') subprocess.check_call('cd {}; {} run db-migrate'.format( HOME_DIR, npm_path), shell=True)
def install_plugin(name, plugin_url): """Install plugin. :param name: Plugin name :type name: str :param plugin_url: Plugin file location :type plugin_path: str """ ctx.logger.info('Installing {} plugin...'.format(name)) plugin_path = utils.download_cloudify_resource(plugin_url, service_name=SERVICE_NAME) # Use /dev/urandom to get entropy faster while installing plugins utils.run([ 'sudo', '-u', 'logstash', 'sh', '-c', ('export JRUBY_OPTS=-J-Djava.security.egd=file:/dev/urandom; ' '/opt/logstash/bin/plugin install {0}'.format(plugin_path)) ])
def install_consul(): consul_binary = join(HOME_DIR, 'consul') utils.mkdir(dirname(consul_binary)) utils.mkdir(CONFIG_DIR) consul_package = \ utils.download_cloudify_resource(ctx_properties['consul_package_url'], SERVICE_NAME) temp_dir = tempfile.mkdtemp() try: with zipfile.ZipFile(consul_package) as consul_archive: consul_archive.extractall(temp_dir) utils.move(join(temp_dir, 'consul'), consul_binary) utils.chmod('+x', consul_binary) finally: utils.remove(temp_dir)
def install_postgresql_jdbc_driver(): """Install driver used by the jdbc plugin to write data to postgresql.""" jdbc_driver_url = ctx_properties['postgresql_jdbc_driver_url'] ctx.logger.info('Installing PostgreSQL JDBC driver...') jar_path = join(HOME_DIR, 'vendor', 'jar') jdbc_path = join(jar_path, 'jdbc') utils.mkdir(jdbc_path) utils.chown('logstash', 'logstash', jar_path) driver_path = utils.download_cloudify_resource(jdbc_driver_url, service_name=SERVICE_NAME) utils.run([ 'sudo', '-u', 'logstash', 'cp', driver_path, join(jdbc_path, basename(jdbc_driver_url)), ])
def get_manager_config(): """ Extracting specific files from cloudify-manager repo, with clean-ups after """ cloudify_resources_url = ctx_properties['cloudify_resources_url'] ctx.logger.info('Downloading cloudify-manager Repository...') manager_repo = utils.download_cloudify_resource(cloudify_resources_url, SERVICE_NAME) ctx.logger.info('Extracting Manager Repository...') manager_dir = utils.untar(manager_repo, unique_tmp_dir=True) ctx.logger.info('Deploying Riemann manager.config...') config_src_path = join(manager_dir, 'plugins', 'riemann-controller', 'riemann_controller', 'resources', 'manager.config') utils.move(config_src_path, '{0}/conf.d/manager.config'.format(RIEMANN_CONFIG_PATH)) utils.remove(manager_dir) utils.remove(manager_repo)
def install_consul(): consul_binary = '/opt/cloudify/consul/consul' consul_config_dir = '/etc/consul.d' utils.mkdir(dirname(consul_binary)) utils.mkdir(consul_config_dir) consul_package = \ utils.download_cloudify_resource(ctx_properties['consul_package_url'], CONSUL_SERVICE_NAME) temp_dir = tempfile.mkdtemp() try: with zipfile.ZipFile(consul_package) as consul_archive: consul_archive.extractall(temp_dir) utils.move(join(temp_dir, 'consul'), consul_binary) utils.chmod('+x', consul_binary) finally: utils.remove(temp_dir)
def install_optional(rest_venv): props = ctx_properties dsl_parser_source_url = props['dsl_parser_module_source_url'] rest_client_source_url = props['rest_client_module_source_url'] plugins_common_source_url = props['plugins_common_module_source_url'] script_plugin_source_url = props['script_plugin_module_source_url'] agent_source_url = props['agent_module_source_url'] rest_service_source_url = props['rest_service_module_source_url'] # this allows to upgrade modules if necessary. ctx.logger.info('Installing Optional Packages if supplied...') if dsl_parser_source_url: utils.install_python_package(dsl_parser_source_url, rest_venv) if rest_client_source_url: utils.install_python_package(rest_client_source_url, rest_venv) if plugins_common_source_url: utils.install_python_package(plugins_common_source_url, rest_venv) if script_plugin_source_url: utils.install_python_package(script_plugin_source_url, rest_venv) if agent_source_url: utils.install_python_package(agent_source_url, rest_venv) if rest_service_source_url: ctx.logger.info('Downloading cloudify-manager Repository...') manager_repo = \ utils.download_cloudify_resource(rest_service_source_url, SERVICE_NAME) ctx.logger.info('Extracting Manager Repository...') tmp_dir = utils.untar(manager_repo, unique_tmp_dir=True) rest_service_dir = join(tmp_dir, 'rest-service') resources_dir = join(tmp_dir, 'resources/rest-service/cloudify/') ctx.logger.info('Installing REST Service...') utils.install_python_package(rest_service_dir, rest_venv) ctx.logger.info('Deploying Required Manager Resources...') utils.move(resources_dir, utils.MANAGER_RESOURCES_HOME) utils.remove(tmp_dir)
def install_optional(rest_venv): props = ctx_properties dsl_parser_source_url = props['dsl_parser_module_source_url'] rest_client_source_url = props['rest_client_module_source_url'] securest_source_url = props['securest_module_source_url'] plugins_common_source_url = props['plugins_common_module_source_url'] script_plugin_source_url = props['script_plugin_module_source_url'] agent_source_url = props['agent_module_source_url'] rest_service_source_url = props['rest_service_module_source_url'] # this allows to upgrade modules if necessary. ctx.logger.info('Installing Optional Packages if supplied...') if dsl_parser_source_url: utils.install_python_package(dsl_parser_source_url, rest_venv) if rest_client_source_url: utils.install_python_package(rest_client_source_url, rest_venv) if securest_source_url: utils.install_python_package(securest_source_url, rest_venv) if plugins_common_source_url: utils.install_python_package(plugins_common_source_url, rest_venv) if script_plugin_source_url: utils.install_python_package(script_plugin_source_url, rest_venv) if agent_source_url: utils.install_python_package(agent_source_url, rest_venv) if rest_service_source_url: ctx.logger.info('Downloading cloudify-manager Repository...') manager_repo = \ utils.download_cloudify_resource(rest_service_source_url, REST_SERVICE_NAME) ctx.logger.info('Extracting Manager Repository...') utils.untar(manager_repo) ctx.logger.info('Installing REST Service...') utils.install_python_package('/tmp/rest-service', rest_venv) ctx.logger.info('Deploying Required Manager Resources...') utils.move( '/tmp/resources/rest-service/cloudify/', MANAGER_RESOURCES_HOME)
def _install_optional(mgmtworker_venv): rest_props = utils.ctx_factory.get('restservice') rest_client_source_url = \ rest_props['rest_client_module_source_url'] plugins_common_source_url = \ rest_props['plugins_common_module_source_url'] script_plugin_source_url = \ rest_props['script_plugin_module_source_url'] rest_service_source_url = \ rest_props['rest_service_module_source_url'] agent_source_url = \ rest_props['agent_module_source_url'] # this allows to upgrade modules if necessary. ctx.logger.info('Installing Optional Packages if supplied...') if rest_client_source_url: utils.install_python_package(rest_client_source_url, mgmtworker_venv) if plugins_common_source_url: utils.install_python_package( plugins_common_source_url, mgmtworker_venv) if script_plugin_source_url: utils.install_python_package(script_plugin_source_url, mgmtworker_venv) if agent_source_url: utils.install_python_package(agent_source_url, mgmtworker_venv) if rest_service_source_url: ctx.logger.info('Downloading cloudify-manager Repository...') manager_repo = \ utils.download_cloudify_resource(rest_service_source_url, MGMT_WORKER_SERVICE_NAME) ctx.logger.info('Extracting Manager Repository...') utils.untar(manager_repo) ctx.logger.info('Installing Management Worker Plugins...') # shouldn't we extract the riemann-controller and workflows modules to # their own repos? utils.install_python_package( '/tmp/plugins/riemann-controller', mgmtworker_venv) utils.install_python_package('/tmp/workflows', mgmtworker_venv)
def _install_optional(mgmtworker_venv): rest_props = utils.ctx_factory.get('restservice') rest_client_source_url = \ rest_props['rest_client_module_source_url'] plugins_common_source_url = \ rest_props['plugins_common_module_source_url'] script_plugin_source_url = \ rest_props['script_plugin_module_source_url'] rest_service_source_url = \ rest_props['rest_service_module_source_url'] agent_source_url = \ rest_props['agent_module_source_url'] # this allows to upgrade modules if necessary. ctx.logger.info('Installing Optional Packages if supplied...') if rest_client_source_url: utils.install_python_package(rest_client_source_url, mgmtworker_venv) if plugins_common_source_url: utils.install_python_package(plugins_common_source_url, mgmtworker_venv) if script_plugin_source_url: utils.install_python_package(script_plugin_source_url, mgmtworker_venv) if agent_source_url: utils.install_python_package(agent_source_url, mgmtworker_venv) if rest_service_source_url: ctx.logger.info('Downloading cloudify-manager Repository...') manager_repo = \ utils.download_cloudify_resource(rest_service_source_url, MGMT_WORKER_SERVICE_NAME) ctx.logger.info('Extracting Manager Repository...') utils.untar(manager_repo) ctx.logger.info('Installing Management Worker Plugins...') # shouldn't we extract the riemann-controller and workflows modules to # their own repos? utils.install_python_package('/tmp/plugins/riemann-controller', mgmtworker_venv) utils.install_python_package('/tmp/workflows', mgmtworker_venv)
def install_plugin(name, plugin_url): """Install plugin. :param name: Plugin name :type name: str :param plugin_url: Plugin file location :type plugin_path: str """ ctx.logger.info('Installing {} plugin...'.format(name)) plugin_path = utils.download_cloudify_resource( plugin_url, service_name=SERVICE_NAME) # Use /dev/urandom to get entropy faster while installing plugins utils.run([ 'sudo', '-u', 'logstash', 'sh', '-c', ( 'export JRUBY_OPTS=-J-Djava.security.egd=file:/dev/urandom; ' '/opt/logstash/bin/plugin install {0}'.format(plugin_path) ) ])
def get_manager_config(): """ Extracting specific files from cloudify-manager repo, with clean-ups after """ cloudify_resources_url = ctx_properties['cloudify_resources_url'] ctx.logger.info('Downloading cloudify-manager Repository...') manager_repo = utils.download_cloudify_resource( cloudify_resources_url, SERVICE_NAME) ctx.logger.info('Extracting Manager Repository...') manager_dir = utils.untar(manager_repo, unique_tmp_dir=True) ctx.logger.info('Deploying Riemann manager.config...') config_src_path = join( manager_dir, 'plugins', 'riemann-controller', 'riemann_controller', 'resources', 'manager.config' ) utils.move( config_src_path, '{0}/conf.d/manager.config'.format(RIEMANN_CONFIG_PATH) ) utils.remove(manager_dir) utils.remove(manager_repo)
def _install_optional(mgmtworker_venv): rest_props = utils.ctx_factory.get('restservice') rest_client_source_url = rest_props['rest_client_module_source_url'] plugins_common_source_url = rest_props['plugins_common_module_source_url'] script_plugin_source_url = rest_props['script_plugin_module_source_url'] rest_service_source_url = rest_props['rest_service_module_source_url'] agent_source_url = rest_props['agent_module_source_url'] # this allows to upgrade modules if necessary. ctx.logger.info('Installing Optional Packages if supplied...') if rest_client_source_url: utils.install_python_package(rest_client_source_url, mgmtworker_venv) if plugins_common_source_url: utils.install_python_package( plugins_common_source_url, mgmtworker_venv) if script_plugin_source_url: utils.install_python_package(script_plugin_source_url, mgmtworker_venv) if agent_source_url: utils.install_python_package(agent_source_url, mgmtworker_venv) if rest_service_source_url: ctx.logger.info('Downloading cloudify-manager Repository...') manager_repo = \ utils.download_cloudify_resource(rest_service_source_url, SERVICE_NAME) ctx.logger.info('Extracting Manager Repository...') tmp_dir = utils.untar(manager_repo, unique_tmp_dir=True) workflows_dir = join(tmp_dir, 'workflows') riemann_dir = join(tmp_dir, 'plugins/riemann-controller') ctx.logger.info('Installing Management Worker Plugins...') utils.install_python_package(riemann_dir, mgmtworker_venv) utils.install_python_package(workflows_dir, mgmtworker_venv) utils.remove(tmp_dir)
def _install_optional(mgmtworker_venv): rest_props = utils.ctx_factory.get('restservice') rest_client_source_url = rest_props['rest_client_module_source_url'] plugins_common_source_url = rest_props['plugins_common_module_source_url'] script_plugin_source_url = rest_props['script_plugin_module_source_url'] rest_service_source_url = rest_props['rest_service_module_source_url'] agent_source_url = rest_props['agent_module_source_url'] # this allows to upgrade modules if necessary. ctx.logger.info('Installing Optional Packages if supplied...') if rest_client_source_url: utils.install_python_package(rest_client_source_url, mgmtworker_venv) if plugins_common_source_url: utils.install_python_package(plugins_common_source_url, mgmtworker_venv) if script_plugin_source_url: utils.install_python_package(script_plugin_source_url, mgmtworker_venv) if agent_source_url: utils.install_python_package(agent_source_url, mgmtworker_venv) if rest_service_source_url: ctx.logger.info('Downloading cloudify-manager Repository...') manager_repo = \ utils.download_cloudify_resource(rest_service_source_url, SERVICE_NAME) ctx.logger.info('Extracting Manager Repository...') tmp_dir = utils.untar(manager_repo, unique_tmp_dir=True) workflows_dir = join(tmp_dir, 'workflows') riemann_dir = join(tmp_dir, 'plugins/riemann-controller') ctx.logger.info('Installing Management Worker Plugins...') utils.install_python_package(riemann_dir, mgmtworker_venv) utils.install_python_package(workflows_dir, mgmtworker_venv) utils.remove(tmp_dir)
def install_riemann(): langohr_source_url = ctx_properties['langohr_jar_source_url'] daemonize_source_url = ctx_properties['daemonize_rpm_source_url'] riemann_source_url = ctx_properties['riemann_rpm_source_url'] rabbitmq_username = ctx_properties['rabbitmq_username'] rabbitmq_password = ctx_properties['rabbitmq_password'] utils.create_service_user( user=RIEMANN_USER, group=RIEMANN_GROUP, home=utils.CLOUDIFY_HOME_DIR ) riemann_config_path = '/etc/riemann' riemann_log_path = '/var/log/cloudify/riemann' langohr_home = '/opt/lib' extra_classpath = '{0}/langohr.jar'.format(langohr_home) riemann_dir = '/opt/riemann' # Confirm username and password have been supplied for broker before # continuing. # Components other than logstash and riemann have this handled in code. # Note that these are not directly used in this script, but are used by the # deployed resources, hence the check here. if not rabbitmq_username or not rabbitmq_password: ctx.abort_operation( 'Both rabbitmq_username and rabbitmq_password must be supplied ' 'and at least 1 character long in the manager blueprint inputs.') rabbit_props = utils.ctx_factory.get('rabbitmq') runtime_props['rabbitmq_endpoint_ip'] = utils.get_rabbitmq_endpoint_ip() runtime_props['rabbitmq_username'] = rabbit_props.get('rabbitmq_username') runtime_props['rabbitmq_password'] = rabbit_props.get('rabbitmq_password') ctx.logger.info('Installing Riemann...') utils.set_selinux_permissive() utils.copy_notice(RIEMANN_SERVICE_NAME) utils.mkdir(riemann_log_path) utils.mkdir(langohr_home) utils.mkdir(riemann_config_path) utils.mkdir('{0}/conf.d'.format(riemann_config_path)) # utils.chown cannot be used as it will change both user and group utils.sudo(['chown', RIEMANN_USER, riemann_dir]) langohr = utils.download_cloudify_resource(langohr_source_url, RIEMANN_SERVICE_NAME) utils.sudo(['cp', langohr, extra_classpath]) ctx.logger.info('Applying Langohr permissions...') utils.sudo(['chmod', '644', extra_classpath]) utils.yum_install(daemonize_source_url, service_name=RIEMANN_SERVICE_NAME) utils.yum_install(riemann_source_url, service_name=RIEMANN_SERVICE_NAME) utils.chown(RIEMANN_USER, RIEMANN_GROUP, riemann_log_path) utils.logrotate(RIEMANN_SERVICE_NAME) files_to_remove = [riemann_config_path, riemann_log_path, extra_classpath, riemann_dir] runtime_props['files_to_remove'] = files_to_remove
def install_webui(): nodejs_source_url = ctx_properties['nodejs_tar_source_url'] webui_source_url = ctx_properties['webui_tar_source_url'] grafana_source_url = ctx_properties['grafana_tar_source_url'] # injected as an input to the script ctx.instance.runtime_properties['influxdb_endpoint_ip'] = \ os.environ.get('INFLUXDB_ENDPOINT_IP') # is this a telecom edition? ctx.instance.runtime_properties['ui_variation'] = \ 'telecom' if ctx_properties['telecom_edition'] else '' nodejs_home = '/opt/nodejs' webui_home = '/opt/cloudify-ui' webui_log_path = '/var/log/cloudify/webui' grafana_home = '{0}/grafana'.format(webui_home) webui_user = '******' webui_group = 'webui' ctx.logger.info('Installing Cloudify\'s WebUI...') utils.set_selinux_permissive() utils.copy_notice(WEBUI_SERVICE_NAME) utils.mkdir(nodejs_home) utils.mkdir(webui_home) utils.mkdir('{0}/backend'.format(webui_home)) utils.mkdir(webui_log_path) utils.mkdir(grafana_home) utils.create_service_user(webui_user, webui_home) ctx.logger.info('Installing NodeJS...') nodejs = utils.download_cloudify_resource(nodejs_source_url, WEBUI_SERVICE_NAME) utils.untar(nodejs, nodejs_home) ctx.logger.info('Installing Cloudify\'s WebUI...') webui = utils.download_cloudify_resource(webui_source_url, WEBUI_SERVICE_NAME) utils.untar(webui, webui_home) ctx.logger.info('Installing Grafana...') grafana = utils.download_cloudify_resource(grafana_source_url, WEBUI_SERVICE_NAME) utils.untar(grafana, grafana_home) ctx.logger.info('Deploying WebUI Configuration...') utils.deploy_blueprint_resource( '{0}/gsPresets.json'.format(CONFIG_PATH), '{0}/backend/gsPresets.json'.format(webui_home), WEBUI_SERVICE_NAME) ctx.logger.info('Deploying Grafana Configuration...') utils.deploy_blueprint_resource( '{0}/grafana_config.js'.format(CONFIG_PATH), '{0}/config.js'.format(grafana_home), WEBUI_SERVICE_NAME) ctx.logger.info('Fixing permissions...') utils.chown(webui_user, webui_group, webui_home) utils.chown(webui_user, webui_group, nodejs_home) utils.chown(webui_user, webui_group, webui_log_path) utils.logrotate(WEBUI_SERVICE_NAME) utils.systemd.configure(WEBUI_SERVICE_NAME)
def deploy_manager_sources(): """Deploys all manager sources from a single archive. """ archive_path = ctx_properties['manager_resources_package'] archive_checksum_path = \ ctx_properties['manager_resources_package_checksum_file'] skip_checksum_validation = ctx_properties['skip_checksum_validation'] agent_archives_path = utils.AGENT_ARCHIVES_PATH utils.mkdir(agent_archives_path) if archive_path: sources_agents_path = os.path.join( utils.CLOUDIFY_SOURCES_PATH, 'agents') # this will leave this several hundreds of MBs archive on the # manager. should find a way to clean it after all operations # were completed and bootstrap succeeded as it is not longer # necessary utils.mkdir(utils.CLOUDIFY_SOURCES_PATH) resource_name = os.path.basename(archive_path) destination = os.path.join(utils.CLOUDIFY_SOURCES_PATH, resource_name) ctx.logger.info('Downloading manager resources package...') resources_archive_path = \ utils.download_cloudify_resource( archive_path, NODE_NAME, destination=destination) # This would ideally go under utils.download_cloudify_resource but as # of now, we'll only be validating the manager resources package. if not skip_checksum_validation: ctx.logger.info('Validating checksum...') skip_if_failed = False if not archive_checksum_path: skip_if_failed = True archive_checksum_path = archive_path + '.md5' md5_name = os.path.basename(archive_checksum_path) destination = os.path.join(utils.CLOUDIFY_SOURCES_PATH, md5_name) resources_archive_md5_path = utils.download_cloudify_resource( archive_checksum_path, NODE_NAME, destination=destination) if not utils.validate_md5_checksum(resources_archive_path, resources_archive_md5_path): if skip_if_failed: ctx.logger.warn('Checksum validation failed. ' 'Continuing as no checksum file was ' 'explicitly provided.') else: ctx.abort_operation( 'Failed to validate checksum for {0}'.format( resources_archive_path)) else: ctx.logger.info('Resources Package downloaded successfully...') else: ctx.logger.info( 'Skipping resources package checksum validation...') utils.untar( resources_archive_path, utils.CLOUDIFY_SOURCES_PATH, skip_old_files=True) def splitext(filename): # not using os.path.splitext as it would return .gz instead of # .tar.gz if filename.endswith('.tar.gz'): return '.tar.gz' elif filename.endswith('.exe'): return '.exe' else: ctx.abort_operation( 'Unknown agent format for {0}. ' 'Must be either tar.gz or exe'.format(filename)) def normalize_agent_name(filename): # this returns the normalized name of an agent upon which our agent # installer retrieves agent packages for installation. # e.g. Ubuntu-trusty-agent_3.4.0-m3-b392.tar.gz returns # ubuntu-trusty-agent return filename.split('_', 1)[0].lower() def backup_agent_resources(agents_dir): ctx.logger.info('Backing up agents in {0}...'.format(agents_dir)) if not os.path.isdir(utils.AGENTS_ROLLBACK_PATH): utils.mkdir(utils.AGENTS_ROLLBACK_PATH) utils.copy(agents_dir, utils.AGENTS_ROLLBACK_PATH) def restore_agent_resources(agents_dir): ctx.logger.info('Restoring agents in {0}'.format( utils.AGENTS_ROLLBACK_PATH)) if os.path.isdir(agents_dir): utils.remove(agents_dir) utils.mkdir(agents_dir) utils.copy(os.path.join(utils.AGENTS_ROLLBACK_PATH, 'agents', '.'), agents_dir) manager_scripts_path = os.path.join( utils.MANAGER_RESOURCES_HOME, 'packages', 'scripts') manager_templates_path = os.path.join( utils.MANAGER_RESOURCES_HOME, 'packages', 'templates') if utils.is_upgrade: backup_agent_resources(agent_archives_path) utils.remove(agent_archives_path) utils.mkdir(agent_archives_path) utils.remove(manager_scripts_path) utils.remove(manager_templates_path) ctx.logger.info('Upgrading agents...') elif utils.is_rollback: ctx.logger.info('Restoring agents...') restore_agent_resources(agent_archives_path) for agent_file in os.listdir(sources_agents_path): agent_id = normalize_agent_name(agent_file) agent_extension = splitext(agent_file) utils.move( os.path.join(sources_agents_path, agent_file), os.path.join(agent_archives_path, agent_id + agent_extension))
def install_riemann(): langohr_source_url = ctx_properties['langohr_jar_source_url'] daemonize_source_url = ctx_properties['daemonize_rpm_source_url'] riemann_source_url = ctx_properties['riemann_rpm_source_url'] # Needed for Riemann's config cloudify_resources_url = ctx_properties['cloudify_resources_url'] rabbitmq_username = ctx_properties['rabbitmq_username'] rabbitmq_password = ctx_properties['rabbitmq_password'] riemann_config_path = '/etc/riemann' riemann_log_path = '/var/log/cloudify/riemann' langohr_home = '/opt/lib' extra_classpath = '{0}/langohr.jar'.format(langohr_home) # Confirm username and password have been supplied for broker before # continuing. # Components other than logstash and riemann have this handled in code. # Note that these are not directly used in this script, but are used by the # deployed resources, hence the check here. if not rabbitmq_username or not rabbitmq_password: ctx.abort_operation( 'Both rabbitmq_username and rabbitmq_password must be supplied ' 'and at least 1 character long in the manager blueprint inputs.') rabbit_props = utils.ctx_factory.get('rabbitmq') ctx.instance.runtime_properties['rabbitmq_endpoint_ip'] = \ utils.get_rabbitmq_endpoint_ip( rabbit_props.get('rabbitmq_endpoint_ip')) ctx.instance.runtime_properties['rabbitmq_username'] = \ rabbit_props.get('rabbitmq_username') ctx.instance.runtime_properties['rabbitmq_password'] = \ rabbit_props.get('rabbitmq_password') ctx.logger.info('Installing Riemann...') utils.set_selinux_permissive() utils.copy_notice(RIEMANN_SERVICE_NAME) utils.mkdir(riemann_log_path) utils.mkdir(langohr_home) utils.mkdir(riemann_config_path) utils.mkdir('{0}/conf.d'.format(riemann_config_path)) langohr = utils.download_cloudify_resource(langohr_source_url, RIEMANN_SERVICE_NAME) utils.sudo(['cp', langohr, extra_classpath]) ctx.logger.info('Applying Langohr permissions...') utils.sudo(['chmod', '644', extra_classpath]) utils.yum_install(daemonize_source_url, service_name=RIEMANN_SERVICE_NAME) utils.yum_install(riemann_source_url, service_name=RIEMANN_SERVICE_NAME) utils.logrotate(RIEMANN_SERVICE_NAME) ctx.logger.info('Downloading cloudify-manager Repository...') manager_repo = utils.download_cloudify_resource(cloudify_resources_url, RIEMANN_SERVICE_NAME) ctx.logger.info('Extracting Manager Repository...') utils.untar(manager_repo, '/tmp') ctx.logger.info('Deploying Riemann manager.config...') utils.move( '/tmp/plugins/riemann-controller/riemann_controller/resources/manager.config', # NOQA '{0}/conf.d/manager.config'.format(riemann_config_path)) ctx.logger.info('Deploying Riemann conf...') utils.deploy_blueprint_resource( '{0}/main.clj'.format(CONFIG_PATH), '{0}/main.clj'.format(riemann_config_path), RIEMANN_SERVICE_NAME) # our riemann configuration will (by default) try to read these environment # variables. If they don't exist, it will assume # that they're found at "localhost" # export MANAGEMENT_IP="" # export RABBITMQ_HOST="" # we inject the management_ip for both of these to Riemann's systemd # config. # These should be potentially different # if the manager and rabbitmq are running on different hosts. utils.systemd.configure(RIEMANN_SERVICE_NAME) utils.clean_var_log_dir(RIEMANN_SERVICE_NAME)
def deploy_manager_sources(): """Deploys all manager sources from a single archive. """ archive_path = ctx_properties['manager_resources_package'] archive_checksum_path = \ ctx_properties['manager_resources_package_checksum_file'] skip_checksum_validation = ctx_properties['skip_checksum_validation'] agent_archives_path = utils.AGENT_ARCHIVES_PATH utils.mkdir(agent_archives_path) if archive_path: sources_agents_path = os.path.join(utils.CLOUDIFY_SOURCES_PATH, 'agents') # this will leave this several hundreds of MBs archive on the # manager. should find a way to clean it after all operations # were completed and bootstrap succeeded as it is not longer # necessary utils.mkdir(utils.CLOUDIFY_SOURCES_PATH) resource_name = os.path.basename(archive_path) destination = os.path.join(utils.CLOUDIFY_SOURCES_PATH, resource_name) ctx.logger.info('Downloading manager resources package...') resources_archive_path = \ utils.download_cloudify_resource( archive_path, NODE_NAME, destination=destination) # This would ideally go under utils.download_cloudify_resource but as # of now, we'll only be validating the manager resources package. if not skip_checksum_validation: ctx.logger.info('Validating checksum...') skip_if_failed = False if not archive_checksum_path: skip_if_failed = True archive_checksum_path = archive_path + '.md5' md5_name = os.path.basename(archive_checksum_path) destination = os.path.join(utils.CLOUDIFY_SOURCES_PATH, md5_name) resources_archive_md5_path = utils.download_cloudify_resource( archive_checksum_path, NODE_NAME, destination=destination) if not utils.validate_md5_checksum(resources_archive_path, resources_archive_md5_path): if skip_if_failed: ctx.logger.warn('Checksum validation failed. ' 'Continuing as no checksum file was ' 'explicitly provided.') else: ctx.abort_operation( 'Failed to validate checksum for {0}'.format( resources_archive_path)) else: ctx.logger.info('Resources Package downloaded successfully...') else: ctx.logger.info( 'Skipping resources package checksum validation...') utils.untar(resources_archive_path, utils.CLOUDIFY_SOURCES_PATH, skip_old_files=True) def splitext(filename): # not using os.path.splitext as it would return .gz instead of # .tar.gz if filename.endswith('.tar.gz'): return '.tar.gz' elif filename.endswith('.exe'): return '.exe' else: ctx.abort_operation( 'Unknown agent format for {0}. ' 'Must be either tar.gz or exe'.format(filename)) def normalize_agent_name(filename): # this returns the normalized name of an agent upon which our agent # installer retrieves agent packages for installation. # e.g. Ubuntu-trusty-agent_3.4.0-m3-b392.tar.gz returns # ubuntu-trusty-agent return filename.split('_', 1)[0].lower() def backup_agent_resources(agents_dir): ctx.logger.info('Backing up agents in {0}...'.format(agents_dir)) if not os.path.isdir(utils.AGENTS_ROLLBACK_PATH): utils.mkdir(utils.AGENTS_ROLLBACK_PATH) utils.copy(agents_dir, utils.AGENTS_ROLLBACK_PATH) def restore_agent_resources(agents_dir): ctx.logger.info('Restoring agents in {0}'.format( utils.AGENTS_ROLLBACK_PATH)) if os.path.isdir(agents_dir): utils.remove(agents_dir) utils.mkdir(agents_dir) utils.copy(os.path.join(utils.AGENTS_ROLLBACK_PATH, 'agents', '.'), agents_dir) manager_scripts_path = os.path.join(utils.MANAGER_RESOURCES_HOME, 'packages', 'scripts') manager_templates_path = os.path.join(utils.MANAGER_RESOURCES_HOME, 'packages', 'templates') if utils.is_upgrade: backup_agent_resources(agent_archives_path) utils.remove(agent_archives_path) utils.mkdir(agent_archives_path) utils.remove(manager_scripts_path) utils.remove(manager_templates_path) ctx.logger.info('Upgrading agents...') elif utils.is_rollback: ctx.logger.info('Restoring agents...') restore_agent_resources(agent_archives_path) for agent_file in os.listdir(sources_agents_path): agent_id = normalize_agent_name(agent_file) agent_extension = splitext(agent_file) utils.move( os.path.join(sources_agents_path, agent_file), os.path.join(agent_archives_path, agent_id + agent_extension))
def install_riemann(): langohr_source_url = ctx_properties['langohr_jar_source_url'] daemonize_source_url = ctx_properties['daemonize_rpm_source_url'] riemann_source_url = ctx_properties['riemann_rpm_source_url'] # Needed for Riemann's config cloudify_resources_url = ctx_properties['cloudify_resources_url'] rabbitmq_username = ctx_properties['rabbitmq_username'] rabbitmq_password = ctx_properties['rabbitmq_password'] riemann_config_path = '/etc/riemann' riemann_log_path = '/var/log/cloudify/riemann' langohr_home = '/opt/lib' extra_classpath = '{0}/langohr.jar'.format(langohr_home) # Confirm username and password have been supplied for broker before # continuing. # Components other than logstash and riemann have this handled in code. # Note that these are not directly used in this script, but are used by the # deployed resources, hence the check here. if not rabbitmq_username or not rabbitmq_password: ctx.abort_operation( 'Both rabbitmq_username and rabbitmq_password must be supplied ' 'and at least 1 character long in the manager blueprint inputs.') rabbit_props = utils.ctx_factory.get('rabbitmq') ctx.instance.runtime_properties['rabbitmq_endpoint_ip'] = \ utils.get_rabbitmq_endpoint_ip( rabbit_props.get('rabbitmq_endpoint_ip')) ctx.instance.runtime_properties['rabbitmq_username'] = \ rabbit_props.get('rabbitmq_username') ctx.instance.runtime_properties['rabbitmq_password'] = \ rabbit_props.get('rabbitmq_password') ctx.logger.info('Installing Riemann...') utils.set_selinux_permissive() utils.copy_notice(RIEMANN_SERVICE_NAME) utils.mkdir(riemann_log_path) utils.mkdir(langohr_home) utils.mkdir(riemann_config_path) utils.mkdir('{0}/conf.d'.format(riemann_config_path)) langohr = utils.download_cloudify_resource(langohr_source_url, RIEMANN_SERVICE_NAME) utils.sudo(['cp', langohr, extra_classpath]) ctx.logger.info('Applying Langohr permissions...') utils.sudo(['chmod', '644', extra_classpath]) utils.yum_install(daemonize_source_url, service_name=RIEMANN_SERVICE_NAME) utils.yum_install(riemann_source_url, service_name=RIEMANN_SERVICE_NAME) utils.logrotate(RIEMANN_SERVICE_NAME) ctx.logger.info('Downloading cloudify-manager Repository...') manager_repo = utils.download_cloudify_resource(cloudify_resources_url, RIEMANN_SERVICE_NAME) ctx.logger.info('Extracting Manager Repository...') utils.untar(manager_repo, '/tmp') ctx.logger.info('Deploying Riemann manager.config...') utils.move( '/tmp/plugins/riemann-controller/riemann_controller/resources/manager.config', # NOQA '{0}/conf.d/manager.config'.format(riemann_config_path)) ctx.logger.info('Deploying Riemann conf...') utils.deploy_blueprint_resource('{0}/main.clj'.format(CONFIG_PATH), '{0}/main.clj'.format(riemann_config_path), RIEMANN_SERVICE_NAME) # our riemann configuration will (by default) try to read these environment # variables. If they don't exist, it will assume # that they're found at "localhost" # export MANAGEMENT_IP="" # export RABBITMQ_HOST="" # we inject the management_ip for both of these to Riemann's systemd # config. # These should be potentially different # if the manager and rabbitmq are running on different hosts. utils.systemd.configure(RIEMANN_SERVICE_NAME) utils.clean_var_log_dir(RIEMANN_SERVICE_NAME)
def install_syncthing(): syncthing_package = \ utils.download_cloudify_resource( ctx_properties['syncthing_package_url'], SYNCTHING_SERVICE_NAME) utils.untar(syncthing_package, destination='/opt/cloudify/syncthing')
def deploy_manager_sources(): """Deploys all manager sources from a single archive. """ archive_path = ctx.node.properties['manager_resources_package'] archive_checksum_path = \ ctx.node.properties['manager_resources_package_checksum_file'] skip_checksum_validation = ctx.node.properties['skip_checksum_validation'] if archive_path: sources_agents_path = os.path.join( utils.CLOUDIFY_SOURCES_PATH, 'agents') agent_archives_path = utils.AGENT_ARCHIVES_PATH utils.mkdir(agent_archives_path) # this will leave this several hundreds of MBs archive on the # manager. should find a way to clean it after all operations # were completed and bootstrap succeeded as it is not longer # necessary resources_archive_path = \ utils.download_cloudify_resource(archive_path) # This would ideally go under utils.download_cloudify_resource but as # of now, we'll only be validating the manager resources package. if not skip_checksum_validation: skip_if_failed = False if not archive_checksum_path: skip_if_failed = True archive_checksum_path = archive_path + '.md5' resources_archive_md5_path = \ utils.download_cloudify_resource(archive_checksum_path) if not utils.validate_md5_checksum(resources_archive_path, resources_archive_md5_path): if skip_if_failed: ctx.logger.warn('Checksum validation failed. ' 'Continuing as no checksum file was ' 'explicitly provided.') else: utils.error_exit( 'Failed to validate checksum for {0}'.format( resources_archive_path)) else: ctx.logger.info('Resources Package downloaded successfully...') else: ctx.logger.info( 'Skipping resources package checksum validation...') utils.untar( resources_archive_path, utils.CLOUDIFY_SOURCES_PATH, skip_old_files=True) def splitext(filename): # not using os.path.splitext as it would return .gz instead of # .tar.gz if filename.endswith('.tar.gz'): return '.tar.gz' elif filename.endswith('.exe'): return '.exe' else: utils.exit_error( 'Unknown agent format for {0}. ' 'Must be either tar.gz or exe'.format(filename)) def normalize_agent_name(filename): # this returns the normalized name of an agent upon which our agent # installer retrieves agent packages for installation. # e.g. Ubuntu-trusty-agent_3.4.0-m3-b392.tar.gz returns # ubuntu-trusty-agent return filename.split('_', 1)[0].lower() for agent_file in os.listdir(sources_agents_path): agent_id = normalize_agent_name(agent_file) agent_extension = splitext(agent_file) utils.move( os.path.join(sources_agents_path, agent_file), os.path.join(agent_archives_path, agent_id + agent_extension))