def copy_spreadsheet(slug): """ Copy the COPY spreadsheet """ _check_credentials() config_path = '%s/%s/graphic_config.py' % (app_config.GRAPHICS_PATH, slug) graphic_config = imp.load_source('graphic_config', config_path) kwargs = { 'credentials': get_credentials(), 'url': SPREADSHEET_COPY_URL_TEMPLATE % graphic_config.COPY_GOOGLE_DOC_KEY, 'method': 'POST', 'headers': {'Content-Type': 'application/json'}, 'body': json.dumps({ 'title': '%s GRAPHIC COPY' % slug, }), } resp = app_config.authomatic.access(**kwargs) if resp.status == 200: spreadsheet_key = resp.data['id'] spreadsheet_url = SPREADSHEET_VIEW_TEMPLATE % spreadsheet_key print 'New spreadsheet created successfully!' print 'View it online at %s' % spreadsheet_url utils.replace_in_file(config_path, graphic_config.COPY_GOOGLE_DOC_KEY, spreadsheet_key) else: print 'Error creating spreadsheet (status code %s) with message %s' % (resp.status, resp.reason) return None
def post(slug): """ Set the post to work on. """ # Force root path every time fab_path = os.path.realpath(os.path.dirname(__file__)) root_path = os.path.join(fab_path, '..') os.chdir(root_path) env.slug = utils._find_slugs(slug) if not env.slug: utils.confirm( 'This post does not exist. Do you want to create a new post called %s?' % slug) _new(slug) return env.static_path = '%s/%s' % (app_config.POST_PATH, env.slug) if os.path.exists('%s/post_config.py' % env.static_path): # set slug for deployment in post_config find = "DEPLOY_SLUG = ''" replace = "DEPLOY_SLUG = '%s'" % env.slug utils.replace_in_file('%s/post_config.py' % env.static_path, find, replace) env.post_config = imp.load_source( 'post_config', '%s/post_config.py' % env.static_path) env.copytext_key = env.post_config.COPY_GOOGLE_DOC_KEY else: env.post_config = None env.copytext_key = None env.copytext_slug = env.slug
def post(slug): """ Set the post to work on. """ # Force root path every time fab_path = os.path.realpath(os.path.dirname(__file__)) root_path = os.path.join(fab_path, '..') os.chdir(root_path) env.slug = utils._find_slugs(slug) if not env.slug: utils.confirm('This post does not exist. Do you want to create a new post called %s?' % slug) _new(slug) return env.static_path = '%s/%s' % (app_config.POST_PATH, env.slug) if os.path.exists ('%s/post_config.py' % env.static_path): # set slug for deployment in post_config find = "DEPLOY_SLUG = ''" replace = "DEPLOY_SLUG = '%s'" % env.slug utils.replace_in_file('%s/post_config.py' % env.static_path, find, replace) env.post_config = imp.load_source('post_config', '%s/post_config.py' % env.static_path) env.copytext_key = env.post_config.COPY_GOOGLE_DOC_KEY else: env.post_config = None env.copytext_key = None env.copytext_slug = env.slug
def deactivate(self, instances, vars): try: # Get first server name first_server_name = vars["SERVER_NAME"].split(" ")[0] # Check if file exists if not os.path.isfile("/etc/nginx/" + first_server_name + "/server.conf"): utils.log("[!] /etc/nginx/" + first_server_name + "/server.conf doesn't exist") return False # Remove the include utils.replace_in_file( "/etc/nginx/nginx.conf", "include /etc/nginx/" + first_server_name + "/server.conf;\n", "") # Reload if self.reload(instances): return True except Exception as e: utils.log("[!] Exception while deactivating config : " + str(e)) return False
def act(): with open('/dependencies/input_data.json', 'r') as f: data = json.load(f) run(['deps', 'branch'], check=True) for manifest_path, manifest_data in data.get('manifests', {}).items(): for dependency_name, updated_dependency_data in manifest_data[ 'updated']['dependencies'].items(): installed = manifest_data['current']['dependencies'][ dependency_name]['constraint'] version_to_update_to = updated_dependency_data['constraint'] if installed == 'latest': # may or not actually have :latest in the line pattern = r'(FROM\s+{})(?::latest)?(\s+.*)'.format( dependency_name) else: pattern = r'(FROM\s+{}):{}(\s+.*)'.format( dependency_name, installed) replace_in_file(manifest_path, pattern, r'\1:{}\2'.format(version_to_update_to)) run([ 'deps', 'commit', '-m', 'Update {} from {} to {}'.format( dependency_name, installed, version_to_update_to), manifest_path ], check=True) run(['deps', 'pullrequest', write_json_to_temp_file(data)], check=True)
def copy_spreadsheet(slug): """ Copy the COPY spreadsheet """ _check_credentials() config_path = '%s/%s/' % (app_config.GRAPHICS_PATH, slug) graphic_config = load_graphic_config(config_path) if not hasattr( graphic_config, 'COPY_GOOGLE_DOC_KEY') or not graphic_config.COPY_GOOGLE_DOC_KEY: print 'Skipping spreadsheet creation. (COPY_GOOGLE_DOC_KEY is not defined in %s/graphic_config.py.)' % slug return metadata = {'title': '%s GRAPHIC COPY' % slug} try: if app_config.DRIVE_SPREADSHEETS_FOLDER: metadata['parents'] = [{ 'id': app_config.DRIVE_SPREADSHEETS_FOLDER }] except AttributeError: pass kwargs = { 'credentials': get_credentials(), 'url': SPREADSHEET_COPY_URL_TEMPLATE % graphic_config.COPY_GOOGLE_DOC_KEY, 'method': 'POST', 'headers': { 'Content-Type': 'application/json' }, 'body': json.dumps(metadata), } resp = app_config.authomatic.access(**kwargs) if resp.status == 200: spreadsheet_key = resp.data['id'] spreadsheet_url = SPREADSHEET_VIEW_TEMPLATE % spreadsheet_key print 'New spreadsheet created successfully!' print 'View it online at %s' % spreadsheet_url utils.replace_in_file('%s/graphic_config.py' % config_path, graphic_config.COPY_GOOGLE_DOC_KEY, spreadsheet_key) return True else: utils.replace_in_file('%s/graphic_config.py' % config_path, graphic_config.COPY_GOOGLE_DOC_KEY, '') print 'Error creating spreadsheet (status code %s) with message %s' % ( resp.status, resp.reason) if resp.status == 404: print 'Please make sure you modify the DRIVE_SPREADSHEETS_FOLDER in app_config.py. Check the configuration section on the README.' return False
def _new(slug): local('cp -r new_post %s/%s' % (app_config.POST_PATH, slug)) post(slug) _check_credentials() old_key = env.post_config.COPY_GOOGLE_DOC_KEY new_key = _create_spreadsheet('%s Look At This COPY' % slug) if new_key: utils.replace_in_file('%s/post_config.py' % env.static_path, old_key, new_key) env.copytext_key = new_key update()
def patch_file_with_pci_id(self, src: str, dst: str) -> None: """ Generate config with the PCI ids of GPUs installed on the system :param src: Original config file (template) :param dst: Destination file """ gpu_list = utils.get_gpu_list() utils.replace_in_file( src, dst, { "<embedded-gpu>": gpu_list[0].get_pci_id(), "<dedicated-gpu>": gpu_list[1].get_pci_id() })
def delete_service(client, server_name): if not os.path.isdir("/etc/nginx/" + server_name): return False, "Config doesn't exist." try: shutil.rmtree("/etc/nginx/" + server_name) except Exception as e: return False, str(e) utils.replace_in_file( "/etc/nginx/nginx.conf", "include /etc/nginx/" + server_name + "/server.conf;\n", "") check, nb = reload_instances(client) if not check: return check, nb return True, "Web service " + server_name + " has been deleted."
def toggle_boot_loader(directory, is_boot_loader_hidden=True): """In directory, alter isolinux.cfg file to hide (default) or show boot loader on startup. Hiding boot loader is required for a fully automated installation. """ timeout = int(is_boot_loader_hidden) # 1 to hide boot loader! # Replace "timeout" option in isolinux/isolinux.cfg filename = os.path.join(directory, 'isolinux', 'isolinux.cfg') pattern = r'^(\s*timeout\s*)[0-1](\s*#|$)' flags = re.IGNORECASE pattern = re.compile(pattern, flags) replacement = '\g<1>%d\g<2>' % timeout replace_in_file(pattern, replacement, filename)
def new_service(client, env): proc = subprocess.run( ["/opt/entrypoint/site-config.sh", env["SERVER_NAME"]], env=env, capture_output=True) if proc.returncode != 0: return False, "Error code " + str( proc.returncode) + " while generating config." utils.replace_in_file( "/etc/nginx/nginx.conf", "}", "include /etc/nginx/" + env["SERVER_NAME"] + "/server.conf;\n}") check, nb = reload_instances(client) if not check: return check, nb return True, "Web service " + env["SERVER_NAME"] + " has been added."
def activate(self, instances, vars) : try : # Check if file exists if not os.path.isfile("/etc/nginx/" + vars["SERVER_NAME"] + "/server.conf") : utils.log("[!] /etc/nginx/" + vars["SERVER_NAME"] + "/server.conf doesn't exist") return False # Include the server conf utils.replace_in_file("/etc/nginx/nginx.conf", "}", "include /etc/nginx/" + vars["SERVER_NAME"] + "/server.conf;\n}") return self.reload(instances) except Exception as e : traceback.print_exc() utils.log("[!] Error while activating config : " + str(e)) return False
def configure_logstash(): logstash_conf_path = '/etc/logstash/conf.d' runtime_properties = ctx.instance.runtime_properties rabbitmq_username = runtime_properties.get('rabbitmq_username') rabbitmq_password = runtime_properties.get('rabbitmq_password') # Confirm username and password have been supplied for broker before # continuing. # Components other than logstash and riemann have this handled in code. # Note that these are not directly used in this script, but are used by the # deployed resources, hence the check here. if not rabbitmq_username or not rabbitmq_password: ctx.abort_operation( 'Both rabbitmq_username and rabbitmq_password must be supplied ' 'and at least 1 character long in the manager blueprint inputs.') ctx.logger.info('Deploying Logstash configuration...') utils.deploy_blueprint_resource( '{0}/logstash.conf'.format(CONFIG_PATH), '{0}/logstash.conf'.format(logstash_conf_path), LOGSTASH_SERVICE_NAME) # Due to a bug in the handling of configuration files, # configuration files with the same name cannot be deployed. # Since the logrotate config file is called `logstash`, # we change the name of the logstash env vars config file # from logstash to cloudify-logstash to be consistent with # other service env var files. init_file = '/etc/init.d/logstash' utils.replace_in_file( 'sysconfig/\$name', 'sysconfig/cloudify-$name', init_file) utils.chmod('755', init_file) utils.chown('root', 'root', init_file) ctx.logger.debug('Deploying Logstash sysconfig...') utils.deploy_blueprint_resource( '{0}/cloudify-logstash'.format(CONFIG_PATH), '/etc/sysconfig/cloudify-logstash', LOGSTASH_SERVICE_NAME) utils.logrotate(LOGSTASH_SERVICE_NAME) utils.sudo(['/sbin/chkconfig', 'logstash', 'on']) utils.clean_var_log_dir(LOGSTASH_SERVICE_NAME)
def copy_spreadsheet(slug): """ Copy the COPY spreadsheet """ _check_credentials() config_path = '%s/%s/' % (app_config.GRAPHICS_PATH, slug) graphic_config = load_graphic_config(config_path) if not hasattr( graphic_config, 'COPY_GOOGLE_DOC_KEY') or not graphic_config.COPY_GOOGLE_DOC_KEY: print 'Skipping spreadsheet creation. (COPY_GOOGLE_DOC_KEY is not defined in %s/graphic_config.py.)' % slug return kwargs = { 'credentials': get_credentials(), 'url': SPREADSHEET_COPY_URL_TEMPLATE % graphic_config.COPY_GOOGLE_DOC_KEY, 'method': 'POST', 'headers': { 'Content-Type': 'application/json' }, 'body': json.dumps({ 'title': '%s GRAPHIC COPY' % slug, }), } resp = app_config.authomatic.access(**kwargs) if resp.status == 200: spreadsheet_key = resp.data['id'] spreadsheet_url = SPREADSHEET_VIEW_TEMPLATE % spreadsheet_key print 'New spreadsheet created successfully!' print 'View it online at %s' % spreadsheet_url utils.replace_in_file('%s/graphic_config.py' % config_path, graphic_config.COPY_GOOGLE_DOC_KEY, spreadsheet_key) return True else: utils.replace_in_file(config_path, graphic_config.COPY_GOOGLE_DOC_KEY, '') print 'Error creating spreadsheet (status code %s) with message %s' % ( resp.status, resp.reason) return False
def copy_spreadsheet(slug): """ Copy the COPY spreadsheet """ _check_credentials() config_path = '%s/%s/' % (app_config.GRAPHICS_PATH, slug) graphic_config = load_graphic_config(config_path) if not hasattr(graphic_config, 'COPY_GOOGLE_DOC_KEY') or not graphic_config.COPY_GOOGLE_DOC_KEY: print 'Skipping spreadsheet creation. (COPY_GOOGLE_DOC_KEY is not defined in %s/graphic_config.py.)' % slug return metadata = {'title': '%s GRAPHIC COPY' % slug} try: if app_config.DRIVE_SPREADSHEETS_FOLDER: metadata['parents'] = [{ 'id': app_config.DRIVE_SPREADSHEETS_FOLDER}] except AttributeError: pass kwargs = { 'credentials': get_credentials(), 'url': SPREADSHEET_COPY_URL_TEMPLATE % graphic_config.COPY_GOOGLE_DOC_KEY, 'method': 'POST', 'headers': {'Content-Type': 'application/json'}, 'body': json.dumps(metadata), } resp = app_config.authomatic.access(**kwargs) if resp.status == 200: spreadsheet_key = resp.data['id'] spreadsheet_url = SPREADSHEET_VIEW_TEMPLATE % spreadsheet_key print 'New spreadsheet created successfully!' print 'View it online at %s' % spreadsheet_url utils.replace_in_file('%s/graphic_config.py' % config_path , graphic_config.COPY_GOOGLE_DOC_KEY, spreadsheet_key) return True else: utils.replace_in_file('%s/graphic_config.py' % config_path, graphic_config.COPY_GOOGLE_DOC_KEY, '') print 'Error creating spreadsheet (status code %s) with message %s' % (resp.status, resp.reason) if resp.status == 404: print 'Please make sure you modify the DRIVE_SPREADSHEETS_FOLDER in app_config.py. Check the configuration section on the README.' return False
def remove_display_manager_hooks(self) -> None: """ Remove hooks on display managers """ if os.path.exists(self.get_display_manager_hook_file_path()): # GDM utils.remove(gdm_file) # LightDM if os.path.exists(lightdm_file): utils.replace_in_file( lightdm_file, lightdm_file, { ('display-setup-script=' + self.get_display_manager_hook_file_path( )): '#display-setup-script=' }) # SDDM if os.path.exists(sddm_file): utils.remove_line_in_file( sddm_file, self.get_display_manager_hook_file_path())
def deactivate(instances, vars) : try : # Check if file exists if not os.path.isfile("/etc/nginx/" + vars["SERVER_NAME"] + "/server.conf") : utils.log("[!] /etc/nginx/" + vars["SERVER_NAME"] + "/server.conf doesn't exist") return False # Remove the include utils.replace_in_file("/etc/nginx/nginx.conf", "include /etc/nginx/" + vars["SERVER_NAME"] + "/server.conf;\n", "") # Send SIGHUP to all running instances for instance_id, instance in instances.items() : if instance.status == "running" : try : instance.kill("SIGHUP") utils.log("[*] Sent SIGHUP signal to bunkerized-nginx instance " + instance.name + " / " + instance.id) except docker.errors.APIError as e : utils.log("[!] Docker error while sending SIGHUP signal : " + str(e)) return True except Exception as e : utils.log("[!] Error while deactivating config : " + str(e)) return False
def apply_display_manager_hooks(self) -> None: """ Apply hooks on display managers """ if os.path.exists(self.get_display_manager_hook_file_path()): # GDM if os.path.exists('/etc/gdm/PreSession/'): utils.create_symlink(self.get_display_manager_hook_file_path(), gdm_file) # LightDM if os.path.exists(lightdm_file): utils.replace_in_file( lightdm_file, lightdm_file, { '#display-setup-script=': ('display-setup-script=' + self.get_display_manager_hook_file_path()) }) # SDDM if os.path.exists(sddm_file) and not utils.file_contains( sddm_file, self.get_display_manager_hook_file_path()): utils.write_line_in_file( sddm_file, self.get_display_manager_hook_file_path())
def copy_spreadsheet(slug): """ Copy the COPY spreadsheet """ _check_credentials() config_path = '%s/%s/' % (app_config.GRAPHICS_PATH, slug) graphic_config = load_graphic_config(config_path) if not hasattr(graphic_config, 'COPY_GOOGLE_DOC_KEY') or not graphic_config.COPY_GOOGLE_DOC_KEY: print 'Skipping spreadsheet creation. (COPY_GOOGLE_DOC_KEY is not defined in %s/graphic_config.py.)' % slug return kwargs = { 'credentials': get_credentials(), 'url': SPREADSHEET_COPY_URL_TEMPLATE % graphic_config.COPY_GOOGLE_DOC_KEY, 'method': 'POST', 'headers': {'Content-Type': 'application/json'}, 'body': json.dumps({ 'title': '%s GRAPHIC COPY' % slug, 'parents': [ {"kind": "drive#fileLink", "id": "0B2CR1DddJUTbMXNzN3VNUVNKQjQ"}], }), } resp = app_config.authomatic.access(**kwargs) if resp.status == 200: spreadsheet_key = resp.data['id'] spreadsheet_url = SPREADSHEET_VIEW_TEMPLATE % spreadsheet_key print 'New spreadsheet created successfully!' print 'View it online at %s' % spreadsheet_url utils.replace_in_file('%s/graphic_config.py' % config_path , graphic_config.COPY_GOOGLE_DOC_KEY, spreadsheet_key) return (True, spreadsheet_url) else: utils.replace_in_file(config_path, graphic_config.COPY_GOOGLE_DOC_KEY, '') print 'Error creating spreadsheet (status code %s) with message %s' % (resp.status, resp.reason) return (False, 'Nope')
def copy_spreadsheet(slug): """ Copy the COPY spreadsheet """ _check_credentials() config_path = "%s/%s/graphic_config.py" % (app_config.GRAPHICS_PATH, slug) graphic_config = _graphic_config(slug) if not hasattr(graphic_config, "COPY_GOOGLE_DOC_KEY") or not graphic_config.COPY_GOOGLE_DOC_KEY: print "Skipping spreadsheet creation. (COPY_GOOGLE_DOC_KEY is not defined in %s/graphic_config.py.)" % slug return kwargs = { "credentials": get_credentials(), "url": SPREADSHEET_COPY_URL_TEMPLATE % graphic_config.COPY_GOOGLE_DOC_KEY, "method": "POST", "headers": {"Content-Type": "application/json"}, "body": json.dumps({"title": "%s GRAPHIC COPY" % slug}), } resp = app_config.authomatic.access(**kwargs) if resp.status == 200: spreadsheet_key = resp.data["id"] spreadsheet_url = SPREADSHEET_VIEW_TEMPLATE % spreadsheet_key print "New spreadsheet created successfully!" print "View it online at %s" % spreadsheet_url utils.replace_in_file(config_path, graphic_config.COPY_GOOGLE_DOC_KEY, spreadsheet_key) return True utils.replace_in_file(config_path, graphic_config.COPY_GOOGLE_DOC_KEY, "") print "Error creating spreadsheet (status code %s) with message %s" % (resp.status, resp.reason) return False
def main(): cfg_file = '/opt/shutongFlow/apps/apps/settings.py' # 修改数据库配置 replace_in_file(cfg_file, "'USER': '******'", "'USER': '******'") config_info = get_config_info() password = config_info['mysql']['root_password'] replace_in_file( cfg_file, "'PASSWORD': '******'", "'PASSWORD': '******'".format(password) ) # 使前端项目在dev模式下可被外部访问 f = '/opt/shutongFlow/fronted/config/index.js' replace_in_file(f, "host: 'localhost'", "host: '0.0.0.0'") f = '/opt/shutongFlow/fronted/src/main.js' ip = config_info['ip'] replace_in_file( f, "axios.defaults.baseURL = 'http://127.0.0.1:6062/'", "axios.defaults.baseURL = 'http://{}:6062/'".format(ip) )
def __create_project_file(self, sources_path, src_file_relative_path, dest_file_path, replace_content=False): if not os.path.isfile(dest_file_path): src_file_path = os.path.join(sources_path, src_file_relative_path) try: shutil.copyfile(src_file_path, dest_file_path) except WindowsError: raise ScriptError("Impossible de copier le fichier " + sources_path + " vers " + dest_file_path) if replace_content: replace_in_file(dest_file_path, self.DUMMY_STRING.capitalize(), self.project_name) replace_in_file(dest_file_path, self.DUMMY_STRING, self.project_name.lower()) replace_in_file(dest_file_path, self.AUTHOR_STRING.capitalize(), self.author_name) replace_in_file(dest_file_path, self.AUTHOR_STRING, self.author_name.lower())
def copy_spreadsheet(slug): """ Copy the template Google spreadsheet into the graphics folder on Drive. """ _check_credentials() config_path = '%s/%s/' % (app_config.GRAPHICS_PATH, slug) graphic_config = load_graphic_config(config_path) new_file_key = graphic_config.COPY_GOOGLE_DOC_KEY if not hasattr(graphic_config, 'COPY_GOOGLE_DOC_KEY') or not new_file_key: print 'Skipping spreadsheet creation. (COPY_GOOGLE_DOC_KEY is not ' + \ 'defined in %s/graphic_config.py.)' % slug return req_url = SPREADSHEET_COPY_URL_TEMPLATE % graphic_config.COPY_GOOGLE_DOC_KEY kwargs = { 'credentials': get_credentials(), 'url': req_url, 'method': 'POST', 'headers': {'Content-Type': 'application/json'}, 'body': json.dumps({ 'title': slug }), } resp = app_config.authomatic.access(**kwargs) if resp.status == 200: # Sucessfully created file print resp spreadsheet_key = resp.data['id'] # Copy file in Drive from 'templates' to 'graphics' directory. graphics_dir_key = '0B95Rq71MRLfHY0gxUURqbS1tWDg' # TODO: move to cfg request_url = 'https://www.googleapis.com/drive/v2/files/' + \ '%s/parents' % spreadsheet_key kwargs = { 'credentials': get_credentials(), 'url': request_url, 'method': 'POST', 'headers': {'Content-Type': 'application/json'}, 'body': json.dumps({ 'id': graphics_dir_key }) } resp2 = app_config.authomatic.access(**kwargs) if resp2.status == 200: # Successfully copied file to 'graphics' folder # Delete old file from 'templates' directory. templates_dir_key = '0B95Rq71MRLfHeTlSNmtnWmEwQ28' request_url = 'https://www.googleapis.com/drive/v2/files/' + \ '%s/parents/%s' % (spreadsheet_key, templates_dir_key) kwargs = { 'credentials': get_credentials(), 'url': request_url, 'method': 'DELETE', 'headers': {'Content-Type': 'application/json'} } resp3 = app_config.authomatic.access(**kwargs) if resp3.status == 204: # Successfully deleted old file spreadsheet_url = SPREADSHEET_VIEW_TEMPLATE % spreadsheet_key print 'New spreadsheet created successfully!' # local('open %s' % spreadsheet_url) print 'View it online at %s' % spreadsheet_url utils.replace_in_file( '%s/graphic_config.py' % config_path, graphic_config.COPY_GOOGLE_DOC_KEY, spreadsheet_key) return True utils.replace_in_file( config_path, graphic_config.COPY_GOOGLE_DOC_KEY, '') print 'Error creating spreadsheet (status code %s) with message %s' % ( resp.status, resp.reason) return False
def _install_elasticsearch(): es_java_opts = ctx_properties['es_java_opts'] es_heap_size = ctx_properties['es_heap_size'] es_source_url = ctx_properties['es_rpm_source_url'] es_curator_rpm_source_url = \ ctx_properties['es_curator_rpm_source_url'] # this will be used only if elasticsearch-curator is not installed via # an rpm and an internet connection is available es_curator_version = "3.2.3" es_home = "/opt/elasticsearch" es_logs_path = "/var/log/cloudify/elasticsearch" es_conf_path = "/etc/elasticsearch" es_unit_override = "/etc/systemd/system/elasticsearch.service.d" es_scripts_path = os.path.join(es_conf_path, 'scripts') ctx.logger.info('Installing Elasticsearch...') utils.set_selinux_permissive() utils.copy_notice('elasticsearch') utils.mkdir(es_home) utils.mkdir(es_logs_path) utils.yum_install(es_source_url, service_name=ES_SERVICE_NAME) ctx.logger.info('Chowning {0} by elasticsearch user...'.format( es_logs_path)) utils.chown('elasticsearch', 'elasticsearch', es_logs_path) ctx.logger.info('Creating systemd unit override...') utils.mkdir(es_unit_override) utils.deploy_blueprint_resource( os.path.join(CONFIG_PATH, 'restart.conf'), os.path.join(es_unit_override, 'restart.conf'), ES_SERVICE_NAME) ctx.logger.info('Deploying Elasticsearch Configuration...') utils.deploy_blueprint_resource( os.path.join(CONFIG_PATH, 'elasticsearch.yml'), os.path.join(es_conf_path, 'elasticsearch.yml'), ES_SERVICE_NAME) utils.chown('elasticsearch', 'elasticsearch', os.path.join(es_conf_path, 'elasticsearch.yml')) ctx.logger.info('Deploying elasticsearch logging configuration file...') utils.deploy_blueprint_resource( os.path.join(CONFIG_PATH, 'logging.yml'), os.path.join(es_conf_path, 'logging.yml'), ES_SERVICE_NAME) utils.chown('elasticsearch', 'elasticsearch', os.path.join(es_conf_path, 'logging.yml')) ctx.logger.info('Creating Elasticsearch scripts folder and ' 'additional external Elasticsearch scripts...') utils.mkdir(es_scripts_path) utils.deploy_blueprint_resource( os.path.join(CONFIG_PATH, 'scripts', 'append.groovy'), os.path.join(es_scripts_path, 'append.groovy'), ES_SERVICE_NAME ) ctx.logger.info('Setting Elasticsearch Heap Size...') # we should treat these as templates. utils.replace_in_file( '(?:#|)ES_HEAP_SIZE=(.*)', 'ES_HEAP_SIZE={0}'.format(es_heap_size), '/etc/sysconfig/elasticsearch') if es_java_opts: ctx.logger.info('Setting additional JAVA_OPTS...') utils.replace_in_file( '(?:#|)ES_JAVA_OPTS=(.*)', 'ES_JAVA_OPTS={0}'.format(es_java_opts), '/etc/sysconfig/elasticsearch') ctx.logger.info('Setting Elasticsearch logs path...') utils.replace_in_file( '(?:#|)LOG_DIR=(.*)', 'LOG_DIR={0}'.format(es_logs_path), '/etc/sysconfig/elasticsearch') utils.replace_in_file( '(?:#|)ES_GC_LOG_FILE=(.*)', 'ES_GC_LOG_FILE={0}'.format(os.path.join(es_logs_path, 'gc.log')), '/etc/sysconfig/elasticsearch') utils.logrotate(ES_SERVICE_NAME) ctx.logger.info('Installing Elasticsearch Curator...') if not es_curator_rpm_source_url: ctx.install_python_package('elasticsearch-curator=={0}'.format( es_curator_version)) else: utils.yum_install(es_curator_rpm_source_url, service_name=ES_SERVICE_NAME) _configure_index_rotation() # elasticsearch provides a systemd init env. we just enable it. utils.systemd.enable(ES_SERVICE_NAME, append_prefix=False)
def install_logstash(): logstash_unit_override = '/etc/systemd/system/logstash.service.d' logstash_source_url = ctx_properties['logstash_rpm_source_url'] rabbitmq_username = ctx_properties['rabbitmq_username'] rabbitmq_password = ctx_properties['rabbitmq_password'] logstash_log_path = '/var/log/cloudify/logstash' logstash_conf_path = '/etc/logstash/conf.d' # injected as an input to the script ctx.instance.runtime_properties['es_endpoint_ip'] = \ os.environ['ES_ENDPOINT_IP'] elasticsearch_props = utils.ctx_factory.get('elasticsearch') ctx.instance.runtime_properties['es_endpoint_port'] = \ elasticsearch_props['es_endpoint_port'] rabbit_props = utils.ctx_factory.get('rabbitmq') ctx.instance.runtime_properties['rabbitmq_endpoint_ip'] = \ utils.get_rabbitmq_endpoint_ip( rabbit_props.get('rabbitmq_endpoint_ip')) ctx.instance.runtime_properties['rabbitmq_username'] = \ rabbit_props['rabbitmq_username'] ctx.instance.runtime_properties['rabbitmq_password'] = \ rabbit_props['rabbitmq_password'] # Confirm username and password have been supplied for broker before # continuing. # Components other than logstash and riemann have this handled in code. # Note that these are not directly used in this script, but are used by the # deployed resources, hence the check here. if not rabbitmq_username or not rabbitmq_password: ctx.abort_operation( 'Both rabbitmq_username and rabbitmq_password must be supplied ' 'and at least 1 character long in the manager blueprint inputs.') ctx.logger.info('Installing Logstash...') utils.set_selinux_permissive() utils.copy_notice(LOGSTASH_SERVICE_NAME) utils.yum_install(logstash_source_url, service_name=LOGSTASH_SERVICE_NAME) utils.mkdir(logstash_log_path) utils.chown('logstash', 'logstash', logstash_log_path) ctx.logger.debug('Creating systemd unit override...') utils.mkdir(logstash_unit_override) utils.deploy_blueprint_resource( '{0}/restart.conf'.format(CONFIG_PATH), '{0}/restart.conf'.format(logstash_unit_override), LOGSTASH_SERVICE_NAME) ctx.logger.info('Deploying Logstash configuration...') utils.deploy_blueprint_resource( '{0}/logstash.conf'.format(CONFIG_PATH), '{0}/logstash.conf'.format(logstash_conf_path), LOGSTASH_SERVICE_NAME) # Due to a bug in the handling of configuration files, # configuration files with the same name cannot be deployed. # Since the logrotate config file is called `logstash`, # we change the name of the logstash env vars config file # from logstash to cloudify-logstash to be consistent with # other service env var files. init_file = '/etc/init.d/logstash' utils.replace_in_file('sysconfig/\$name', 'sysconfig/cloudify-$name', init_file) utils.chmod('755', init_file) utils.chown('root', 'root', init_file) ctx.logger.debug('Deploying Logstash sysconfig...') utils.deploy_blueprint_resource( '{0}/cloudify-logstash'.format(CONFIG_PATH), '/etc/sysconfig/cloudify-logstash', LOGSTASH_SERVICE_NAME) utils.logrotate(LOGSTASH_SERVICE_NAME) utils.sudo(['/sbin/chkconfig', 'logstash', 'on']) utils.clean_var_log_dir(LOGSTASH_SERVICE_NAME)
def install_logstash(): logstash_unit_override = '/etc/systemd/system/logstash.service.d' logstash_source_url = ctx_properties['logstash_rpm_source_url'] rabbitmq_username = ctx_properties['rabbitmq_username'] rabbitmq_password = ctx_properties['rabbitmq_password'] logstash_log_path = '/var/log/cloudify/logstash' logstash_conf_path = '/etc/logstash/conf.d' # injected as an input to the script ctx.instance.runtime_properties['es_endpoint_ip'] = \ os.environ['ES_ENDPOINT_IP'] elasticsearch_props = utils.ctx_factory.get('elasticsearch') ctx.instance.runtime_properties['es_endpoint_port'] = \ elasticsearch_props['es_endpoint_port'] rabbit_props = utils.ctx_factory.get('rabbitmq') ctx.instance.runtime_properties['rabbitmq_endpoint_ip'] = \ utils.get_rabbitmq_endpoint_ip( rabbit_props.get('rabbitmq_endpoint_ip')) ctx.instance.runtime_properties['rabbitmq_username'] = \ rabbit_props['rabbitmq_username'] ctx.instance.runtime_properties['rabbitmq_password'] = \ rabbit_props['rabbitmq_password'] # Confirm username and password have been supplied for broker before # continuing. # Components other than logstash and riemann have this handled in code. # Note that these are not directly used in this script, but are used by the # deployed resources, hence the check here. if not rabbitmq_username or not rabbitmq_password: ctx.abort_operation( 'Both rabbitmq_username and rabbitmq_password must be supplied ' 'and at least 1 character long in the manager blueprint inputs.') ctx.logger.info('Installing Logstash...') utils.set_selinux_permissive() utils.copy_notice(LOGSTASH_SERVICE_NAME) utils.yum_install(logstash_source_url, service_name=LOGSTASH_SERVICE_NAME) utils.mkdir(logstash_log_path) utils.chown('logstash', 'logstash', logstash_log_path) ctx.logger.info('Creating systemd unit override...') utils.mkdir(logstash_unit_override) utils.deploy_blueprint_resource( '{0}/restart.conf'.format(CONFIG_PATH), '{0}/restart.conf'.format(logstash_unit_override), LOGSTASH_SERVICE_NAME) ctx.logger.info('Deploying Logstash conf...') utils.deploy_blueprint_resource( '{0}/logstash.conf'.format(CONFIG_PATH), '{0}/logstash.conf'.format(logstash_conf_path), LOGSTASH_SERVICE_NAME) # Due to a bug in the handling of configuration files, # configuration files with the same name cannot be deployed. # Since the logrotate config file is called `logstash`, # we change the name of the logstash env vars config file # from logstash to cloudify-logstash to be consistent with # other service env var files. init_file = '/etc/init.d/logstash' utils.replace_in_file( 'sysconfig/\$name', 'sysconfig/cloudify-$name', init_file) utils.chmod('755', init_file) utils.chown('root', 'root', init_file) ctx.logger.info('Deploying Logstash sysconfig...') utils.deploy_blueprint_resource( '{0}/cloudify-logstash'.format(CONFIG_PATH), '/etc/sysconfig/cloudify-logstash', LOGSTASH_SERVICE_NAME) utils.logrotate(LOGSTASH_SERVICE_NAME) utils.sudo(['/sbin/chkconfig', 'logstash', 'on']) utils.clean_var_log_dir(LOGSTASH_SERVICE_NAME)
def _install_elasticsearch(): es_java_opts = ctx_properties['es_java_opts'] es_heap_size = ctx_properties['es_heap_size'] es_source_url = ctx_properties['es_rpm_source_url'] es_curator_rpm_source_url = \ ctx_properties['es_curator_rpm_source_url'] # this will be used only if elasticsearch-curator is not installed via # an rpm and an internet connection is available es_curator_version = "3.2.3" es_home = "/opt/elasticsearch" es_logs_path = "/var/log/cloudify/elasticsearch" es_conf_path = "/etc/elasticsearch" es_unit_override = "/etc/systemd/system/elasticsearch.service.d" es_scripts_path = os.path.join(es_conf_path, 'scripts') ctx.logger.info('Installing Elasticsearch...') utils.set_selinux_permissive() utils.copy_notice('elasticsearch') utils.mkdir(es_home) utils.mkdir(es_logs_path) utils.yum_install(es_source_url, service_name=ES_SERVICE_NAME) ctx.logger.info( 'Chowning {0} by elasticsearch user...'.format(es_logs_path)) utils.chown('elasticsearch', 'elasticsearch', es_logs_path) ctx.logger.info('Creating systemd unit override...') utils.mkdir(es_unit_override) utils.deploy_blueprint_resource( os.path.join(CONFIG_PATH, 'restart.conf'), os.path.join(es_unit_override, 'restart.conf'), ES_SERVICE_NAME) ctx.logger.info('Deploying Elasticsearch Configuration...') utils.deploy_blueprint_resource( os.path.join(CONFIG_PATH, 'elasticsearch.yml'), os.path.join(es_conf_path, 'elasticsearch.yml'), ES_SERVICE_NAME) utils.chown('elasticsearch', 'elasticsearch', os.path.join(es_conf_path, 'elasticsearch.yml')) ctx.logger.info('Deploying elasticsearch logging configuration file...') utils.deploy_blueprint_resource(os.path.join(CONFIG_PATH, 'logging.yml'), os.path.join(es_conf_path, 'logging.yml'), ES_SERVICE_NAME) utils.chown('elasticsearch', 'elasticsearch', os.path.join(es_conf_path, 'logging.yml')) ctx.logger.info('Creating Elasticsearch scripts folder and ' 'additional external Elasticsearch scripts...') utils.mkdir(es_scripts_path) utils.deploy_blueprint_resource( os.path.join(CONFIG_PATH, 'scripts', 'append.groovy'), os.path.join(es_scripts_path, 'append.groovy'), ES_SERVICE_NAME) ctx.logger.info('Setting Elasticsearch Heap Size...') # we should treat these as templates. utils.replace_in_file('(?:#|)ES_HEAP_SIZE=(.*)', 'ES_HEAP_SIZE={0}'.format(es_heap_size), '/etc/sysconfig/elasticsearch') if es_java_opts: ctx.logger.info('Setting additional JAVA_OPTS...') utils.replace_in_file('(?:#|)ES_JAVA_OPTS=(.*)', 'ES_JAVA_OPTS={0}'.format(es_java_opts), '/etc/sysconfig/elasticsearch') ctx.logger.info('Setting Elasticsearch logs path...') utils.replace_in_file('(?:#|)LOG_DIR=(.*)', 'LOG_DIR={0}'.format(es_logs_path), '/etc/sysconfig/elasticsearch') utils.replace_in_file( '(?:#|)ES_GC_LOG_FILE=(.*)', 'ES_GC_LOG_FILE={0}'.format(os.path.join(es_logs_path, 'gc.log')), '/etc/sysconfig/elasticsearch') utils.logrotate(ES_SERVICE_NAME) ctx.logger.info('Installing Elasticsearch Curator...') if not es_curator_rpm_source_url: ctx.install_python_package( 'elasticsearch-curator=={0}'.format(es_curator_version)) else: utils.yum_install(es_curator_rpm_source_url, service_name=ES_SERVICE_NAME) _configure_index_rotation() # elasticsearch provides a systemd init env. we just enable it. utils.systemd.enable(ES_SERVICE_NAME, append_prefix=False)