def _test_file_codec(self, data, read_codec, write_codec=None, expected_data=None, expected_exception=None, reverse_encoding=False): write_codec = write_codec or read_codec with tempfile.NamedTemporaryFile() as test_file: encode = True decode = True if reverse_encoding: encode = False decode = False if expected_exception: with expected_exception: operating_system.write_file(test_file.name, data, codec=write_codec, encode=encode) operating_system.read_file(test_file.name, codec=read_codec, decode=decode) else: operating_system.write_file(test_file.name, data, codec=write_codec, encode=encode) read = operating_system.read_file(test_file.name, codec=read_codec, decode=decode) if expected_data is not None: self.assertEqual(expected_data, read) else: self.assertEqual(data, read)
def test_read_write_file_input_validation(self): with ExpectedException(exception.UnprocessableEntity, "File does not exist: None"): operating_system.read_file(None) with ExpectedException(exception.UnprocessableEntity, "File does not exist: /__DOES_NOT_EXIST__"): operating_system.read_file("/__DOES_NOT_EXIST__") with ExpectedException(exception.UnprocessableEntity, "Invalid path: None"): operating_system.write_file(None, {})
def get(self, revision): revisions = self._collect_revisions() if revisions: # Return the difference between this revision and the current base. this_revision = operating_system.read_file(revisions[revision - 1], codec=self._codec) current_base = operating_system.read_file(self._base_config_path, codec=self._codec) return guestagent_utils.dict_difference(this_revision, current_base) return {}
def test_read_write_file_input_validation(self): with ExpectedException(exception.UnprocessableEntity, "File does not exist: None"): operating_system.read_file(None) with ExpectedException(exception.UnprocessableEntity, "File does not exist: /__DOES_NOT_EXIST__"): operating_system.read_file('/__DOES_NOT_EXIST__') with ExpectedException(exception.UnprocessableEntity, "Invalid path: None"): operating_system.write_file(None, {})
def _test_file_codec(self, data, read_codec, write_codec=None, expected_data=None, expected_exception=None): write_codec = write_codec or read_codec with tempfile.NamedTemporaryFile() as test_file: if expected_exception: with expected_exception: operating_system.write_file(test_file.name, data, codec=write_codec) operating_system.read_file(test_file.name, codec=read_codec) else: operating_system.write_file(test_file.name, data, codec=write_codec) read = operating_system.read_file(test_file.name, codec=read_codec) if expected_data is not None: self.assertEqual(expected_data, read) else: self.assertEqual(data, read)
def _update_crsconfig_params_hostname(self, hostname): filepath = path.join(GRID_HOME, 'crs', 'install', 'crsconfig_params') contents = re.sub(r'INSTALL_NODE=.*', 'INSTALL_NODE={hostname}'.format(hostname=hostname), operating_system.read_file(filepath, as_root=True)) self.write_oracle_user_file( filepath, contents, filemode=operating_system.FileMode.SET_FULL)
def _read_log_position(self): backup_var_file = ('%s/backup_variables.txt' % MySqlApp.get_data_dir()) if operating_system.exists(backup_var_file): try: LOG.info(_("Reading log position from %s") % backup_var_file) backup_vars = operating_system.read_file( backup_var_file, stream_codecs.PropertiesCodec(delimiter='='), as_root=True) binlog_position = backup_vars['binlog_position'] binlog_file, binlog_pos = binlog_position.split(':') return { 'log_file': binlog_file, 'log_position': int(binlog_pos) } except Exception as ex: LOG.exception(ex) raise self.UnableToDetermineBinlogPosition( {'binlog_file': backup_var_file}) else: LOG.info(_("Log position detail not available. " "Using default values.")) return {'log_file': '', 'log_position': 4}
def get_master_ref(self, service, snapshot_info): """Capture information from a master node""" pfile = '/tmp/init%s_stby.ora' % self._get_config().db_name pwfile = ('%(ora_home)s/dbs/orapw%(db_name)s' % { 'ora_home': CONF.get(MANAGER).oracle_home, 'db_name': self._get_config().db_name }) ctlfile = '/tmp/%s_stby.ctl' % self._get_config().db_name oratabfile = '/etc/oratab' oracnffile = CONF.get(MANAGER).conf_file datafile = '/tmp/oradata.tar.gz' def _cleanup_tmp_files(): operating_system.remove(ctlfile, force=True, as_root=True) operating_system.remove(pfile, force=True, as_root=True) operating_system.remove(datafile, force=True, as_root=True) _cleanup_tmp_files() with ora_service.LocalOracleClient(self._get_config().db_name, service=True) as client: client.execute("ALTER DATABASE CREATE STANDBY CONTROLFILE AS " "'%s'" % ctlfile) ora_service.OracleAdmin().create_parameter_file(target=pfile, client=client) q = sql_query.Query() q.columns = ["value"] q.tables = ["v$parameter"] q.where = ["name = 'fal_server'"] client.execute(str(q)) row = client.fetchone() db_list = [] if row is not None and row[0] is not None: db_list = str(row[0]).split(",") db_list.insert(0, self._get_config().db_name) # Create a tar file containing files needed for slave creation utils.execute_with_timeout('tar', '-Pczvf', datafile, ctlfile, pwfile, pfile, oratabfile, oracnffile, run_as_root=True, root_helper='sudo') oradata_encoded = operating_system.read_file( datafile, codec=stream_codecs.Base64Codec(), as_root=True, decode=False) _cleanup_tmp_files() master_ref = { 'host': netutils.get_my_ipv4(), 'db_name': self._get_config().db_name, 'db_list': db_list, 'oradata': oradata_encoded, } return master_ref
def _rewind_against_master(self, service): """Call pg_rewind to resync datadir against state of new master We should already have a recovery.conf file in PGDATA """ rconf = operating_system.read_file( service.pgsql_recovery_config, codec=stream_codecs.KeyValueCodec(line_terminator="\n"), as_root=True ) conninfo = rconf["primary_conninfo"].strip() # The recovery.conf file we want should already be there, but pg_rewind # will delete it, so copy it out first rec = service.pgsql_recovery_config tmprec = "/tmp/recovery.conf.bak" operating_system.move(rec, tmprec, as_root=True) cmd_full = " ".join( [ "pg_rewind", "-D", service.pgsql_data_dir, "--source-pgdata=" + service.pgsql_data_dir, "--source-server=" + conninfo, ] ) out, err = utils.execute("sudo", "su", "-", service.pgsql_owner, "-c", "%s" % cmd_full, check_exit_code=0) LOG.debug("Got stdout %s and stderr %s from pg_rewind" % (str(out), str(err))) operating_system.move(tmprec, rec, as_root=True)
def mount_storage(self, storage_info): fstab = path.join('/etc', 'fstab') default_mount_options = ('rw,bg,hard,nointr,tcp,vers=3,timeo=600,' 'rsize=32768,wsize=32768,actimeo=0') data_mount_options = ('user,tcp,rsize=32768,wsize=32768,hard,intr,' 'noac,nfsvers=3') if storage_info['type'] == 'nfs': sources = storage_info['data'] data = list() if operating_system.exists(fstab): data.append(operating_system.read_file(fstab, as_root=True)) def _line(source, target, options=default_mount_options): data.append('{source} {target} nfs {options} 0 0'.format( source=source, target=target, options=options)) _line(sources['votedisk_mount'], SHARED_DISK_PATHS['votedisk'],) _line(sources['registry_mount'], SHARED_DISK_PATHS['registry'],) _line(sources['database_mount'], SHARED_DISK_PATHS['database'], data_mount_options) operating_system.write_file(fstab, '\n'.join(data), as_root=True) utils.execute_with_timeout('mount', '-a', run_as_root=True, root_helper='sudo', timeout=service.ORACLE_TIMEOUT, log_output_on_error=True) else: raise exception.GuestError(_( "Storage type {t} not valid.").format(t=storage_info['type']))
def __init__(self): self._admin_pwd = None self._sys_pwd = None self._db_name = None self._db_unique_name = None self.codec = stream_codecs.IniCodec() if not os.path.isfile(self._CONF_FILE): operating_system.create_directory(os.path.dirname(self._CONF_FILE), as_root=True) section = {self._CONF_ORA_SEC: {}} operating_system.write_file(self._CONF_FILE, section, codec=self.codec, as_root=True) else: config = operating_system.read_file(self._CONF_FILE, codec=self.codec, as_root=True) try: if self._CONF_SYS_KEY in config[self._CONF_ORA_SEC]: self._sys_pwd = config[self._CONF_ORA_SEC][self._CONF_SYS_KEY] if self._CONF_ADMIN_KEY in config[self._CONF_ORA_SEC]: self._admin_pwd = config[self._CONF_ORA_SEC][self._CONF_ADMIN_KEY] if self._CONF_ROOT_ENABLED in config[self._CONF_ORA_SEC]: self._root_enabled = config[self._CONF_ORA_SEC][self._CONF_ROOT_ENABLED] if self._CONF_DB_NAME in config[self._CONF_ORA_SEC]: self._db_name = config[self._CONF_ORA_SEC][self._CONF_DB_NAME] if self._CONF_DB_UNIQUE_NAME in config[self._CONF_ORA_SEC]: self._db_unique_name = config[self._CONF_ORA_SEC][self._CONF_DB_UNIQUE_NAME] except KeyError: # the ORACLE section does not exist, stop parsing pass
def read_module_results(cls, is_admin=False, include_contents=False): """Read all the module results on the guest and return a list of them. """ results = [] pattern = cls.MODULE_RESULT_FILENAME result_files = operating_system.list_files_in_directory( cls.MODULE_BASE_DIR, recursive=True, pattern=pattern) for result_file in result_files: result = cls.read_module_result(result_file) if (not result.get('removed') and (is_admin or result.get('visible'))): if include_contents: codec = stream_codecs.Base64Codec() # keep admin_only for backwards compatibility if not is_admin and (result.get('is_admin') or result.get('admin_only')): contents = ( "Must be admin to retrieve contents for module %s" % result.get('name', 'Unknown')) result['contents'] = codec.serialize(contents) else: contents_dir = os.path.dirname(result_file) contents_file = cls.build_contents_filename( contents_dir) result['contents'] = operating_system.read_file( contents_file, codec=codec, decode=False) results.append(result) results.sort(key=operator.itemgetter('updated'), reverse=True) return results
def prep_pfile_management(self): """Generate the base PFILE from the original SPFILE, cleanse it of internal settings, create a backup spfile, and initialize the configuration manager to use it. """ self.admin.create_pfile(target=self.paths.os_pfile, from_memory=True) parameters = operating_system.read_file( self.paths.os_pfile, codec=self.pfile_codec(), as_root=True) cleansed_parameters = dict() for k, v in parameters.items(): if k.startswith('_'): continue if v.find('rdbms') != -1: continue cleansed_parameters[k] = v operating_system.write_file( self.paths.os_pfile, cleansed_parameters, codec=self.pfile_codec(), as_root=True) self.admin.create_spfile(target=self.paths.base_spfile, source=self.paths.os_pfile) self._init_configuration_manager()
def read_module_results(cls, is_admin=False, include_contents=False): """Read all the module results on the guest and return a list of them. """ results = [] pattern = cls.MODULE_RESULT_FILENAME result_files = operating_system.list_files_in_directory( cls.MODULE_BASE_DIR, recursive=True, pattern=pattern) for result_file in result_files: result = cls.read_module_result(result_file) if (not result.get('removed') and (is_admin or result.get('visible'))): if include_contents: codec = stream_codecs.Base64Codec() if not is_admin and result.get('admin_only'): contents = ( "Must be admin to retrieve contents for module %s" % result.get('name', 'Unknown')) result['contents'] = codec.serialize(contents) else: contents_dir = os.path.dirname(result_file) contents_file = cls.build_contents_filename( contents_dir) result['contents'] = operating_system.read_file( contents_file, codec=codec, decode=False) results.append(result) return results
def apply(self, group_name, change_id, options): revision_file = self._find_revision_file(group_name, change_id) if revision_file is None: # Create a new file. last_revision_index = self._get_last_file_index(group_name) revision_file = guestagent_utils.build_file_path( self._revision_dir, '%s-%03d-%s' % (group_name, last_revision_index + 1, change_id), self._revision_ext) else: # Update the existing file. current = operating_system.read_file(revision_file, codec=self._codec) options = guestagent_utils.update_dict(options, current) operating_system.write_file(revision_file, options, codec=self._codec, as_root=self._requires_root) operating_system.chown(revision_file, self._owner, self._group, as_root=self._requires_root) operating_system.chmod(revision_file, FileMode.ADD_READ_ALL, as_root=self._requires_root)
def _rewind_against_master(self, service): """Call pg_rewind to resync datadir against state of new master We should already have a recovery.conf file in PGDATA """ rconf = operating_system.read_file( service.pgsql_recovery_config, codec=stream_codecs.KeyValueCodec(line_terminator='\n'), as_root=True) conninfo = rconf['primary_conninfo'].strip() # The recovery.conf file we want should already be there, but pg_rewind # will delete it, so copy it out first rec = service.pgsql_recovery_config tmprec = "/tmp/recovery.conf.bak" operating_system.move(rec, tmprec, as_root=True) cmd_full = " ".join([ "pg_rewind", "-D", service.pgsql_data_dir, '--source-pgdata=' + service.pgsql_data_dir, '--source-server=' + conninfo ]) out, err = utils.execute("sudo", "su", "-", service.pgsql_owner, "-c", "%s" % cmd_full, check_exit_code=0) LOG.debug("Got stdout %s and stderr %s from pg_rewind" % (str(out), str(err))) operating_system.move(tmprec, rec, as_root=True)
def _save_value_in_file(self, option, value): config = operating_system.read_file( self.file_path, codec=self._codec, as_root=True) name = self.key_names[option] config[self.section_name][name] = value operating_system.write_file( self.file_path, config, codec=self._codec, as_root=True)
def _get_or_create_replication_user(self, service): """There are three scenarios we need to deal with here: - This is a fresh master, with no replicator user created. Generate a new u/p - We are attaching a new slave and need to give it the login creds Send the creds we have stored in PGDATA/.replpass - This is a failed-over-to slave, who will have the replicator user but not the credentials file. Recreate the repl user in this case """ LOG.debug("Checking for replicator user") pwfile = os.path.join(service.pgsql_data_dir, ".replpass") admin = service.build_admin() if admin.user_exists(REPL_USER): if operating_system.exists(pwfile, as_root=True): LOG.debug("Found existing .replpass, returning pw") pw = operating_system.read_file(pwfile, as_root=True) else: LOG.debug("Found user but not .replpass, recreate") u = models.PostgreSQLUser(REPL_USER) admin._drop_user(context=None, user=u) pw = self._create_replication_user(service, admin, pwfile) else: LOG.debug("Found no replicator user, create one") pw = self._create_replication_user(service, admin, pwfile) repl_user_info = {"name": REPL_USER, "password": pw} return repl_user_info
def get(self, revision): revision_files = self._collect_revisions() if revision_files: revision_file = revision_files[revision - 1] return operating_system.read_file(revision_file, codec=self._codec) return {}
def apply(self, name, datastore, ds_version, data_file, admin_module): data = operating_system.read_file( data_file, codec=stream_codecs.KeyValueCodec()) for key, value in data.items(): if 'message' == key.lower(): return True, value return False, 'Message not found in contents file'
def apply(self, name, datastore, ds_version, data_file, admin_module): data = operating_system.read_file(data_file, codec=stream_codecs.KeyValueCodec()) for key, value in data.items(): if 'message' == key.lower(): return True, value return False, 'Message not found in contents file'
def parse_updates(self): parsed_options = {} for path in self._collect_revisions(): options = operating_system.read_file(path, codec=self._codec) guestagent_utils.update_dict(options, parsed_options) return parsed_options
def apply(self, group_name, change_id, options): self._initialize_import_directory() revision_file = self._find_revision_file(group_name, change_id) if revision_file is None: # Create a new file. last_revision_index = self._get_last_file_index(group_name) revision_file = guestagent_utils.build_file_path( self._revision_dir, '%s-%03d-%s' % (group_name, last_revision_index + 1, change_id), self._revision_ext) else: # Update the existing file. current = operating_system.read_file( revision_file, codec=self._codec, as_root=self._requires_root) options = guestagent_utils.update_dict(options, current) operating_system.write_file( revision_file, options, codec=self._codec, as_root=self._requires_root) operating_system.chown( revision_file, self._owner, self._group, as_root=self._requires_root) operating_system.chmod( revision_file, FileMode.ADD_READ_ALL, as_root=self._requires_root)
def _rewind_against_master(self): """Call pg_rewind to resync datadir against state of new master We should already have a recovery.conf file in PGDATA """ rconf = operating_system.read_file(self.pgsql_recovery_config, as_root=True) regex = re.compile("primary_conninfo = (.*)") m = regex.search(rconf) conninfo = m.group(1) # The recovery.conf file we want should already be there, but pg_rewind # will delete it, so copy it out first rec = self.pgsql_recovery_config tmprec = "/tmp/recovery.conf.bak" operating_system.move(rec, tmprec, as_root=True) cmd_full = " ".join([ "pg_rewind", "-D", self.pgsql_data_dir, '--source-server=' + conninfo ]) out, err = utils.execute("sudo", "su", "-", self.PGSQL_OWNER, "-c", "%s" % cmd_full, check_exit_code=0) LOG.debug("Got stdout %s and stderr %s from pg_rewind" % (str(out), str(err))) operating_system.move(tmprec, rec, as_root=True)
def parse_updates(self): parsed_options = {} for path in self._collect_revision_files(): options = operating_system.read_file(path, codec=self._codec) guestagent_utils.update_dict(options, parsed_options) return parsed_options
def _get_or_create_replication_user(self, service): """There are three scenarios we need to deal with here: - This is a fresh master, with no replicator user created. Generate a new u/p - We are attaching a new slave and need to give it the login creds Send the creds we have stored in PGDATA/.replpass - This is a failed-over-to slave, who will have the replicator user but not the credentials file. Recreate the repl user in this case """ LOG.debug("Checking for replicator user") pwfile = os.path.join(service.pgsql_data_dir, ".replpass") admin = service.build_admin() if admin.user_exists(REPL_USER): if operating_system.exists(pwfile, as_root=True): LOG.debug("Found existing .replpass, returning pw") pw = operating_system.read_file(pwfile, as_root=True) else: LOG.debug("Found user but not .replpass, recreate") u = models.PostgreSQLUser(REPL_USER) admin._drop_user(context=None, user=u) pw = self._create_replication_user(service, admin, pwfile) else: LOG.debug("Found no replicator user, create one") pw = self._create_replication_user(service, admin, pwfile) repl_user_info = {'name': REPL_USER, 'password': pw} return repl_user_info
def _load_current_superuser(self): config = operating_system.read_file(self._get_cqlsh_conf_path(), codec=IniCodec()) return models.CassandraUser( config[self._CONF_AUTH_SEC][self._CONF_USR_KEY], config[self._CONF_AUTH_SEC][self._CONF_PWD_KEY] )
def _get_actual_db_status(self): if os.path.exists(CONF.get(MANAGER).oracle_ra_status_file): status = operating_system.read_file( CONF.get(MANAGER).oracle_ra_status_file, as_root=True) if status.startswith('OK'): return rd_instance.ServiceStatuses.RUNNING elif status.startswith('ERROR'): return rd_instance.ServiceStatuses.UNKNOWN
def _assert_import_overrides(self, strategy, group_name, overrides, path_builder): # Check all override files and their contents, for change_id, _, index, expected in overrides: expected_path = path_builder(strategy._revision_dir, group_name, change_id, index, strategy._revision_ext) self._assert_file_exists(expected_path, True) # Assert that the file contents. imported = operating_system.read_file(expected_path, codec=strategy._codec) self.assertEqual(expected, imported)
def _save_value_in_file(self, param, value): config = operating_system.read_file(self._CONF_FILE, codec=self.codec, as_root=True) config[self._CONF_ORA_SEC][param] = value operating_system.write_file(self._CONF_FILE, config, codec=self.codec, as_root=True)
def _create_oratab_entry(self): """Create in the /etc/oratab file entries for the databases being restored""" file_content = operating_system.read_file(ORATAB_PATH) file_content += ("\n%(db_name)s:%(ora_home)s:N\n" % {'db_name': self.db_name, 'ora_home': ORACLE_HOME}) operating_system.write_file(ORATAB_PATH, file_content, as_root=True) operating_system.chown(ORATAB_PATH, 'oracle', 'oinstall', recursive=True, force=True, as_root=True)
def _needs_pdb_cleanup(self): if os.path.exists(CONF.get(MANAGER).oracle_ra_status_file): status = operating_system.read_file( CONF.get(MANAGER).oracle_ra_status_file, as_root=True) if status.startswith('ERROR-CONN'): return False else: return True else: return False
def _save_value_in_file(self, option, value): config = operating_system.read_file(self.file_path, codec=self._codec, as_root=True) name = self.key_names[option] config[self.section_name][name] = value operating_system.write_file(self.file_path, config, codec=self._codec, as_root=True)
def pg_primary_host(self): """There seems to be no way to programmatically determine this on a hot standby, so grab what we have written to the recovery file """ r = operating_system.read_file(self.pgsql_recovery_config, as_root=True) regexp = re.compile("host=(\d+.\d+.\d+.\d+) ") m = regexp.search(r) return m.group(1)
def parse_updates(self): parsed_options = {} for path in self._collect_revision_files(): options = operating_system.read_file(path, codec=self._codec, as_root=self._requires_root) guestagent_utils.update_dict(options, parsed_options) LOG.debug(f"Parsed overrides options: {parsed_options}") return parsed_options
def enable_ldap(self): LOG.debug("Starting saslauthd for LDAP support.") # Ubuntu and RHEL have different ways of enabling the service saslauthd_init_file = operating_system.file_discovery( ['/etc/default/saslauthd']) if saslauthd_init_file: codec = stream_codecs.KeyValueCodec(line_terminator='\n') saslauthd_init = operating_system.read_file( saslauthd_init_file, codec=codec, as_root=True) saslauthd_init['START'] = 'yes' operating_system.write_file( saslauthd_init_file, saslauthd_init, codec=codec, as_root=True) elif operating_system.file_discovery(['/etc/sysconfig/saslauthd']): operating_system.enable_service_on_boot(['saslauthd']) else: LOG.exception(_("Cannot find saslauthd service to enable for LDAP " "client. Skipping.")) return operating_system.start_service(['saslauthd']) saslauthd_conf_file = '/etc/saslauthd.conf' saslauthd_conf = operating_system.read_file( saslauthd_conf_file, stream_codecs.YamlCodec(), as_root=True) saslauthd_conf.update({ 'ldap_servers': CONF.get(self.manager).get('ldap_servers'), 'ldap_search_base': CONF.get(self.manager).get('ldap_search_base') }) ldap_tls_cacert_dir = CONF.get(self.manager).get('ldap_tls_cacert_dir', None) if ldap_tls_cacert_dir: saslauthd_conf.update({ 'ldap_tls_cacert_dir': ldap_tls_cacert_dir, }) ldap_tls_cacert_file = (CONF.get(self.manager) .get('ldap_tls_cacert_file', None)) if ldap_tls_cacert_file: saslauthd_conf.update({ 'ldap_tls_cacert_file': ldap_tls_cacert_file, }) operating_system.write_file( saslauthd_conf_file, saslauthd_conf, stream_codecs.YamlCodec(), as_root=True) LOG.debug("Enabled saslauthd as an LDAP client.")
def pg_version(self): """Find the database version file stored in the data directory. :returns: A tuple with the path to the version file (in the root of the data directory) and the version string. """ version_files = operating_system.list_files_in_directory( self.DATA_BASE, recursive=True, pattern='PG_VERSION', as_root=True) version_file = sorted(version_files, key=len)[0] version = operating_system.read_file(version_file, as_root=True) return version_file, version.strip()
def _create_oratab_entry(self): oratab = self.app.paths.oratab_file file_content = operating_system.read_file(oratab, as_root=True) file_content += "\n%(db_name)s:%(ora_home)s:N\n" % { "db_name": self.db_name, "ora_home": self.app.paths.oracle_home, } operating_system.write_file(oratab, file_content, as_root=True) operating_system.chown( oratab, self.app.instance_owner, self.app.instance_owner_group, recursive=True, force=True, as_root=True )
def read_module_result(cls, result_file, default=None): result_file = cls.get_result_filename(result_file) result = default try: result = operating_system.read_file( result_file, codec=stream_codecs.JsonCodec()) except Exception: if not result: LOG.exception(_("Could not find module result in %s") % result_file) raise return result
def read_module_result(cls, result_file, default=None): result_file = cls.get_result_filename(result_file) result = default try: result = operating_system.read_file( result_file, codec=stream_codecs.JsonCodec()) except Exception: if not result: LOG.exception( _("Could not find module result in %s") % result_file) raise return result
def edit_response_file(self, filename, edits): """Edit the given response file. Given a dictionary of edits, changes specified occurances of '<key>' to 'value'. """ templates_dir = path.join(self.user_home_dir, 'rsp') template_file = path.join(templates_dir, filename) response_file = path.join(self.user_home_dir, filename) contents = operating_system.read_file(template_file, as_root=True) for key in edits.keys(): contents = contents.replace('<{key}>'.format(key=key), edits[key]) self.write_oracle_user_file(response_file, contents) return response_file
def apply(self, name, datastore, ds_version, data_file, admin_module): license_key = None data = operating_system.read_file(data_file, codec=stream_codecs.KeyValueCodec()) for key, value in data.items(): if 'license_key' == key.lower(): license_key = value break if license_key: self._add_license_key(license_key) self._server_control('start') else: return False, "'license_key' not found in contents file"
def apply(self, name, datastore, ds_version, data_file, admin_module): license_key = None data = operating_system.read_file( data_file, codec=stream_codecs.KeyValueCodec()) for key, value in data.items(): if 'license_key' == key.lower(): license_key = value break if license_key: self._add_license_key(license_key) self._server_control('start') else: return False, "'license_key' not found in contents file"
def apply_next(self, options): revision_num = self.count_revisions() + 1 old_revision_backup = guestagent_utils.build_file_path( self._revision_backup_dir, self._base_config_name, str(revision_num), self._BACKUP_EXT ) operating_system.copy( self._base_config_path, old_revision_backup, force=True, preserve=True, as_root=self._requires_root ) current = operating_system.read_file(self._base_config_path, codec=self._codec) guestagent_utils.update_dict(options, current) operating_system.write_file(self._base_config_path, current, codec=self._codec, as_root=self._requires_root) operating_system.chown(self._base_config_path, self._owner, self._group, as_root=self._requires_root) operating_system.chmod(self._base_config_path, FileMode.ADD_READ_ALL, as_root=self._requires_root)
def get_master_ref(self, service, snapshot_info): """Capture information from a master node""" ctlfile = path.join(TMP_DIR, '%s_stby.ctl' % service.admin.database_name) datafile = path.join(TMP_DIR, 'oradata.tar.gz') def _cleanup_tmp_files(): operating_system.remove(ctlfile, force=True, as_root=True) operating_system.remove(datafile, force=True, as_root=True) _cleanup_tmp_files() with service.cursor(service.admin.database_name) as cursor: cursor.execute( str( sql_query.AlterDatabase( "CREATE STANDBY CONTROLFILE AS '%s'" % ctlfile))) cursor.execute( str( sql_query.Query(columns=['VALUE'], tables=['V$PARAMETER'], where=["NAME = 'fal_server'"]))) row = cursor.fetchone() db_list = [] if row is not None and row[0] is not None: db_list = str(row[0]).split(",") db_list.insert(0, service.admin.database_name) # Create a tar file containing files needed for slave creation utils.execute_with_timeout('tar', '-Pczvf', datafile, ctlfile, service.paths.orapw_file, service.paths.oratab_file, CONF.get(MANAGER).conf_file, run_as_root=True, root_helper='sudo') oradata_encoded = operating_system.read_file( datafile, codec=stream_codecs.Base64Codec(), as_root=True, decode=False) _cleanup_tmp_files() master_ref = { 'host': netutils.get_my_ipv4(), 'db_name': service.admin.database_name, 'db_list': db_list, 'oradata': oradata_encoded, } return master_ref
def _create_oratab_entry(self): oratab = self.app.paths.oratab_file file_content = operating_system.read_file(oratab, as_root=True) file_content += ("\n%(db_name)s:%(ora_home)s:N\n" % { 'db_name': self.db_name, 'ora_home': self.app.paths.oracle_home }) operating_system.write_file(oratab, file_content, as_root=True) operating_system.chown(oratab, self.app.instance_owner, self.app.instance_owner_group, recursive=True, force=True, as_root=True)
def parse_configuration(self): """Read contents of the configuration file (applying overrides if any) and parse it into a dict. :returns: Configuration file as a Python dict. """ base_options = operating_system.read_file(self._base_config_path, codec=self._codec) updates = self._override_strategy.parse_updates() guestagent_utils.update_dict(updates, base_options) return base_options
def assert_test_log_save(self, client, log_name, publish=False): # generate the file self.report.log("Executing log_save for log '%s' (publish: %s)" % (log_name, publish)) with tempfile.NamedTemporaryFile() as temp_file: client.instances.log_save(self.instance_info.id, log_name=log_name, publish=publish, filename=temp_file.name) file_contents = operating_system.read_file(temp_file.name) # now grab the contents ourselves self.assert_log_generator(client, log_name, lines=100000) # and compare them self.assert_equal(self._get_last_log_contents(log_name), file_contents)