def _configure_as_query_router(self): LOG.info(_("Configuring instance as a cluster query router.")) self.is_query_router = True # Write the 'mongos' upstart script. # FIXME(pmalik): The control script should really be written in the # elements. # The guestagent will choose the right daemon ('mongod' or 'mongos') # based on the 'cluster_config' values. upstart_contents = system.MONGOS_UPSTART_CONTENTS.format(config_file_placeholder=CONFIG_FILE) operating_system.write_file(system.MONGOS_UPSTART, upstart_contents, as_root=True) # FIXME(pmalik): We should really have a separate configuration # template for the 'mongos' process. # Remove all storage configurations from the template. # They apply only to 'mongod' processes. # Already applied overrides will be integrated into the base file and # their current groups removed. config = guestagent_utils.expand_dict(self.configuration_manager.parse_configuration()) if "storage" in config: LOG.debug("Removing 'storage' directives from the configuration " "template.") del config["storage"] self.configuration_manager.save_configuration(guestagent_utils.flatten_dict(config)) # Apply 'mongos' configuration. self._configure_network(MONGODB_PORT) self.configuration_manager.apply_system_override({"sharding.configDB": ""}, CNF_CLUSTER)
def mount_storage(self, storage_info): fstab = path.join('/etc', 'fstab') default_mount_options = ('rw,bg,hard,nointr,tcp,vers=3,timeo=600,' 'rsize=32768,wsize=32768,actimeo=0') data_mount_options = ('user,tcp,rsize=32768,wsize=32768,hard,intr,' 'noac,nfsvers=3') if storage_info['type'] == 'nfs': sources = storage_info['data'] data = list() if operating_system.exists(fstab): data.append(operating_system.read_file(fstab, as_root=True)) def _line(source, target, options=default_mount_options): data.append('{source} {target} nfs {options} 0 0'.format( source=source, target=target, options=options)) _line(sources['votedisk_mount'], SHARED_DISK_PATHS['votedisk'],) _line(sources['registry_mount'], SHARED_DISK_PATHS['registry'],) _line(sources['database_mount'], SHARED_DISK_PATHS['database'], data_mount_options) operating_system.write_file(fstab, '\n'.join(data), as_root=True) utils.execute_with_timeout('mount', '-a', run_as_root=True, root_helper='sudo', timeout=service.ORACLE_TIMEOUT, log_output_on_error=True) else: raise exception.GuestError(_( "Storage type {t} not valid.").format(t=storage_info['type']))
def save_configuration(self, options): """Write given contents to the base configuration file. Remove all existing overrides (both system and user). :param contents Contents of the configuration file. :type contents string or dict """ if isinstance(options, dict): # Serialize a dict of options for writing. self.save_configuration(self._codec.serialize(options)) else: self._override_strategy.remove(self.USER_GROUP) self._override_strategy.remove(self.SYSTEM_PRE_USER_GROUP) self._override_strategy.remove(self.SYSTEM_POST_USER_GROUP) operating_system.write_file( self._base_config_path, options, as_root=self._requires_root) operating_system.chown( self._base_config_path, self._owner, self._group, as_root=self._requires_root) operating_system.chmod( self._base_config_path, FileMode.ADD_READ_ALL, as_root=self._requires_root) self.refresh_cache()
def __init__(self, status): self.state_change_wait_time = CONF.state_change_wait_time self.status = status revision_dir = \ guestagent_utils.build_file_path( os.path.join(MOUNT_POINT, os.path.dirname(system.VERTICA_ADMIN)), ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR) if not operating_system.exists(FAKE_CFG): operating_system.write_file(FAKE_CFG, '', as_root=True) operating_system.chown(FAKE_CFG, system.VERTICA_ADMIN, system.VERTICA_ADMIN_GRP, as_root=True) operating_system.chmod(FAKE_CFG, FileMode.ADD_GRP_RX_OTH_RX(), as_root=True) self.configuration_manager = \ ConfigurationManager(FAKE_CFG, system.VERTICA_ADMIN, system.VERTICA_ADMIN_GRP, PropertiesCodec(delimiter='='), requires_root=True, override_strategy=ImportOverrideStrategy( revision_dir, "cnf"))
def _write_standby_recovery_file(self, service, snapshot, sslmode='prefer'): LOG.info("Snapshot data received:" + str(snapshot)) logging_config = snapshot['log_position'] conninfo_params = \ {'host': snapshot['master']['host'], 'port': snapshot['master']['port'], 'repl_user': logging_config['replication_user']['name'], 'password': logging_config['replication_user']['password'], 'sslmode': sslmode} conninfo = 'host=%(host)s ' \ 'port=%(port)s ' \ 'dbname=os_admin ' \ 'user=%(repl_user)s ' \ 'password=%(password)s ' \ 'sslmode=%(sslmode)s ' % conninfo_params recovery_conf = "standby_mode = 'on'\n" recovery_conf += "primary_conninfo = '" + conninfo + "'\n" recovery_conf += "trigger_file = '/tmp/postgresql.trigger'\n" recovery_conf += "recovery_target_timeline='latest'\n" operating_system.write_file(service.pgsql_recovery_config, recovery_conf, codec=stream_codecs.IdentityCodec(), as_root=True) operating_system.chown(service.pgsql_recovery_config, user=service.pgsql_owner, group=service.pgsql_owner, as_root=True)
def _configure_as_query_router(self): LOG.info(_("Configuring instance as a cluster query router.")) self.is_query_router = True # Write the 'mongos' upstart script. # FIXME(pmalik): The control script should really be written in the # elements. # The guestagent will choose the right daemon ('mongod' or 'mongos') # based on the 'cluster_config' values. upstart_contents = (system.MONGOS_UPSTART_CONTENTS.format( config_file_placeholder=CONFIG_FILE)) operating_system.write_file(system.MONGOS_UPSTART, upstart_contents, as_root=True) # FIXME(pmalik): We should really have a separate configuration # template for the 'mongos' process. # Remove all storage configurations from the template. # They apply only to 'mongod' processes. # Already applied overrides will be integrated into the base file and # their current groups removed. config = guestagent_utils.expand_dict( self.configuration_manager.parse_configuration()) if 'storage' in config: LOG.debug("Removing 'storage' directives from the configuration " "template.") del config['storage'] self.configuration_manager.save_configuration( guestagent_utils.flatten_dict(config)) # Apply 'mongos' configuration. self._configure_network(MONGODB_PORT) self.configuration_manager.apply_system_override( {'sharding.configDB': ''}, CNF_CLUSTER)
def __init__(self): self._admin_pwd = None self._sys_pwd = None self._db_name = None self._db_unique_name = None self.codec = stream_codecs.IniCodec() if not os.path.isfile(self._CONF_FILE): operating_system.create_directory(os.path.dirname(self._CONF_FILE), as_root=True) section = {self._CONF_ORA_SEC: {}} operating_system.write_file(self._CONF_FILE, section, codec=self.codec, as_root=True) else: config = operating_system.read_file(self._CONF_FILE, codec=self.codec, as_root=True) try: if self._CONF_SYS_KEY in config[self._CONF_ORA_SEC]: self._sys_pwd = config[self._CONF_ORA_SEC][self._CONF_SYS_KEY] if self._CONF_ADMIN_KEY in config[self._CONF_ORA_SEC]: self._admin_pwd = config[self._CONF_ORA_SEC][self._CONF_ADMIN_KEY] if self._CONF_ROOT_ENABLED in config[self._CONF_ORA_SEC]: self._root_enabled = config[self._CONF_ORA_SEC][self._CONF_ROOT_ENABLED] if self._CONF_DB_NAME in config[self._CONF_ORA_SEC]: self._db_name = config[self._CONF_ORA_SEC][self._CONF_DB_NAME] if self._CONF_DB_UNIQUE_NAME in config[self._CONF_ORA_SEC]: self._db_unique_name = config[self._CONF_ORA_SEC][self._CONF_DB_UNIQUE_NAME] except KeyError: # the ORACLE section does not exist, stop parsing pass
def _write_standby_recovery_file(self, service, snapshot, sslmode="prefer"): LOG.info("Snapshot data received:" + str(snapshot)) logging_config = snapshot["log_position"] conninfo_params = { "host": snapshot["master"]["host"], "port": snapshot["master"]["port"], "repl_user": logging_config["replication_user"]["name"], "password": logging_config["replication_user"]["password"], "sslmode": sslmode, } conninfo = ( "host=%(host)s " "port=%(port)s " "dbname=os_admin " "user=%(repl_user)s " "password=%(password)s " "sslmode=%(sslmode)s " % conninfo_params ) recovery_conf = "standby_mode = 'on'\n" recovery_conf += "primary_conninfo = '" + conninfo + "'\n" recovery_conf += "trigger_file = '/tmp/postgresql.trigger'\n" recovery_conf += "recovery_target_timeline='latest'\n" operating_system.write_file( service.pgsql_recovery_config, recovery_conf, codec=stream_codecs.IdentityCodec(), as_root=True ) operating_system.chown( service.pgsql_recovery_config, user=service.pgsql_owner, group=service.pgsql_owner, as_root=True )
def test_import_override_strategy(self): base_config_contents = {'Section_1': {'name': 'pi', 'is_number': 'True', 'value': '3.1415'} } config_overrides_v1 = {'Section_1': {'name': 'sqrt(2)', 'value': '1.4142'} } config_overrides_v2 = {'Section_1': {'is_number': 'False'}} config_overrides_seq = [config_overrides_v1, config_overrides_v2] expected_contents_seq = [base_config_contents, base_config_contents, base_config_contents] codec = IniCodec() current_user = getpass.getuser() revision_dir = self._create_temp_dir() with tempfile.NamedTemporaryFile() as base_config: # Write initial config contents. operating_system.write_file( base_config.name, base_config_contents, codec) strategy = ImportOverrideStrategy(revision_dir, 'ext') strategy.configure( base_config.name, current_user, current_user, codec, False) self._assert_import_override_strategy( strategy, config_overrides_seq, expected_contents_seq)
def write_oracle_user_file(self, filepath, contents, filemode=operating_system.FileMode.SET_USR_RW): operating_system.write_file(filepath, contents, as_root=True) operating_system.chown(filepath, INSTANCE_OWNER, INSTANCE_OWNER_GROUP, force=True, as_root=True) operating_system.chmod(filepath, filemode, force=True, as_root=True)
def _test_import_override_strategy(self, system_overrides, user_overrides, test_multi_rev): base_config_contents = { 'Section_1': { 'name': 'pi', 'is_number': 'True', 'value': '3.1415' } } codec = IniCodec() current_user = getpass.getuser() revision_dir = self._create_temp_dir() with tempfile.NamedTemporaryFile() as base_config: # Write initial config contents. operating_system.write_file(base_config.name, base_config_contents, codec) strategy = ImportOverrideStrategy(revision_dir, 'ext') strategy.configure(base_config.name, current_user, current_user, codec, False) self._assert_import_override_strategy(strategy, system_overrides, user_overrides, test_multi_rev)
def write_module_contents(cls, module_dir, contents, md5): contents_file = cls.build_contents_filename(module_dir) operating_system.write_file(contents_file, contents, codec=stream_codecs.Base64Codec(), encode=False) return contents_file
def apply(self, group_name, change_id, options): self._initialize_import_directory() revision_file = self._find_revision_file(group_name, change_id) if revision_file is None: # Create a new file. last_revision_index = self._get_last_file_index(group_name) revision_file = guestagent_utils.build_file_path( self._revision_dir, '%s-%03d-%s' % (group_name, last_revision_index + 1, change_id), self._revision_ext) else: # Update the existing file. current = operating_system.read_file( revision_file, codec=self._codec, as_root=self._requires_root) options = guestagent_utils.update_dict(options, current) operating_system.write_file( revision_file, options, codec=self._codec, as_root=self._requires_root) operating_system.chown( revision_file, self._owner, self._group, as_root=self._requires_root) operating_system.chmod( revision_file, FileMode.ADD_READ_ALL, as_root=self._requires_root)
def reset_configuration(self, options, remove_overrides=False): """Write given contents to the base configuration file. Remove all existing overrides (both system and user) as required. :param options: Contents of the configuration file (string or dict). :param remove_overrides: Remove the overrides or not. """ if isinstance(options, dict): # Serialize a dict of options for writing. self.reset_configuration(self._codec.serialize(options), remove_overrides=remove_overrides) else: if remove_overrides: self._override_strategy.remove(self.USER_GROUP) self._override_strategy.remove(self.SYSTEM_PRE_USER_GROUP) self._override_strategy.remove(self.SYSTEM_POST_USER_GROUP) operating_system.write_file(self._base_config_path, options, as_root=self._requires_root) operating_system.chown(self._base_config_path, self._owner, self._group, as_root=self._requires_root) operating_system.chmod(self._base_config_path, FileMode.ADD_READ_ALL, as_root=self._requires_root) self.refresh_cache()
def save_configuration(self, options): """Write given contents to the base configuration file. Remove all existing overrides (both system and user). :param contents Contents of the configuration file. :type contents string or dict """ if isinstance(options, dict): # Serialize a dict of options for writing. self.save_configuration(self._codec.serialize(options)) else: self._override_strategy.remove(self.USER_GROUP) self._override_strategy.remove(self.SYSTEM_GROUP) operating_system.write_file(self._base_config_path, options, as_root=self._requires_root) operating_system.chown(self._base_config_path, self._owner, self._group, as_root=self._requires_root) operating_system.chmod(self._base_config_path, FileMode.ADD_READ_ALL, as_root=self._requires_root) self._refresh_cache()
def _test_file_codec(self, data, read_codec, write_codec=None, expected_data=None, expected_exception=None, reverse_encoding=False): write_codec = write_codec or read_codec with tempfile.NamedTemporaryFile() as test_file: encode = True decode = True if reverse_encoding: encode = False decode = False if expected_exception: with expected_exception: operating_system.write_file(test_file.name, data, codec=write_codec, encode=encode) operating_system.read_file(test_file.name, codec=read_codec, decode=decode) else: operating_system.write_file(test_file.name, data, codec=write_codec, encode=encode) read = operating_system.read_file(test_file.name, codec=read_codec, decode=decode) if expected_data is not None: self.assertEqual(expected_data, read) else: self.assertEqual(data, read)
def reset_root_password(self): """Reset the password of the localhost root account used by Trove for initial datastore configuration. """ with tempfile.NamedTemporaryFile(mode='w') as init_file: operating_system.write_file(init_file.name, self.RESET_ROOT_MYSQL_COMMANDS) operating_system.chmod(init_file.name, FileMode.ADD_READ_ALL, as_root=True) # Do not attempt to delete the file as the 'trove' user. # The process writing into it may have assumed its ownership. # Only owners can delete temporary # files (restricted deletion). err_log_file = tempfile.NamedTemporaryFile( suffix=self._ERROR_LOG_SUFFIX, delete=False) try: # As of MySQL 5.7.6, for MySQL installation using an RPM # distribution, server startup and shutdown is managed by # systemd on several Linux platforms. On these platforms, # mysqld_safe is no longer installed because it is # unnecessary. if self._mysqld_safe_cmd_exists(): self._start_mysqld_safe_with_init_file( init_file, err_log_file) else: self._start_mysqld_with_init_file(init_file) finally: err_log_file.close() operating_system.remove(err_log_file.name, force=True, as_root=True)
def apply(self, group_name, change_id, options): revision_file = self._find_revision_file(group_name, change_id) if revision_file is None: # Create a new file. last_revision_index = self._get_last_file_index(group_name) revision_file = guestagent_utils.build_file_path( self._revision_dir, '%s-%03d-%s' % (group_name, last_revision_index + 1, change_id), self._revision_ext) else: # Update the existing file. current = operating_system.read_file(revision_file, codec=self._codec) options = guestagent_utils.update_dict(options, current) operating_system.write_file(revision_file, options, codec=self._codec, as_root=self._requires_root) operating_system.chown(revision_file, self._owner, self._group, as_root=self._requires_root) operating_system.chmod(revision_file, FileMode.ADD_READ_ALL, as_root=self._requires_root)
def _generate_root_password(client): """Generate, set, and preserve a random password for root@localhost when invoking mysqladmin to determine the execution status of the mysql service. """ localhost = "localhost" new_password = utils.generate_random_password() uu = sql_query.SetPassword(models.MySQLUser.root_username, host=localhost, new_password=new_password) t = text(str(uu)) client.execute(t) # Save the password to root's private .my.cnf file root_sect = { 'client': { 'user': '******', 'password': new_password, 'host': localhost } } operating_system.write_file('/root/.my.cnf', root_sect, codec=IniCodec(), as_root=True)
def reset_root_password(self): """Reset the password of the localhost root account used by Trove for initial datastore configuration. """ try: # Do not attempt to delete these files as the 'trove' user. # The process writing into it may have assumed its ownership. # Only owners can delete temporary files (restricted deletion). init_file = tempfile.NamedTemporaryFile(mode='w', delete=False) operating_system.write_file(init_file.name, self.RESET_ROOT_MYSQL_COMMANDS) operating_system.chmod(init_file.name, FileMode.ADD_READ_ALL, as_root=True) err_log_file = tempfile.NamedTemporaryFile( suffix=self._ERROR_LOG_SUFFIX, delete=False) self._start_mysqld_safe_with_init_file(init_file, err_log_file) finally: init_file.close() err_log_file.close() operating_system.remove(init_file.name, force=True, as_root=True) operating_system.remove(err_log_file.name, force=True, as_root=True)
def prep_pfile_management(self): """Generate the base PFILE from the original SPFILE, cleanse it of internal settings, create a backup spfile, and initialize the configuration manager to use it. """ self.admin.create_pfile(target=self.paths.os_pfile, from_memory=True) parameters = operating_system.read_file( self.paths.os_pfile, codec=self.pfile_codec(), as_root=True) cleansed_parameters = dict() for k, v in parameters.items(): if k.startswith('_'): continue if v.find('rdbms') != -1: continue cleansed_parameters[k] = v operating_system.write_file( self.paths.os_pfile, cleansed_parameters, codec=self.pfile_codec(), as_root=True) self.admin.create_spfile(target=self.paths.base_spfile, source=self.paths.os_pfile) self._init_configuration_manager()
def apply_access_rules(self): """PostgreSQL Client authentication settings The order of entries is important. The first failure to authenticate stops the lookup. That is why the 'local' connections validate first. The OrderedDict is necessary to guarantee the iteration order. """ LOG.debug("Applying client authentication access rules.") access_rules = OrderedDict([ ('local', [['all', SUPER_USER_NAME, None, 'trust'], ['replication', SUPER_USER_NAME, None, 'trust'], ['all', 'all', None, 'md5']]), ('host', [['all', SUPER_USER_NAME, '127.0.0.1/32', 'trust'], ['all', SUPER_USER_NAME, '::1/128', 'trust'], ['all', SUPER_USER_NAME, 'localhost', 'trust'], ['all', SUPER_USER_NAME, '0.0.0.0/0', 'reject'], ['all', SUPER_USER_NAME, '::/0', 'reject'], ['all', 'all', '0.0.0.0/0', 'md5'], ['all', 'all', '::/0', 'md5']]) ]) operating_system.write_file( HBA_CONFIG_FILE, access_rules, stream_codecs.PropertiesCodec(string_mappings={'\t': None}), as_root=True) operating_system.chown(HBA_CONFIG_FILE, CONF.database_service_uid, CONF.database_service_uid, as_root=True) operating_system.chmod(HBA_CONFIG_FILE, operating_system.FileMode.SET_USR_RO, as_root=True)
def reset_root_password(self): """Reset the password of the localhost root account used by Trove for initial datastore configuration. """ with tempfile.NamedTemporaryFile(mode='w') as init_file: operating_system.write_file(init_file.name, self.RESET_ROOT_MYSQL_COMMANDS) operating_system.chmod(init_file.name, FileMode.ADD_READ_ALL, as_root=True) # Do not attempt to delete the file as the 'trove' user. # The process writing into it may have assumed its ownership. # Only owners can delete temporary # files (restricted deletion). err_log_file = tempfile.NamedTemporaryFile( suffix=self._ERROR_LOG_SUFFIX, delete=False) try: # As of MySQL 5.7.6, for MySQL installation using an RPM # distribution, server startup and shutdown is managed by # systemd on several Linux platforms. On these platforms, # mysqld_safe is no longer installed because it is # unnecessary. if self._mysqld_safe_cmd_exists(): self._start_mysqld_safe_with_init_file( init_file, err_log_file) else: self._start_mysqld_with_init_file(init_file) finally: err_log_file.close() operating_system.remove( err_log_file.name, force=True, as_root=True)
def write_module_contents(cls, module_dir, contents, md5, use_root=False): contents_file = cls.build_contents_filename(module_dir) operating_system.write_file(contents_file, contents, codec=stream_codecs.Base64Codec(), encode=False, as_root=use_root) return contents_file
def _save_value_in_file(self, option, value): config = operating_system.read_file( self.file_path, codec=self._codec, as_root=True) name = self.key_names[option] config[self.section_name][name] = value operating_system.write_file( self.file_path, config, codec=self._codec, as_root=True)
def _write_standby_recovery_file(self, snapshot, sslmode='prefer'): LOG.info("Snapshot data received:" + str(snapshot)) logging_config = snapshot['log_position'] conninfo_params = \ {'host': snapshot['master']['host'], 'port': snapshot['master']['port'], 'repl_user': logging_config['replication_user']['name'], 'password': logging_config['replication_user']['password'], 'sslmode': sslmode} conninfo = 'host=%(host)s ' \ 'port=%(port)s ' \ 'dbname=os_admin ' \ 'user=%(repl_user)s ' \ 'password=%(password)s ' \ 'sslmode=%(sslmode)s ' % conninfo_params recovery_conf = "standby_mode = 'on'\n" recovery_conf += "primary_conninfo = '" + conninfo + "'\n" recovery_conf += "trigger_file = '/tmp/postgresql.trigger'\n" recovery_conf += "recovery_target_timeline='latest'\n" operating_system.write_file(self.PGSQL_RECOVERY_CONFIG, recovery_conf, codec=stream_codecs.IdentityCodec(), as_root=True) operating_system.chown(self.PGSQL_RECOVERY_CONFIG, user="******", group="postgres", as_root=True)
def test_write_file_as_root_with_error(self, copy_mock): target_file = tempfile.NamedTemporaryFile() temp_file = tempfile.NamedTemporaryFile() with patch("tempfile.NamedTemporaryFile", return_value=temp_file): with ExpectedException(Exception, "Error while executing 'copy'."): operating_system.write_file(target_file.name, "Lorem Ipsum", as_root=True) self.assertFalse(os.path.exists(temp_file.name))
def _create_replication_user(self, pwfile): """Create the replication user. Unfortunately, to be able to run pg_rewind, we need SUPERUSER, not just REPLICATION privilege """ pw = utils.generate_random_password() operating_system.write_file(pwfile, pw, as_root=True) operating_system.chown(pwfile, user=self.PGSQL_OWNER, group=self.PGSQL_OWNER, as_root=True) operating_system.chmod(pwfile, FileMode.OCTAL_MODE("0600"), as_root=True) # TODO(atomic77) Alter user is swallowing the replication # option for some reason -- enable this code when the # underlying issue is fixed # repl_user = models.PostgreSQLUser(name=REPL_USER, # password=REPL_PW) # self._create_user(context=None, user=repl_user) # self.alter_user(None, repl_user, 'REPLICATION', 'LOGIN') pgutil.psql("CREATE USER %s SUPERUSER ENCRYPTED " "password '%s';" % (REPL_USER, pw)) return pw
def reset_password_for_restore(self, ds_version=None, data_dir='/var/lib/mysql/data'): """Reset the root password after restore the db data. We create a temporary database container by running mysqld_safe to reset the root password. """ LOG.info('Starting to reset password for restore') try: root_pass = self.app.get_auth_password(file="root.cnf") except exception.UnprocessableEntity: root_pass = utils.generate_random_password() self.app.save_password('root', root_pass) with tempfile.NamedTemporaryFile(mode='w') as init_file, \ tempfile.NamedTemporaryFile(suffix='.err') as err_file: operating_system.write_file( init_file.name, f"ALTER USER 'root'@'localhost' IDENTIFIED BY '{root_pass}';") command = (f'mysqld_safe --init-file={init_file.name} ' f'--log-error={err_file.name} ' f'--datadir={data_dir}') extra_volumes = { init_file.name: { "bind": init_file.name, "mode": "rw" }, err_file.name: { "bind": err_file.name, "mode": "rw" }, } # Allow database service user to access the temporary files. for file in [init_file.name, err_file.name]: operating_system.chmod(file, operating_system.FileMode.SET_ALL_RWX(), force=True, as_root=True) try: self.app.start_db(ds_version=ds_version, command=command, extra_volumes=extra_volumes) except Exception as err: LOG.error('Failed to reset password for restore, error: %s', str(err)) LOG.debug('Content in init error log file: %s', err_file.read()) raise err finally: LOG.debug( 'The init container log: %s', docker_util.get_container_logs(self.app.docker_client)) docker_util.remove_container(self.app.docker_client) LOG.info('Finished to reset password for restore')
def begin_install(self): """Called right before DB is prepared.""" prepare_start_file = guestagent_utils.build_file_path( self.GUESTAGENT_DIR, self.PREPARE_START_FILENAME) operating_system.write_file(prepare_start_file, '') self.prepare_completed = False self.set_status(instance.ServiceStatuses.BUILDING, True)
def begin_install(self): """First call of the DB prepare.""" prepare_start_file = guestagent_utils.build_file_path( self.GUESTAGENT_DIR, self.PREPARE_START_FILENAME) operating_system.write_file(prepare_start_file, '') self.__refresh_prepare_completed() self.set_status(instance.ServiceStatuses.BUILDING, True)
def apply_next(self, options): revision_num = self.count_revisions() + 1 revision_file_path = guestagent_utils.build_file_path( self._revision_dir, self._base_config_name, str(revision_num), self._revision_ext ) operating_system.write_file(revision_file_path, options, codec=self._codec, as_root=self._requires_root) operating_system.chown(revision_file_path, self._owner, self._group, as_root=self._requires_root) operating_system.chmod(revision_file_path, FileMode.ADD_READ_ALL, as_root=self._requires_root)
def test_write_file_as_root_with_error(self, copy_mock): target_file = tempfile.NamedTemporaryFile() temp_file = tempfile.NamedTemporaryFile() with patch('tempfile.NamedTemporaryFile', return_value=temp_file): with ExpectedException(Exception, "Error while executing 'copy'."): operating_system.write_file(target_file.name, "Lorem Ipsum", as_root=True) self.assertFalse(os.path.exists(temp_file.name))
def test_write_file_as_root(self, copy_mock): target_file = tempfile.NamedTemporaryFile() temp_file = tempfile.NamedTemporaryFile() with patch("tempfile.NamedTemporaryFile", return_value=temp_file): operating_system.write_file(target_file.name, "Lorem Ipsum", as_root=True) copy_mock.assert_called_once_with(temp_file.name, target_file.name, force=True, as_root=True) self.assertFalse(os.path.exists(temp_file.name))
def __create_cqlsh_config(self, sections): config_path = self._get_cqlsh_conf_path() config_dir = os.path.dirname(config_path) if not os.path.exists(config_dir): os.mkdir(config_dir, self._CONF_DIR_MODS) else: os.chmod(config_dir, self._CONF_DIR_MODS) operating_system.write_file(config_path, sections, codec=IniCodec()) os.chmod(config_path, self._CONF_FILE_MODS)
def init_config(self): if not operating_system.exists(MOUNT_POINT, True): operating_system.create_directory(MOUNT_POINT, system.DB2_INSTANCE_OWNER, system.DB2_INSTANCE_OWNER, as_root=True) """ The database manager configuration file - db2systm is stored under the /home/db2inst1/sqllib directory. To update the configuration parameters, DB2 recommends using the command - UPDATE DBM CONFIGURATION commands instead of directly updating the config file. The existing PropertiesCodec implementation has been reused to handle text-file operations. Configuration overrides are implemented using the ImportOverrideStrategy of the guestagent configuration manager. """ LOG.debug("Initialize DB2 configuration") revision_dir = ( guestagent_utils.build_file_path( os.path.join(MOUNT_POINT, os.path.dirname(system.DB2_INSTANCE_OWNER)), ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR) ) if not operating_system.exists(FAKE_CFG): operating_system.write_file(FAKE_CFG, '', as_root=True) operating_system.chown(FAKE_CFG, system.DB2_INSTANCE_OWNER, system.DB2_INSTANCE_OWNER, as_root=True) self.configuration_manager = ( ConfigurationManager(FAKE_CFG, system.DB2_INSTANCE_OWNER, system.DB2_INSTANCE_OWNER, PropertiesCodec(delimiter='='), requires_root=True, override_strategy=ImportOverrideStrategy( revision_dir, "cnf")) ) ''' Below we are getting the database manager default configuration and saving it to the DB2_DEFAULT_CFG file. This is done to help with correctly resetting the configurations to the original values when user wants to detach a user-defined configuration group from an instance. DB2 provides a command to reset the database manager configuration parameters (RESET DBM CONFIGURATION) but this command resets all the configuration parameters to the system defaults. When we build a DB2 guest image there are certain configurations parameters like SVCENAME which we set so that the instance can start correctly. Hence resetting this value to the system default will render the instance in an unstable state. Instead, the recommended way for resetting a subset of configuration parameters is to save the output of GET DBM CONFIGURATION of the original configuration and then call UPDATE DBM CONFIGURATION to reset the value. http://www.ibm.com/support/knowledgecenter/SSEPGG_10.5.0/ com.ibm.db2.luw.admin.cmd.doc/doc/r0001970.html ''' if not operating_system.exists(DB2_DEFAULT_CFG): run_command(system.GET_DBM_CONFIGURATION % { "dbm_config": DB2_DEFAULT_CFG}) self.process_default_dbm_config()
def _create_oratab_entry(self): """Create in the /etc/oratab file entries for the databases being restored""" file_content = operating_system.read_file(ORATAB_PATH) file_content += ("\n%(db_name)s:%(ora_home)s:N\n" % {'db_name': self.db_name, 'ora_home': ORACLE_HOME}) operating_system.write_file(ORATAB_PATH, file_content, as_root=True) operating_system.chown(ORATAB_PATH, 'oracle', 'oinstall', recursive=True, force=True, as_root=True)
def test_read_write_file_input_validation(self): with ExpectedException(exception.UnprocessableEntity, "File does not exist: None"): operating_system.read_file(None) with ExpectedException(exception.UnprocessableEntity, "File does not exist: /__DOES_NOT_EXIST__"): operating_system.read_file("/__DOES_NOT_EXIST__") with ExpectedException(exception.UnprocessableEntity, "Invalid path: None"): operating_system.write_file(None, {})
def _save_value_in_file(self, param, value): config = operating_system.read_file(self._CONF_FILE, codec=self.codec, as_root=True) config[self._CONF_ORA_SEC][param] = value operating_system.write_file(self._CONF_FILE, config, codec=self.codec, as_root=True)
def test_rolling_override_strategy(self): base_config_contents = { 'Section_1': { 'name': 'pi', 'is_number': 'True', 'value': '3.1415' } } config_overrides_v1 = { 'Section_1': { 'name': 'sqrt(2)', 'value': '1.4142' } } expected_contents_v1 = { 'Section_1': { 'name': 'sqrt(2)', 'is_number': 'True', 'value': '1.4142' } } config_overrides_v2 = {'Section_1': {'is_number': 'False'}} expected_contents_v2 = { 'Section_1': { 'name': 'sqrt(2)', 'is_number': 'False', 'value': '1.4142' } } config_overrides_seq = [config_overrides_v1, config_overrides_v2] expected_contents_seq = [ base_config_contents, expected_contents_v1, expected_contents_v2 ] codec = IniCodec() current_user = getpass.getuser() backup_config_dir = self._create_temp_dir() with tempfile.NamedTemporaryFile() as base_config: # Write initial config contents. operating_system.write_file(base_config.name, base_config_contents, codec) strategy = RollingOverrideStrategy(backup_config_dir) strategy.configure(base_config.name, current_user, current_user, codec, False) self._assert_rolling_override_strategy(strategy, config_overrides_seq, expected_contents_seq)
def _save_value_in_file(self, option, value): config = operating_system.read_file(self.file_path, codec=self._codec, as_root=True) name = self.key_names[option] config[self.section_name][name] = value operating_system.write_file(self.file_path, config, codec=self._codec, as_root=True)
def store_key(self, key): """Store the cluster key.""" LOG.debug('Storing key for MongoDB cluster.') operating_system.write_file(system.MONGO_KEY_FILE, key, as_root=True) operating_system.chmod(system.MONGO_KEY_FILE, operating_system.FileMode.SET_USR_RO, as_root=True) operating_system.chown(system.MONGO_KEY_FILE, system.MONGO_USER, system.MONGO_USER, as_root=True)
def test_write_file_as_root(self, copy_mock): target_file = tempfile.NamedTemporaryFile() temp_file = tempfile.NamedTemporaryFile() with patch('tempfile.NamedTemporaryFile', return_value=temp_file): operating_system.write_file( target_file.name, "Lorem Ipsum", as_root=True) copy_mock.assert_called_once_with( temp_file.name, target_file.name, force=True, as_root=True) self.assertFalse(os.path.exists(temp_file.name))
def _assert_get_value(self, override_strategy): base_config_contents = {'Section_1': {'name': 'pi', 'is_number': 'True', 'value': '3.1415'} } config_overrides_v1 = {'Section_1': {'name': 'sqrt(2)', 'value': '1.4142'} } config_overrides_v2 = {'Section_1': {'name': 'e', 'value': '2.7183'}, 'Section_2': {'foo': 'bar'} } codec = IniCodec() current_user = getpass.getuser() with tempfile.NamedTemporaryFile() as base_config: # Write initial config contents. operating_system.write_file( base_config.name, base_config_contents, codec) manager = ConfigurationManager( base_config.name, current_user, current_user, codec, requires_root=False) manager.set_override_strategy(override_strategy, 2) # Test default value. self.assertEqual(None, manager.get_value('Section_2')) self.assertEqual('foo', manager.get_value('Section_2', 'foo')) # Test value before applying overrides. self.assertEqual('pi', manager.get_value('Section_1')['name']) self.assertEqual('3.1415', manager.get_value('Section_1')['value']) # Test value after applying overrides. manager.apply_override(config_overrides_v1) self.assertEqual('sqrt(2)', manager.get_value('Section_1')['name']) self.assertEqual('1.4142', manager.get_value('Section_1')['value']) manager.apply_override(config_overrides_v2) self.assertEqual('e', manager.get_value('Section_1')['name']) self.assertEqual('2.7183', manager.get_value('Section_1')['value']) self.assertEqual('bar', manager.get_value('Section_2')['foo']) # Test value after removing overrides. manager.remove_override() self.assertEqual('sqrt(2)', manager.get_value('Section_1')['name']) self.assertEqual('1.4142', manager.get_value('Section_1')['value']) manager.remove_override() self.assertEqual('pi', manager.get_value('Section_1')['name']) self.assertEqual('3.1415', manager.get_value('Section_1')['value']) self.assertEqual(None, manager.get_value('Section_2'))
def _save_authentication_properties(self, admin_password): client_sect = { 'client': { 'user': ADMIN_USER_NAME, 'password': admin_password, 'host': '127.0.0.1' } } operating_system.write_file(self.get_client_auth_file(), client_sect, codec=self.CFG_CODEC)
def _create_oratab_entry(self): oratab = self.app.paths.oratab_file file_content = operating_system.read_file(oratab, as_root=True) file_content += "\n%(db_name)s:%(ora_home)s:N\n" % { "db_name": self.db_name, "ora_home": self.app.paths.oracle_home, } operating_system.write_file(oratab, file_content, as_root=True) operating_system.chown( oratab, self.app.instance_owner, self.app.instance_owner_group, recursive=True, force=True, as_root=True )
def save_password(user, password): content = { 'client': { 'user': user, 'password': password, 'host': "localhost" } } operating_system.write_file('/opt/trove-guestagent/%s.cnf' % user, content, codec=IniCodec())
def validate_log_file(self, log_file, owner): """Make sure the log file exists and is accessible by owner. """ if not operating_system.exists(log_file, as_root=True): operating_system.write_file(log_file, '', as_root=True) operating_system.chown(log_file, user=owner, group=owner, as_root=True) operating_system.chmod(log_file, FileMode.ADD_USR_RW_GRP_RW_OTH_R, as_root=True) return log_file
def test_read_write_file_input_validation(self): with ExpectedException(exception.UnprocessableEntity, "File does not exist: None"): operating_system.read_file(None) with ExpectedException(exception.UnprocessableEntity, "File does not exist: /__DOES_NOT_EXIST__"): operating_system.read_file('/__DOES_NOT_EXIST__') with ExpectedException(exception.UnprocessableEntity, "Invalid path: None"): operating_system.write_file(None, {})
def validate_log_file(self, log_file, owner): """Make sure the log file exists and is accessible by owner. """ if not operating_system.exists(log_file, as_root=True): operating_system.write_file(log_file, '', as_root=True) operating_system.chown(log_file, user=owner, group=owner, as_root=True) operating_system.chmod(log_file, FileMode.ADD_USR_RW_GRP_RW_OTH_R, as_root=True) LOG.debug("Set log file '%s' as readable" % log_file) return log_file
def _create_tns_file(self, dbs): if not path.isfile(TNS_PATH): operating_system.write_file(TNS_PATH, '', as_root=True) conf = configuration.ConfigurationManager( TNS_PATH, system.ORACLE_INSTANCE_OWNER, system.ORACLE_GROUP_OWNER, stream_codecs.PropertiesCodec('='), requires_root=True) conf.save_configuration({}) for db in dbs: tns_entry = self._create_tns_entry(db['db_unique_name'], db['host'], self.ORA_CONF.db_name) conf.apply_system_override({db['db_unique_name']: tns_entry}, db['db_unique_name'])
def save_configuration(self, contents): """Write given contents to the base configuration file. Remove all existing revisions. :param contents Plain-text contents of the configuration file. :type contents string """ if self._override_strategy: self._override_strategy.remove_last(self._current_revision + 1) operating_system.write_file(self._base_config_path, contents, as_root=self._requires_root) operating_system.chown(self._base_config_path, self._owner, self._group, as_root=self._requires_root) operating_system.chmod(self._base_config_path, FileMode.ADD_READ_ALL, as_root=self._requires_root)
def apply_next(self, options): revision_num = self.count_revisions() + 1 old_revision_backup = guestagent_utils.build_file_path( self._revision_backup_dir, self._base_config_name, str(revision_num), self._BACKUP_EXT ) operating_system.copy( self._base_config_path, old_revision_backup, force=True, preserve=True, as_root=self._requires_root ) current = operating_system.read_file(self._base_config_path, codec=self._codec) guestagent_utils.update_dict(options, current) operating_system.write_file(self._base_config_path, current, codec=self._codec, as_root=self._requires_root) operating_system.chown(self._base_config_path, self._owner, self._group, as_root=self._requires_root) operating_system.chmod(self._base_config_path, FileMode.ADD_READ_ALL, as_root=self._requires_root)