def build_module_dir(cls, module_type, module_id): sub_dir = os.path.join(module_type, module_id) module_dir = guestagent_utils.build_file_path(cls.MODULE_BASE_DIR, sub_dir) if not operating_system.exists(module_dir, is_directory=True): operating_system.create_directory(module_dir, force=True) return module_dir
def _run_pre_backup(self): """Create archival contents in dump dir""" try: est_dump_size = self.estimate_dump_size() avail = operating_system.get_bytes_free_on_fs(DB2_DBPATH) if est_dump_size > avail: self.cleanup() raise OSError( _("Need more free space to backup db2 database," " estimated %(est_dump_size)s" " and found %(avail)s bytes free ") % { 'est_dump_size': est_dump_size, 'avail': avail }) operating_system.create_directory(DB2_BACKUP_DIR, system.DB2_INSTANCE_OWNER, system.DB2_INSTANCE_OWNER, as_root=True) service.run_command(system.QUIESCE_DB2) dbNames = self.list_dbnames() for dbName in dbNames: service.run_command(system.BACKUP_DB % { 'dbname': dbName, 'dir': DB2_BACKUP_DIR }) service.run_command(system.UNQUIESCE_DB2) except exception.ProcessExecutionError as e: LOG.debug("Caught exception when preparing the directory") self.cleanup() raise e
def _run_pre_backup(self): """Create archival contents in dump dir""" try: est_dump_size = self.estimate_dump_size() avail = operating_system.get_bytes_free_on_fs(MONGODB_DBPATH) if est_dump_size > avail: self.cleanup() # TODO(atomic77) Though we can fully recover from this error # BackupRunner will leave the trove instance in a BACKUP state raise OSError( _("Need more free space to run mongodump, " "estimated %(est_dump_size)s" " and found %(avail)s bytes free ") % { 'est_dump_size': est_dump_size, 'avail': avail }) operating_system.create_directory(MONGO_DUMP_DIR, as_root=True) operating_system.chown(MONGO_DUMP_DIR, mongo_system.MONGO_USER, mongo_system.MONGO_USER, as_root=True) # high timeout here since mongodump can take a long time utils.execute_with_timeout('mongodump', '--out', MONGO_DUMP_DIR, *(self.app.admin_cmd_auth_params()), run_as_root=True, root_helper='sudo', timeout=LARGE_TIMEOUT) except exception.ProcessExecutionError as e: LOG.debug("Caught exception when creating the dump") self.cleanup() raise e
def _initialize_import_directory(self): """Lazy-initialize the directory for imported revision files. """ if not os.path.exists(self._revision_dir): operating_system.create_directory( self._revision_dir, user=self._owner, group=self._group, force=True, as_root=self._requires_root)
def recreate_wal_archive_dir(): operating_system.remove(WAL_ARCHIVE_DIR, force=True, recursive=True, as_root=True) operating_system.create_directory(WAL_ARCHIVE_DIR, user=PgSqlProcess.PGSQL_OWNER, group=PgSqlProcess.PGSQL_OWNER, force=True, as_root=True)
def build_module_dir(cls, module_type, module_id): sub_dir = os.path.join(module_type, module_id) module_dir = guestagent_utils.build_file_path( cls.MODULE_BASE_DIR, sub_dir) if not operating_system.exists(module_dir, is_directory=True): operating_system.create_directory(module_dir, force=True) return module_dir
def __init__(self): self._admin_pwd = None self._sys_pwd = None self._db_name = None self._db_unique_name = None self.codec = stream_codecs.IniCodec() if not os.path.isfile(self._CONF_FILE): operating_system.create_directory(os.path.dirname(self._CONF_FILE), as_root=True) section = {self._CONF_ORA_SEC: {}} operating_system.write_file(self._CONF_FILE, section, codec=self.codec, as_root=True) else: config = operating_system.read_file(self._CONF_FILE, codec=self.codec, as_root=True) try: if self._CONF_SYS_KEY in config[self._CONF_ORA_SEC]: self._sys_pwd = config[self._CONF_ORA_SEC][self._CONF_SYS_KEY] if self._CONF_ADMIN_KEY in config[self._CONF_ORA_SEC]: self._admin_pwd = config[self._CONF_ORA_SEC][self._CONF_ADMIN_KEY] if self._CONF_ROOT_ENABLED in config[self._CONF_ORA_SEC]: self._root_enabled = config[self._CONF_ORA_SEC][self._CONF_ROOT_ENABLED] if self._CONF_DB_NAME in config[self._CONF_ORA_SEC]: self._db_name = config[self._CONF_ORA_SEC][self._CONF_DB_NAME] if self._CONF_DB_UNIQUE_NAME in config[self._CONF_ORA_SEC]: self._db_unique_name = config[self._CONF_ORA_SEC][self._CONF_DB_UNIQUE_NAME] except KeyError: # the ORACLE section does not exist, stop parsing pass
def build_log_file_name(self, log_name, owner, datastore_dir=None): """Build a log file name based on the log_name and make sure the directories exist and are accessible by owner. """ if datastore_dir is None: base_dir = self.GUEST_LOG_BASE_DIR if not operating_system.exists(base_dir, is_directory=True): operating_system.create_directory(base_dir, user=owner, group=owner, force=True, as_root=True) datastore_dir = guestagent_utils.build_file_path( base_dir, self.GUEST_LOG_DATASTORE_DIRNAME) if not operating_system.exists(datastore_dir, is_directory=True): operating_system.create_directory(datastore_dir, user=owner, group=owner, force=True, as_root=True) log_file_name = guestagent_utils.build_file_path( datastore_dir, '%s-%s.log' % (self.manager, log_name)) return self.validate_log_file(log_file_name, owner)
def init_storage_structure(self, mount_point): try: operating_system.create_directory( mount_point, user=self.couchbase_owner, group=self.couchbase_owner, as_root=True) except exception.ProcessExecutionError: LOG.exception(_("Error while initiating storage structure."))
def _run_pre_backup(self): """Create archival contents in dump dir""" try: est_dump_size = self.estimate_dump_size() avail = operating_system.get_bytes_free_on_fs(MONGODB_DBPATH) if est_dump_size > avail: self.cleanup() # TODO(atomic77) Though we can fully recover from this error # BackupRunner will leave the trove instance in a BACKUP state raise OSError(_("Need more free space to run mongodump, " "estimated %(est_dump_size)s" " and found %(avail)s bytes free ") % {'est_dump_size': est_dump_size, 'avail': avail}) operating_system.create_directory(MONGO_DUMP_DIR, as_root=True) operating_system.chown(MONGO_DUMP_DIR, mongo_system.MONGO_USER, mongo_system.MONGO_USER, as_root=True) # high timeout here since mongodump can take a long time utils.execute_with_timeout( 'mongodump', '--out', MONGO_DUMP_DIR, *(self.app.admin_cmd_auth_params()), run_as_root=True, root_helper='sudo', timeout=LARGE_TIMEOUT ) except exception.ProcessExecutionError as e: LOG.debug("Caught exception when creating the dump") self.cleanup() raise e
def pre_upgrade(self, context): app = self.mysql_app(self.mysql_app_status.get()) data_dir = app.get_data_dir() mount_point, _data = os.path.split(data_dir) save_dir = "%s/etc_mysql" % mount_point save_etc_dir = "%s/etc" % mount_point home_save = "%s/trove_user" % mount_point app.status.begin_restart() app.stop_db() if operating_system.exists("/etc/my.cnf", as_root=True): operating_system.create_directory(save_etc_dir, as_root=True) operating_system.copy("/etc/my.cnf", save_etc_dir, preserve=True, as_root=True) operating_system.copy("/etc/mysql/.", save_dir, preserve=True, as_root=True) operating_system.copy("%s/." % os.path.expanduser('~'), home_save, preserve=True, as_root=True) self.unmount_volume(context, mount_point=data_dir) return { 'mount_point': mount_point, 'save_dir': save_dir, 'save_etc_dir': save_etc_dir, 'home_save': home_save }
def __init__(self, base_config_path, owner, group, codec, requires_root=False, override_strategy=None): """ :param base_config_path Path to the configuration file. :type base_config_path string :param owner Owner of the configuration files. :type owner string :param group Group of the configuration files. :type group string :param codec Codec for reading/writing of the particular configuration format. :type codec StreamCodec :param requires_root Whether the manager requires superuser privileges. :type requires_root boolean :param override_strategy Strategy used to manage configuration overrides (e.g. ImportOverrideStrategy). Defaults to OneFileOverrideStrategy if None. This strategy should be compatible with very much any datastore. It is recommended each datastore defines its strategy explicitly to avoid upgrade compatibility issues in case the default implementation changes in the future. :type override_strategy ConfigurationOverrideStrategy """ self._base_config_path = base_config_path self._owner = owner self._group = group self._codec = codec self._requires_root = requires_root self._value_cache = None if not override_strategy: # Use OneFile strategy by default. Store the revisions in a # sub-directory at the location of the configuration file. revision_dir = guestagent_utils.build_file_path( os.path.dirname(base_config_path), self.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR) operating_system.create_directory(revision_dir, user=owner, group=group, force=True, as_root=requires_root) self._override_strategy = OneFileOverrideStrategy(revision_dir) else: self._override_strategy = override_strategy self._override_strategy.configure(base_config_path, owner, group, codec, requires_root)
def _run_pre_backup(self): """Create archival contents in dump dir""" try: est_dump_size = self.estimate_dump_size() avail = operating_system.get_bytes_free_on_fs(DB2_DBPATH) if est_dump_size > avail: self.cleanup() raise OSError(_("Need more free space to backup db2 database," " estimated %(est_dump_size)s" " and found %(avail)s bytes free ") % {'est_dump_size': est_dump_size, 'avail': avail}) operating_system.create_directory(DB2_BACKUP_DIR, system.DB2_INSTANCE_OWNER, system.DB2_INSTANCE_OWNER, as_root=True) service.run_command(system.QUIESCE_DB2) dbNames = self.list_dbnames() for dbName in dbNames: service.run_command(system.BACKUP_DB % { 'dbname': dbName, 'dir': DB2_BACKUP_DIR}) service.run_command(system.UNQUIESCE_DB2) except exception.ProcessExecutionError as e: LOG.debug("Caught exception when preparing the directory") self.cleanup() raise e
def do_prepare(self, context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot, ds_version=None): """This is called from prepare in the base class.""" data_dir = mount_point + '/data' if device_path: LOG.info('Preparing the storage for %s, mount path %s', device_path, mount_point) self.app.stop_db() device = volume.VolumeDevice(device_path) # unmount if device is already mounted device.unmount_device(device_path) device.format() if operating_system.list_files_in_directory(mount_point): # rsync existing data to a "data" sub-directory # on the new volume device.migrate_data(mount_point, target_subdir="data") # mount the volume device.mount(mount_point) operating_system.chown(mount_point, CONF.database_service_uid, CONF.database_service_uid, recursive=True, as_root=True) operating_system.create_directory(data_dir, user=CONF.database_service_uid, group=CONF.database_service_uid, as_root=True) self.app.set_data_dir(data_dir) # Prepare mysql configuration LOG.info('Preparing database configuration') self.app.configuration_manager.save_configuration(config_contents) self.app.update_overrides(overrides) # Restore data from backup and reset root password if backup_info: self.perform_restore(context, data_dir, backup_info) self.reset_password_for_restore(ds_version=ds_version, data_dir=data_dir) # Start database service. # Cinder volume initialization(after formatted) may leave a # lost+found folder command = f'--ignore-db-dir=lost+found --datadir={data_dir}' self.app.start_db(ds_version=ds_version, command=command) self.app.secure() enable_remote_root = (backup_info and self.adm.is_root_enabled()) if enable_remote_root: self.status.report_root(context) else: self.app.secure_root() if snapshot: # This instance is a replication slave self.attach_replica(context, snapshot, snapshot['config'])
def recreate_wal_archive_dir(cls): wal_archive_dir = CONF.postgresql.wal_archive_location operating_system.remove(wal_archive_dir, force=True, recursive=True, as_root=True) operating_system.create_directory(wal_archive_dir, user=cls.PGSQL_OWNER, group=cls.PGSQL_OWNER, force=True, as_root=True)
def recreate_wal_archive_dir(self): wal_archive_dir = self.wal_archive_location operating_system.remove(wal_archive_dir, force=True, recursive=True, as_root=True) operating_system.create_directory(wal_archive_dir, user=self.pgsql_owner, group=self.pgsql_owner, force=True, as_root=True)
def _initialize_writable_run_dir(self): """Create a writable directory for Mongodb's runtime data (e.g. PID-file). """ mongodb_run_dir = os.path.dirname(system.MONGO_PID_FILE) LOG.debug("Initializing a runtime directory: %s" % mongodb_run_dir) operating_system.create_directory( mongodb_run_dir, user=system.MONGO_USER, group=system.MONGO_USER, force=True, as_root=True)
def pre_restore(self): self.stop_db(context=None) PgBaseBackupUtil.recreate_wal_archive_dir() datadir = self.pgsql_data_dir operating_system.remove(datadir, force=True, recursive=True, as_root=True) operating_system.create_directory(datadir, user=self.PGSQL_OWNER, group=self.PGSQL_OWNER, force=True, as_root=True)
def init_config(self): if not operating_system.exists(MOUNT_POINT, True): operating_system.create_directory(MOUNT_POINT, system.DB2_INSTANCE_OWNER, system.DB2_INSTANCE_OWNER, as_root=True) """ The database manager configuration file - db2systm is stored under the /home/db2inst1/sqllib directory. To update the configuration parameters, DB2 recommends using the command - UPDATE DBM CONFIGURATION commands instead of directly updating the config file. The existing PropertiesCodec implementation has been reused to handle text-file operations. Configuration overrides are implemented using the ImportOverrideStrategy of the guestagent configuration manager. """ LOG.debug("Initialize DB2 configuration") revision_dir = ( guestagent_utils.build_file_path( os.path.join(MOUNT_POINT, os.path.dirname(system.DB2_INSTANCE_OWNER)), ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR) ) if not operating_system.exists(FAKE_CFG): operating_system.write_file(FAKE_CFG, '', as_root=True) operating_system.chown(FAKE_CFG, system.DB2_INSTANCE_OWNER, system.DB2_INSTANCE_OWNER, as_root=True) self.configuration_manager = ( ConfigurationManager(FAKE_CFG, system.DB2_INSTANCE_OWNER, system.DB2_INSTANCE_OWNER, PropertiesCodec(delimiter='='), requires_root=True, override_strategy=ImportOverrideStrategy( revision_dir, "cnf")) ) ''' Below we are getting the database manager default configuration and saving it to the DB2_DEFAULT_CFG file. This is done to help with correctly resetting the configurations to the original values when user wants to detach a user-defined configuration group from an instance. DB2 provides a command to reset the database manager configuration parameters (RESET DBM CONFIGURATION) but this command resets all the configuration parameters to the system defaults. When we build a DB2 guest image there are certain configurations parameters like SVCENAME which we set so that the instance can start correctly. Hence resetting this value to the system default will render the instance in an unstable state. Instead, the recommended way for resetting a subset of configuration parameters is to save the output of GET DBM CONFIGURATION of the original configuration and then call UPDATE DBM CONFIGURATION to reset the value. http://www.ibm.com/support/knowledgecenter/SSEPGG_10.5.0/ com.ibm.db2.luw.admin.cmd.doc/doc/r0001970.html ''' if not operating_system.exists(DB2_DEFAULT_CFG): run_command(system.GET_DBM_CONFIGURATION % { "dbm_config": DB2_DEFAULT_CFG}) self.process_default_dbm_config()
def _run_pre_backup(self): """Create backupset in backup dir""" self.cleanup() operating_system.create_directory(BACKUP_DIR, user='******', group='oinstall', force=True, as_root=True) try: est_backup_size = self.estimate_backup_size() avail = operating_system.get_bytes_free_on_fs( CONF.get('oracle').mount_point) if est_backup_size > avail: # TODO(schang): BackupRunner will leave the trove instance # in a BACKUP state raise OSError( _("Need more free space to run RMAN backup, " "estimated %(est_backup_size)s" " and found %(avail)s bytes free ") % { 'est_backup_size': est_backup_size, 'avail': avail }) backup_dir = (BACKUP_DIR + '/%s') % self.db_name operating_system.create_directory(backup_dir, user='******', group='oinstall', force=True, as_root=True) backup_cmd = ("""\"\ rman target %(admin_user)s/%(admin_pswd)s@localhost/%(db_name)s <<EOF run { configure backup optimization on; backup incremental level=%(backup_level)s as compressed backupset database format '%(backup_dir)s/%%I_%%u_%%s_%(backup_id)s.dat' plus archivelog; backup current controlfile format '%(backup_dir)s/%%I_%%u_%%s_%(backup_id)s.ctl'; } EXIT; EOF\" """ % { 'admin_user': ADMIN_USER, 'admin_pswd': self.oracnf.admin_password, 'db_name': self.db_name, 'backup_dir': backup_dir, 'backup_id': self.backup_id, 'backup_level': self.backup_level }) utils.execute_with_timeout("su - oracle -c " + backup_cmd, run_as_root=True, root_helper='sudo', timeout=LARGE_TIMEOUT, shell=True, log_output_on_error=True) except exception.ProcessExecutionError as e: LOG.debug("Caught exception when creating backup files") self.cleanup() raise e
def pre_restore(self): self.app.stop_db() LOG.info(_("Preparing WAL archive dir")) self.app.recreate_wal_archive_dir() datadir = self.app.pgsql_data_dir operating_system.remove(datadir, force=True, recursive=True, as_root=True) operating_system.create_directory(datadir, user=self.app.pgsql_owner, group=self.app.pgsql_owner, force=True, as_root=True)
def pre_restore(self): self.app.stop_db() LOG.info("Preparing WAL archive dir") self.app.recreate_wal_archive_dir() datadir = self.app.pgsql_data_dir operating_system.remove(datadir, force=True, recursive=True, as_root=True) operating_system.create_directory(datadir, user=self.app.pgsql_owner, group=self.app.pgsql_owner, force=True, as_root=True)
def pre_restore(self): self.stop_db(context=None) LOG.info("Preparing WAL archive dir") PgSqlProcess.recreate_wal_archive_dir() datadir = self.pgsql_data_dir operating_system.remove(datadir, force=True, recursive=True, as_root=True) operating_system.create_directory(datadir, user=self.PGSQL_OWNER, group=self.PGSQL_OWNER, force=True, as_root=True)
def _install_redis(self, packages): """ Install the redis server. """ LOG.debug('Installing redis server.') LOG.debug("Creating %s.", system.REDIS_CONF_DIR) operating_system.create_directory(system.REDIS_CONF_DIR, as_root=True) pkg_opts = {} packager.pkg_install(packages, pkg_opts, TIME_OUT) self.start_db() LOG.debug('Finished installing redis server.')
def mount(self): if not os.path.exists(self.mount_point): operating_system.create_directory(self.mount_point, as_root=True) LOG.debug("Mounting volume. Device path:{0}, mount_point:{1}, " "volume_type:{2}, mount options:{3}".format( self.device_path, self.mount_point, self.volume_fstype, self.mount_options)) cmd = ("sudo mount -t %s -o %s %s %s" % (self.volume_fstype, self.mount_options, self.device_path, self.mount_point)) child = pexpect.spawn(cmd) child.expect(pexpect.EOF)
def _init_overrides_dir(cls): """Initialize a directory for configuration overrides. """ revision_dir = cls._param_file_path(configuration.ConfigurationManager. DEFAULT_STRATEGY_OVERRIDES_SUB_DIR) if not os.path.exists(revision_dir): operating_system.create_directory( revision_dir, user=system.ORACLE_INSTANCE_OWNER, group=system.ORACLE_GROUP_OWNER, force=True, as_root=True) return revision_dir
def _install_couchbase(self, packages): """ Install the Couchbase Server. """ LOG.debug('Installing Couchbase Server. Creating %s' % self.couchbase_conf_dir) operating_system.create_directory(self.couchbase_conf_dir, as_root=True) pkg_opts = {} packager.pkg_install(packages, pkg_opts, 1200) self.start_db() LOG.debug('Finished installing Couchbase Server.')
def __init__(self, base_config_path, owner, group, codec, requires_root=False, override_strategy=None): """ :param base_config_path Path to the configuration file. :type base_config_path string :param owner Owner of the configuration files. :type owner string :param group Group of the configuration files. :type group string :param codec Codec for reading/writing of the particular configuration format. :type codec StreamCodec :param requires_root Whether the manager requires superuser privileges. :type requires_root boolean :param override_strategy Strategy used to manage configuration overrides (e.g. ImportOverrideStrategy). Defaults to OneFileOverrideStrategy if None. This strategy should be compatible with very much any datastore. It is recommended each datastore defines its strategy explicitly to avoid upgrade compatibility issues in case the default implementation changes in the future. :type override_strategy ConfigurationOverrideStrategy """ self._base_config_path = base_config_path self._owner = owner self._group = group self._codec = codec self._requires_root = requires_root self._value_cache = None if not override_strategy: # Use OneFile strategy by default. Store the revisions in a # sub-directory at the location of the configuration file. revision_dir = guestagent_utils.build_file_path( os.path.dirname(base_config_path), self.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR) operating_system.create_directory( revision_dir, user=owner, group=group, force=True, as_root=requires_root) self._override_strategy = OneFileOverrideStrategy(revision_dir) else: self._override_strategy = override_strategy self._override_strategy.configure( base_config_path, owner, group, codec, requires_root)
def _install_couchbase(self, packages): """ Install the Couchbase Server. """ LOG.debug('Installing Couchbase Server. Creating %s' % system.COUCHBASE_CONF_DIR) operating_system.create_directory(system.COUCHBASE_CONF_DIR, as_root=True) pkg_opts = {} packager.pkg_install(packages, pkg_opts, system.TIME_OUT) self.start_db() LOG.debug('Finished installing Couchbase Server.')
def _install_redis(self, packages): """ Install the redis server. """ LOG.debug('Installing redis server.') msg = "Creating %s." % system.REDIS_CONF_DIR LOG.debug(msg) operating_system.create_directory(system.REDIS_CONF_DIR, as_root=True) pkg_opts = {} packager.pkg_install(packages, pkg_opts, TIME_OUT) self.start_db() LOG.debug('Finished installing redis server.')
def pre_restore(self): self.app.stop_db() LOG.info("Removing old persistence file: %s.", self.restore_location) operating_system.remove(self.restore_location, force=True, as_root=True) dir = os.path.dirname(self.restore_location) operating_system.create_directory(dir, as_root=True) operating_system.chmod(dir, FileMode.SET_FULL, as_root=True) # IF AOF is set, we need to turn it off temporarily if self.aof_set: self.app.configuration_manager.apply_system_override( self.aof_off_cfg, change_id=self.CONF_LABEL_AOF_TEMP_OFF)
def _init_overrides_dir(cls): """Initialize a directory for configuration overrides. """ revision_dir = guestagent_utils.build_file_path( os.path.dirname(CONFIG_FILE), ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR ) if not os.path.exists(revision_dir): operating_system.create_directory( revision_dir, user=system.MONGO_USER, group=system.MONGO_USER, force=True, as_root=True ) return revision_dir
def pre_restore(self): self.app.stop_db() LOG.info(_("Removing old persistence file: %s."), self.restore_location) operating_system.remove(self.restore_location, force=True, as_root=True) dir = os.path.dirname(self.restore_location) operating_system.create_directory(dir, as_root=True) operating_system.chmod(dir, FileMode.SET_FULL, as_root=True) # IF AOF is set, we need to turn it off temporarily if self.aof_set: self.app.configuration_manager.apply_system_override( self.aof_off_cfg, change_id=self.CONF_LABEL_AOF_TEMP_OFF)
def pre_restore(self): """Prepare the data directory for restored files. The directory itself is not included in the backup archive (i.e. the archive is rooted inside the data directory). This is to make sure we can always restore an old backup even if the standard guest agent data directory changes. """ LOG.debug('Initializing a data directory.') operating_system.create_directory( self.restore_location, user=self._app.cassandra_owner, group=self._app.cassandra_owner, force=True, as_root=True)
def pre_restore(self): self.stop_db(context=None) LOG.info("Preparing WAL archive dir") PgSqlProcess.recreate_wal_archive_dir() datadir = self.PGSQL_DATA_DIR operating_system.remove(datadir, force=True, recursive=True, as_root=True) operating_system.create_directory(datadir, user=self.PGSQL_OWNER, group=self.PGSQL_OWNER, force=True, as_root=True)
def pre_restore(self): """Prepare the data directory for restored files. The directory itself is not included in the backup archive (i.e. the archive is rooted inside the data directory). This is to make sure we can always restore an old backup even if the standard guest agent data directory changes. """ LOG.debug('Initializing a data directory.') operating_system.create_directory(self.restore_location, user=self._app.cassandra_owner, group=self._app.cassandra_owner, force=True, as_root=True)
def _init_overrides_dir(cls): """Initialize a directory for configuration overrides. """ revision_dir = guestagent_utils.build_file_path( os.path.dirname(system.REDIS_CONFIG), ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR) if not os.path.exists(revision_dir): operating_system.create_directory( revision_dir, user=system.REDIS_OWNER, group=system.REDIS_OWNER, force=True, as_root=True) return revision_dir
def _run_restore(self): metadata = self.storage.load_metadata(self.location, self.checksum) self.db_name = metadata['db_name'] operating_system.create_directory(ORA_FAST_RECOVERY_PATH, user='******', group='oinstall', force=True, as_root=True) operating_system.create_directory(ORA_AUDIT_PATH % {'db': self.db_name}, user='******', group='oinstall', force=True, as_root=True) operating_system.create_directory(ORA_FAST_RECOVERY_PATH + '/' + self.db_name, user='******', group='oinstall', force=True, as_root=True) operating_system.create_directory(ORA_DATA_PATH + '/' + self.db_name, user='******', group='oinstall', force=True, as_root=True) # the backup set will restore directly to ORADATA/backupset_files self._unpack_backup_files(self.location, self.checksum) operating_system.chown(ORA_BACKUP_PATH, 'oracle', 'oinstall', recursive=True, force=True, as_root=True) operating_system.chown(ORA_FAST_RECOVERY_PATH, 'oracle', 'oinstall', recursive=True, force=True, as_root=True) self._perform_restore() self._perform_recover() self._open_database()
def __init__(self, file_path): self.file_path = file_path self._codec = self.codec_class() self._values = dict.fromkeys(self.key_names) if not path.isfile(self.file_path): operating_system.create_directory( path.dirname(self.file_path), as_root=True) # create a new blank section section = {self.section_name: {}} operating_system.write_file( self.file_path, section, codec=self._codec, as_root=True) else: config = operating_system.read_file( self.file_path, codec=self._codec, as_root=True) self._parse_ora_config(config[self.section_name])
def do_prepare(self, context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot): # pylint: disable=too-many-arguments """MUST be implemented. trove.guestagent.datastore. trove.guestagent.datastore.manager calls self.do_prepare in trove.guestagent.datastore.manager.prepare() """ LOG.debug("Starting initial configuration.") if device_path: device = volume.VolumeDevice(device_path) # unmount if device is already mounted device.unmount_device(device_path) device.format() device.mount(mount_point) operating_system.chown(mount_point, 'k2hdkc', 'k2hdkc', as_root=True) operating_system.create_directory(mount_point + '/data', 'k2hdkc', 'k2hdkc', force=True, as_root=True) operating_system.create_directory(mount_point + '/data/snapshots', 'k2hdkc', 'k2hdkc', force=True, as_root=True) LOG.debug('Mounted the volume.') if config_contents: LOG.debug("Applying configuration.") self._app.configuration_manager.save_configuration(config_contents) if overrides: LOG.debug("Applying self._app.update_overrides") self._app.update_overrides(context, overrides) LOG.debug("Applying _create_k2hdkc_overrides_files") self._create_k2hdkc_overrides_files() ################# # Backup ################# if not cluster_config: if backup_info: self._perform_restore(backup_info, context, mount_point)
def mount(self): if not operating_system.exists(self.mount_point, is_directory=True, as_root=True): operating_system.create_directory(self.mount_point, as_root=True) LOG.debug("Mounting volume. Device path:{0}, mount_point:{1}, " "volume_type:{2}, mount options:{3}".format( self.device_path, self.mount_point, self.volume_fstype, self.mount_options)) try: utils.execute("mount", "-t", self.volume_fstype, "-o", self.mount_options, self.device_path, self.mount_point, run_as_root=True, root_helper="sudo") except exception.ProcessExecutionError: msg = _("Could not mount '%s'.") % self.mount_point log_and_raise(msg)
def _run_pre_backup(self): """Create backupset in backup dir""" self.cleanup() operating_system.create_directory(BACKUP_DIR, user='******', group='oinstall', force=True, as_root=True) try: est_backup_size = self.estimate_backup_size() avail = operating_system.get_bytes_free_on_fs(CONF.get('oracle'). mount_point) if est_backup_size > avail: # TODO(schang): BackupRunner will leave the trove instance # in a BACKUP state raise OSError(_("Need more free space to run RMAN backup, " "estimated %(est_backup_size)s" " and found %(avail)s bytes free ") % {'est_backup_size': est_backup_size, 'avail': avail}) backup_dir = (BACKUP_DIR + '/%s') % self.db_name operating_system.create_directory(backup_dir, user='******', group='oinstall', force=True, as_root=True) backup_cmd = ("""\"\ rman target %(admin_user)s/%(admin_pswd)s@localhost/%(db_name)s <<EOF run { configure backup optimization on; backup incremental level=%(backup_level)s as compressed backupset database format '%(backup_dir)s/%%I_%%u_%%s_%(backup_id)s.dat' plus archivelog; backup current controlfile format '%(backup_dir)s/%%I_%%u_%%s_%(backup_id)s.ctl'; } EXIT; EOF\" """ % {'admin_user': ADMIN_USER, 'admin_pswd': self.oracnf.admin_password, 'db_name': self.db_name, 'backup_dir': backup_dir, 'backup_id': self.backup_id, 'backup_level': self.backup_level}) utils.execute_with_timeout("su - oracle -c " + backup_cmd, run_as_root=True, root_helper='sudo', timeout=LARGE_TIMEOUT, shell=True, log_output_on_error=True) except exception.ProcessExecutionError as e: LOG.debug("Caught exception when creating backup files") self.cleanup() raise e
def __init__(self, file_path): self.file_path = file_path self._codec = self.codec_class() self._values = dict.fromkeys(self.key_names) if not path.isfile(self.file_path): operating_system.create_directory(path.dirname(self.file_path), as_root=True) # create a new blank section section = {self.section_name: {}} operating_system.write_file(self.file_path, section, codec=self._codec, as_root=True) else: config = operating_system.read_file(self.file_path, codec=self._codec, as_root=True) self._parse_ora_config(config[self.section_name])
def save_files_pre_upgrade(self, mount_point): save_dir = path.join(mount_point, 'saves') saves = { 'oratab': self.paths.oratab_file, 'dbs': self.paths.dbs_dir, 'oranet': self.paths.oranet_dir, 'admin': self.paths.admin_dir, 'conf_file': CONF.get(MANAGER).conf_file } if not operating_system.exists(save_dir, is_directory=True, as_root=True): operating_system.create_directory(save_dir, force=True, as_root=True) for item in saves.keys(): operating_system.copy(saves[item], path.join(save_dir, item), recursive=True, preserve=True, as_root=True) return {'save_dir': save_dir, 'saves': saves}
def write_password_to_file(self, root_password): operating_system.create_directory(system.COUCHBASE_CONF_DIR, as_root=True) try: tempfd, tempname = tempfile.mkstemp() os.fchmod(tempfd, stat.S_IRUSR | stat.S_IWUSR) os.write(tempfd, root_password) os.fchmod(tempfd, stat.S_IRUSR) os.close(tempfd) except OSError as err: message = _("An error occurred in saving password " "(%(errno)s). %(strerror)s.") % { "errno": err.errno, "strerror": err.strerror} LOG.exception(message) raise RuntimeError(message) operating_system.move(tempname, system.pwd_file, as_root=True)