def _build_snapshot_package_cmd(self, data_dir, snapshot_name): """Collect all files for a given snapshot and build a package command for them. Transform the paths such that the backup can be restored simply by extracting the archive right to an existing data directory (i.e. place the root into the <data dir> and remove the 'snapshots/<snapshot name>' portion of the path). Attempt to preserve access modifiers on the archived files. Assert the backup is not empty as there should always be at least the system keyspace. Fail if there is nothing to backup. """ LOG.debug('Searching for all snapshot(s) with name "%s".' % snapshot_name) snapshot_files = operating_system.list_files_in_directory( data_dir, recursive=True, include_dirs=False, pattern='.*/snapshots/%s/.*\.%s' % (snapshot_name, self._SNAPSHOT_EXTENSION), as_root=True) num_snapshot_files = len(snapshot_files) LOG.debug('Found %d snapshot (*.%s) files.' % (num_snapshot_files, self._SNAPSHOT_EXTENSION)) if num_snapshot_files > 0: return ('sudo tar ' '--transform="s#snapshots/%s/##" -cpPf - -C "%s" "%s"' % (snapshot_name, data_dir, '" "'.join(snapshot_files))) # There should always be at least the system keyspace snapshot. raise exception.BackupCreationError(_("No data found."))
def read_module_results(cls, is_admin=False, include_contents=False): """Read all the module results on the guest and return a list of them. """ results = [] pattern = cls.MODULE_RESULT_FILENAME result_files = operating_system.list_files_in_directory( cls.MODULE_BASE_DIR, recursive=True, pattern=pattern) for result_file in result_files: result = cls.read_module_result(result_file) if (not result.get('removed') and (is_admin or result.get('visible'))): if include_contents: codec = stream_codecs.Base64Codec() # keep admin_only for backwards compatibility if not is_admin and (result.get('is_admin') or result.get('admin_only')): contents = ( "Must be admin to retrieve contents for module %s" % result.get('name', 'Unknown')) result['contents'] = codec.serialize(contents) else: contents_dir = os.path.dirname(result_file) contents_file = cls.build_contents_filename( contents_dir) result['contents'] = operating_system.read_file( contents_file, codec=codec, decode=False) results.append(result) results.sort(key=operator.itemgetter('updated'), reverse=True) return results
def read_module_results(cls, is_admin=False, include_contents=False): """Read all the module results on the guest and return a list of them. """ results = [] pattern = cls.MODULE_RESULT_FILENAME result_files = operating_system.list_files_in_directory( cls.MODULE_BASE_DIR, recursive=True, pattern=pattern) for result_file in result_files: result = cls.read_module_result(result_file) if (not result.get('removed') and (is_admin or result.get('visible'))): if include_contents: codec = stream_codecs.Base64Codec() if not is_admin and result.get('admin_only'): contents = ( "Must be admin to retrieve contents for module %s" % result.get('name', 'Unknown')) result['contents'] = codec.serialize(contents) else: contents_dir = os.path.dirname(result_file) contents_file = cls.build_contents_filename( contents_dir) result['contents'] = operating_system.read_file( contents_file, codec=codec, decode=False) results.append(result) return results
def _build_snapshot_package_cmd(self, data_dir, snapshot_name): """Collect all files for a given snapshot and build a package command for them. Transform the paths such that the backup can be restored simply by extracting the archive right to an existing data directory (i.e. place the root into the <data dir> and remove the 'snapshots/<snapshot name>' portion of the path). Attempt to preserve access modifiers on the archived files. Assert the backup is not empty as there should always be at least the system keyspace. Fail if there is nothing to backup. """ LOG.debug('Searching for all snapshot(s) with name "%s".', snapshot_name) snapshot_files = operating_system.list_files_in_directory( data_dir, recursive=True, include_dirs=False, pattern='.*/snapshots/%s/.*\.%s' % (snapshot_name, self._SNAPSHOT_EXTENSION), as_root=True) num_snapshot_files = len(snapshot_files) LOG.debug('Found %(num)d snapshot (*.%(ext)s) files.', {'num': num_snapshot_files, 'ext': self._SNAPSHOT_EXTENSION}) if num_snapshot_files > 0: return ('sudo tar ' '--transform="s#snapshots/%s/##" -cpPf - -C "%s" "%s"' % (snapshot_name, data_dir, '" "'.join(snapshot_files))) # There should always be at least the system keyspace snapshot. raise exception.BackupCreationError(_("No data found."))
def do_prepare(self, context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot, ds_version=None): """This is called from prepare in the base class.""" data_dir = mount_point + '/data' if device_path: LOG.info('Preparing the storage for %s, mount path %s', device_path, mount_point) self.app.stop_db() device = volume.VolumeDevice(device_path) # unmount if device is already mounted device.unmount_device(device_path) device.format() if operating_system.list_files_in_directory(mount_point): # rsync existing data to a "data" sub-directory # on the new volume device.migrate_data(mount_point, target_subdir="data") # mount the volume device.mount(mount_point) operating_system.chown(mount_point, CONF.database_service_uid, CONF.database_service_uid, recursive=True, as_root=True) operating_system.create_directory(data_dir, user=CONF.database_service_uid, group=CONF.database_service_uid, as_root=True) self.app.set_data_dir(data_dir) # Prepare mysql configuration LOG.info('Preparing database configuration') self.app.configuration_manager.save_configuration(config_contents) self.app.update_overrides(overrides) # Restore data from backup and reset root password if backup_info: self.perform_restore(context, data_dir, backup_info) self.reset_password_for_restore(ds_version=ds_version, data_dir=data_dir) # Start database service. # Cinder volume initialization(after formatted) may leave a # lost+found folder command = f'--ignore-db-dir=lost+found --datadir={data_dir}' self.app.start_db(ds_version=ds_version, command=command) self.app.secure() enable_remote_root = (backup_info and self.adm.is_root_enabled()) if enable_remote_root: self.status.report_root(context) else: self.app.secure_root() if snapshot: # This instance is a replication slave self.attach_replica(context, snapshot, snapshot['config'])
def _find_revision_file(self, group_name, change_id): name_pattern = self._build_rev_name_pattern(group_name, change_id) found = operating_system.list_files_in_directory( self._revision_dir, recursive=True, pattern=name_pattern, as_root=self._requires_root) return next(iter(found), None)
def _collect_revision_files(self, group_name='.+'): """Collect and return a sorted list of paths to existing revision files. The files should be sorted in the same order in which they were applied. """ name_pattern = self._build_rev_name_pattern(group_name=group_name) return sorted(operating_system.list_files_in_directory( self._revision_dir, recursive=False, pattern=name_pattern))
def _collect_revision_files(self, group_name='.+'): """Collect and return a sorted list of paths to existing revision files. The files should be sorted in the same order in which they were applied. """ name_pattern = self._build_rev_name_pattern(group_name=group_name) return sorted(operating_system.list_files_in_directory( self._revision_dir, recursive=True, pattern=name_pattern, as_root=self._requires_root))
def pg_version(self): """Find the database version file stored in the data directory. :returns: A tuple with the path to the version file (in the root of the data directory) and the version string. """ version_files = operating_system.list_files_in_directory( self.DATA_BASE, recursive=True, pattern='PG_VERSION', as_root=True) version_file = sorted(version_files, key=len)[0] version = operating_system.read_file(version_file, as_root=True) return version_file, version.strip()
def pg_version(self): """Find the database version file stored in the data directory. :returns: A tuple with the path to the version file (in the root of the data directory) and the version string. """ version_files = operating_system.list_files_in_directory( self.DATA_BASE, recursive=True, pattern='PG_VERSION', as_root=True) version_file = sorted(version_files, key=len)[0] version = operating_system.read_file(version_file, as_root=True) return version_file, version.strip()
def _assert_list_files(self, root, recursive, pattern, all_paths, count): found = operating_system.list_files_in_directory( root, recursive=recursive, pattern=pattern) expected = { path for path in all_paths if ( (recursive or os.path.dirname(path) == root) and ( not pattern or re.match( pattern, os.path.basename(path))))} self.assertEqual(expected, found) self.assertEqual(count, len(found), "Incorrect number of listed files.")
def _assert_list_files(self, root, recursive, pattern, all_paths, count): found = operating_system.list_files_in_directory( root, recursive=recursive, pattern=pattern) expected = { path for path in all_paths if ( (recursive or os.path.dirname(path) == root) and ( not pattern or re.match( pattern, os.path.basename(path))))} self.assertEqual(expected, found) self.assertEqual(count, len(found), "Incorrect number of listed files.")
def _remove_import_overrides(self, strategy, group_name, overrides, path_builder): if overrides: # Remove the overrides and immediately check the file was removed. for change_id, _, index, _ in overrides: strategy.remove(group_name, change_id) expected_path = path_builder( strategy._revision_dir, group_name, change_id, index, strategy._revision_ext ) self._assert_file_exists(expected_path, False) else: # Remove the entire group. strategy.remove(group_name) found = operating_system.list_files_in_directory(strategy._revision_dir, pattern="^%s-.+$" % group_name) self.assertEqual(set(), found, "Some import files from group '%s' " "were not removed." % group_name)
def _assert_list_files(self, root, recursive, pattern, include_dirs, all_paths, count): found = operating_system.list_files_in_directory( root, recursive=recursive, pattern=pattern, include_dirs=include_dirs ) expected = { path for path in filter(lambda item: include_dirs or not os.path.isdir(item), all_paths) if ( (recursive or os.path.dirname(path) == root) and (not pattern or re.match(pattern, os.path.basename(path))) ) } self.assertEqual(expected, found) self.assertEqual(count, len(found), "Incorrect number of listed files.")
def _assert_list_files(self, root, recursive, pattern, include_dirs, all_paths, count): found = operating_system.list_files_in_directory( root, recursive=recursive, pattern=pattern, include_dirs=include_dirs) expected = { path for path in filter( lambda item: include_dirs or not os.path.isdir(item), all_paths) if ( (recursive or os.path.dirname(path) == root) and ( not pattern or re.match( pattern, os.path.basename(path))))} self.assertEqual(expected, found) self.assertEqual(count, len(found), "Incorrect number of listed files.")
def _remove_import_overrides( self, strategy, group_name, overrides, path_builder): if overrides: # Remove the overrides and immediately check the file was removed. for change_id, _, index, _ in overrides: strategy.remove(group_name, change_id) expected_path = path_builder( strategy._revision_dir, group_name, change_id, index, strategy._revision_ext) self._assert_file_exists(expected_path, False) else: # Remove the entire group. strategy.remove(group_name) found = operating_system.list_files_in_directory( strategy._revision_dir, pattern='^%s-.+$' % group_name) self.assertEqual(set(), found, "Some import files from group '%s' " "were not removed." % group_name)
def get_master_ref(self, service, snapshot_info): """Capture information from a master node""" ctlfile = path.basename(sorted(operating_system.list_files_in_directory( service.paths.ctlfile1_dir, recursive=True, as_root=True))[0]) + '.bak' ctlfile = path.join(TMP_DIR, ctlfile) datafile = path.join(TMP_DIR, 'oradata.tar.gz') def _cleanup_tmp_files(): operating_system.remove(ctlfile, force=True, as_root=True) operating_system.remove(datafile, force=True, as_root=True) _cleanup_tmp_files() with service.cursor(service.admin.database_name) as cursor: cursor.execute(str(sql_query.AlterDatabase( "CREATE STANDBY CONTROLFILE AS '%s'" % ctlfile))) cursor.execute(str(sql_query.Query( columns=['VALUE'], tables=['V$PARAMETER'], where=["NAME = 'fal_server'"]))) row = cursor.fetchone() db_list = [] if row is not None and row[0] is not None: db_list = str(row[0]).split(",") db_list.insert(0, service.admin.database_name) # Create a tar file containing files needed for slave creation utils.execute_with_timeout('tar', '-Pczvf', datafile, ctlfile, service.paths.orapw_file, service.paths.oratab_file, CONF.get(MANAGER).conf_file, run_as_root=True, root_helper='sudo') oradata_encoded = operating_system.read_file( datafile, codec=stream_codecs.Base64Codec(), as_root=True, decode=False) _cleanup_tmp_files() master_ref = { 'host': netutils.get_my_ipv4(), 'db_name': service.admin.database_name, 'db_list': db_list, 'oradata': oradata_encoded, } return master_ref
def _list_all_files(self, root_dir, pattern): return operating_system.list_files_in_directory(root_dir, recursive=False, pattern=pattern)
def _list_all_files(self, root_dir, pattern): return operating_system.list_files_in_directory(root_dir, recursive=False, pattern=pattern)
def _find_config_file(self, name_pattern): version_base = guestagent_utils.build_file_path(self.CONFIG_BASE, self.pg_version[1]) return sorted(operating_system.list_files_in_directory( version_base, recursive=True, pattern=name_pattern, as_root=True), key=len)[0]
def _find_revision_file(self, group_name, change_id): name_pattern = self._build_rev_name_pattern(group_name, change_id) found = operating_system.list_files_in_directory( self._revision_dir, recursive=True, pattern=name_pattern, as_root=self._requires_root) return next(iter(found), None)
def prepare_slave(self, service, snapshot): """Prepare the environment needed for starting the slave Oracle processes. """ master_info = snapshot['master'] db_name = master_info['db_name'] db_unique_name = ('%(db_name)s_%(replica_label)s' % {'db_name': db_name, 'replica_label': utils.generate_random_string(6)}) service.paths.update_db_name(db_name) # Create necessary directories and set necessary permissions new_dirs = [service.paths.db_data_dir, service.paths.db_fast_recovery_logs_dir, service.paths.db_fast_recovery_dir, service.paths.audit_dir] for directory in new_dirs: operating_system.create_directory(directory, service.instance_owner, service.instance_owner_group, as_root=True) chown_dirs = [service.paths.fast_recovery_area, service.paths.admin_dir] for directory in chown_dirs: operating_system.chown(directory, service.instance_owner, service.instance_owner_group, as_root=True) # Install on the slave files extracted from the master # (e.g. the control, pfile, password, oracle.cnf file ... etc) oradata_encoded = master_info['oradata'] tmp_data_path = path.join(TMP_DIR, 'oradata.tar.gz') operating_system.write_file(tmp_data_path, oradata_encoded, codec=stream_codecs.Base64Codec(), encode=False) utils.execute_with_timeout('tar', '-Pxzvf', tmp_data_path, run_as_root=True, root_helper='sudo') # Put the control file in place tmp_ctlfile_path = sorted( operating_system.list_files_in_directory(TMP_DIR, pattern='*.ctl.bak$', recursive=True, as_root=True))[0] ctlfile_name = path.basename(tmp_ctlfile_path)[:-4] ctlfile1_path = path.join(service.paths.ctlfile1_dir, ctlfile_name) operating_system.create_directory(service.paths.ctlfile1_dir, force=True, user='******', group='oinstall', as_root=True) operating_system.create_directory(service.paths.ctlfile2_dir, force=True, user='******', group='oinstall', as_root=True) operating_system.move( tmp_ctlfile_path, ctlfile1_path, as_root=True) operating_system.copy( ctlfile1_path, service.paths.ctlfile2_dir, preserve=True, as_root=True) # Set the db_name and db_unique_name via the PFILE which will be # removed later operating_system.write_file(service.paths.pfile, "*.db_unique_name='%s'\n" "*.db_name='%s'\n" % (db_unique_name, db_name), as_root=True) operating_system.chown(service.paths.pfile, service.instance_owner, service.instance_owner_group, as_root=True, force=True) service.admin.delete_conf_cache() service.admin.ora_config.db_name = db_name service.admin.ora_config.db_unique_name = db_unique_name # Set proper permissions on the oratab file operating_system.chown(service.paths.oratab_file, service.instance_owner, service.instance_owner_group, as_root=True, force=True) # Create the listener.ora file and restart service.configure_listener()