def save_configuration(self, options): """Write given contents to the base configuration file. Remove all existing overrides (both system and user). :param contents Contents of the configuration file. :type contents string or dict """ if isinstance(options, dict): # Serialize a dict of options for writing. self.save_configuration(self._codec.serialize(options)) else: self._override_strategy.remove(self.USER_GROUP) self._override_strategy.remove(self.SYSTEM_PRE_USER_GROUP) self._override_strategy.remove(self.SYSTEM_POST_USER_GROUP) operating_system.write_file( self._base_config_path, options, as_root=self._requires_root) operating_system.chown( self._base_config_path, self._owner, self._group, as_root=self._requires_root) operating_system.chmod( self._base_config_path, FileMode.ADD_READ_ALL, as_root=self._requires_root) self.refresh_cache()
def prepare(self, context, packages, databases, memory_mb, users, device_path=None, mount_point=None, backup_info=None, config_contents=None, root_password=None, overrides=None, cluster_config=None, snapshot=None): """Makes ready DBAAS on a Guest container.""" MySqlAppStatus.get().begin_install() # status end_mysql_install set with secure() app = MySqlApp(MySqlAppStatus.get()) app.install_if_needed(packages) if device_path: # stop and do not update database app.stop_db() device = volume.VolumeDevice(device_path) # unmount if device is already mounted device.unmount_device(device_path) device.format() if os.path.exists(mount_point): # rsync existing data to a "data" sub-directory # on the new volume device.migrate_data(mount_point, target_subdir="data") # mount the volume device.mount(mount_point) operating_system.chown(mount_point, 'mysql', 'mysql', recursive=False, as_root=True) LOG.debug("Mounted the volume at %s." % mount_point) # We need to temporarily update the default my.cnf so that # mysql will start after the volume is mounted. Later on it # will be changed based on the config template and restart. app.update_overrides("[mysqld]\ndatadir=%s/data\n" % mount_point) app.start_mysql() if backup_info: self._perform_restore(backup_info, context, mount_point + "/data", app) LOG.debug("Securing MySQL now.") app.secure(config_contents, overrides) enable_root_on_restore = (backup_info and MySqlAdmin().is_root_enabled()) if root_password and not backup_info: app.secure_root(secure_remote_root=True) MySqlAdmin().enable_root(root_password) elif enable_root_on_restore: app.secure_root(secure_remote_root=False) MySqlAppStatus.get().report_root(context, 'root') else: app.secure_root(secure_remote_root=True) app.complete_install_or_restart() if databases: self.create_database(context, databases) if users: self.create_user(context, users) if snapshot: self.attach_replica(context, snapshot, snapshot['config']) LOG.info(_('Completed setup of MySQL database instance.'))
def write_config( self, config_contents, execute_function=utils.execute_with_timeout, mkstemp_function=tempfile.mkstemp, unlink_function=os.unlink, ): # first securely create a temp file. mkstemp() will set # os.O_EXCL on the open() call, and we get a file with # permissions of 600 by default. (conf_fd, conf_path) = mkstemp_function() LOG.debug("Storing temporary configuration at %s." % conf_path) # write config and close the file, delete it if there is an # error. only unlink if there is a problem. In normal course, # we move the file. try: os.write(conf_fd, config_contents) operating_system.move(conf_path, system.CASSANDRA_CONF, as_root=True) # TODO(denis_makogon): figure out the dynamic way to discover # configs owner since it can cause errors if there is # no cassandra user in operating system operating_system.chown(system.CASSANDRA_CONF, "cassandra", "cassandra", recursive=False, as_root=True) operating_system.chmod(system.CASSANDRA_CONF, FileMode.ADD_READ_ALL, as_root=True) except Exception: LOG.exception(_("Exception generating Cassandra configuration %s.") % conf_path) unlink_function(conf_path) raise finally: os.close(conf_fd) LOG.info(_("Wrote new Cassandra configuration."))
def initial_setup(self): self.ip_address = netutils.get_my_ipv4() mount_point = CONF.couchbase.mount_point try: LOG.info(_('Couchbase Server change data dir path.')) operating_system.chown(mount_point, 'couchbase', 'couchbase', as_root=True) pwd = CouchbaseRootAccess.get_password() utils.execute_with_timeout( (system.cmd_node_init % {'data_path': mount_point, 'IP': self.ip_address, 'PWD': pwd}), shell=True) operating_system.remove(system.INSTANCE_DATA_DIR, force=True, as_root=True) LOG.debug('Couchbase Server initialize cluster.') utils.execute_with_timeout( (system.cmd_cluster_init % {'IP': self.ip_address, 'PWD': pwd}), shell=True) utils.execute_with_timeout(system.cmd_set_swappiness, shell=True) utils.execute_with_timeout(system.cmd_update_sysctl_conf, shell=True) LOG.info(_('Couchbase Server initial setup finished.')) except exception.ProcessExecutionError: LOG.exception(_('Error performing initial Couchbase setup.')) raise RuntimeError("Couchbase Server initial setup failed")
def do_prepare(self, context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot): """This is called from prepare in the base class.""" if device_path: device = volume.VolumeDevice(device_path) # unmount if device is already mounted device.unmount_device(device_path) device.format() device.mount(mount_point) operating_system.chown(mount_point, 'redis', 'redis', as_root=True) LOG.debug('Mounted the volume.') self._app.install_if_needed(packages) LOG.info(_('Writing redis configuration.')) if cluster_config: config_contents = (config_contents + "\n" + "cluster-enabled yes\n" + "cluster-config-file cluster.conf\n") self._app.configuration_manager.save_configuration(config_contents) self._app.apply_initial_guestagent_configuration() if backup_info: persistence_dir = self._app.get_working_dir() self._perform_restore(backup_info, context, persistence_dir, self._app) else: # If we're not restoring, we have to force a restart of the # server manually so that the configuration stuff takes effect self._app.restart() if snapshot: self.attach_replica(context, snapshot, snapshot['config'])
def _write_standby_recovery_file(self, service, snapshot, sslmode="prefer"): LOG.info("Snapshot data received:" + str(snapshot)) logging_config = snapshot["log_position"] conninfo_params = { "host": snapshot["master"]["host"], "port": snapshot["master"]["port"], "repl_user": logging_config["replication_user"]["name"], "password": logging_config["replication_user"]["password"], "sslmode": sslmode, } conninfo = ( "host=%(host)s " "port=%(port)s " "dbname=os_admin " "user=%(repl_user)s " "password=%(password)s " "sslmode=%(sslmode)s " % conninfo_params ) recovery_conf = "standby_mode = 'on'\n" recovery_conf += "primary_conninfo = '" + conninfo + "'\n" recovery_conf += "trigger_file = '/tmp/postgresql.trigger'\n" recovery_conf += "recovery_target_timeline='latest'\n" operating_system.write_file( service.pgsql_recovery_config, recovery_conf, codec=stream_codecs.IdentityCodec(), as_root=True ) operating_system.chown( service.pgsql_recovery_config, user=service.pgsql_owner, group=service.pgsql_owner, as_root=True )
def _run_restore(self): metadata = self.storage.load_metadata(self.location, self.checksum) self.db_name = metadata['db_name'] operating_system.create_directory(ORA_FAST_RECOVERY_PATH, user='******', group='oinstall', force=True, as_root=True) operating_system.create_directory(ORA_AUDIT_PATH % {'db': self.db_name}, user='******', group='oinstall', force=True, as_root=True) operating_system.create_directory(ORA_FAST_RECOVERY_PATH + '/' + self.db_name, user='******', group='oinstall', force=True, as_root=True) operating_system.create_directory(ORA_DATA_PATH + '/' + self.db_name, user='******', group='oinstall', force=True, as_root=True) # the backup set will restore directly to ORADATA/backupset_files self._unpack_backup_files(self.location, self.checksum) operating_system.chown(ORA_BACKUP_PATH, 'oracle', 'oinstall', recursive=True, force=True, as_root=True) operating_system.chown(ORA_FAST_RECOVERY_PATH, 'oracle', 'oinstall', recursive=True, force=True, as_root=True) self._perform_restore() self._perform_recover() self._open_database()
def post_restore(self): """ To restore from backup, all we need to do is untar the compressed database files into the database directory and change its ownership. """ operating_system.chown(service.COUCHDB_LIB_DIR, "couchdb", "couchdb", as_root=True) self.app.restart()
def _write_standby_recovery_file(self, snapshot, sslmode='prefer'): LOG.info("Snapshot data received:" + str(snapshot)) logging_config = snapshot['log_position'] conninfo_params = \ {'host': snapshot['master']['host'], 'port': snapshot['master']['port'], 'repl_user': logging_config['replication_user']['name'], 'password': logging_config['replication_user']['password'], 'sslmode': sslmode} conninfo = 'host=%(host)s ' \ 'port=%(port)s ' \ 'dbname=os_admin ' \ 'user=%(repl_user)s ' \ 'password=%(password)s ' \ 'sslmode=%(sslmode)s ' % conninfo_params recovery_conf = "standby_mode = 'on'\n" recovery_conf += "primary_conninfo = '" + conninfo + "'\n" recovery_conf += "trigger_file = '/tmp/postgresql.trigger'\n" recovery_conf += "recovery_target_timeline='latest'\n" operating_system.write_file(self.PGSQL_RECOVERY_CONFIG, recovery_conf, codec=stream_codecs.IdentityCodec(), as_root=True) operating_system.chown(self.PGSQL_RECOVERY_CONFIG, user="******", group="postgres", as_root=True)
def change_ownership(self, mount_point): LOG.debug("Changing ownership of the Oracle data directory.") operating_system.chown(mount_point, system.ORACLE_INSTANCE_OWNER, system.ORACLE_GROUP_OWNER, force=True, as_root=True)
def write_oracle_user_file(self, filepath, contents, filemode=operating_system.FileMode.SET_USR_RW): operating_system.write_file(filepath, contents, as_root=True) operating_system.chown(filepath, INSTANCE_OWNER, INSTANCE_OWNER_GROUP, force=True, as_root=True) operating_system.chmod(filepath, filemode, force=True, as_root=True)
def prepare(self, context, packages, databases, memory_mb, users, device_path=None, mount_point=None, backup_info=None, config_contents=None, root_password=None, overrides=None, cluster_config=None, snapshot=None): """ This is called when the trove instance first comes online. It is the first rpc message passed from the task manager. prepare handles all the base configuration of the redis instance. """ try: app = RedisApp(RedisAppStatus.get()) RedisAppStatus.get().begin_install() if device_path: device = volume.VolumeDevice(device_path) # unmount if device is already mounted device.unmount_device(device_path) device.format() device.mount(mount_point) operating_system.chown(mount_point, 'redis', 'redis', as_root=True) LOG.debug('Mounted the volume.') app.install_if_needed(packages) LOG.info(_('Writing redis configuration.')) app.write_config(config_contents) app.restart() LOG.info(_('Redis instance has been setup and configured.')) except Exception: LOG.exception(_("Error setting up Redis instance.")) app.status.set_status(rd_instance.ServiceStatuses.FAILED) raise RuntimeError("prepare call has failed.")
def prepare(self, context, packages, databases, memory_mb, users, device_path=None, mount_point=None, backup_info=None, config_contents=None, root_password=None, overrides=None, cluster_config=None, snapshot=None): """Makes ready DBAAS on a Guest container.""" LOG.debug("Preparing MongoDB instance.") self.status.begin_install() self.app.install_if_needed(packages) self.app.stop_db() self.app.clear_storage() mount_point = system.MONGODB_MOUNT_POINT if device_path: device = volume.VolumeDevice(device_path) # unmount if device is already mounted device.unmount_device(device_path) device.format() if os.path.exists(system.MONGODB_MOUNT_POINT): device.migrate_data(mount_point) device.mount(mount_point) operating_system.chown(mount_point, system.MONGO_USER, system.MONGO_USER, as_root=True) LOG.debug("Mounted the volume %(path)s as %(mount)s." % {'path': device_path, "mount": mount_point}) self.app.secure(cluster_config) conf_changes = self.get_config_changes(cluster_config, mount_point) config_contents = self.app.update_config_contents( config_contents, conf_changes) if cluster_config is None: self.app.start_db_with_conf_changes(config_contents) if backup_info: self._perform_restore(backup_info, context, mount_point, self.app) else: if cluster_config['instance_type'] == "query_router": self.app.reset_configuration({'config_contents': config_contents}) self.app.write_mongos_upstart() self.app.status.is_query_router = True # don't start mongos until add_config_servers is invoked elif cluster_config['instance_type'] == "config_server": self.app.status.is_config_server = True self.app.start_db_with_conf_changes(config_contents) elif cluster_config['instance_type'] == "member": self.app.start_db_with_conf_changes(config_contents) else: LOG.error(_("Bad cluster configuration; instance type " "given as %s.") % cluster_config['instance_type']) self.status.set_status(ds_instance.ServiceStatuses.FAILED) return self.status.set_status(ds_instance.ServiceStatuses.BUILD_PENDING) LOG.info(_('Completed setup of MongoDB database instance.'))
def post_restore(self): self._run_prepare() operating_system.chown(self.restore_location, 'mysql', None, force=True, as_root=True) self._delete_old_binlogs() self.reset_root_password() self.app.start_mysql()
def apply(self, group_name, change_id, options): self._initialize_import_directory() revision_file = self._find_revision_file(group_name, change_id) if revision_file is None: # Create a new file. last_revision_index = self._get_last_file_index(group_name) revision_file = guestagent_utils.build_file_path( self._revision_dir, '%s-%03d-%s' % (group_name, last_revision_index + 1, change_id), self._revision_ext) else: # Update the existing file. current = operating_system.read_file( revision_file, codec=self._codec, as_root=self._requires_root) options = guestagent_utils.update_dict(options, current) operating_system.write_file( revision_file, options, codec=self._codec, as_root=self._requires_root) operating_system.chown( revision_file, self._owner, self._group, as_root=self._requires_root) operating_system.chmod( revision_file, FileMode.ADD_READ_ALL, as_root=self._requires_root)
def _run_pre_backup(self): """Create archival contents in dump dir""" try: est_dump_size = self.estimate_dump_size() avail = operating_system.get_bytes_free_on_fs(MONGODB_DBPATH) if est_dump_size > avail: self.cleanup() # TODO(atomic77) Though we can fully recover from this error # BackupRunner will leave the trove instance in a BACKUP state raise OSError(_("Need more free space to run mongodump, " "estimated %(est_dump_size)s" " and found %(avail)s bytes free ") % {'est_dump_size': est_dump_size, 'avail': avail}) operating_system.create_directory(MONGO_DUMP_DIR, as_root=True) operating_system.chown(MONGO_DUMP_DIR, mongo_system.MONGO_USER, mongo_system.MONGO_USER, as_root=True) # high timeout here since mongodump can take a long time utils.execute_with_timeout( 'mongodump', '--out', MONGO_DUMP_DIR, *(self.app.admin_cmd_auth_params()), run_as_root=True, root_helper='sudo', timeout=LARGE_TIMEOUT ) except exception.ProcessExecutionError as e: LOG.debug("Caught exception when creating the dump") self.cleanup() raise e
def apply_next(self, options): revision_num = self.count_revisions() + 1 revision_file_path = guestagent_utils.build_file_path( self._revision_dir, self._base_config_name, str(revision_num), self._revision_ext ) operating_system.write_file(revision_file_path, options, codec=self._codec, as_root=self._requires_root) operating_system.chown(revision_file_path, self._owner, self._group, as_root=self._requires_root) operating_system.chmod(revision_file_path, FileMode.ADD_READ_ALL, as_root=self._requires_root)
def init_config(self): if not operating_system.exists(MOUNT_POINT, True): operating_system.create_directory(MOUNT_POINT, system.DB2_INSTANCE_OWNER, system.DB2_INSTANCE_OWNER, as_root=True) """ The database manager configuration file - db2systm is stored under the /home/db2inst1/sqllib directory. To update the configuration parameters, DB2 recommends using the command - UPDATE DBM CONFIGURATION commands instead of directly updating the config file. The existing PropertiesCodec implementation has been reused to handle text-file operations. Configuration overrides are implemented using the ImportOverrideStrategy of the guestagent configuration manager. """ LOG.debug("Initialize DB2 configuration") revision_dir = ( guestagent_utils.build_file_path( os.path.join(MOUNT_POINT, os.path.dirname(system.DB2_INSTANCE_OWNER)), ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR) ) if not operating_system.exists(FAKE_CFG): operating_system.write_file(FAKE_CFG, '', as_root=True) operating_system.chown(FAKE_CFG, system.DB2_INSTANCE_OWNER, system.DB2_INSTANCE_OWNER, as_root=True) self.configuration_manager = ( ConfigurationManager(FAKE_CFG, system.DB2_INSTANCE_OWNER, system.DB2_INSTANCE_OWNER, PropertiesCodec(delimiter='='), requires_root=True, override_strategy=ImportOverrideStrategy( revision_dir, "cnf")) ) ''' Below we are getting the database manager default configuration and saving it to the DB2_DEFAULT_CFG file. This is done to help with correctly resetting the configurations to the original values when user wants to detach a user-defined configuration group from an instance. DB2 provides a command to reset the database manager configuration parameters (RESET DBM CONFIGURATION) but this command resets all the configuration parameters to the system defaults. When we build a DB2 guest image there are certain configurations parameters like SVCENAME which we set so that the instance can start correctly. Hence resetting this value to the system default will render the instance in an unstable state. Instead, the recommended way for resetting a subset of configuration parameters is to save the output of GET DBM CONFIGURATION of the original configuration and then call UPDATE DBM CONFIGURATION to reset the value. http://www.ibm.com/support/knowledgecenter/SSEPGG_10.5.0/ com.ibm.db2.luw.admin.cmd.doc/doc/r0001970.html ''' if not operating_system.exists(DB2_DEFAULT_CFG): run_command(system.GET_DBM_CONFIGURATION % { "dbm_config": DB2_DEFAULT_CFG}) self.process_default_dbm_config()
def store_key(self, key): """Store the cluster key.""" LOG.debug("Storing key for MongoDB cluster.") with tempfile.NamedTemporaryFile() as f: f.write(key) f.flush() operating_system.copy(f.name, system.MONGO_KEY_FILE, force=True, as_root=True) operating_system.chmod(system.MONGO_KEY_FILE, operating_system.FileMode.SET_USR_RO, as_root=True) operating_system.chown(system.MONGO_KEY_FILE, system.MONGO_USER, system.MONGO_USER, as_root=True)
def _create_oratab_entry(self): """Create in the /etc/oratab file entries for the databases being restored""" file_content = operating_system.read_file(ORATAB_PATH) file_content += ("\n%(db_name)s:%(ora_home)s:N\n" % {'db_name': self.db_name, 'ora_home': ORACLE_HOME}) operating_system.write_file(ORATAB_PATH, file_content, as_root=True) operating_system.chown(ORATAB_PATH, 'oracle', 'oinstall', recursive=True, force=True, as_root=True)
def post_restore(self): """Updated ownership on the restored files. """ LOG.debug('Updating ownership of the restored files.') operating_system.chown( self.restore_location, self._app.cassandra_owner, self._app.cassandra_owner, recursive=True, force=True, as_root=True)
def do_prepare(self, context, packages, databases, memory_mb, users, device_path=None, mount_point=None, backup_info=None, config_contents=None, root_password=None, overrides=None, cluster_config=None, snapshot=None): """This is called from prepare in the base class.""" self.app.install_if_needed(packages) self.app.wait_for_start() self.app.stop_db() self.app.clear_storage() mount_point = system.MONGODB_MOUNT_POINT if device_path: device = volume.VolumeDevice(device_path) # unmount if device is already mounted device.unmount_device(device_path) device.format() if os.path.exists(system.MONGODB_MOUNT_POINT): device.migrate_data(mount_point) device.mount(mount_point) operating_system.chown(mount_point, system.MONGO_USER, system.MONGO_USER, as_root=True) LOG.debug("Mounted the volume %(path)s as %(mount)s." % {'path': device_path, "mount": mount_point}) if config_contents: # Save resolved configuration template first. self.app.configuration_manager.save_configuration(config_contents) # Apply guestagent specific configuration changes. self.app.apply_initial_guestagent_configuration( cluster_config, mount_point) if not cluster_config: # Create the Trove admin user. self.app.secure() # Don't start mongos until add_config_servers is invoked, # don't start members as they should already be running. if not (self.app.is_query_router or self.app.is_cluster_member): self.app.start_db(update_db=True) if not cluster_config and backup_info: self._perform_restore(backup_info, context, mount_point, self.app) if service.MongoDBAdmin().is_root_enabled(): self.app.status.report_root(context, 'root') if not cluster_config and root_password: LOG.debug('Root password provided. Enabling root.') service.MongoDBAdmin().enable_root(root_password) if not cluster_config: if databases: self.create_database(context, databases) if users: self.create_user(context, users)
def store_key(self, key): """Store the cluster key.""" LOG.debug('Storing key for MongoDB cluster.') operating_system.write_file(system.MONGO_KEY_FILE, key, as_root=True) operating_system.chmod(system.MONGO_KEY_FILE, operating_system.FileMode.SET_USR_RO, as_root=True) operating_system.chown(system.MONGO_KEY_FILE, system.MONGO_USER, system.MONGO_USER, as_root=True)
def _create_oratab_entry(self): oratab = self.app.paths.oratab_file file_content = operating_system.read_file(oratab, as_root=True) file_content += "\n%(db_name)s:%(ora_home)s:N\n" % { "db_name": self.db_name, "ora_home": self.app.paths.oracle_home, } operating_system.write_file(oratab, file_content, as_root=True) operating_system.chown( oratab, self.app.instance_owner, self.app.instance_owner_group, recursive=True, force=True, as_root=True )
def do_prepare( self, context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot, ): """This is called from prepare in the base class.""" app = self.mysql_app(self.mysql_app_status.get()) app.install_if_needed(packages) if device_path: # stop and do not update database app.stop_db(do_not_start_on_reboot=self.volume_do_not_start_on_reboot) device = volume.VolumeDevice(device_path) # unmount if device is already mounted device.unmount_device(device_path) device.format() if os.path.exists(mount_point): # rsync existing data to a "data" sub-directory # on the new volume device.migrate_data(mount_point, target_subdir="data") # mount the volume device.mount(mount_point) operating_system.chown(mount_point, service.MYSQL_OWNER, service.MYSQL_OWNER, recursive=False, as_root=True) LOG.debug("Mounted the volume at %s." % mount_point) # We need to temporarily update the default my.cnf so that # mysql will start after the volume is mounted. Later on it # will be changed based on the config template # (see MySqlApp.secure()) and restart. app.set_data_dir(mount_point + "/data") app.start_mysql() if backup_info: self._perform_restore(backup_info, context, mount_point + "/data", app) LOG.debug("Securing MySQL now.") app.secure(config_contents) enable_root_on_restore = backup_info and self.mysql_admin().is_root_enabled() if enable_root_on_restore: app.secure_root(secure_remote_root=False) self.mysql_app_status.get().report_root(context, "root") else: app.secure_root(secure_remote_root=True) if snapshot: self.attach_replica(context, snapshot, snapshot["config"])
def validate_log_file(self, log_file, owner): """Make sure the log file exists and is accessible by owner. """ if not operating_system.exists(log_file, as_root=True): operating_system.write_file(log_file, '', as_root=True) operating_system.chown(log_file, user=owner, group=owner, as_root=True) operating_system.chmod(log_file, FileMode.ADD_USR_RW_GRP_RW_OTH_R, as_root=True) LOG.debug("Set log file '%s' as readable" % log_file) return log_file
def reset_configuration(self, context, configuration): """Reset the PgSql configuration file to the one given. The configuration parameter is a string containing the full configuration file that should be used. """ config_location = PGSQL_CONFIG.format(version=self._get_psql_version()) LOG.debug("{guest_id}: Writing configuration file to /tmp/pgsql_config.".format(guest_id=CONF.guest_id)) with open("/tmp/pgsql_config", "w+") as config_file: config_file.write(configuration) operating_system.chown("/tmp/pgsql_config", "postgres", None, recursive=False, as_root=True) operating_system.move("/tmp/pgsql_config", config_location, timeout=30, as_root=True)
def save_configuration(self, contents): """Write given contents to the base configuration file. Remove all existing revisions. :param contents Plain-text contents of the configuration file. :type contents string """ if self._override_strategy: self._override_strategy.remove_last(self._current_revision + 1) operating_system.write_file(self._base_config_path, contents, as_root=self._requires_root) operating_system.chown(self._base_config_path, self._owner, self._group, as_root=self._requires_root) operating_system.chmod(self._base_config_path, FileMode.ADD_READ_ALL, as_root=self._requires_root)
def apply_next(self, options): revision_num = self.count_revisions() + 1 old_revision_backup = guestagent_utils.build_file_path( self._revision_backup_dir, self._base_config_name, str(revision_num), self._BACKUP_EXT ) operating_system.copy( self._base_config_path, old_revision_backup, force=True, preserve=True, as_root=self._requires_root ) current = operating_system.read_file(self._base_config_path, codec=self._codec) guestagent_utils.update_dict(options, current) operating_system.write_file(self._base_config_path, current, codec=self._codec, as_root=self._requires_root) operating_system.chown(self._base_config_path, self._owner, self._group, as_root=self._requires_root) operating_system.chmod(self._base_config_path, FileMode.ADD_READ_ALL, as_root=self._requires_root)
def change_permissions(self): """ When CouchDB is installed, a default user 'couchdb' is created. Inorder to start/stop/restart CouchDB service as the current OS user, add this user to the 'couchdb' group and provide read/ write access to the 'couchdb' group. """ try: LOG.debug("Changing permissions.") operating_system.chown( COUCHDB_LIB_DIR, 'couchdb', 'couchdb', as_root=True ) operating_system.chown( COUCHDB_LOG_DIR, 'couchdb', 'couchdb', as_root=True ) operating_system.chown( COUCHDB_BIN_DIR, 'couchdb', 'couchdb', as_root=True ) operating_system.chown( COUCHDB_CONFIG_DIR, 'couchdb', 'couchdb', as_root=True ) operating_system.chmod(COUCHDB_LIB_DIR, FileMode.ADD_GRP_RW, as_root=True) operating_system.chmod(COUCHDB_LOG_DIR, FileMode.ADD_GRP_RW, as_root=True) operating_system.chmod(COUCHDB_BIN_DIR, FileMode.ADD_GRP_RW, as_root=True) operating_system.chmod(COUCHDB_CONFIG_DIR, FileMode.ADD_GRP_RW, as_root=True) self.execute_change_permission_commands( system.UPDATE_GROUP_MEMBERSHIP ) LOG.debug("Successfully changed permissions.") except exception.ProcessExecutionError: LOG.exception(_("Error changing permissions."))
def _create_replication_user(self, service, adm_mgr, pwfile): """Create the replication user and password file. Unfortunately, to be able to run pg_rewind, we need SUPERUSER, not just REPLICATION privilege """ pw = utils.generate_random_password() operating_system.write_file(pwfile, pw, as_root=True) operating_system.chown(pwfile, user=CONF.database_service_uid, group=CONF.database_service_uid, as_root=True) operating_system.chmod(pwfile, FileMode.SET_USR_RWX(), as_root=True) LOG.debug(f"File {pwfile} created") LOG.debug(f"Creating replication user {REPL_USER}") repl_user = models.PostgreSQLUser(name=REPL_USER, password=pw) adm_mgr.create_user(repl_user, None, *('REPLICATION', 'SUPERUSER', 'LOGIN')) return pw
def post_restore(self): operating_system.chown(self.restore_location, system.REDIS_OWNER, system.REDIS_OWNER, as_root=True) self.app.start_db() # IF AOF was set, we need to put back the original file if self.aof_set: self.app.admin.wait_until('loading', '0', section=self.INFO_PERSISTENCE_SECTION) self.app.admin.execute('BGREWRITEAOF') self.app.admin.wait_until('aof_rewrite_in_progress', '0', section=self.INFO_PERSISTENCE_SECTION) self.app.stop_db() self.app.configuration_manager.remove_system_override( change_id=self.CONF_LABEL_AOF_TEMP_OFF) self.app.start_db()
def _create_replication_user(self, service, admin, pwfile): """Create the replication user. Unfortunately, to be able to run pg_rewind, we need SUPERUSER, not just REPLICATION privilege """ pw = utils.generate_random_password() operating_system.write_file(pwfile, pw, as_root=True) operating_system.chown(pwfile, user=service.pgsql_owner, group=service.pgsql_owner, as_root=True) operating_system.chmod(pwfile, FileMode.OCTAL_MODE("0600"), as_root=True) repl_user = models.PostgreSQLUser(name=REPL_USER, password=pw) admin._create_user(context=None, user=repl_user) admin.alter_user(None, repl_user, True, 'REPLICATION', 'LOGIN') return pw
def write_recovery_file(self, restore=False): metadata = self.storage.load_metadata(self.location, self.checksum) LOG.info(_("Metadata for backup: %s") % str(metadata)) recovery_conf = "" recovery_conf += "recovery_target_name = '%s' \n" % metadata['label'] recovery_conf += "recovery_target_timeline = '%s' \n" % 1 if restore: recovery_conf += "restore_command = '" + \ self.pgsql_restore_cmd + "'\n" recovery_file = os.path.join(self.PGSQL_DATA_DIR, 'recovery.conf') operating_system.write_file(recovery_file, recovery_conf, codec=stream_codecs.IdentityCodec(), as_root=True) operating_system.chown(recovery_file, user=self.PGSQL_OWNER, group=self.PGSQL_OWNER, as_root=True)
def save_configuration(self, contents): """Write given contents to the base configuration file. Remove all existing revisions. :param contents Plain-text contents of the configuration file. :type contents string """ if self._override_strategy: self._override_strategy.remove_last(self._current_revision + 1) operating_system.write_file(self._base_config_path, contents, as_root=self._requires_root) operating_system.chown(self._base_config_path, self._owner, self._group, as_root=self._requires_root) operating_system.chmod(self._base_config_path, FileMode.ADD_READ_ALL, as_root=self._requires_root)
def detach_slave(self, service, for_failover): """Touch trigger file in to disable recovery mode""" LOG.debug("Detaching slave, use trigger file to disable recovery mode") operating_system.write_file(TRIGGER_FILE, '') operating_system.chown(TRIGGER_FILE, user=service.pgsql_owner, group=service.pgsql_owner, as_root=True) def _wait_for_failover(): """Wait until slave has switched out of recovery mode""" return not service.pg_is_in_recovery() try: utils.poll_until(_wait_for_failover, time_out=120) except exception.PollTimeOut: raise RuntimeError( _("Timeout occurred waiting for slave to exit" "recovery mode"))
def do_prepare(self, context, packages, databases, memory_mb, users, device_path=None, mount_point=None, backup_info=None, config_contents=None, root_password=None, overrides=None, cluster_config=None, snapshot=None): """This is called from prepare in the base class.""" if device_path: device = volume.VolumeDevice(device_path) # unmount if device is already mounted device.unmount_device(device_path) device.format() device.mount(mount_point) operating_system.chown(mount_point, 'redis', 'redis', as_root=True) LOG.debug('Mounted the volume.') self._app.install_if_needed(packages) LOG.info(_('Writing redis configuration.')) if cluster_config: config_contents = (config_contents + "\n" + "cluster-enabled yes\n" + "cluster-config-file cluster.conf\n") self._app.configuration_manager.save_configuration(config_contents) self._app.apply_initial_guestagent_configuration() if backup_info: persistence_dir = self._app.get_working_dir() self._perform_restore(backup_info, context, persistence_dir, self._app) else: # If we're not restoring, we have to force a restart of the # server manually so that the configuration stuff takes effect self._app.restart() if snapshot: self.attach_replica(context, snapshot, snapshot['config'])
def do_prepare(self, context, packages, databases, memory_mb, users, device_path, mount_point, backup_info, config_contents, root_password, overrides, cluster_config, snapshot, ds_version=None): operating_system.ensure_directory(self.app.datadir, user=CONF.database_service_uid, group=CONF.database_service_uid, as_root=True) operating_system.ensure_directory(service.WAL_ARCHIVE_DIR, user=CONF.database_service_uid, group=CONF.database_service_uid, as_root=True) LOG.info('Preparing database config files') self.app.configuration_manager.save_configuration(config_contents) self.app.set_data_dir(self.app.datadir) self.app.update_overrides(overrides) # Prepare pg_hba.conf self.app.apply_access_rules() self.configuration_manager.apply_system_override( {'hba_file': service.HBA_CONFIG_FILE}) # Restore data from backup and reset root password if backup_info: self.perform_restore(context, self.app.datadir, backup_info) if not snapshot: signal_file = f"{self.app.datadir}/recovery.signal" operating_system.execute_shell_cmd( f"touch {signal_file}", [], shell=True, as_root=True) operating_system.chown(signal_file, CONF.database_service_uid, CONF.database_service_uid, force=True, as_root=True) if snapshot: # This instance is a replica self.attach_replica(context, snapshot, snapshot['config']) # config_file can only be set on the postgres command line command = f"postgres -c config_file={service.CONFIG_FILE}" self.app.start_db(ds_version=ds_version, command=command)
def reset_configuration(self, context, configuration): """Reset the PgSql configuration file to the one given. The configuration parameter is a string containing the full configuration file that should be used. """ config_location = PGSQL_CONFIG.format( version=self._get_psql_version(), ) LOG.debug( "{guest_id}: Writing configuration file to /tmp/pgsql_config." .format( guest_id=CONF.guest_id, ) ) with open('/tmp/pgsql_config', 'w+') as config_file: config_file.write(configuration) operating_system.chown('/tmp/pgsql_config', 'postgres', None, recursive=False, as_root=True) operating_system.move('/tmp/pgsql_config', config_location, timeout=30, as_root=True)
def post_upgrade(self, context, upgrade_info): self.app.stop_db() if 'device' in upgrade_info: self.mount_volume(context, mount_point=upgrade_info['mount_point'], device_path=upgrade_info['device'], write_to_fstab=True) operating_system.chown(path=upgrade_info['mount_point'], user=self.app.cassandra_owner, group=self.app.cassandra_owner, recursive=True, as_root=True) self._restore_home_directory(upgrade_info['home_save']) self._restore_directory(upgrade_info['save_etc_dir'], self.app.cassandra_conf_dir) self._reset_app() self.app.start_db() self.app.upgrade_sstables() self.app.status.end_restart()
def _apply_access_rules(self): LOG.debug("Applying database access rules.") # Connections to all resources are granted. # # Local access from administrative users is implicitly trusted. # # Remote access from the Trove's account is always rejected as # it is not needed and could be used by malicious users to hijack the # instance. # # Connections from other accounts always require a double-MD5-hashed # password. # # Make the rules readable only by the Postgres service. # # NOTE: The order of entries is important. # The first failure to authenticate stops the lookup. # That is why the 'local' connections validate first. # The OrderedDict is necessary to guarantee the iteration order. access_rules = OrderedDict( [('local', [['all', 'postgres,os_admin', None, 'trust'], ['all', 'all', None, 'md5']]), ('host', [['all', 'postgres,os_admin', '127.0.0.1/32', 'trust'], ['all', 'postgres,os_admin', '::1/128', 'trust'], ['all', 'postgres,os_admin', 'localhost', 'trust'], ['all', 'os_admin', '0.0.0.0/0', 'reject'], ['all', 'os_admin', '::/0', 'reject'], ['all', 'all', '0.0.0.0/0', 'md5'], ['all', 'all', '::/0', 'md5']]) ]) operating_system.write_file(self.pgsql_hba_config, access_rules, PropertiesCodec( string_mappings={'\t': None}), as_root=True) operating_system.chown(self.pgsql_hba_config, self.PGSQL_OWNER, self.PGSQL_OWNER, as_root=True) operating_system.chmod(self.pgsql_hba_config, FileMode.SET_USR_RO, as_root=True)
def post_upgrade(self, context, upgrade_info): self._app.stop_db() if 'device' in upgrade_info: self.mount_volume(context, mount_point=upgrade_info['mount_point'], device_path=upgrade_info['device'], write_to_fstab=True) operating_system.chown(path=upgrade_info['mount_point'], user=system.REDIS_OWNER, group=system.REDIS_OWNER, recursive=True, as_root=True) self._restore_home_directory(upgrade_info['home_save']) self._restore_directory(upgrade_info['save_etc_dir'], system.REDIS_CONF_DIR) self._app = service.RedisApp() self._app.start_db() self._app.status.end_restart()
def __init__(self, status): self.state_change_wait_time = CONF.state_change_wait_time self.status = status revision_dir = \ guestagent_utils.build_file_path( os.path.join(MOUNT_POINT, os.path.dirname(system.VERTICA_ADMIN)), ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR) if not operating_system.exists(FAKE_CFG): operating_system.write_file(FAKE_CFG, '', as_root=True) operating_system.chown(FAKE_CFG, system.VERTICA_ADMIN, system.VERTICA_ADMIN_GRP, as_root=True) operating_system.chmod(FAKE_CFG, FileMode.ADD_GRP_RX_OTH_RX(), as_root=True) self.configuration_manager = \ ConfigurationManager(FAKE_CFG, system.VERTICA_ADMIN, system.VERTICA_ADMIN_GRP, PropertiesCodec(delimiter='='), requires_root=True, override_strategy=ImportOverrideStrategy( revision_dir, "cnf"))
def _create_tns_file(self, service, dbs): tns_file = service.paths.tns_file tns_entries = {} for db in dbs: tns_entry = ('(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)' '(HOST=%(host)s)(PORT=%(port)s))' '(CONNECT_DATA=(SERVICE_NAME=%(service_name)s)))' % { 'dbname': db['db_unique_name'], 'host': db['host'], 'port': CONF.get(MANAGER).listener_port, 'service_name': service.admin.database_name }) tns_entries[db['db_unique_name']] = tns_entry operating_system.write_file(tns_file, tns_entries, codec=stream_codecs.KeyValueCodec(), as_root=True) operating_system.chown(tns_file, service.instance_owner, service.instance_owner_group, force=True, as_root=True)
def _create_replication_user(self, pwfile): """Create the replication user. Unfortunately, to be able to run pg_rewind, we need SUPERUSER, not just REPLICATION privilege """ pw = utils.generate_random_password() operating_system.write_file(pwfile, pw, as_root=True) operating_system.chown(pwfile, user=self.PGSQL_OWNER, group=self.PGSQL_OWNER, as_root=True) operating_system.chmod(pwfile, FileMode.OCTAL_MODE("0600"), as_root=True) # TODO(atomic77) Alter user is swallowing the replication # option for some reason -- enable this code when the # underlying issue is fixed # repl_user = models.PostgreSQLUser(name=REPL_USER, # password=REPL_PW) # self._create_user(context=None, user=repl_user) # self.alter_user(None, repl_user, 'REPLICATION', 'LOGIN') pgutil.psql("CREATE USER %s SUPERUSER ENCRYPTED " "password '%s';" % (REPL_USER, pw)) return pw
def change_permissions(self): """ When CouchDB is installed, a default user 'couchdb' is created. Inorder to start/stop/restart CouchDB service as the current OS user, add the current OS user to the 'couchdb' group and provide read/write access to the 'couchdb' group. """ try: LOG.debug("Changing permissions.") for dir in [ COUCHDB_LIB_DIR, COUCHDB_LOG_DIR, COUCHDB_BIN_DIR, COUCHDB_CONFIG_DIR ]: operating_system.chown(dir, 'couchdb', 'couchdb', as_root=True) operating_system.chmod(dir, FileMode.ADD_GRP_RW, as_root=True) operating_system.change_user_group(getpass.getuser(), 'couchdb', as_root=True) LOG.debug("Successfully changed permissions.") except exception.ProcessExecutionError: LOG.exception(_("Error changing permissions."))
def _run_restore(self): metadata = self.storage.load_metadata(self.location, self.checksum) self.db_name = metadata['db_name'] operating_system.create_directory(ORA_FAST_RECOVERY_PATH, user='******', group='oinstall', force=True, as_root=True) operating_system.create_directory(ORA_AUDIT_PATH % {'db': self.db_name}, user='******', group='oinstall', force=True, as_root=True) operating_system.create_directory(ORA_FAST_RECOVERY_PATH + '/' + self.db_name, user='******', group='oinstall', force=True, as_root=True) operating_system.create_directory(ORA_DATA_PATH + '/' + self.db_name, user='******', group='oinstall', force=True, as_root=True) # the backup set will restore directly to ORADATA/backupset_files self._unpack_backup_files(self.location, self.checksum) operating_system.chown(ORA_BACKUP_PATH, 'oracle', 'oinstall', recursive=True, force=True, as_root=True) self._perform_restore() self._perform_recover() self._open_database()
def create_lsnr_file(self): """Create the listener.ora file""" content = ('SID_LIST_LISTENER=(SID_LIST=(SID_DESC=' '(GLOBAL_DBNAME=%(db_name)s)' '(ORACLE_HOME=%(ora_home)s)' '(SID_NAME=%(db_name)s)))\n' % { 'db_name': self.admin.database_name, 'ora_home': self.paths.oracle_home }) content += ('LISTENER=(DESCRIPTION_LIST=(DESCRIPTION=(ADDRESS=' '(PROTOCOL=TCP)(HOST=%(host)s)(PORT=%(port)s))))\n' % { 'host': socket.gethostname(), 'port': CONF.get(MANAGER).listener_port }) content += ('ADR_BASE_LISTENER=%s\n' % self.paths.oracle_base) content += ('SECURE_REGISTER_LISTENER = (TCP)\n') operating_system.write_file(self.paths.lsnr_file, content, as_root=True) operating_system.chown(self.paths.lsnr_file, self.instance_owner, self.instance_owner_group, as_root=True)
def _create_lsnr_file(self): """Create the listener.ora file""" listener_file_name = 'listener.ora' listener_path = path.join(ORANET_DIR, listener_file_name) content = ('SID_LIST_LISTENER=(SID_LIST=(SID_DESC=' '(GLOBAL_DBNAME=%(db_name)s)' '(ORACLE_HOME=%(ora_home)s)' '(SID_NAME=%(db_name)s)))\n' % { 'db_name': self.ORA_CONF.db_name, 'ora_home': CONF.get(MANAGER).oracle_home }) content += ('LISTENER=(DESCRIPTION_LIST=(DESCRIPTION=(ADDRESS=' '(PROTOCOL=TCP)(HOST=%(host)s)(PORT=%(port)s))' '(ADDRESS=(PROTOCOL=IPC)(KEY=EXTPROC1521))))\n' % { 'host': socket.gethostname(), 'port': CONF.get(MANAGER).listener_port }) content += ('ADR_BASE_LISTENER=%s\n' % CONF.get(MANAGER).oracle_base) operating_system.write_file(listener_path, content, as_root=True) operating_system.chown(listener_path, system.ORACLE_INSTANCE_OWNER, system.ORACLE_GROUP_OWNER, as_root=True)
def write_config(self, config_contents, execute_function=utils.execute_with_timeout, mkstemp_function=tempfile.mkstemp, unlink_function=os.unlink): # first securely create a temp file. mkstemp() will set # os.O_EXCL on the open() call, and we get a file with # permissions of 600 by default. (conf_fd, conf_path) = mkstemp_function() LOG.debug('Storing temporary configuration at %s.' % conf_path) # write config and close the file, delete it if there is an # error. only unlink if there is a problem. In normal course, # we move the file. try: os.write(conf_fd, config_contents) operating_system.move(conf_path, system.CASSANDRA_CONF, as_root=True) # TODO(denis_makogon): figure out the dynamic way to discover # configs owner since it can cause errors if there is # no cassandra user in operating system operating_system.chown(system.CASSANDRA_CONF, 'cassandra', 'cassandra', recursive=False, as_root=True) operating_system.chmod(system.CASSANDRA_CONF, FileMode.ADD_READ_ALL, as_root=True) except Exception: LOG.exception( _("Exception generating Cassandra configuration %s.") % conf_path) unlink_function(conf_path) raise finally: os.close(conf_fd) LOG.info(_('Wrote new Cassandra configuration.'))
def set_db_to_listen(self, context): """Allow remote connections with encrypted passwords.""" # Using cat to read file due to read permissions issues. out, err = utils.execute_with_timeout( 'sudo', 'cat', PGSQL_HBA_CONFIG.format( version=self._get_psql_version(), ), timeout=30, ) LOG.debug( "{guest_id}: Writing hba file to /tmp/pgsql_hba_config.".format( guest_id=CONF.guest_id, ) ) with open('/tmp/pgsql_hba_config', 'w+') as config_file: config_file.write(out) config_file.write("host all all 0.0.0.0/0 md5\n") operating_system.chown('/tmp/pgsql_hba_config', 'postgres', None, recursive=False, as_root=True) operating_system.move('/tmp/pgsql_hba_config', PGSQL_HBA_CONFIG.format( version=self._get_psql_version(), ), timeout=30, as_root=True)
def post_upgrade(self, context, upgrade_info): app = self.mysql_app(self.mysql_app_status.get()) app.stop_db() if 'device' in upgrade_info: self.mount_volume(context, mount_point=upgrade_info['mount_point'], device_path=upgrade_info['device'], write_to_fstab=True) operating_system.chown(path=upgrade_info['mount_point'], user=service.MYSQL_OWNER, group=service.MYSQL_OWNER, recursive=True, as_root=True) self._restore_home_directory(upgrade_info['home_save']) if operating_system.exists(upgrade_info['save_etc_dir'], is_directory=True, as_root=True): self._restore_directory(upgrade_info['save_etc_dir'], "/etc") self._restore_directory("%s/." % upgrade_info['save_dir'], "/etc/mysql") self.configuration_manager.refresh_cache() app.start_mysql() app.status.end_restart()
def apply_next(self, options): revision_num = self.count_revisions() + 1 old_revision_backup = guestagent_utils.build_file_path( self._revision_backup_dir, self._base_config_name, str(revision_num), self._BACKUP_EXT) operating_system.copy(self._base_config_path, old_revision_backup, force=True, preserve=True, as_root=self._requires_root) current = operating_system.read_file(self._base_config_path, codec=self._codec) guestagent_utils.update_dict(options, current) operating_system.write_file(self._base_config_path, current, codec=self._codec, as_root=self._requires_root) operating_system.chown(self._base_config_path, self._owner, self._group, as_root=self._requires_root) operating_system.chmod(self._base_config_path, FileMode.ADD_READ_ALL, as_root=self._requires_root)
def change_permissions(self): """ When CouchDB is installed, a default user 'couchdb' is created. Inorder to start/stop/restart CouchDB service as the current OS user, add this user to the 'couchdb' group and provide read/ write access to the 'couchdb' group. """ try: LOG.debug("Changing permissions.") operating_system.chown(COUCHDB_LIB_DIR, 'couchdb', 'couchdb', as_root=True) operating_system.chown(COUCHDB_LOG_DIR, 'couchdb', 'couchdb', as_root=True) operating_system.chown(COUCHDB_BIN_DIR, 'couchdb', 'couchdb', as_root=True) operating_system.chown(COUCHDB_CONFIG_DIR, 'couchdb', 'couchdb', as_root=True) operating_system.chmod(COUCHDB_LIB_DIR, FileMode.ADD_GRP_RW, as_root=True) operating_system.chmod(COUCHDB_LOG_DIR, FileMode.ADD_GRP_RW, as_root=True) operating_system.chmod(COUCHDB_BIN_DIR, FileMode.ADD_GRP_RW, as_root=True) operating_system.chmod(COUCHDB_CONFIG_DIR, FileMode.ADD_GRP_RW, as_root=True) self.execute_change_permission_commands( system.UPDATE_GROUP_MEMBERSHIP) LOG.debug("Successfully changed permissions.") except exception.ProcessExecutionError: LOG.exception(_("Error changing permissions."))
def prepare(self, context, packages, databases, memory_mb, users, device_path=None, mount_point=None, backup_info=None, config_contents=None, root_password=None, overrides=None, cluster_config=None, snapshot=None): """Makes ready DBAAS on a Guest container.""" LOG.debug("Preparing MongoDB instance.") self.status.begin_install() self.app.install_if_needed(packages) self.app.stop_db() self.app.clear_storage() mount_point = system.MONGODB_MOUNT_POINT if device_path: device = volume.VolumeDevice(device_path) # unmount if device is already mounted device.unmount_device(device_path) device.format() if os.path.exists(system.MONGODB_MOUNT_POINT): device.migrate_data(mount_point) device.mount(mount_point) operating_system.chown(mount_point, system.MONGO_USER, system.MONGO_USER, as_root=True) LOG.debug("Mounted the volume %(path)s as %(mount)s." % { 'path': device_path, "mount": mount_point }) conf_changes = self.get_config_changes(cluster_config, mount_point) config_contents = self.app.update_config_contents( config_contents, conf_changes) if cluster_config is None: self.app.start_db_with_conf_changes(config_contents) if backup_info: self._perform_restore(backup_info, context, mount_point, self.app) else: if cluster_config['instance_type'] == "query_router": self.app.reset_configuration( {'config_contents': config_contents}) self.app.write_mongos_upstart() self.app.status.is_query_router = True # don't start mongos until add_config_servers is invoked elif cluster_config['instance_type'] == "config_server": self.app.status.is_config_server = True self.app.start_db_with_conf_changes(config_contents) elif cluster_config['instance_type'] == "member": self.app.start_db_with_conf_changes(config_contents) else: LOG.error( _("Bad cluster configuration; instance type " "given as %s.") % cluster_config['instance_type']) self.status.set_status(ds_instance.ServiceStatuses.FAILED) return self.status.set_status(ds_instance.ServiceStatuses.BUILD_PENDING) LOG.info(_('Completed setup of MongoDB database instance.'))
def prepare(self, context, packages, databases, memory_mb, users, device_path=None, mount_point=None, backup_info=None, config_contents=None, root_password=None, overrides=None, cluster_config=None, snapshot=None): """Makes ready DBAAS on a Guest container.""" LOG.debug("Preparing MongoDB instance.") self.app.status.begin_install() self.app.install_if_needed(packages) self.app.wait_for_start() self.app.stop_db() self.app.clear_storage() mount_point = system.MONGODB_MOUNT_POINT if device_path: device = volume.VolumeDevice(device_path) # unmount if device is already mounted device.unmount_device(device_path) device.format() if os.path.exists(system.MONGODB_MOUNT_POINT): device.migrate_data(mount_point) device.mount(mount_point) operating_system.chown(mount_point, system.MONGO_USER, system.MONGO_USER, as_root=True) LOG.debug("Mounted the volume %(path)s as %(mount)s." % { 'path': device_path, "mount": mount_point }) if config_contents: # Save resolved configuration template first. self.app.configuration_manager.save_configuration(config_contents) # Apply guestagent specific configuration changes. self.app.apply_initial_guestagent_configuration( cluster_config, mount_point) if not cluster_config: # Create the Trove admin user. self.app.secure() # Don't start mongos until add_config_servers is invoked, # don't start members as they should already be running. if not (self.app.is_query_router or self.app.is_cluster_member): self.app.start_db(update_db=True) if not cluster_config and backup_info: self._perform_restore(backup_info, context, mount_point, self.app) if service.MongoDBAdmin().is_root_enabled(): self.app.status.report_root(context, 'root') if not cluster_config and root_password: LOG.debug('Root password provided. Enabling root.') service.MongoDBAdmin().enable_root(root_password) if not cluster_config: if databases: self.create_database(context, databases) if users: self.create_user(context, users) if cluster_config: self.app.status.set_status( ds_instance.ServiceStatuses.BUILD_PENDING) else: self.app.complete_install_or_restart() LOG.info(_('Completed setup of MongoDB database instance.'))
def prepare_slave(self, service, snapshot): """Prepare the environment needed for starting the slave Oracle processes. """ master_info = snapshot['master'] db_name = master_info['db_name'] db_unique_name = ('%(db_name)s_%(replica_label)s' % { 'db_name': db_name, 'replica_label': utils.generate_random_string(6) }) service.paths.update_db_name(db_name) # Create necessary directories and set necessary permissions new_dirs = [ service.paths.db_data_dir, service.paths.db_fast_recovery_logs_dir, service.paths.db_fast_recovery_dir, service.paths.audit_dir ] for directory in new_dirs: operating_system.create_directory(directory, service.instance_owner, service.instance_owner_group, as_root=True) chown_dirs = [ service.paths.fast_recovery_area, service.paths.admin_dir ] for directory in chown_dirs: operating_system.chown(directory, service.instance_owner, service.instance_owner_group, as_root=True) # Install on the slave files extracted from the master # (e.g. the control, pfile, password, oracle.cnf file ... etc) oradata_encoded = master_info['oradata'] tmp_data_path = path.join(TMP_DIR, 'oradata.tar.gz') operating_system.write_file(tmp_data_path, oradata_encoded, codec=stream_codecs.Base64Codec(), encode=False) utils.execute_with_timeout('tar', '-Pxzvf', tmp_data_path, run_as_root=True, root_helper='sudo') # Put the control file in place tmp_ctlfile_path = path.join(TMP_DIR, '%s_stby.ctl' % db_name) operating_system.move(tmp_ctlfile_path, service.paths.ctlfile1_file, as_root=True) operating_system.copy(service.paths.ctlfile1_file, service.paths.ctlfile2_file, preserve=True, as_root=True) # Set the db_name and db_unique_name via the PFILE which will be # removed later operating_system.write_file(service.paths.pfile, "*.db_unique_name='%s'\n" "*.db_name='%s'\n" % (db_unique_name, db_name), as_root=True) operating_system.chown(service.paths.pfile, service.instance_owner, service.instance_owner_group, as_root=True, force=True) service.admin.delete_conf_cache() service.admin.ora_config.db_name = db_name service.admin.ora_config.db_unique_name = db_unique_name # Set proper permissions on the oratab file operating_system.chown(service.paths.oratab_file, service.instance_owner, service.instance_owner_group, as_root=True, force=True) # Create the listener.ora file and restart service.configure_listener()
def prepare(self, context, packages, databases, memory_mb, users, device_path=None, mount_point=None, backup_info=None, config_contents=None, root_password=None, overrides=None, cluster_config=None, snapshot=None): """Makes ready DBAAS on a Guest container.""" self.mysql_app_status.get().begin_install() # status end_mysql_install set with secure() app = self.mysql_app(self.mysql_app_status.get()) app.install_if_needed(packages) if device_path: # stop and do not update database app.stop_db() device = volume.VolumeDevice(device_path) # unmount if device is already mounted device.unmount_device(device_path) device.format() if os.path.exists(mount_point): # rsync existing data to a "data" sub-directory # on the new volume device.migrate_data(mount_point, target_subdir="data") # mount the volume device.mount(mount_point) operating_system.chown(mount_point, service_base.MYSQL_OWNER, service_base.MYSQL_OWNER, recursive=False, as_root=True) LOG.debug("Mounted the volume at %s." % mount_point) # We need to temporarily update the default my.cnf so that # mysql will start after the volume is mounted. Later on it # will be changed based on the config template # (see MySqlApp.secure()) and restart. app.set_data_dir(mount_point + '/data') app.start_mysql() if backup_info: self._perform_restore(backup_info, context, mount_point + "/data", app) LOG.debug("Securing MySQL now.") app.secure(config_contents, overrides) enable_root_on_restore = (backup_info and self.mysql_admin().is_root_enabled()) if root_password and not backup_info: app.secure_root(secure_remote_root=True) self.mysql_admin().enable_root(root_password) elif enable_root_on_restore: app.secure_root(secure_remote_root=False) self.mysql_app_status.get().report_root(context, 'root') else: app.secure_root(secure_remote_root=True) app.complete_install_or_restart() if databases: self.create_database(context, databases) if users: self.create_user(context, users) if snapshot: self.attach_replica(context, snapshot, snapshot['config']) LOG.info(_('Completed setup of MySQL database instance.'))
def prepare_slave(self, snapshot): """Prepare the environment needed for starting the slave Oracle processes. """ master_info = snapshot['master'] db_name = master_info['db_name'] tmp_dir = '/tmp' tmp_data_path = path.join(tmp_dir, 'oradata.tar.gz') orabase_path = CONF.get(MANAGER).oracle_base orahome_path = CONF.get(MANAGER).oracle_home db_data_path = path.join(orabase_path, 'oradata', db_name) fast_recovery_path = path.join(orabase_path, 'fast_recovery_area') db_fast_recovery_path = path.join(fast_recovery_path, db_name) audit_path = path.join(orabase_path, 'admin', db_name, 'adump') admin_path = path.join(orabase_path, 'admin') # Create necessary directories and set permissions directories = [db_data_path, db_fast_recovery_path, audit_path] for directory in directories: operating_system.create_directory(directory, system.ORACLE_INSTANCE_OWNER, system.ORACLE_GROUP_OWNER, as_root=True) operating_system.chown(fast_recovery_path, system.ORACLE_INSTANCE_OWNER, system.ORACLE_GROUP_OWNER, as_root=True) operating_system.chown(admin_path, system.ORACLE_INSTANCE_OWNER, system.ORACLE_GROUP_OWNER, as_root=True) # Install on the slave files extracted from the master # (e.g. the control, pfile, password, oracle.cnf file ... etc) oradata = master_info['oradata'] operating_system.write_file(tmp_data_path, oradata, codec=stream_codecs.Base64Codec()) utils.execute_with_timeout('tar', '-Pxzvf', tmp_data_path, run_as_root=True, root_helper='sudo') # Put the control file in place tmp_ctlfile_path = path.join(tmp_dir, '%s_stby.ctl' % db_name) ctlfile1_path = path.join(db_data_path, 'control01.ctl') ctlfile2_path = path.join(db_fast_recovery_path, 'control02.ctl') operating_system.move(tmp_ctlfile_path, ctlfile1_path, as_root=True) operating_system.copy(ctlfile1_path, ctlfile2_path, preserve=True, as_root=True) db_unique_name = ('%(db_name)s_%(replica_label)s' % { 'db_name': db_name, 'replica_label': utils.generate_random_string(6) }) # Customize the pfile for slave and put it in the right place. # The pfile that came from master is owned by the 'oracle' user, # so we need to change ownership first before editing it. tmp_pfile_path = path.join(tmp_dir, 'init%s_stby.ora' % db_name) pfile_path = path.join(orahome_path, 'dbs', 'init%s.ora' % db_name) operating_system.chown(tmp_pfile_path, getpass.getuser(), None, as_root=True) with open(tmp_pfile_path, 'a') as pfile: pfile.write("*.db_unique_name='%s'\n" % db_unique_name) # Finished editing pfile, put it in the proper directory and chown # back to oracle user and group operating_system.move(tmp_pfile_path, pfile_path, force=True, as_root=True) operating_system.chown(pfile_path, system.ORACLE_INSTANCE_OWNER, system.ORACLE_GROUP_OWNER, as_root=True) self.ORA_CONF.db_name = db_name self.ORA_CONF.db_unique_name = db_unique_name # Set proper permissions on the oratab file operating_system.chown('/etc/oratab', system.ORACLE_INSTANCE_OWNER, system.ORACLE_GROUP_OWNER, as_root=True) # Create the listener.ora file self._create_lsnr_file() # Restart the listener utils.execute_with_timeout("sudo", "su", "-", "oracle", "-c", "lsnrctl reload", timeout=CONF.usage_timeout)