def restore_backup(self, context, backup_info, restore_location): backup_id = backup_info['id'] storage_driver = CONF.storage_strategy backup_driver = cfg.get_configuration_property('backup_strategy') image = cfg.get_configuration_property('backup_docker_image') name = 'db_restore' volumes = { '/var/lib/postgresql/data': { 'bind': '/var/lib/postgresql/data', 'mode': 'rw' } } os_cred = (f"--os-token={context.auth_token} " f"--os-auth-url={CONF.service_credentials.auth_url} " f"--os-tenant-id={context.project_id}") command = ( f'/usr/bin/python3 main.py --nobackup ' f'--storage-driver={storage_driver} --driver={backup_driver} ' f'{os_cred} ' f'--restore-from={backup_info["location"]} ' f'--restore-checksum={backup_info["checksum"]} ' f'--pg-wal-archive-dir {WAL_ARCHIVE_DIR}') if CONF.backup_aes_cbc_key: command = (f"{command} " f"--backup-encryption-key={CONF.backup_aes_cbc_key}") LOG.debug( 'Stop the database and clean up the data before restore ' 'from %s', backup_id) self.stop_db() for dir in [WAL_ARCHIVE_DIR, self.datadir]: operating_system.remove_dir_contents(dir) # Start to run restore inside a separate docker container LOG.info('Starting to restore backup %s, command: %s', backup_id, command) output, ret = docker_util.run_container(self.docker_client, image, name, volumes=volumes, command=command) result = output[-1] if not ret: msg = f'Failed to run restore container, error: {result}' LOG.error(msg) raise Exception(msg) for dir in [WAL_ARCHIVE_DIR, self.datadir]: operating_system.chown(dir, CONF.database_service_uid, CONF.database_service_uid, force=True, as_root=True)
def main(): log_levels = [ 'docker=WARN', ] default_log_levels = logging.get_default_log_levels() default_log_levels.extend(log_levels) logging.set_defaults(default_log_levels=default_log_levels) logging.register_options(CONF) cfg.parse_args(sys.argv) logging.setup(CONF, None) debug_utils.setup() from trove.guestagent import dbaas manager = dbaas.datastore_registry().get(CONF.datastore_manager) if not manager: msg = (_("Manager class not registered for datastore manager %s") % CONF.datastore_manager) raise RuntimeError(msg) if not CONF.guest_id: msg = (_("The guest_id parameter is not set. guest_info.conf " "was not injected into the guest or not read by guestagent")) raise RuntimeError(msg) # Create user and group for running docker container. LOG.info('Creating user and group for database service') uid = cfg.get_configuration_property('database_service_uid') operating_system.create_user('database', uid) # Mount device if needed. # When doing rebuild, the device should be already formatted but not # mounted. device_path = CONF.get(CONF.datastore_manager).device_path mount_point = CONF.get(CONF.datastore_manager).mount_point device = volume.VolumeDevice(device_path) if not device.mount_points(device_path): LOG.info('Preparing the storage for %s, mount path %s', device_path, mount_point) device.format() device.mount(mount_point) operating_system.chown(mount_point, CONF.database_service_uid, CONF.database_service_uid, recursive=True, as_root=True) # rpc module must be loaded after decision about thread monkeypatching # because if thread module is not monkeypatched we can't use eventlet # executor from oslo_messaging library. from trove import rpc rpc.init(CONF) from trove.common.rpc import service as rpc_service server = rpc_service.RpcService( key=CONF.instance_rpc_encr_key, topic="guestagent.%s" % CONF.guest_id, manager=manager, host=CONF.guest_id, rpc_api_version=guest_api.API.API_LATEST_VERSION) launcher = openstack_service.launch(CONF, server, restart_method='mutate') launcher.wait()
def get_datastore_log_defs(self): owner = cfg.get_configuration_property('database_service_uid') datastore_dir = self.app.get_data_dir() long_query_time = CONF.get(self.manager).get( 'guest_log_long_query_time') general_log_file = self.build_log_file_name( self.GUEST_LOG_DEFS_GENERAL_LABEL, owner, datastore_dir=datastore_dir) general_log_dir, general_log_filename = os.path.split(general_log_file) return { self.GUEST_LOG_DEFS_GENERAL_LABEL: { self.GUEST_LOG_TYPE_LABEL: guest_log.LogType.USER, self.GUEST_LOG_USER_LABEL: owner, self.GUEST_LOG_FILE_LABEL: general_log_file, self.GUEST_LOG_ENABLE_LABEL: { 'logging_collector': True, 'log_destination': 'stderr', 'log_directory': general_log_dir, 'log_filename': general_log_filename, 'log_statement': 'all', 'debug_print_plan': True, 'log_min_duration_statement': long_query_time, }, self.GUEST_LOG_DISABLE_LABEL: { 'logging_collector': False, }, self.GUEST_LOG_RESTART_LABEL: True, }, }
def __init__(self, status, docker_client): super(PgSqlApp, self).__init__(status, docker_client) # See # https://github.com/docker-library/docs/blob/master/postgres/README.md#pgdata mount_point = cfg.get_configuration_property('mount_point') self.datadir = f"{mount_point}/data/pgdata" self.adm = PgSqlAdmin(SUPER_USER_NAME)
def rebalance_cluster(self, added_nodes=None, removed_nodes=None, enabled_services=None): enabled_services = (enabled_services or cfg.get_configuration_property('default_services')) LOG.info(_("Enabling Couchbase services: %s") % enabled_services) self.build_admin().run_rebalance(added_nodes, removed_nodes, enabled_services)
def clean_wal_archives(self, context): """Clean up the wal archives to free up disk space.""" archive_path = service.WAL_ARCHIVE_DIR data_path = cfg.get_configuration_property('mount_point') if not operating_system.exists(archive_path, is_directory=True, as_root=True): return self._clean_wals(archive_path, data_path)
def restore_backup(self, context, backup_info, restore_location): backup_id = backup_info['id'] storage_driver = CONF.storage_strategy backup_driver = cfg.get_configuration_property('backup_strategy') user_token = context.auth_token auth_url = CONF.service_credentials.auth_url user_tenant = context.project_id image = CONF.backup_docker_image name = 'db_restore' volumes = {'/var/lib/mysql': {'bind': '/var/lib/mysql', 'mode': 'rw'}} command = ( f'/usr/bin/python3 main.py --nobackup ' f'--storage-driver={storage_driver} --driver={backup_driver} ' f'--os-token={user_token} --os-auth-url={auth_url} ' f'--os-tenant-id={user_tenant} ' f'--restore-from={backup_info["location"]} ' f'--restore-checksum={backup_info["checksum"]}') LOG.debug( 'Stop the database and clean up the data before restore ' 'from %s', backup_id) self.stop_db() operating_system.chmod(restore_location, operating_system.FileMode.SET_FULL, as_root=True) utils.clean_out(restore_location) # Start to run restore inside a separate docker container LOG.info('Starting to restore backup %s, command: %s', backup_id, command) output, ret = docker_util.run_container(self.docker_client, image, name, volumes=volumes, command=command) result = output[-1] if not ret: msg = f'Failed to run restore container, error: {result}' LOG.error(msg) raise Exception(msg) LOG.debug('Deleting ib_logfile files after restore from backup %s', backup_id) operating_system.chown(restore_location, CONF.database_service_uid, CONF.database_service_uid, force=True, as_root=True) self.wipe_ib_logfiles()
def apply_initial_guestagent_configuration(self): """Update guestagent-controlled configuration properties. """ LOG.debug("Applying initial guestagent configuration.") file_locations = { 'data_directory': self._quote(self.pgsql_data_dir), 'hba_file': self._quote(self.pgsql_hba_config), 'ident_file': self._quote(self.pgsql_ident_config), 'external_pid_file': self._quote(self.pgsql_pid_file), 'unix_socket_directories': self._quote(self.pgsql_run_dir), 'listen_addresses': self._quote(','.join(self.LISTEN_ADDRESSES)), 'port': cfg.get_configuration_property('postgresql_port')} self.configuration_manager.apply_system_override(file_locations) self._apply_access_rules()
def get_backup_image(self): """Get the actual container image based on datastore version. For example, this method converts openstacktrove/db-backup-mysql:1.0.0 to openstacktrove/db-backup-mysql5.7:1.0.0 """ image = cfg.get_configuration_property('backup_docker_image') name, tag = image.rsplit(':', 1) # Get minor version cur_ver = semantic_version.Version.coerce(CONF.datastore_version) minor_ver = f"{cur_ver.major}.{cur_ver.minor}" return f"{name}{minor_ver}:{tag}"
def get_datastore_log_defs(self): owner = cfg.get_configuration_property('database_service_uid') datastore_dir = self.app.get_data_dir() server_section = configurations.MySQLConfParser.SERVER_CONF_SECTION long_query_time = CONF.get( self.manager).get('guest_log_long_query_time') / 1000 general_log_file = self.build_log_file_name( self.GUEST_LOG_DEFS_GENERAL_LABEL, owner, datastore_dir=datastore_dir) error_log_file = self.validate_log_file('/var/log/mysqld.log', owner) slow_query_log_file = self.build_log_file_name( self.GUEST_LOG_DEFS_SLOW_QUERY_LABEL, owner, datastore_dir=datastore_dir) return { self.GUEST_LOG_DEFS_GENERAL_LABEL: { self.GUEST_LOG_TYPE_LABEL: guest_log.LogType.USER, self.GUEST_LOG_USER_LABEL: owner, self.GUEST_LOG_FILE_LABEL: general_log_file, self.GUEST_LOG_SECTION_LABEL: server_section, self.GUEST_LOG_ENABLE_LABEL: { 'general_log': 'on', 'general_log_file': general_log_file, 'log_output': 'file', }, self.GUEST_LOG_DISABLE_LABEL: { 'general_log': 'off', }, }, self.GUEST_LOG_DEFS_SLOW_QUERY_LABEL: { self.GUEST_LOG_TYPE_LABEL: guest_log.LogType.USER, self.GUEST_LOG_USER_LABEL: owner, self.GUEST_LOG_FILE_LABEL: slow_query_log_file, self.GUEST_LOG_SECTION_LABEL: server_section, self.GUEST_LOG_ENABLE_LABEL: { 'slow_query_log': 'on', 'slow_query_log_file': slow_query_log_file, 'long_query_time': long_query_time, }, self.GUEST_LOG_DISABLE_LABEL: { 'slow_query_log': 'off', }, }, self.GUEST_LOG_DEFS_ERROR_LABEL: { self.GUEST_LOG_TYPE_LABEL: guest_log.LogType.SYS, self.GUEST_LOG_USER_LABEL: owner, self.GUEST_LOG_FILE_LABEL: error_log_file, }, }
def get_backup_strategy(self): """Get backup strategy. innobackupex was removed in Percona XtraBackup 8.0, use xtrabackup instead. """ strategy = cfg.get_configuration_property('backup_strategy') mysql_8 = semantic_version.Version('8.0.0') cur_ver = semantic_version.Version.coerce(CONF.datastore_version) if cur_ver >= mysql_8: strategy = 'xtrabackup' return strategy
def apply_initial_guestagent_configuration(self): """Update guestagent-controlled configuration properties. """ LOG.debug("Applying initial guestagent configuration.") file_locations = { 'data_directory': self._quote(self.pgsql_data_dir), 'hba_file': self._quote(self.pgsql_hba_config), 'ident_file': self._quote(self.pgsql_ident_config), 'external_pid_file': self._quote(self.pgsql_pid_file), 'unix_socket_directories': self._quote(self.pgsql_run_dir), 'listen_addresses': self._quote(','.join(self.LISTEN_ADDRESSES)), 'port': cfg.get_configuration_property('postgresql_port') } self.configuration_manager.apply_system_override(file_locations) self._apply_access_rules()
def _incremental_restore(self, location, checksum): """Recursively apply backups from all parents. If we are the parent then we restore to the restore_location and we apply the logs to the restore_location only. Otherwise if we are an incremental we restore to a subfolder to prevent stomping on the full restore data. Then we run apply log with the '--incremental-dir' flag """ metadata = self.storage.load_metadata(location, checksum) incremental_dir = None if 'parent_location' in metadata: LOG.info( _("Restoring parent: %(parent_location)s" " checksum: %(parent_checksum)s.") % metadata) parent_location = metadata['parent_location'] parent_checksum = metadata['parent_checksum'] # Restore parents recursively so backup are applied sequentially self._incremental_restore(parent_location, parent_checksum) # for *this* backup set the incremental_dir # just use the checksum for the incremental path as it is # sufficiently unique /var/lib/mysql/<checksum> incremental_dir = os.path.join( cfg.get_configuration_property('mount_point'), checksum) operating_system.create_directory(incremental_dir, as_root=True) command = self._incremental_restore_cmd(incremental_dir) else: # The parent (full backup) use the same command from InnobackupEx # super class and do not set an incremental_dir. command = self.restore_cmd self.content_length += self._unpack(location, checksum, command) self._incremental_prepare(incremental_dir) # Delete unpacked incremental backup metadata if incremental_dir: operating_system.remove(incremental_dir, force=True, as_root=True)
def _incremental_restore(self, location, checksum): """Recursively apply backups from all parents. If we are the parent then we restore to the restore_location and we apply the logs to the restore_location only. Otherwise if we are an incremental we restore to a subfolder to prevent stomping on the full restore data. Then we run apply log with the '--incremental-dir' flag """ metadata = self.storage.load_metadata(location, checksum) incremental_dir = None if 'parent_location' in metadata: LOG.info(_("Restoring parent: %(parent_location)s" " checksum: %(parent_checksum)s.") % metadata) parent_location = metadata['parent_location'] parent_checksum = metadata['parent_checksum'] # Restore parents recursively so backup are applied sequentially self._incremental_restore(parent_location, parent_checksum) # for *this* backup set the incremental_dir # just use the checksum for the incremental path as it is # sufficiently unique /var/lib/mysql/<checksum> incremental_dir = os.path.join( cfg.get_configuration_property('mount_point'), checksum) operating_system.create_directory(incremental_dir, as_root=True) command = self._incremental_restore_cmd(incremental_dir) else: # The parent (full backup) use the same command from InnobackupEx # super class and do not set an incremental_dir. command = self.restore_cmd self.content_length += self._unpack(location, checksum, command) self._incremental_prepare(incremental_dir) # Delete unpacked incremental backup metadata if incremental_dir: operating_system.remove(incremental_dir, force=True, as_root=True)
def generate_random_password(password_length=None, datastore=None, alpha_first=True): """ Generate and return a random password string. :param password_length: Length of password to create. If value is None, the default_password_length set in the configuration will be used. :param datastore: Datastore name to generate random password for. If value is None, default values set in the configuration will be used. :param alpha_first: Specify whether the generated password should begin with an alphabet. :return: A randomly generated password string """ lower_case = 'abcdefghjkmnpqrstuvwxyz' upper_case = 'ABCDEFGHJKMNPQRTUVWXYZ' numbers = '2346789' min_lower_case = cfg.get_configuration_property('password_min_lower_case', datastore) min_upper_case = cfg.get_configuration_property('password_min_upper_case', datastore) min_numbers = cfg.get_configuration_property('password_min_numbers', datastore) min_special_chars = cfg.get_configuration_property( 'password_min_special_chars', datastore) special_chars = cfg.get_configuration_property('password_special_charset', datastore) password_length = (password_length or cfg.get_configuration_property( 'default_password_length', datastore)) choices = [lower_case, upper_case, numbers, special_chars] mins = [min_lower_case, min_upper_case, min_numbers, min_special_chars] all_choices = (lower_case + upper_case + numbers + special_chars) password = bytearray() if password_length < 1: raise RuntimeError("Length cannot be less than 1") total_min = 0 for index, value in enumerate(mins): total_min += value if value: password.extend( passlib_utils.generate_password( size=value, charset=choices[index]).encode('utf-8')) if index == 1: random.shuffle(password) remainder = password_length - total_min if total_min > password_length: raise RuntimeError("Length cannot be less than %d" % total_min) if remainder > 0: password.extend( passlib_utils.generate_password( size=password_length - total_min, charset=all_choices).encode('utf-8')) if alpha_first: last_part = bytearray(password[1:]) random.shuffle(last_part) password = password[:1] password.extend(last_part) else: random.shuffle(password) try: return password.decode('utf-8') except AttributeError: return str(password)
def backup_strategy(self): return cfg.get_configuration_property('backup_strategy')
def __init__(self, user): port = cfg.get_configuration_property('postgresql_port') self.__connection = PostgresLocalhostConnection(user.name, port=port)
def __init__(self, username): port = cfg.get_configuration_property('postgresql_port') self.connection = PostgresConnection(username, port=port)
def wal_archive_location(self): return cfg.get_configuration_property('wal_archive_location')
def get_master_ref(self, service, snapshot_info): master_ref = {"host": netutils.get_my_ipv4(), "port": cfg.get_configuration_property("postgresql_port")} return master_ref
def create_backup(self, context, backup_info): storage_driver = CONF.storage_strategy backup_driver = cfg.get_configuration_property('backup_strategy') incremental = '' backup_type = 'full' if backup_info.get('parent'): incremental = ( f'--incremental ' f'--parent-location={backup_info["parent"]["location"]} ' f'--parent-checksum={backup_info["parent"]["checksum"]}') backup_type = 'incremental' backup_id = backup_info["id"] image = CONF.backup_docker_image name = 'db_backup' volumes = {'/var/lib/mysql': {'bind': '/var/lib/mysql', 'mode': 'rw'}} admin_pass = self.get_auth_password() user_token = context.auth_token auth_url = CONF.service_credentials.auth_url user_tenant = context.project_id metadata = f'datastore:{backup_info["datastore"]},' \ f'datastore_version:{backup_info["datastore_version"]}' command = ( f'/usr/bin/python3 main.py --backup --backup-id={backup_id} ' f'--storage-driver={storage_driver} --driver={backup_driver} ' f'--db-user=os_admin --db-password={admin_pass} ' f'--db-host=127.0.0.1 ' f'--os-token={user_token} --os-auth-url={auth_url} ' f'--os-tenant-id={user_tenant} ' f'--swift-extra-metadata={metadata} ' f'{incremental}') # Update backup status in db conductor = conductor_api.API(context) mount_point = CONF.get(CONF.datastore_manager).mount_point stats = guestagent_utils.get_filesystem_volume_stats(mount_point) backup_state = { 'backup_id': backup_id, 'size': stats.get('used', 0.0), 'state': BackupState.BUILDING, 'backup_type': backup_type } conductor.update_backup(CONF.guest_id, sent=timeutils.utcnow_ts(microsecond=True), **backup_state) LOG.debug("Updated state for %s to %s.", backup_id, backup_state) # Start to run backup inside a separate docker container try: LOG.info('Starting to create backup %s, command: %s', backup_id, command) output, ret = docker_util.run_container(self.docker_client, image, name, volumes=volumes, command=command) result = output[-1] if not ret: msg = f'Failed to run backup container, error: {result}' LOG.error(msg) raise Exception(msg) backup_result = BACKUP_LOG.match(result) if backup_result: backup_state.update({ 'checksum': backup_result.group('checksum'), 'location': backup_result.group('location'), 'success': True, 'state': BackupState.COMPLETED, }) else: backup_state.update({ 'success': False, 'state': BackupState.FAILED, }) except Exception as err: LOG.error("Failed to create backup %s", backup_id) backup_state.update({ 'success': False, 'state': BackupState.FAILED, }) raise exception.TroveError( "Failed to create backup %s, error: %s" % (backup_id, str(err))) finally: LOG.info("Completed backup %s.", backup_id) conductor.update_backup(CONF.guest_id, sent=timeutils.utcnow_ts(microsecond=True), **backup_state) LOG.debug("Updated state for %s to %s.", backup_id, backup_state)
def initialize_cluster(self, enabled_services=None): enabled_services = (enabled_services or cfg.get_configuration_property('default_services')) LOG.info(_("Enabling Couchbase services: %s") % enabled_services) self.build_admin().run_cluster_init(self.ramsize_quota_mb, enabled_services)
def create_backup(self, context, backup_info, volumes_mapping={}, need_dbuser=True, extra_params=''): storage_driver = CONF.storage_strategy backup_driver = self.get_backup_strategy() incremental = '' backup_type = 'full' if backup_info.get('parent'): incremental = ( f'--incremental ' f'--parent-location={backup_info["parent"]["location"]} ' f'--parent-checksum={backup_info["parent"]["checksum"]}') backup_type = 'incremental' name = 'db_backup' backup_id = backup_info["id"] image = self.get_backup_image() os_cred = (f"--os-token={context.auth_token} " f"--os-auth-url={CONF.service_credentials.auth_url} " f"--os-tenant-id={context.project_id}") db_userinfo = '' if need_dbuser: admin_pass = self.get_auth_password() # Use localhost to avoid host access verification. db_userinfo = (f"--db-host=localhost --db-user=os_admin " f"--db-password={admin_pass}") swift_metadata = ( f'datastore:{backup_info["datastore"]},' f'datastore_version:{backup_info["datastore_version"]}' ) swift_container = (backup_info.get('swift_container') or CONF.backup_swift_container) swift_params = (f'--swift-extra-metadata={swift_metadata} ' f'--swift-container={swift_container}') command = ( f'/usr/bin/python3 main.py --backup --backup-id={backup_id} ' f'--storage-driver={storage_driver} --driver={backup_driver} ' f'{os_cred} ' f'{db_userinfo} ' f'{swift_params} ' f'{incremental} ' f'{extra_params}' ) # Update backup status in db conductor = conductor_api.API(context) mount_point = cfg.get_configuration_property('mount_point') stats = guestagent_utils.get_filesystem_volume_stats(mount_point) backup_state = { 'backup_id': backup_id, 'size': stats.get('used', 0.0), 'state': BackupState.BUILDING, 'backup_type': backup_type } conductor.update_backup(CONF.guest_id, sent=timeutils.utcnow_ts(microsecond=True), **backup_state) LOG.debug(f"Updated state for backup {backup_id} to {backup_state}") # Start to run backup inside a separate docker container try: LOG.info(f'Starting to create backup {backup_id}, ' f'command: {command}') output, ret = docker_util.run_container( self.docker_client, image, name, volumes=volumes_mapping, command=command) result = output[-1] if not ret: msg = f'Failed to run backup container, error: {result}' LOG.error(msg) raise Exception(msg) backup_result = BACKUP_LOG_RE.match(result) if backup_result: backup_state.update({ 'checksum': backup_result.group('checksum'), 'location': backup_result.group('location'), 'success': True, 'state': BackupState.COMPLETED, }) else: msg = f'Cannot parse backup output: {result}' LOG.error(msg) backup_state.update({ 'success': False, 'state': BackupState.FAILED, }) # The exception message is visible to the user user_msg = msg ex_regex = re.compile(r'.+Exception: (.+)') for line in output[-5:-1]: m = ex_regex.search(line) if m: user_msg = m.group(1) break raise Exception(user_msg) except Exception as err: LOG.error("Failed to create backup %s", backup_id) backup_state.update({ 'success': False, 'state': BackupState.FAILED, }) raise exception.TroveError( "Failed to create backup %s, error: %s" % (backup_id, str(err)) ) finally: LOG.info("Completed backup %s.", backup_id) conductor.update_backup( CONF.guest_id, sent=timeutils.utcnow_ts(microsecond=True), **backup_state) LOG.debug("Updated state for %s to %s.", backup_id, backup_state)
def generate_random_password(password_length=None): password_length = ( password_length or cfg.get_configuration_property('default_password_length') ) return passlib_utils.generate_password(size=password_length)
def get_master_ref(self, service, snapshot_info): master_ref = { 'host': netutils.get_my_ipv4(), 'port': cfg.get_configuration_property('postgresql_port') } return master_ref
def generate_random_password(password_length=None, datastore=None, alpha_first=True): """ Generate and return a random password string. :param password_length: Length of password to create. If value is None, the default_password_length set in the configuration will be used. :param datastore: Datastore name to generate random password for. If value is None, default values set in the configuration will be used. :param alpha_first: Specify whether the generated password should begin with an alphabet. :return: A randomly generated password string """ lower_case = 'abcdefghjkmnpqrstuvwxyz' upper_case = 'ABCDEFGHJKMNPQRTUVWXYZ' numbers = '2346789' min_lower_case = cfg.get_configuration_property( 'password_min_lower_case', datastore) min_upper_case = cfg.get_configuration_property( 'password_min_upper_case', datastore) min_numbers = cfg.get_configuration_property( 'password_min_numbers', datastore) min_special_chars = cfg.get_configuration_property( 'password_min_special_chars', datastore) special_chars = cfg.get_configuration_property( 'password_special_charset', datastore) password_length = ( password_length or cfg.get_configuration_property('default_password_length', datastore) ) choices = [lower_case, upper_case, numbers, special_chars] mins = [min_lower_case, min_upper_case, min_numbers, min_special_chars] all_choices = (lower_case + upper_case + numbers + special_chars) password = bytearray() if password_length < 1: raise RuntimeError("Length cannot be less than 1") total_min = 0 for index, value in enumerate(mins): total_min += value if value: password.extend(passlib_utils.generate_password( size=value, charset=choices[index]).encode('utf-8')) if index == 1: random.shuffle(password) remainder = password_length - total_min if total_min > password_length: raise RuntimeError("Length cannot be less than %d" % total_min) if remainder > 0: password.extend(passlib_utils.generate_password( size=password_length - total_min, charset=all_choices) .encode('utf-8')) if alpha_first: last_part = bytearray(password[1:]) random.shuffle(last_part) password = password[:1] password.extend(last_part) else: random.shuffle(password) try: return password.decode('utf-8') except AttributeError: return str(password)
def load_controller(cls, manager): clazz = cfg.get_configuration_property(cls.NAME, manager) LOG.debug("Loading controller class: %s" % clazz) controller = import_class(clazz) return controller()
def generate_random_password(password_length=None): password_length = ( password_length or cfg.get_configuration_property('default_password_length')) return passlib_utils.generate_password(size=password_length)
def get_backup_image(self): return cfg.get_configuration_property('backup_docker_image')
def start_db(self, update_db=False, ds_version=None, command=None, extra_volumes=None): """Start and wait for database service.""" docker_image = CONF.get(CONF.datastore_manager).docker_image image = (f'{docker_image}:latest' if not ds_version else f'{docker_image}:{ds_version}') command = command if command else '' try: root_pass = self.get_auth_password(file="root.cnf") except exception.UnprocessableEntity: root_pass = utils.generate_random_password() # Get uid and gid user = "******" % (CONF.database_service_uid, CONF.database_service_uid) # Create folders for mysql on localhost for folder in ['/etc/mysql', '/var/run/mysqld']: operating_system.ensure_directory(folder, user=CONF.database_service_uid, group=CONF.database_service_uid, force=True, as_root=True) volumes = { "/etc/mysql": { "bind": "/etc/mysql", "mode": "rw" }, "/var/run/mysqld": { "bind": "/var/run/mysqld", "mode": "rw" }, "/var/lib/mysql": { "bind": "/var/lib/mysql", "mode": "rw" }, } if extra_volumes: volumes.update(extra_volumes) # Expose ports ports = {} tcp_ports = cfg.get_configuration_property('tcp_ports') for port_range in tcp_ports: for port in port_range: ports[f'{port}/tcp'] = port try: docker_util.start_container(self.docker_client, image, volumes=volumes, network_mode="bridge", ports=ports, user=user, environment={ "MYSQL_ROOT_PASSWORD": root_pass, "MYSQL_INITDB_SKIP_TZINFO": 1, }, command=command) # Save root password LOG.debug("Saving root credentials to local host.") self.save_password('root', root_pass) except Exception: LOG.exception("Failed to start mysql") raise exception.TroveError(_("Failed to start mysql")) if not self.status.wait_for_status( service_status.ServiceStatuses.HEALTHY, CONF.state_change_wait_time, update_db): raise exception.TroveError(_("Failed to start mysql"))
def get_backup_strategy(self): return cfg.get_configuration_property('backup_strategy')
def generate_random_password(password_length=None): password_length = ( password_length or cfg.get_configuration_property('default_password_length')) return pwd.genword(length=password_length)