def testBackupRestoreRaid(self): mpoint = '/tmp/mpoint' if not os.path.isdir(mpoint): os.makedirs(mpoint) self.array = Storage.create(type='raid', disks=self.vols, level=1, vg='dbstorage', snap_pv=self.snap_vol, fstype='ext3') self.array.mkfs() self.array.mount(mpoint) # Create big file bigfile_path = os.path.join(mpoint, 'bigfile') system('dd if=/dev/random of=%s bs=1M count=5' % bigfile_path) md5sum = system( ('/usr/bin/md5sum %s' % bigfile_path))[0].strip().split(' ')[0] array_snap = self.array.snapshot() self.array.destroy(remove_disks=True) self.array = Storage.create(snapshot=array_snap) new_mpoint = '/tmp/mpoint2' if not os.path.isdir(new_mpoint): os.makedirs(new_mpoint) self.array.mount(new_mpoint) bigfile_path2 = os.path.join(new_mpoint, 'bigfile') md5sum2 = system( ('/usr/bin/md5sum %s' % bigfile_path2))[0].strip().split(' ')[0] self.assertEqual(md5sum, md5sum2) self.array.destroy(remove_disks=True)
def testBackupRestoreRaid(self): mpoint = '/tmp/mpoint' if not os.path.isdir(mpoint): os.makedirs(mpoint) self.array = Storage.create(type='raid', disks=self.vols, level=1, vg='dbstorage', snap_pv=self.snap_vol, fstype='ext3') self.array.mkfs() self.array.mount(mpoint) # Create big file bigfile_path = os.path.join(mpoint, 'bigfile') system('dd if=/dev/random of=%s bs=1M count=5' % bigfile_path) md5sum = system(('/usr/bin/md5sum %s' % bigfile_path))[0].strip().split(' ')[0] array_snap = self.array.snapshot() self.array.destroy(remove_disks=True) self.array = Storage.create(snapshot=array_snap) new_mpoint = '/tmp/mpoint2' if not os.path.isdir(new_mpoint): os.makedirs(new_mpoint) self.array.mount(new_mpoint) bigfile_path2 = os.path.join(new_mpoint, 'bigfile') md5sum2 = system(('/usr/bin/md5sum %s' % bigfile_path2))[0].strip().split(' ')[0] self.assertEqual(md5sum, md5sum2) self.array.destroy(remove_disks=True)
def _testDetachAttachRaid(self): mpoint = '/tmp/mpoint' if not os.path.isdir(mpoint): os.makedirs(mpoint) self.array = Storage.create(type='raid', disks=self.vols, level=1, vg='dbstorage', snap_pv=self.snap_vol, fstype='ext3') self.array.mkfs() self.array.mount(mpoint) bigfile_path = os.path.join(mpoint, 'bigfile') system('dd if=/dev/random of=%s bs=1M count=5' % bigfile_path) md5sum = system( ('/usr/bin/md5sum %s' % bigfile_path))[0].strip().split(' ')[0] self.assertTrue(os.path.ismount(mpoint)) config = self.array.detach(force=True) self.assertFalse(os.path.ismount(mpoint)) self.assertEqual(self.array.devname, None) self.array = Storage.create(**config) self.array.mount(mpoint) md5sum2 = system( ('/usr/bin/md5sum %s' % bigfile_path))[0].strip().split(' ')[0] self.assertEqual(md5sum, md5sum2)
def test_create_over_disk(self): vol = Storage.create(type='myvol', device='/dev/lvolume', disk='/dev/sdb') self.assertEqual(vol.disk.devname, '/dev/sdb') vol = Storage.create(type='myvol', device='/dev/ldevice2', disk=dict(type='myvol', device='/dev/sdb', param1='value1')) self.assertEqual(vol.disk.devname, '/dev/sdb') self.assertEqual(vol.disk.param1, 'value1')
def test_create_over_disk(self): vol = Storage.create(type='myvol', device='/dev/lvolume', disk='/dev/sdb') self.assertEqual(vol.disk.devname, '/dev/sdb') vol = Storage.create( type='myvol', device='/dev/ldevice2', disk=dict( type='myvol', device='/dev/sdb', param1='value1' ) ) self.assertEqual(vol.disk.devname, '/dev/sdb') self.assertEqual(vol.disk.param1, 'value1')
def test_with_ignores(self): class VolConfig(VolumeConfig): vg = None base64_whatever = None only_in_volume_config = None only_in_snapshot_config = None class Vol(VolConfig, Volume): _ignores = ('only_in_snapshot_config') class Snap(VolConfig, Snapshot): _ignores = ('only_in_volume_config', ) class VolPvd(VolumeProvider): type = 'mimimi' vol_class = Vol snap_class = Snap Storage.explore_provider(VolPvd) vol = Storage.create(type='mimimi', device='/dev/sdo', vg='vg0', only_in_volume_config='4u', only_in_snapshot_config='4s') snap = vol.snapshot() snap_cnf = snap.config() vol_cnf = vol.config() self.assertFalse('only_in_volume_config' in snap_cnf) self.assertEqual(snap_cnf['vg'], 'vg0') self.assertFalse('only_in_snapshot_config' in vol_cnf) self.assertTrue(vol_cnf['base64_whatever'] is None)
def _testCreateDestroyRaid(self): self._logger.info('>>>>>>> Starting Create-Destroy test for raid.') self.array = Storage.create(type='raid', disks=self.vols, level=1, vg='dbstorage') self.assertTrue(os.path.exists(self.array.raid_pv)) time.sleep(2) self.array.destroy(force=True, remove_disks=True) self._logger.info('>>>>>>> Create-Destroy test successfully finished.')
def test_create_from_snapshot(self): vol = Storage.create(snapshot=dict(type='base', device='/dev/sdb', mpoint='/mnt/dbstorage', fstype='xfs')) self.assertEqual(vol.devname, '/dev/sdb') self.assertEqual(vol.mpoint, '/mnt/dbstorage') vol = Storage.create(device='/dev/sdd', snapshot=dict(type='myvol', device='/dev/lvol', param1='value1', param2='value2')) self.assertEqual(vol.devname, '/dev/sdd') self.assertEqual(vol.type, 'myvol') self.assertEqual(vol.param1, 'value1')
def setUp(self): self.vols = [] for i in range(3): #system('dd if=/dev/zero of=/tmp/device%s bs=1M count=10' % i) #self.vols.append(Storage.create(type='loop', file='/tmp/device%s' % i)) self.vols.append(Storage.create(type='ebs', size=1, avail_zone='us-east-1a')) self._logger.debug("Volume with id '%s' created." % self.vols[-1].id) self.snap_vol = self.vols.pop()
def test_1(self): v1 = Storage.create(device='/dev/sdo') v2 = Storage.create(device='/dev/sdm') table = Storage.volume_table() self.assertEqual(len(table), 2) v1row = firstmatched(lambda row: row['device'] == '/dev/sdo', table) self.assertTrue(v1row) self.assertEqual(v1row['volume_id'], v1.id) self.assertEqual(v1row['device'], v1.device) self.assertEqual(v1row['type'], v1.type) self.assertEqual(v1row['state'], 'attached') v2.detach() table = Storage.volume_table() self.assertEqual(len(table), 2) v2row = firstmatched(lambda row: row['device'] == '/dev/sdm', table) self.assertEqual(v2row['state'], 'detached')
def test_create_vol_container(self): vol = Storage.create(type='myvol', device='/dev/gp0', disks=('/dev/sdb', dict(type='myvol', device='/dev/sdd'))) self.assertEqual(len(vol.disks), 2) self.assertEqual(vol.disks[0].devname, '/dev/sdb') self.assertEqual(vol.disks[1].devname, '/dev/sdd') self.assertEqual(vol.disks[1].type, 'myvol')
def test_create_vol_container(self): vol = Storage.create( type='myvol', device='/dev/gp0', disks=('/dev/sdb', dict(type='myvol', device='/dev/sdd')) ) self.assertEqual(len(vol.disks), 2) self.assertEqual(vol.disks[0].devname, '/dev/sdb') self.assertEqual(vol.disks[1].devname, '/dev/sdd') self.assertEqual(vol.disks[1].type, 'myvol')
def test_1(self): vol = Storage.create( type='ebs', id='vol-12345678', snapshot=dict( id='snap-87654321', type='ebs' ) ) pass
def _testDetachAttachRaid(self): mpoint = '/tmp/mpoint' if not os.path.isdir(mpoint): os.makedirs(mpoint) self.array = Storage.create(type='raid', disks=self.vols, level=1, vg='dbstorage', snap_pv=self.snap_vol, fstype='ext3') self.array.mkfs() self.array.mount(mpoint) bigfile_path = os.path.join(mpoint, 'bigfile') system('dd if=/dev/random of=%s bs=1M count=5' % bigfile_path) md5sum = system(('/usr/bin/md5sum %s' % bigfile_path))[0].strip().split(' ')[0] self.assertTrue(os.path.ismount(mpoint)) config = self.array.detach(force=True) self.assertFalse(os.path.ismount(mpoint)) self.assertEqual(self.array.devname, None) self.array = Storage.create(**config) self.array.mount(mpoint) md5sum2 = system(('/usr/bin/md5sum %s' % bigfile_path))[0].strip().split(' ')[0] self.assertEqual(md5sum, md5sum2)
def test_base_volume(self): device = '/dev/sdo' mpoint = '/mnt/media-server-flvs' fstype = 'ext4' vol = Storage.create(device=device, mpoint=mpoint, fstype=fstype) snap = vol.snapshot('snap #00') snap_cnf = snap.config() vol_cnf = vol.config() self.assertEqual(snap_cnf['type'], vol.type) self.assertEqual(snap_cnf['mpoint'], vol.mpoint) self.assertEqual(snap_cnf['device'], vol.device)
def test_1(self): self.vol = vol = Storage.create( type='eph', disk=self.device, vg='dbstorage', fstype='ext3', snap_backend='my://secretphase/backups') self.assertEqual(vol.type, 'eph') self.assertTrue(os.path.exists(vol.devname)) self.assertEqual(vol.disk.devname, self.device) self.assertTrue(os.path.exists(vol.tranzit_vol.devname)) config = self.vol.config() self.assertEqual(config['type'], 'eph') self.assertTrue(config['size'] is not None) self.assertTrue(config['snap_backend'] is not None) self.assertTrue(isinstance(config['disk'], dict)) # Reinitialize storage from config eph = Storage.create(config) self.assertEqual(eph.device, self.vol.device)
def test_1(self): self.vol = vol = Storage.create( type = 'eph', disk = self.device, vg = 'dbstorage', fstype = 'ext3', snap_backend='my://secretphase/backups' ) self.assertEqual(vol.type, 'eph') self.assertTrue(os.path.exists(vol.devname)) self.assertEqual(vol.disk.devname, self.device) self.assertTrue(os.path.exists(vol.tranzit_vol.devname)) config = self.vol.config() self.assertEqual(config['type'], 'eph') self.assertTrue(config['size'] is not None) self.assertTrue(config['snap_backend'] is not None) self.assertTrue(isinstance(config['disk'], dict)) # Reinitialize storage from config eph = Storage.create(config) self.assertEqual(eph.device, self.vol.device)
def test_create_from_snapshot(self): vol = Storage.create( snapshot=dict( type='base', device='/dev/sdb', mpoint='/mnt/dbstorage', fstype='xfs' ) ) self.assertEqual(vol.devname, '/dev/sdb') self.assertEqual(vol.mpoint, '/mnt/dbstorage') vol = Storage.create( device='/dev/sdd', snapshot=dict( type='myvol', device='/dev/lvol', param1='value1', param2='value2' ) ) self.assertEqual(vol.devname, '/dev/sdd') self.assertEqual(vol.type, 'myvol') self.assertEqual(vol.param1, 'value1')
def _plug_storage(self, mpoint, vol): if not isinstance(vol, Volume): vol['tags'] = self.redis_tags vol = Storage.create(vol) try: if not os.path.exists(mpoint): os.makedirs(mpoint) if not vol.mounted(): vol.mount(mpoint) except StorageError, e: if 'you must specify the filesystem type' in str(e): vol.mkfs() vol.mount(mpoint) else: raise
def _plug_storage(self, mpoint, vol): if not isinstance(vol, Volume): vol['tags'] = self.postgres_tags vol = Storage.create(vol) try: if not os.path.exists(mpoint): os.makedirs(mpoint) if not vol.mounted(): vol.mount(mpoint) except StorageError, e: ''' XXX: Crapy. We need to introduce error codes from fstool ''' if 'you must specify the filesystem type' in str(e): vol.mkfs() vol.mount(mpoint) else: raise
def on_init(self): bus.on("host_init_response", self.on_host_init_response) bus.on("before_host_up", self.on_before_host_up) bus.on("before_reboot_start", self.on_before_reboot_start) bus.on("before_reboot_finish", self.on_before_reboot_finish) if self._cnf.state == ScalarizrState.RUNNING: storage_conf = Storage.restore_config(self._volume_config_path) storage_conf['tags'] = self.redis_tags self.storage_vol = Storage.create(storage_conf) if not self.storage_vol.mounted(): self.storage_vol.mount() self.redis_instances = redis.RedisInstances(self.is_replication_master, self.persistence_type) self.redis_instances.init_processes(ports=[redis.DEFAULT_PORT,], passwords=[self.get_main_password(),]) self.redis_instances.start() self._init_script = self.redis_instances.get_default_process()
def on_DbMsr_NewMasterUp(self, message): """ Switch replication to a new master server @type message: scalarizr.messaging.Message @param message: DbMsr_NewMasterUp """ if not message.body.has_key(BEHAVIOUR) or message.db_type != BEHAVIOUR: raise HandlerError("DbMsr_NewMasterUp message for PostgreSQL behaviour must have 'postgresql' property and db_type 'postgresql'") postgresql_data = message.postgresql.copy() if self.is_replication_master: self._logger.debug('Skipping NewMasterUp. My replication role is master') return host = message.local_ip or message.remote_ip self._logger.info("Switching replication to a new postgresql master %s", host) bus.fire('before_postgresql_change_master', host=host) if OPT_SNAPSHOT_CNF in postgresql_data and postgresql_data[OPT_SNAPSHOT_CNF]['type'] != 'eph': snap_data = postgresql_data[OPT_SNAPSHOT_CNF] self._logger.info('Reinitializing Slave from the new snapshot %s', snap_data['id']) self.postgresql.service.stop() self._logger.debug('Destroying old storage') self.storage_vol.destroy() self._logger.debug('Storage destroyed') self._logger.debug('Plugging new storage') vol = Storage.create(snapshot=snap_data.copy(), tags=self.postgres_tags) self._plug_storage(self._storage_path, vol) self._logger.debug('Storage plugged') Storage.backup_config(vol.config(), self._volume_config_path) Storage.backup_config(snap_data, self._snapshot_config_path) self.storage_vol = vol self.postgresql.init_slave(self._storage_path, host, POSTGRESQL_DEFAULT_PORT, self.root_password) self._logger.debug("Replication switched") bus.fire('postgresql_change_master', host=host)
def _cleanup_after_rebundle(): cnf = bus.cnf pl = bus.platform logger = logging.getLogger(__name__) if 'volumes' not in pl.features: # Destory mysql storages if os.path.exists(cnf.private_path('storage/mysql.json')): logger.info('Cleanuping old MySQL storage') vol = Storage.create(Storage.restore_config(cnf.private_path('storage/mysql.json'))) vol.destroy(force=True) # Reset private configuration priv_path = cnf.private_path() for file in os.listdir(priv_path): if file in ('.user-data', '.update'): continue path = os.path.join(priv_path, file) os.remove(path) if (os.path.isfile(path) or os.path.islink(path)) else shutil.rmtree(path) system2('sync', shell=True)
def _cleanup_after_rebundle(): cnf = bus.cnf pl = bus.platform logger = logging.getLogger(__name__) if 'volumes' not in pl.features: # Destory mysql storages if os.path.exists(cnf.private_path( 'storage/mysql.json')) and pl.name == 'rackspace': logger.info('Cleanuping old MySQL storage') vol = Storage.create( Storage.restore_config(cnf.private_path('storage/mysql.json'))) vol.destroy(force=True) # Reset private configuration priv_path = cnf.private_path() for file in os.listdir(priv_path): if file in ('.user-data', '.update'): continue path = os.path.join(priv_path, file) os.remove(path) if (os.path.isfile(path) or os.path.islink(path)) else shutil.rmtree(path) system2('sync', shell=True)
def test_with_ignores(self): class VolConfig(VolumeConfig): vg = None base64_whatever = None only_in_volume_config = None only_in_snapshot_config = None class Vol(VolConfig, Volume): _ignores = ('only_in_snapshot_config') class Snap(VolConfig, Snapshot): _ignores = ('only_in_volume_config',) class VolPvd(VolumeProvider): type = 'mimimi' vol_class = Vol snap_class = Snap Storage.explore_provider(VolPvd) vol = Storage.create( type='mimimi', device='/dev/sdo', vg='vg0', only_in_volume_config='4u', only_in_snapshot_config='4s' ) snap = vol.snapshot() snap_cnf = snap.config() vol_cnf = vol.config() self.assertFalse('only_in_volume_config' in snap_cnf) self.assertEqual(snap_cnf['vg'], 'vg0') self.assertFalse('only_in_snapshot_config' in vol_cnf) self.assertTrue(vol_cnf['base64_whatever'] is None)
def test_create_by_string_args(self): vol = Storage.create('/dev/sdb') self.assertEqual(vol.devname, '/dev/sdb')
def test_1(self): class TransferMock(object): SCHEMA = 'file://' def __init__(self): self._logger = logging.getLogger(__name__) pass def upload(self, files, remote_dst): remote_path = os.path.normpath(remote_dst[len(self.SCHEMA):]) ret = [] for file in files: self._logger.debug('Copy %s -> %s/', file, remote_path) shutil.copy(file, remote_path) ret.append('file://%s/%s' % (remote_path, os.path.basename(file))) print system(('ls', '-la', remote_path))[0] return tuple(ret) def download(self, remote_files, dst, recursive=False): if isinstance(remote_files, basestring): remote_files = (remote_files, ) files = list( os.path.normpath(path[len(self.SCHEMA):]) for path in remote_files) ret = [] for file in files: self._logger.debug('Copy %s -> %s/', file, dst) shutil.copy(file, dst) ret.append(os.path.join(dst, os.path.basename(file))) return ret Storage.lookup_provider('eph')._snap_pvd._transfer = TransferMock() # Create snapshot strage volume (Remote storage emulation) self.vols[1] = Storage.create(device=self.devices[1], mpoint=self.mpoints[1], fstype='ext3') self.vols[1].mkfs() self.vols[1].mount() # Create and mount EPH storage self.vols[0] = Storage.create(type='eph', disk=self.devices[0], vg='casstorage', snap_backend='%s%s' % (TransferMock.SCHEMA, self.mpoints[1]), fstype='ext3', mpoint=self.mpoints[0]) self.vols[0].mkfs() self.vols[0].mount() # Create big file bigfile = os.path.join(self.mpoints[0], 'bigfile') system( ('dd', 'if=/dev/urandom', 'of=%s' % bigfile, 'bs=1M', 'count=15')) bigsize = os.path.getsize(bigfile) self.assertTrue(bigsize > 0) md5sum = system(('/usr/bin/md5sum', bigfile))[0].strip().split(' ')[0] # Snapshot storage snap = self.vols[0].snapshot(description='Bigfile with us forever') self.assertEqual(snap.type, 'eph') self.assertEqual(snap.vg, 'casstorage') self.assertEqual(snap.state, Snapshot.CREATING) wait_until(lambda: snap.state in (Snapshot.COMPLETED, Snapshot.FAILED)) print snap.config() if snap.state == Snapshot.FAILED: raise Exception( 'Snapshot creation failed. See log for more details') # Destroy original storage self.vols[0].destroy() self.vols[0] = None # Restore snapshot self.vols[2] = Storage.create(disk=self.devices[2], snapshot=snap) self.vols[2].mount(self.mpoints[2]) bigfile2 = os.path.join(self.mpoints[2], 'bigfile') self.assertTrue(os.path.exists(bigfile2)) md5sum2 = system( ('/usr/bin/md5sum', bigfile2))[0].strip().split(' ')[0] self.assertEqual(md5sum, md5sum2)
def test_1(self): class TransferMock(object): SCHEMA = 'file://' def __init__(self): self._logger = logging.getLogger(__name__) pass def upload(self, files, remote_dst): remote_path = os.path.normpath(remote_dst[len(self.SCHEMA):]) ret = [] for file in files: self._logger.debug('Copy %s -> %s/', file, remote_path) shutil.copy(file, remote_path) ret.append('file://%s/%s' % (remote_path, os.path.basename(file))) print system(('ls', '-la', remote_path))[0] return tuple(ret) def download(self, remote_files, dst, recursive=False): if isinstance(remote_files, basestring): remote_files = (remote_files,) files = list(os.path.normpath(path[len(self.SCHEMA):]) for path in remote_files) ret = [] for file in files: self._logger.debug('Copy %s -> %s/', file, dst) shutil.copy(file, dst) ret.append(os.path.join(dst, os.path.basename(file))) return ret Storage.lookup_provider('eph')._snap_pvd._transfer = TransferMock() # Create snapshot strage volume (Remote storage emulation) self.vols[1] = Storage.create( device=self.devices[1], mpoint=self.mpoints[1], fstype='ext3' ) self.vols[1].mkfs() self.vols[1].mount() # Create and mount EPH storage self.vols[0] = Storage.create( type='eph', disk=self.devices[0], vg='casstorage', snap_backend = '%s%s' % (TransferMock.SCHEMA, self.mpoints[1]), fstype = 'ext3', mpoint = self.mpoints[0] ) self.vols[0].mkfs() self.vols[0].mount() # Create big file bigfile = os.path.join(self.mpoints[0], 'bigfile') system(('dd', 'if=/dev/urandom', 'of=%s' % bigfile, 'bs=1M', 'count=15')) bigsize = os.path.getsize(bigfile) self.assertTrue(bigsize > 0) md5sum = system(('/usr/bin/md5sum', bigfile))[0].strip().split(' ')[0] # Snapshot storage snap = self.vols[0].snapshot(description='Bigfile with us forever') self.assertEqual(snap.type, 'eph') self.assertEqual(snap.vg, 'casstorage') self.assertEqual(snap.state, Snapshot.CREATING) wait_until(lambda: snap.state in (Snapshot.COMPLETED, Snapshot.FAILED)) print snap.config() if snap.state == Snapshot.FAILED: raise Exception('Snapshot creation failed. See log for more details') # Destroy original storage self.vols[0].destroy() self.vols[0] = None # Restore snapshot self.vols[2] = Storage.create(disk=self.devices[2], snapshot=snap) self.vols[2].mount(self.mpoints[2]) bigfile2 = os.path.join(self.mpoints[2], 'bigfile') self.assertTrue(os.path.exists(bigfile2)) md5sum2 = system(('/usr/bin/md5sum', bigfile2))[0].strip().split(' ')[0] self.assertEqual(md5sum, md5sum2)
def test_1(self): vol = Storage.create(type='ebs', id='vol-12345678', snapshot=dict(id='snap-87654321', type='ebs')) pass
def on_init(self): #temporary fix for starting-after-rebundle issue if not os.path.exists(PG_SOCKET_DIR): os.makedirs(PG_SOCKET_DIR) rchown(user='******', path=PG_SOCKET_DIR) bus.on("host_init_response", self.on_host_init_response) bus.on("before_host_up", self.on_before_host_up) bus.on("before_reboot_start", self.on_before_reboot_start) bus.on("before_reboot_finish", self.on_before_reboot_finish) if self._cnf.state == ScalarizrState.BOOTSTRAPPING: self._insert_iptables_rules() if disttool.is_redhat_based(): checkmodule_paths = software.whereis('checkmodule') semodule_package_paths = software.whereis('semodule_package') semodule_paths = software.whereis('semodule') if all((checkmodule_paths, semodule_package_paths, semodule_paths)): filetool.write_file('/tmp/sshkeygen.te', SSH_KEYGEN_SELINUX_MODULE, logger=self._logger) self._logger.debug('Compiling SELinux policy for ssh-keygen') system2((checkmodule_paths[0], '-M', '-m', '-o', '/tmp/sshkeygen.mod', '/tmp/sshkeygen.te'), logger=self._logger) self._logger.debug('Building SELinux package for ssh-keygen') system2((semodule_package_paths[0], '-o', '/tmp/sshkeygen.pp', '-m', '/tmp/sshkeygen.mod'), logger=self._logger) self._logger.debug('Loading ssh-keygen SELinux package') system2((semodule_paths[0], '-i', '/tmp/sshkeygen.pp'), logger=self._logger) if self._cnf.state == ScalarizrState.RUNNING: storage_conf = Storage.restore_config(self._volume_config_path) storage_conf['tags'] = self.postgres_tags self.storage_vol = Storage.create(storage_conf) if not self.storage_vol.mounted(): self.storage_vol.mount() self.postgresql.service.start() self.accept_all_clients() self._logger.debug("Checking presence of Scalr's PostgreSQL root user.") root_password = self.root_password if not self.postgresql.root_user.exists(): self._logger.debug("Scalr's PostgreSQL root user does not exist. Recreating") self.postgresql.root_user = self.postgresql.create_user(ROOT_USER, root_password) else: try: self.postgresql.root_user.check_system_password(root_password) self._logger.debug("Scalr's root PgSQL user is present. Password is correct.") except ValueError: self._logger.warning("Scalr's root PgSQL user was changed. Recreating.") self.postgresql.root_user.change_system_password(root_password) if self.is_replication_master: #ALTER ROLE cannot be executed in a read-only transaction self._logger.debug("Checking password for pg_role scalr.") if not self.postgresql.root_user.check_role_password(root_password): self._logger.warning("Scalr's root PgSQL role was changed. Recreating.") self.postgresql.root_user.change_role_password(root_password)