def testBackupRestoreRaid(self): mpoint = '/tmp/mpoint' if not os.path.isdir(mpoint): os.makedirs(mpoint) self.array = Storage.create(type='raid', disks=self.vols, level=1, vg='dbstorage', snap_pv=self.snap_vol, fstype='ext3') self.array.mkfs() self.array.mount(mpoint) # Create big file bigfile_path = os.path.join(mpoint, 'bigfile') system('dd if=/dev/random of=%s bs=1M count=5' % bigfile_path) md5sum = system( ('/usr/bin/md5sum %s' % bigfile_path))[0].strip().split(' ')[0] array_snap = self.array.snapshot() self.array.destroy(remove_disks=True) self.array = Storage.create(snapshot=array_snap) new_mpoint = '/tmp/mpoint2' if not os.path.isdir(new_mpoint): os.makedirs(new_mpoint) self.array.mount(new_mpoint) bigfile_path2 = os.path.join(new_mpoint, 'bigfile') md5sum2 = system( ('/usr/bin/md5sum %s' % bigfile_path2))[0].strip().split(' ')[0] self.assertEqual(md5sum, md5sum2) self.array.destroy(remove_disks=True)
def test_with_ignores(self): class VolConfig(VolumeConfig): vg = None base64_whatever = None only_in_volume_config = None only_in_snapshot_config = None class Vol(VolConfig, Volume): _ignores = ('only_in_snapshot_config') class Snap(VolConfig, Snapshot): _ignores = ('only_in_volume_config', ) class VolPvd(VolumeProvider): type = 'mimimi' vol_class = Vol snap_class = Snap Storage.explore_provider(VolPvd) vol = Storage.create(type='mimimi', device='/dev/sdo', vg='vg0', only_in_volume_config='4u', only_in_snapshot_config='4s') snap = vol.snapshot() snap_cnf = snap.config() vol_cnf = vol.config() self.assertFalse('only_in_volume_config' in snap_cnf) self.assertEqual(snap_cnf['vg'], 'vg0') self.assertFalse('only_in_snapshot_config' in vol_cnf) self.assertTrue(vol_cnf['base64_whatever'] is None)
def _testDetachAttachRaid(self): mpoint = '/tmp/mpoint' if not os.path.isdir(mpoint): os.makedirs(mpoint) self.array = Storage.create(type='raid', disks=self.vols, level=1, vg='dbstorage', snap_pv=self.snap_vol, fstype='ext3') self.array.mkfs() self.array.mount(mpoint) bigfile_path = os.path.join(mpoint, 'bigfile') system('dd if=/dev/random of=%s bs=1M count=5' % bigfile_path) md5sum = system( ('/usr/bin/md5sum %s' % bigfile_path))[0].strip().split(' ')[0] self.assertTrue(os.path.ismount(mpoint)) config = self.array.detach(force=True) self.assertFalse(os.path.ismount(mpoint)) self.assertEqual(self.array.devname, None) self.array = Storage.create(**config) self.array.mount(mpoint) md5sum2 = system( ('/usr/bin/md5sum %s' % bigfile_path))[0].strip().split(' ')[0] self.assertEqual(md5sum, md5sum2)
def _init_slave(self, message): """ Initialize redis slave @type message: scalarizr.messaging.Message @param message: HostUp message """ LOG.info("Initializing %s slave" % BEHAVIOUR) with bus.initialization_op as op: with op.step(self._step_create_storage): LOG.debug("Initializing slave storage") self.storage_vol = self._plug_storage(self._storage_path, dict(snapshot=Storage.restore_config(self._snapshot_config_path))) Storage.backup_config(self.storage_vol.config(), self._volume_config_path) with op.step(self._step_init_slave): # Change replication master master_host = self._get_master_host() LOG.debug("Master server obtained (local_ip: %s, public_ip: %s)", master_host.internal_ip, master_host.external_ip) host = master_host.internal_ip or master_host.external_ip instance = self.redis_instances.get_instance(port=redis.DEFAULT_PORT) instance.init_slave(self._storage_path, host, redis.DEFAULT_PORT) op.progress(50) instance.wait_for_sync() with op.step(self._step_collect_host_up_data): # Update HostUp message message.redis = self._compat_storage_data(self.storage_vol) message.db_type = BEHAVIOUR
def _init_slave(self, message): """ Initialize postgresql slave @type message: scalarizr.messaging.Message @param message: HostUp message """ self._logger.info("Initializing postgresql slave") with bus.initialization_op as op: with op.step(self._step_create_storage): self._logger.debug("Initialize slave storage") self.storage_vol = self._plug_storage(self._storage_path, dict(snapshot=Storage.restore_config(self._snapshot_config_path))) Storage.backup_config(self.storage_vol.config(), self._volume_config_path) with op.step(self._step_init_slave): # Change replication master master_host = self._get_master_host() self._logger.debug("Master server obtained (local_ip: %s, public_ip: %s)", master_host.internal_ip, master_host.external_ip) host = master_host.internal_ip or master_host.external_ip self.postgresql.init_slave(self._storage_path, host, POSTGRESQL_DEFAULT_PORT, self.root_password) with op.step(self._step_collect_host_up_data): # Update HostUp message message.postgresql = self._compat_storage_data(self.storage_vol) message.db_type = BEHAVIOUR
def testBackupRestoreRaid(self): mpoint = '/tmp/mpoint' if not os.path.isdir(mpoint): os.makedirs(mpoint) self.array = Storage.create(type='raid', disks=self.vols, level=1, vg='dbstorage', snap_pv=self.snap_vol, fstype='ext3') self.array.mkfs() self.array.mount(mpoint) # Create big file bigfile_path = os.path.join(mpoint, 'bigfile') system('dd if=/dev/random of=%s bs=1M count=5' % bigfile_path) md5sum = system(('/usr/bin/md5sum %s' % bigfile_path))[0].strip().split(' ')[0] array_snap = self.array.snapshot() self.array.destroy(remove_disks=True) self.array = Storage.create(snapshot=array_snap) new_mpoint = '/tmp/mpoint2' if not os.path.isdir(new_mpoint): os.makedirs(new_mpoint) self.array.mount(new_mpoint) bigfile_path2 = os.path.join(new_mpoint, 'bigfile') md5sum2 = system(('/usr/bin/md5sum %s' % bigfile_path2))[0].strip().split(' ')[0] self.assertEqual(md5sum, md5sum2) self.array.destroy(remove_disks=True)
def test_explore_default_provider2(self): Storage.explore_provider(self.MyPvd, default_for_snap=True) self.assertEqual(Storage.default_snap_provider, self.MyPvd.type) self.assertFalse(Storage.default_vol_provider) self.assertTrue(isinstance(Storage.lookup_provider(self.MyPvd.type), self.MyPvd)) self.assertTrue(isinstance(Storage.lookup_provider(None, True), self.MyPvd))
def test_create_over_disk(self): vol = Storage.create(type='myvol', device='/dev/lvolume', disk='/dev/sdb') self.assertEqual(vol.disk.devname, '/dev/sdb') vol = Storage.create(type='myvol', device='/dev/ldevice2', disk=dict(type='myvol', device='/dev/sdb', param1='value1')) self.assertEqual(vol.disk.devname, '/dev/sdb') self.assertEqual(vol.disk.param1, 'value1')
def test_create_over_disk(self): vol = Storage.create(type='myvol', device='/dev/lvolume', disk='/dev/sdb') self.assertEqual(vol.disk.devname, '/dev/sdb') vol = Storage.create( type='myvol', device='/dev/ldevice2', disk=dict( type='myvol', device='/dev/sdb', param1='value1' ) ) self.assertEqual(vol.disk.devname, '/dev/sdb') self.assertEqual(vol.disk.param1, 'value1')
def on_host_init_response(self, message): """ Check postgresql data in host init response @type message: scalarizr.messaging.Message @param message: HostInitResponse """ with bus.initialization_op as op: with op.phase(self._phase_postgresql): with op.step(self._step_accept_scalr_conf): if not message.body.has_key(BEHAVIOUR) or message.db_type != BEHAVIOUR: raise HandlerError("HostInitResponse message for PostgreSQL behaviour must have 'postgresql' property and db_type 'postgresql'") ''' if message.postgresql[OPT_REPLICATION_MASTER] != '1' and \ (not message.body.has_key(OPT_ROOT_SSH_PUBLIC_KEY) or not message.body.has_key(OPT_ROOT_SSH_PRIVATE_KEY)): raise HandlerError("HostInitResponse message for PostgreSQL slave must contain both public and private ssh keys") ''' dir = os.path.dirname(self._volume_config_path) if not os.path.exists(dir): os.makedirs(dir) postgresql_data = message.postgresql.copy() root = PgUser(ROOT_USER, self.pg_keys_dir) root.store_keys(postgresql_data[OPT_ROOT_SSH_PUBLIC_KEY], postgresql_data[OPT_ROOT_SSH_PRIVATE_KEY]) del postgresql_data[OPT_ROOT_SSH_PUBLIC_KEY] del postgresql_data[OPT_ROOT_SSH_PRIVATE_KEY] for key, file in ((OPT_VOLUME_CNF, self._volume_config_path), (OPT_SNAPSHOT_CNF, self._snapshot_config_path)): if os.path.exists(file): os.remove(file) if key in postgresql_data: if postgresql_data[key]: Storage.backup_config(postgresql_data[key], file) del postgresql_data[key] root_user= postgresql_data[OPT_ROOT_USER] or ROOT_USER postgresql_data['%s_password' % root_user] = postgresql_data.get(OPT_ROOT_PASSWORD) or cryptotool.pwgen(10) del postgresql_data[OPT_ROOT_PASSWORD] self._logger.debug("Update postgresql config with %s", postgresql_data) self._update_config(postgresql_data)
def test_snapshot_factory(self): Storage.providers = self._save_pvds pvd = Storage.lookup_provider('eph') snap = pvd.snapshot_factory('hom') self.assertEqual(snap.type, 'eph') self.assertEqual(snap.description, 'hom') self.assertTrue(isinstance(snap, EphSnapshot))
def _format_image(self): LOG.info("Formatting image") vol_entry = [v for v in self._mtab if v.device.startswith('/dev')][0] if vol_entry.device == '/dev/root' and not os.path.exists(vol_entry.device): vol_entry = [v for v in mount.mounts('/etc/mtab') if v.device.startswith('/dev')][0] fs = Storage.lookup_filesystem(vol_entry.fstype) # create filesystem fs.mkfs(self.devname) # set EXT3/4 options if fs.name.startswith('ext'): # max mounts before check (-1 = disable) system2(('/sbin/tune2fs', '-c', '1', self.devname)) # time based (3m = 3 month) system2(('/sbin/tune2fs', '-i', '3m', self.devname)) # set label label = fs.get_label(vol_entry.device) if label: fs.set_label(self.devname, label) LOG.debug('Image %s formatted', self.devname)
def _format_image(self): LOG.info("Formatting image") vol_entry = [v for v in self._mtab if v.device.startswith('/dev')][0] if vol_entry.device == '/dev/root' and not os.path.exists( vol_entry.device): vol_entry = [ v for v in mount.mounts('/etc/mtab') if v.device.startswith('/dev') ][0] fs = Storage.lookup_filesystem(vol_entry.fstype) # create filesystem fs.mkfs(self.devname) # set EXT3/4 options if fs.name.startswith('ext'): # max mounts before check (-1 = disable) system2(('/sbin/tune2fs', '-c', '1', self.devname)) # time based (3m = 3 month) system2(('/sbin/tune2fs', '-i', '3m', self.devname)) # set label label = fs.get_label(vol_entry.device) if label: fs.set_label(self.devname, label) LOG.debug('Image %s formatted', self.devname)
def test_create_from_snapshot(self): vol = Storage.create(snapshot=dict(type='base', device='/dev/sdb', mpoint='/mnt/dbstorage', fstype='xfs')) self.assertEqual(vol.devname, '/dev/sdb') self.assertEqual(vol.mpoint, '/mnt/dbstorage') vol = Storage.create(device='/dev/sdd', snapshot=dict(type='myvol', device='/dev/lvol', param1='value1', param2='value2')) self.assertEqual(vol.devname, '/dev/sdd') self.assertEqual(vol.type, 'myvol') self.assertEqual(vol.param1, 'value1')
def get_free_devname(device): if device: device = ebstool.get_ebs_devname(device) used_letters = set(row['device'][-1] for row in Storage.volume_table() if row['device'] and ( \ row['state'] == 'attached' or ( \ pl.get_instance_type() == 't1.micro' and row['state'] == 'detached'))) with self.letters_lock: avail_letters = list(set(self.all_letters) - used_letters - self.acquired_letters) volumes = conn.get_all_volumes(filters={'attachment.instance-id': pl.get_instance_id()}) for volume in volumes: volume_device = volume.attach_data.device volume_device = re.sub('\d+', '', volume_device) try: avail_letters.remove(volume_device[-1]) except ValueError: pass if not device or not (device[-1] in avail_letters) or os.path.exists(device): letter = firstmatched( lambda l: not os.path.exists(ebstool.real_devname('/dev/sd%s' % l)), avail_letters ) if letter: device = '/dev/sd%s' % letter self.acquired_letters.add(letter) else: raise StorageError('No free letters for block device name remains') return device
def _testCreateDestroyRaid(self): self._logger.info('>>>>>>> Starting Create-Destroy test for raid.') self.array = Storage.create(type='raid', disks=self.vols, level=1, vg='dbstorage') self.assertTrue(os.path.exists(self.array.raid_pv)) time.sleep(2) self.array.destroy(force=True, remove_disks=True) self._logger.info('>>>>>>> Create-Destroy test successfully finished.')
def setUp(self): self.vols = [] for i in range(3): #system('dd if=/dev/zero of=/tmp/device%s bs=1M count=10' % i) #self.vols.append(Storage.create(type='loop', file='/tmp/device%s' % i)) self.vols.append(Storage.create(type='ebs', size=1, avail_zone='us-east-1a')) self._logger.debug("Volume with id '%s' created." % self.vols[-1].id) self.snap_vol = self.vols.pop()
def test_1(self): v1 = Storage.create(device='/dev/sdo') v2 = Storage.create(device='/dev/sdm') table = Storage.volume_table() self.assertEqual(len(table), 2) v1row = firstmatched(lambda row: row['device'] == '/dev/sdo', table) self.assertTrue(v1row) self.assertEqual(v1row['volume_id'], v1.id) self.assertEqual(v1row['device'], v1.device) self.assertEqual(v1row['type'], v1.type) self.assertEqual(v1row['state'], 'attached') v2.detach() table = Storage.volume_table() self.assertEqual(len(table), 2) v2row = firstmatched(lambda row: row['device'] == '/dev/sdm', table) self.assertEqual(v2row['state'], 'detached')
def on_host_init_response(self, message): """ Check redis data in host init response @type message: scalarizr.messaging.Message @param message: HostInitResponse """ with bus.initialization_op as op: with op.phase(self._phase_redis): with op.step(self._step_accept_scalr_conf): if not message.body.has_key(BEHAVIOUR) or message.db_type != BEHAVIOUR: raise HandlerError("HostInitResponse message for %s behaviour must have '%s' property and db_type '%s'" % (BEHAVIOUR, BEHAVIOUR, BEHAVIOUR)) config_dir = os.path.dirname(self._volume_config_path) if not os.path.exists(config_dir): os.makedirs(config_dir) redis_data = message.redis.copy() LOG.info('Got Redis part of HostInitResponse: %s' % redis_data) ''' XXX: following line enables support for old scalr installations use_password shoud be set by postinstall script for old servers ''' redis_data[OPT_USE_PASSWORD] = redis_data.get(OPT_USE_PASSWORD, '1') for key, config_file in ((OPT_VOLUME_CNF, self._volume_config_path), (OPT_SNAPSHOT_CNF, self._snapshot_config_path)): if os.path.exists(config_file): os.remove(config_file) if key in redis_data: if redis_data[key]: Storage.backup_config(redis_data[key], config_file) del redis_data[key] LOG.debug("Update redis config with %s", redis_data) self._update_config(redis_data) if self.default_service.running: self.default_service.stop('Treminating default redis instance') self.redis_instances = redis.RedisInstances(self.is_replication_master, self.persistence_type) self.redis_instances.init_processes(ports=[redis.DEFAULT_PORT,], passwords=[self.get_main_password(),])
def test_create_vol_container(self): vol = Storage.create(type='myvol', device='/dev/gp0', disks=('/dev/sdb', dict(type='myvol', device='/dev/sdd'))) self.assertEqual(len(vol.disks), 2) self.assertEqual(vol.disks[0].devname, '/dev/sdb') self.assertEqual(vol.disks[1].devname, '/dev/sdd') self.assertEqual(vol.disks[1].type, 'myvol')
def test_create_vol_container(self): vol = Storage.create( type='myvol', device='/dev/gp0', disks=('/dev/sdb', dict(type='myvol', device='/dev/sdd')) ) self.assertEqual(len(vol.disks), 2) self.assertEqual(vol.disks[0].devname, '/dev/sdb') self.assertEqual(vol.disks[1].devname, '/dev/sdd') self.assertEqual(vol.disks[1].type, 'myvol')
def on_init(self): bus.on("host_init_response", self.on_host_init_response) bus.on("before_host_up", self.on_before_host_up) bus.on("before_reboot_start", self.on_before_reboot_start) bus.on("before_reboot_finish", self.on_before_reboot_finish) if self._cnf.state == ScalarizrState.RUNNING: storage_conf = Storage.restore_config(self._volume_config_path) storage_conf['tags'] = self.redis_tags self.storage_vol = Storage.create(storage_conf) if not self.storage_vol.mounted(): self.storage_vol.mount() self.redis_instances = redis.RedisInstances(self.is_replication_master, self.persistence_type) self.redis_instances.init_processes(ports=[redis.DEFAULT_PORT,], passwords=[self.get_main_password(),]) self.redis_instances.start() self._init_script = self.redis_instances.get_default_process()
def on_DbMsr_NewMasterUp(self, message): """ Switch replication to a new master server @type message: scalarizr.messaging.Message @param message: DbMsr_NewMasterUp """ if not message.body.has_key(BEHAVIOUR) or message.db_type != BEHAVIOUR: raise HandlerError("DbMsr_NewMasterUp message for PostgreSQL behaviour must have 'postgresql' property and db_type 'postgresql'") postgresql_data = message.postgresql.copy() if self.is_replication_master: self._logger.debug('Skipping NewMasterUp. My replication role is master') return host = message.local_ip or message.remote_ip self._logger.info("Switching replication to a new postgresql master %s", host) bus.fire('before_postgresql_change_master', host=host) if OPT_SNAPSHOT_CNF in postgresql_data and postgresql_data[OPT_SNAPSHOT_CNF]['type'] != 'eph': snap_data = postgresql_data[OPT_SNAPSHOT_CNF] self._logger.info('Reinitializing Slave from the new snapshot %s', snap_data['id']) self.postgresql.service.stop() self._logger.debug('Destroying old storage') self.storage_vol.destroy() self._logger.debug('Storage destroyed') self._logger.debug('Plugging new storage') vol = Storage.create(snapshot=snap_data.copy(), tags=self.postgres_tags) self._plug_storage(self._storage_path, vol) self._logger.debug('Storage plugged') Storage.backup_config(vol.config(), self._volume_config_path) Storage.backup_config(snap_data, self._snapshot_config_path) self.storage_vol = vol self.postgresql.init_slave(self._storage_path, host, POSTGRESQL_DEFAULT_PORT, self.root_password) self._logger.debug("Replication switched") bus.fire('postgresql_change_master', host=host)
def test_1(self): vol = Storage.create( type='ebs', id='vol-12345678', snapshot=dict( id='snap-87654321', type='ebs' ) ) pass
def _cleanup_after_rebundle(): cnf = bus.cnf pl = bus.platform logger = logging.getLogger(__name__) if 'volumes' not in pl.features: # Destory mysql storages if os.path.exists(cnf.private_path('storage/mysql.json')): logger.info('Cleanuping old MySQL storage') vol = Storage.create(Storage.restore_config(cnf.private_path('storage/mysql.json'))) vol.destroy(force=True) # Reset private configuration priv_path = cnf.private_path() for file in os.listdir(priv_path): if file in ('.user-data', '.update'): continue path = os.path.join(priv_path, file) os.remove(path) if (os.path.isfile(path) or os.path.islink(path)) else shutil.rmtree(path) system2('sync', shell=True)
def _testDetachAttachRaid(self): mpoint = '/tmp/mpoint' if not os.path.isdir(mpoint): os.makedirs(mpoint) self.array = Storage.create(type='raid', disks=self.vols, level=1, vg='dbstorage', snap_pv=self.snap_vol, fstype='ext3') self.array.mkfs() self.array.mount(mpoint) bigfile_path = os.path.join(mpoint, 'bigfile') system('dd if=/dev/random of=%s bs=1M count=5' % bigfile_path) md5sum = system(('/usr/bin/md5sum %s' % bigfile_path))[0].strip().split(' ')[0] self.assertTrue(os.path.ismount(mpoint)) config = self.array.detach(force=True) self.assertFalse(os.path.ismount(mpoint)) self.assertEqual(self.array.devname, None) self.array = Storage.create(**config) self.array.mount(mpoint) md5sum2 = system(('/usr/bin/md5sum %s' % bigfile_path))[0].strip().split(' ')[0] self.assertEqual(md5sum, md5sum2)
def test_1(self): self.vol = vol = Storage.create( type='eph', disk=self.device, vg='dbstorage', fstype='ext3', snap_backend='my://secretphase/backups') self.assertEqual(vol.type, 'eph') self.assertTrue(os.path.exists(vol.devname)) self.assertEqual(vol.disk.devname, self.device) self.assertTrue(os.path.exists(vol.tranzit_vol.devname)) config = self.vol.config() self.assertEqual(config['type'], 'eph') self.assertTrue(config['size'] is not None) self.assertTrue(config['snap_backend'] is not None) self.assertTrue(isinstance(config['disk'], dict)) # Reinitialize storage from config eph = Storage.create(config) self.assertEqual(eph.device, self.vol.device)
def test_base_volume(self): device = '/dev/sdo' mpoint = '/mnt/media-server-flvs' fstype = 'ext4' vol = Storage.create(device=device, mpoint=mpoint, fstype=fstype) snap = vol.snapshot('snap #00') snap_cnf = snap.config() vol_cnf = vol.config() self.assertEqual(snap_cnf['type'], vol.type) self.assertEqual(snap_cnf['mpoint'], vol.mpoint) self.assertEqual(snap_cnf['device'], vol.device)
def test_1(self): self.vol = vol = Storage.create( type = 'eph', disk = self.device, vg = 'dbstorage', fstype = 'ext3', snap_backend='my://secretphase/backups' ) self.assertEqual(vol.type, 'eph') self.assertTrue(os.path.exists(vol.devname)) self.assertEqual(vol.disk.devname, self.device) self.assertTrue(os.path.exists(vol.tranzit_vol.devname)) config = self.vol.config() self.assertEqual(config['type'], 'eph') self.assertTrue(config['size'] is not None) self.assertTrue(config['snap_backend'] is not None) self.assertTrue(isinstance(config['disk'], dict)) # Reinitialize storage from config eph = Storage.create(config) self.assertEqual(eph.device, self.vol.device)
def _cleanup_after_rebundle(): cnf = bus.cnf pl = bus.platform logger = logging.getLogger(__name__) if 'volumes' not in pl.features: # Destory mysql storages if os.path.exists(cnf.private_path( 'storage/mysql.json')) and pl.name == 'rackspace': logger.info('Cleanuping old MySQL storage') vol = Storage.create( Storage.restore_config(cnf.private_path('storage/mysql.json'))) vol.destroy(force=True) # Reset private configuration priv_path = cnf.private_path() for file in os.listdir(priv_path): if file in ('.user-data', '.update'): continue path = os.path.join(priv_path, file) os.remove(path) if (os.path.isfile(path) or os.path.islink(path)) else shutil.rmtree(path) system2('sync', shell=True)
def test_create_from_snapshot(self): vol = Storage.create( snapshot=dict( type='base', device='/dev/sdb', mpoint='/mnt/dbstorage', fstype='xfs' ) ) self.assertEqual(vol.devname, '/dev/sdb') self.assertEqual(vol.mpoint, '/mnt/dbstorage') vol = Storage.create( device='/dev/sdd', snapshot=dict( type='myvol', device='/dev/lvol', param1='value1', param2='value2' ) ) self.assertEqual(vol.devname, '/dev/sdd') self.assertEqual(vol.type, 'myvol') self.assertEqual(vol.param1, 'value1')
def test_with_ignores(self): class VolConfig(VolumeConfig): vg = None base64_whatever = None only_in_volume_config = None only_in_snapshot_config = None class Vol(VolConfig, Volume): _ignores = ('only_in_snapshot_config') class Snap(VolConfig, Snapshot): _ignores = ('only_in_volume_config',) class VolPvd(VolumeProvider): type = 'mimimi' vol_class = Vol snap_class = Snap Storage.explore_provider(VolPvd) vol = Storage.create( type='mimimi', device='/dev/sdo', vg='vg0', only_in_volume_config='4u', only_in_snapshot_config='4s' ) snap = vol.snapshot() snap_cnf = snap.config() vol_cnf = vol.config() self.assertFalse('only_in_volume_config' in snap_cnf) self.assertEqual(snap_cnf['vg'], 'vg0') self.assertFalse('only_in_snapshot_config' in vol_cnf) self.assertTrue(vol_cnf['base64_whatever'] is None)
def _plug_storage(self, mpoint, vol): if not isinstance(vol, Volume): vol['tags'] = self.redis_tags vol = Storage.create(vol) try: if not os.path.exists(mpoint): os.makedirs(mpoint) if not vol.mounted(): vol.mount(mpoint) except StorageError, e: if 'you must specify the filesystem type' in str(e): vol.mkfs() vol.mount(mpoint) else: raise
def _plug_storage(self, mpoint, vol): if not isinstance(vol, Volume): vol['tags'] = self.postgres_tags vol = Storage.create(vol) try: if not os.path.exists(mpoint): os.makedirs(mpoint) if not vol.mounted(): vol.mount(mpoint) except StorageError, e: ''' XXX: Crapy. We need to introduce error codes from fstool ''' if 'you must specify the filesystem type' in str(e): vol.mkfs() vol.mount(mpoint) else: raise
def _init_master(self, message): """ Initialize postgresql master @type message: scalarizr.messaging.Message @param message: HostUp message """ self._logger.info("Initializing PostgreSQL master") with bus.initialization_op as op: with op.step(self._step_create_storage): # Plug storage volume_cnf = Storage.restore_config(self._volume_config_path) try: snap_cnf = Storage.restore_config(self._snapshot_config_path) volume_cnf['snapshot'] = snap_cnf except IOError: pass self.storage_vol = self._plug_storage(mpoint=self._storage_path, vol=volume_cnf) Storage.backup_config(self.storage_vol.config(), self._volume_config_path) with op.step(self._step_init_master): self.postgresql.init_master(mpoint=self._storage_path, password=self.root_password) msg_data = dict() msg_data.update({OPT_REPLICATION_MASTER : str(int(self.is_replication_master)), OPT_ROOT_USER : self.postgresql.root_user.name, OPT_ROOT_PASSWORD : self.root_password, OPT_CURRENT_XLOG_LOCATION : None}) with op.step(self._step_create_data_bundle): # Create snapshot snap = self._create_snapshot() Storage.backup_config(snap.config(), self._snapshot_config_path) with op.step(self._step_collect_host_up_data): # Update HostUp message msg_data.update(self._compat_storage_data(self.storage_vol, snap)) if msg_data: message.db_type = BEHAVIOUR message.postgresql = msg_data.copy() message.postgresql.update({ OPT_ROOT_SSH_PRIVATE_KEY : self.postgresql.root_user.private_key, OPT_ROOT_SSH_PUBLIC_KEY : self.postgresql.root_user.public_key }) try: del msg_data[OPT_SNAPSHOT_CNF], msg_data[OPT_VOLUME_CNF] except KeyError: pass self._update_config(msg_data)
def get_free_devname(device): if device: device = ebstool.get_ebs_devname(device) used_letters = set(row['device'][-1] for row in Storage.volume_table() if row['device'] and ( \ row['state'] == 'attached' or ( \ pl.get_instance_type() == 't1.micro' and row['state'] == 'detached'))) with self.letters_lock: avail_letters = list( set(self.all_letters) - used_letters - self.acquired_letters) volumes = conn.get_all_volumes( filters={ 'attachment.instance-id': pl.get_instance_id() }) for volume in volumes: volume_device = volume.attach_data.device volume_device = re.sub('\d+', '', volume_device) try: avail_letters.remove(volume_device[-1]) except ValueError: pass if not device or not (device[-1] in avail_letters ) or os.path.exists(device): letter = firstmatched( lambda l: not os.path.exists( ebstool.real_devname('/dev/sd%s' % l)), avail_letters) if letter: device = '/dev/sd%s' % letter self.acquired_letters.add(letter) else: raise StorageError( 'No free letters for block device name remains' ) return device
def _init_master(self, message): """ Initialize redis master @type message: scalarizr.messaging.Message @param message: HostUp message """ with bus.initialization_op as op: with op.step(self._step_create_storage): LOG.info("Initializing %s master" % BEHAVIOUR) # Plug storage volume_cnf = Storage.restore_config(self._volume_config_path) try: snap_cnf = Storage.restore_config(self._snapshot_config_path) volume_cnf['snapshot'] = snap_cnf except IOError: pass self.storage_vol = self._plug_storage(mpoint=self._storage_path, vol=volume_cnf) Storage.backup_config(self.storage_vol.config(), self._volume_config_path) with op.step(self._step_init_master): password = self.get_main_password() ri = self.redis_instances.get_instance(port=redis.DEFAULT_PORT) ri.init_master(mpoint=self._storage_path) msg_data = dict() msg_data.update({OPT_REPLICATION_MASTER : '1', OPT_MASTER_PASSWORD : password}) with op.step(self._step_create_data_bundle): # Create snapshot snap = self._create_snapshot() Storage.backup_config(snap.config(), self._snapshot_config_path) with op.step(self._step_collect_host_up_data): # Update HostUp message msg_data.update(self._compat_storage_data(self.storage_vol, snap)) if msg_data: message.db_type = BEHAVIOUR message.redis = msg_data.copy() try: del msg_data[OPT_SNAPSHOT_CNF], msg_data[OPT_VOLUME_CNF] except KeyError: pass self._update_config(msg_data)
def _format_image(self): LOG.info("Formatting image") vol_entry = list(v for v in self._mtab.find(mpoint=self._volume) if v.devname.startswith('/dev'))[0] fs = Storage.lookup_filesystem(vol_entry.fstype) # create filesystem fs.mkfs(self.devname) # set EXT3/4 options if fs.name.startswith('ext'): # max mounts before check (-1 = disable) system2(('/sbin/tune2fs', '-c', '1', self.devname)) # time based (3m = 3 month) system2(('/sbin/tune2fs', '-i', '3m', self.devname)) # set label label = fs.get_label(vol_entry.devname) if label: fs.set_label(self.devname, label) LOG.debug('Image %s formatted', self.devname)
def test_1(self): vol = Storage.create(type='ebs', id='vol-12345678', snapshot=dict(id='snap-87654321', type='ebs')) pass
def test_1(self): class TransferMock(object): SCHEMA = 'file://' def __init__(self): self._logger = logging.getLogger(__name__) pass def upload(self, files, remote_dst): remote_path = os.path.normpath(remote_dst[len(self.SCHEMA):]) ret = [] for file in files: self._logger.debug('Copy %s -> %s/', file, remote_path) shutil.copy(file, remote_path) ret.append('file://%s/%s' % (remote_path, os.path.basename(file))) print system(('ls', '-la', remote_path))[0] return tuple(ret) def download(self, remote_files, dst, recursive=False): if isinstance(remote_files, basestring): remote_files = (remote_files, ) files = list( os.path.normpath(path[len(self.SCHEMA):]) for path in remote_files) ret = [] for file in files: self._logger.debug('Copy %s -> %s/', file, dst) shutil.copy(file, dst) ret.append(os.path.join(dst, os.path.basename(file))) return ret Storage.lookup_provider('eph')._snap_pvd._transfer = TransferMock() # Create snapshot strage volume (Remote storage emulation) self.vols[1] = Storage.create(device=self.devices[1], mpoint=self.mpoints[1], fstype='ext3') self.vols[1].mkfs() self.vols[1].mount() # Create and mount EPH storage self.vols[0] = Storage.create(type='eph', disk=self.devices[0], vg='casstorage', snap_backend='%s%s' % (TransferMock.SCHEMA, self.mpoints[1]), fstype='ext3', mpoint=self.mpoints[0]) self.vols[0].mkfs() self.vols[0].mount() # Create big file bigfile = os.path.join(self.mpoints[0], 'bigfile') system( ('dd', 'if=/dev/urandom', 'of=%s' % bigfile, 'bs=1M', 'count=15')) bigsize = os.path.getsize(bigfile) self.assertTrue(bigsize > 0) md5sum = system(('/usr/bin/md5sum', bigfile))[0].strip().split(' ')[0] # Snapshot storage snap = self.vols[0].snapshot(description='Bigfile with us forever') self.assertEqual(snap.type, 'eph') self.assertEqual(snap.vg, 'casstorage') self.assertEqual(snap.state, Snapshot.CREATING) wait_until(lambda: snap.state in (Snapshot.COMPLETED, Snapshot.FAILED)) print snap.config() if snap.state == Snapshot.FAILED: raise Exception( 'Snapshot creation failed. See log for more details') # Destroy original storage self.vols[0].destroy() self.vols[0] = None # Restore snapshot self.vols[2] = Storage.create(disk=self.devices[2], snapshot=snap) self.vols[2].mount(self.mpoints[2]) bigfile2 = os.path.join(self.mpoints[2], 'bigfile') self.assertTrue(os.path.exists(bigfile2)) md5sum2 = system( ('/usr/bin/md5sum', bigfile2))[0].strip().split(' ')[0] self.assertEqual(md5sum, md5sum2)
def on_DbMsr_PromoteToMaster(self, message): """ Promote slave to master @type message: scalarizr.messaging.Message @param message: redis_PromoteToMaster """ if message.db_type != BEHAVIOUR: LOG.error('Wrong db_type in DbMsr_PromoteToMaster message: %s' % message.db_type) return if self.is_replication_master: LOG.warning('Cannot promote to master. Already master') return bus.fire('before_slave_promote_to_master') master_storage_conf = message.body.get('volume_config') tx_complete = False old_conf = None new_storage_vol = None try: msg_data = dict( db_type=BEHAVIOUR, status="ok", ) if master_storage_conf and master_storage_conf['type'] != 'eph': self.redis_instances.stop('Unplugging slave storage and then plugging master one') old_conf = self.storage_vol.detach(force=True) # ?????? new_storage_vol = self._plug_storage(self._storage_path, master_storage_conf) ''' #This code was removed because redis master storage can be empty yet valid for r in self.redis_instances: # Continue if master storage is a valid redis storage if not r.working_directory.is_initialized(self._storage_path): raise HandlerError("%s is not a valid %s storage" % (self._storage_path, BEHAVIOUR)) Storage.backup_config(new_storage_vol.config(), self._volume_config_path) ''' Storage.backup_config(new_storage_vol.config(), self._volume_config_path) msg_data[BEHAVIOUR] = self._compat_storage_data(vol=new_storage_vol) self.redis_instances.init_as_masters(self._storage_path) self._update_config({OPT_REPLICATION_MASTER : "1"}) if not master_storage_conf or master_storage_conf['type'] == 'eph': snap = self._create_snapshot() Storage.backup_config(snap.config(), self._snapshot_config_path) msg_data[BEHAVIOUR] = self._compat_storage_data(self.storage_vol, snap) self.send_message(DbMsrMessages.DBMSR_PROMOTE_TO_MASTER_RESULT, msg_data) tx_complete = True bus.fire('slave_promote_to_master') except (Exception, BaseException), e: LOG.exception(e) if new_storage_vol and not new_storage_vol.detached: new_storage_vol.detach() # Get back slave storage if old_conf: self._plug_storage(self._storage_path, old_conf) self.send_message(DbMsrMessages.DBMSR_PROMOTE_TO_MASTER_RESULT, dict( db_type=BEHAVIOUR, status="error", last_error=str(e) )) # Start redis self.redis_instances.start()
def blank_config(self, cnf): cnf.pop('snapshot_id', None) def destroy(self, vol, force=False, **kwargs): ''' @type vol: CSVolume ''' super(CSVolumeProvider, self).destroy(vol) conn = self._new_conn() if conn: voltool.detach_volume(conn, vol.id, LOG) voltool.delete_volume(conn, vol.id, LOG) vol.device = None def destroy_snapshot(self, snap): conn = self._new_conn() if conn: LOG.debug('Deleting EBS snapshot %s', snap.id) conn.deleteSnapshot(id=snap.id) @devname_not_empty def detach(self, vol, force=False): super(CSVolumeProvider, self).detach(vol) conn = self._new_conn() if conn: voltool.detach_volume(conn, vol.id, LOG) vol.device = None Storage.explore_provider(CSVolumeProvider)
def test_explore_provider(self): Storage.explore_provider(self.MyPvd) self.assertFalse(Storage.default_snap_provider) self.assertFalse(Storage.default_vol_provider) self.assertTrue(isinstance(Storage.lookup_provider(self.MyPvd.type), self.MyPvd))
self._plug_storage(self._storage_path, old_conf) self.send_message(DbMsrMessages.DBMSR_PROMOTE_TO_MASTER_RESULT, dict( db_type=BEHAVIOUR, status="error", last_error=str(e) )) # Start redis self.redis_instances.start() if tx_complete and master_storage_conf and master_storage_conf['type'] != 'eph': # Delete slave EBS self.storage_vol.destroy(remove_disks=True) self.storage_vol = new_storage_vol Storage.backup_config(self.storage_vol.config(), self._volume_config_path) def on_DbMsr_NewMasterUp(self, message): """ Switch replication to a new master server @type message: scalarizr.messaging.Message @param message: DbMsr__NewMasterUp """ if not message.body.has_key(BEHAVIOUR) or message.db_type != BEHAVIOUR: raise HandlerError("DbMsr_NewMasterUp message for %s behaviour must have '%s' property and db_type '%s'" % BEHAVIOUR, BEHAVIOUR, BEHAVIOUR) if self.is_replication_master: LOG.debug('Skipping NewMasterUp. My replication role is master')
def setUp(self): self.VolPvd.vol_class = self.Vol Storage.explore_provider(self.VolPvd)
def detach(self, vol, force=False): super(EbsVolumeProvider, self).detach(vol) try: pl = bus.platform conn = pl.new_ec2_conn() vol.detached = True except AttributeError: pass else: ebstool.detach_volume(conn, vol.id, self._logger) finally: vol.device = None Storage.explore_provider(EbsVolumeProvider, default_for_snap=True) class S3TransferProvider(TransferProvider): schema = 's3' urlparse.uses_netloc.append(schema) acl = None _logger = None _bucket = None def __init__(self, acl='aws-exec-read'): self._logger = logging.getLogger(__name__) self.acl = acl
def test_create_by_string_args(self): vol = Storage.create('/dev/sdb') self.assertEqual(vol.devname, '/dev/sdb')