def on_host_init_response(self, hir): LOG.info('Configuring block device mountpoints') with bus.initialization_op as op: with op.phase(self._phase_plug_volume): wait_until( self._plug_all_volumes, sleep=10, timeout=600, error_text= 'Cannot attach and mount disks in a reasonable time') volumes = hir.body.get('volumes') or [] if volumes: LOG.debug('HIR volumes: %s', volumes) for i in range(0, len(volumes)): vol = volumes[i] template = vol.pop('template', None) from_template_if_missing = vol.pop('from_template_if_missing', None) vol = storage2.volume(**vol) LOG.info('Ensuring %s volume %s', vol.type, dict(vol)) try: vol.ensure(mount=bool(vol.mpoint), mkfs=True) except storage2.VolumeNotExistsError, e: if template and from_template_if_missing == '1': vol = storage2.volume(**template) LOG.warn( 'Volume %s not exists, re-creating %s volume from config: %s', str(e), vol.type, dict(vol)) vol.ensure(mount=bool(vol.mpoint), mkfs=True) else: raise self._volumes.append(dict(vol))
def on_host_init_response(self, hir): LOG.info('Configuring block device mountpoints') with bus.initialization_op as op: with op.phase(self._phase_plug_volume): wait_until(self._plug_all_volumes, sleep=10, timeout=600, error_text='Cannot attach and mount disks in a reasonable time') volumes = hir.body.get('volumes') or [] if volumes: LOG.debug('HIR volumes: %s', volumes) for i in range(0, len(volumes)): vol = volumes[i] template = vol.pop('template', None) from_template_if_missing = vol.pop('from_template_if_missing', None) vol = storage2.volume(**vol) LOG.info('Ensuring %s volume %s', vol.type, dict(vol)) try: vol.ensure(mount=bool(vol.mpoint), mkfs=True) except storage2.VolumeNotExistsError, e: if template and from_template_if_missing == '1': vol = storage2.volume(**template) LOG.warn('Volume %s not exists, re-creating %s volume from config: %s', str(e), vol.type, dict(vol)) vol.ensure(mount=bool(vol.mpoint), mkfs=True) else: raise self._volumes.append(dict(vol))
def _ensure(self): # snap should be applied after layout: download and extract data. # this could be done on already ensured volume. # Example: resync slave data if not self._lvm_volume: if isinstance(self.disk, basestring) and \ self.disk.startswith('/dev/sd'): self.disk = storage2.volume( type='ec2_ephemeral', name='ephemeral0') self._lvm_volume = storage2.volume( type='lvm', pvs=[self.disk], size=self.size + 'VG', vg=self.vg, name='data') self._lvm_volume.ensure() self.device = self._lvm_volume.device if self.snap: self.snap = storage2.snapshot(self.snap) self.mkfs() tmp_mpoint = not self.mpoint if tmp_mpoint: tmp_mpoint = tempfile.mkdtemp() self.mpoint = tmp_mpoint transfer = cloudfs.LargeTransfer(self.snap.path, self.mpoint + '/') try: self.mount() if hasattr(self.snap, 'size'): df_info = filetool.df() df = filter(lambda x: x.mpoint == self.mpoint, df_info)[0] if df.free < self.snap.size: raise storage2.StorageError('Not enough free space' ' on device %s to restore snapshot.' % self.device) transfer.run() except: e = sys.exc_info()[1] raise storage2.StorageError("Snapshot restore error: %s" % e) finally: try: self.umount() finally: if tmp_mpoint: self.mpoint = None os.rmdir(tmp_mpoint) self.snap = None
def ensure(self, mount=False, mkfs=False, fstab=True, **updates): """ Make sure that volume is attached and ready for use. :param mount: if set, volume eventually will be mounted to it's mpoint :param mkfs: if set, volume will have corresponding fs eventually :return: """ if not self.features['restore']: self._check_restore_unsupported() if self.snap and isinstance(self.snap, Snapshot): self.snap = self.snap.config() try: self._ensure() except storage2.VolumeNotExistsError, e: LOG.debug('recreate_if_missing: %s', self.recreate_if_missing) if self.recreate_if_missing: LOG.warning(e) LOG.info('Volume %s not exists, re-creating %s from template', self.id, self.type) template = dict(self.template or self.clone()) LOG.debug('Template: %s', template) vol = storage2.volume(**template) vol.ensure(mount=bool(vol.mpoint), mkfs=True) self._config = vol.config() else: raise
def clone(self): config = self._config.copy() config.pop('id', None) config.pop('fscreated', None) config.pop('device', None) self._clone(config) return storage2.volume(config)
def _run(self): self.volume = storage2.volume(self.volume) LOG.debug("Volume obj: %s", self.volume) LOG.debug("Volume config: %s", dict(self.volume)) state = {} self.fire("freeze", self.volume, state) try: snap = self.volume.snapshot(self.description, tags=self.tags) finally: self.fire("unfreeze", self.volume, state) try: util.wait_until( lambda: snap.status() in (snap.COMPLETED, snap.FAILED), start_text="Polling snapshot status (%s)" % snap.id, logger=LOG, ) except: if "Request limit exceeded" in str(sys.exc_info()[1]): pass else: raise if snap.status() == snap.FAILED: msg = "Backup failed because snapshot %s failed" % snap.id raise Error(msg) return restore(type=self.type, snapshot=snap, **state)
def _snapshot(self, description, tags, **kwds): lvm_snap = self._lvm_volume.lvm_snapshot(size='100%FREE') try: snap = storage2.snapshot(type='eph') snap.path = os.path.join(os.path.join( self.cloudfs_dir, snap.id + '.manifest.ini')) lvm_snap_vol = storage2.volume( device=lvm_snap.device, mpoint=tempfile.mkdtemp()) lvm_snap_vol.ensure(mount=True) df_info = filetool.df() df = filter(lambda x: x.mpoint == lvm_snap_vol.mpoint, df_info) snap.size = df[0].used try: transfer = cloudfs.LargeTransfer( src=lvm_snap_vol.mpoint + '/', dst=snap.path, tar_it=True, gzip_it=True, tags=tags) transfer.run() finally: lvm_snap_vol.umount() os.rmdir(lvm_snap_vol.mpoint) finally: lvm_snap.destroy() return snap
def check_growth(self, **growth): if int(self.level) in (0, 10): raise storage2.StorageError("Raid%s doesn't support growth" % self.level) disk_growth = growth.get('disks') change_disks = False if disk_growth: for disk_cfg_or_obj in self.disks: disk = storage2.volume(disk_cfg_or_obj) try: disk.check_growth(**disk_growth) change_disks = True except storage2.NoOpError: pass new_len = growth.get('disks_count') current_len = len(self.disks) change_size = new_len and int(new_len) != current_len if not change_size and not change_disks: raise storage2.NoOpError('Configurations are equal. Nothing to do') if change_size and int(new_len) < current_len: raise storage2.StorageError('Disk count can only be increased.') if change_size and int(self.level) in (0, 10): raise storage2.StorageError("Can't add disks to raid level %s" % self.level)
def ensure(self, mount=False, mkfs=False, fstab=True, **updates): """ Make sure that volume is attached and ready for use. :param mount: if set, volume eventually will be mounted to it's mpoint :param mkfs: if set, volume will have corresponding fs eventually :return: """ if not self.features['restore']: self._check_restore_unsupported() if self.snap and isinstance(self.snap, Snapshot): self.snap = self.snap.config() try: self._ensure() except storage2.VolumeNotExistsError, e: LOG.debug("recreate_if_missing: %s" % self.recreate_if_missing) if self.recreate_if_missing: LOG.warning(e) LOG.info('Volume %s not exists, re-creating %s from template', self.id, self.type) template = self.clone() vol = storage2.volume(**dict(template)) vol.ensure(mount=bool(vol.mpoint), mkfs=True) self._config = vol.config() else: raise
def _destroy(self, force, **kwds): remove_disks = kwds.get('remove_disks') if remove_disks: for disk in self.disks: disk = storage2.volume(disk) disk.destroy(force=force) self.disks = []
def on_host_init_response(self, message): with bus.initialization_op as op: with op.phase(self._phase_rabbitmq): with op.step(self._step_accept_scalr_conf): if not message.body.has_key("rabbitmq"): raise HandlerError("HostInitResponse message for RabbitMQ behaviour must have 'rabbitmq' property") rabbitmq_data = message.rabbitmq.copy() if not rabbitmq_data['password']: rabbitmq_data['password'] = cryptotool.pwgen(10) hostname = RABBIT_HOSTNAME_TPL % int(message.server_index) rabbitmq_data['server_index'] = message.server_index rabbitmq_data['hostname'] = hostname dns.ScalrHosts.set('127.0.0.1', hostname) with open('/etc/hostname', 'w') as f: f.write(hostname) system2(('hostname', '-F', '/etc/hostname')) volume_config = rabbitmq_data.pop('volume_config') volume_config['mpoint'] = DEFAULT_STORAGE_PATH rabbitmq_data['volume'] = storage2.volume(volume_config) __rabbitmq__.update(rabbitmq_data)
def on_before_host_up(self, message): """ Configure redis behaviour @type message: scalarizr.messaging.Message @param message: HostUp message """ repl = 'master' if self.is_replication_master else 'slave' message.redis = {} if self.is_replication_master: self._init_master(message) else: self._init_slave(message) __redis__['volume'].tags = self.redis_tags __redis__['volume'] = storage2.volume(__redis__['volume']) self._init_script = self.redis_instances.get_default_process() message.redis['ports'] = self.redis_instances.ports message.redis['passwords'] = self.redis_instances.passwords message.redis['num_processes'] = len(self.redis_instances.instances) message.redis['volume_config'] = dict(__redis__['volume']) bus.fire('service_configured', service_name=SERVICE_NAME, replication=repl, preset=self.initial_preset)
def _plug_volume(self, qe_mpoint): try: assert len( qe_mpoint.volumes ), 'Invalid mpoint info %s. Volumes list is empty' % qe_mpoint qe_volume = qe_mpoint.volumes[0] mpoint = qe_mpoint.dir or None assert qe_volume.volume_id, 'Invalid volume info %s. volume_id should be non-empty' % qe_volume vol = storage2.volume(type=self._vol_type, id=qe_volume.volume_id, name=qe_volume.device, mpoint=mpoint) LOG.info("Plugging volume with tags: %s" % str(vol.tags)) if mpoint: logger = bus.init_op.logger if bus.init_op else LOG logger.info('Ensure %s: take %s, mount to %s', self._vol_type, vol.id, vol.mpoint) vol.ensure(mount=True, mkfs=True, fstab=True) # vol._create_tags_async(qe_volume.volume_id, build_tags()) # [SCALARIZR-1012] upd [UI-343] except: LOG.exception("Can't attach volume")
def _plug_new_style_volumes(self, volumes): for vol in volumes: vol = storage2.volume(**vol) #vol.tags.update(build_tags()) # [UI-343] self._log_ensure_volume(vol) vol.ensure(mount=bool(vol.mpoint), mkfs=True) self._volumes.append(dict(vol))
def on_host_init_response(self, message): with bus.initialization_op as op: with op.phase(self._phase_rabbitmq): with op.step(self._step_accept_scalr_conf): if not message.body.has_key("rabbitmq"): raise HandlerError("HostInitResponse message for RabbitMQ behaviour must have 'rabbitmq' property") rabbitmq_data = message.rabbitmq.copy() if not rabbitmq_data['password']: rabbitmq_data['password'] = cryptotool.pwgen(10) hostname = RABBIT_HOSTNAME_TPL % int(message.server_index) rabbitmq_data['server_index'] = message.server_index rabbitmq_data['hostname'] = hostname dns.ScalrHosts.set('127.0.0.1', hostname) with open('/etc/hostname', 'w') as f: f.write(hostname) system2(('hostname', '-F', '/etc/hostname')) volume_config = rabbitmq_data.pop('volume_config') volume_config['mpoint'] = DEFAULT_STORAGE_PATH rabbitmq_data['volume'] = storage2.volume(volume_config) rabbitmq_data['volume'].tags = self.rabbitmq_tags __rabbitmq__.update(rabbitmq_data)
def _plug_volume(self, qe_mpoint): try: assert len( qe_mpoint.volumes ), 'Invalid mpoint info %s. Volumes list is empty' % qe_mpoint qe_volume = qe_mpoint.volumes[0] mpoint = qe_mpoint.dir or None assert qe_volume.volume_id, 'Invalid volume info %s. volume_id should be non-empty' % qe_volume vol = storage2.volume(type=self._vol_type, id=qe_volume.volume_id, name=qe_volume.device, mpoint=mpoint) if mpoint: def block(): vol.ensure(mount=True, mkfs=True, fstab=True) bus.fire("block_device_mounted", volume_id=vol.id, device=vol.device) if bus.initialization_op: msg = 'Mount device %s to %s' % (vol.device, vol.mpoint) with bus.initialization_op.step(msg): block() else: block() except: LOG.exception("Can't attach volume")
def replace_disk(self, index, disk): ''' :param: index RAID disk index. Starts from 0 :type index: int :param: disk Replacement disk. :type: disk dict/Volume ''' disk_replace = storage2.volume(disk) replace_is_new = not disk_replace.id try: disk_replace.ensure() disk_find = self.disks[index] mdadm.mdadm('manage', self.raid_pv, '--fail', disk_find.device) mdadm.mdadm('manage', self.raid_pv, '--remove', disk_find.device) mdadm.mdadm('manage', self.raid_pv, '--add', disk_replace.device) self.disks[index] = disk_replace except: with util.capture_exception(logger=LOG): if replace_is_new: disk_replace.destroy(force=True) else: disk_find.destroy(force=True)
def _plug_volume(self, qe_mpoint): try: assert len(qe_mpoint.volumes), 'Invalid mpoint info %s. Volumes list is empty' % qe_mpoint qe_volume = qe_mpoint.volumes[0] mpoint = qe_mpoint.dir or None assert qe_volume.volume_id, 'Invalid volume info %s. volume_id should be non-empty' % qe_volume vol = storage2.volume( type=self._vol_type, id=qe_volume.volume_id, name=qe_volume.device, mpoint=mpoint ) if mpoint: def block(): vol.ensure(mount=True, mkfs=True, fstab=True) bus.fire("block_device_mounted", volume_id=vol.id, device=vol.device) if bus.initialization_op: msg = 'Mount device %s to %s' % (vol.device, vol.mpoint) with bus.initialization_op.step(msg): block() else: block() except: LOG.exception("Can't attach volume")
def _plug_volume(self, qe_mpoint): try: assert len( qe_mpoint.volumes ), 'Invalid mpoint info %s. Volumes list is empty' % qe_mpoint qe_volume = qe_mpoint.volumes[0] mpoint = qe_mpoint.dir or None assert qe_volume.volume_id, 'Invalid volume info %s. volume_id should be non-empty' % qe_volume vol = storage2.volume(type=self._vol_type, id=qe_volume.volume_id, name=qe_volume.device, mpoint=mpoint) if mpoint: logger = bus.init_op.logger if bus.init_op else LOG logger.info('Ensure %s: take %s, mount to %s', self._vol_type, vol.id, vol.mpoint) vol.ensure(mount=True, mkfs=True, fstab=True) bus.fire("block_device_mounted", volume_id=vol.id, device=vol.device) self.send_message( Messages.BLOCK_DEVICE_MOUNTED, { "device_name": vol.device, "volume_id": vol.id, "mountpoint": vol.mpoint }) except: LOG.exception("Can't attach volume")
def _plug_volume(self, qe_mpoint): try: assert len(qe_mpoint.volumes), 'Invalid mpoint info %s. Volumes list is empty' % qe_mpoint qe_volume = qe_mpoint.volumes[0] mpoint = qe_mpoint.dir or None assert qe_volume.volume_id, 'Invalid volume info %s. volume_id should be non-empty' % qe_volume vol = storage2.volume( type=self._vol_type, id=qe_volume.volume_id, name=qe_volume.device, mpoint=mpoint ) if mpoint: logger = bus.init_op.logger if bus.init_op else LOG logger.info('Ensure %s: take %s, mount to %s', self._vol_type, vol.id, vol.mpoint) vol.ensure(mount=True, mkfs=True, fstab=True) bus.fire("block_device_mounted", volume_id=vol.id, device=vol.device) self.send_message(Messages.BLOCK_DEVICE_MOUNTED, {"device_name": vol.device, "volume_id": vol.id, "mountpoint": vol.mpoint} ) except: LOG.exception("Can't attach volume")
def _plug_new_style_volumes(self, volumes): for vol in volumes: vol = storage2.volume(**vol) vol.tags.update(build_tags()) self._log_ensure_volume(vol) vol.ensure(mount=bool(vol.mpoint), mkfs=True) self._volumes.append(dict(vol))
def on_before_host_up(self, message): LOG.debug("on_before_host_up") """ Configure MySQL __mysql__['behavior'] @type message: scalarizr.messaging.Message @param message: HostUp message """ self.generate_datadir() self.mysql.service.stop('Configuring MySQL') # On Debian/GCE we've got 'Another MySQL daemon already running with the same unix socket.' socket_file = mysql2_svc.my_print_defaults('mysqld').get('socket') if socket_file: coreutils.remove(socket_file) if 'Amazon' == linux.os['name']: self.mysql.my_cnf.pid_file = os.path.join(__mysql__['data_dir'], 'mysqld.pid') repl = 'master' if int(__mysql__['replication_master']) else 'slave' bus.fire('before_mysql_configure', replication=repl) if repl == 'master': self._init_master(message) else: self._init_slave(message) # Force to resave volume settings __mysql__['volume'] = storage2.volume(__mysql__['volume']) bus.fire('service_configured', service_name=__mysql__['behavior'], replication=repl, preset=self.initial_preset)
def i_have_lvm_layout(step): world.tmp_mpoint = tempfile.mkdtemp() loop = volume(type='loop', size=0.1) world.loops = [loop] world.lvm_vol = volume(type='lvm', vg='mysql', size='80%VG', name='data', mpoint=world.tmp_mpoint, pvs=[loop]) world.lvm_vol.ensure(mount=True, mkfs=True) dir_stats = os.statvfs(world.tmp_mpoint) world.free_space_before = dir_stats.f_bavail * dir_stats.f_bsize world.test_file = os.path.join(world.tmp_mpoint, 'test_file') world.test_text = "I don't wanna die" with open(world.test_file, 'w') as f: f.write(world.test_text)
def do_grow(self, volume, growth): vol = storage2.volume(volume) self._mysql_init.stop('Growing data volume') try: growed_vol = vol.grow(**growth) return dict(growed_vol) finally: self._mysql_init.start()
def do_grow(self, volume, growth): vol = storage2.volume(volume) self.stop_service(reason='Growing data volume') try: growed_vol = vol.grow(**growth) return dict(growed_vol) finally: self.start_service()
def do_grow(op): vol = storage2.volume(volume) self.stop_service(reason='Growing data volume') try: grown_vol = vol.grow(**growth) postgresql_svc.__postgresql__['volume'] = dict(grown_vol) return dict(grown_vol) finally: self.start_service()
def _run(self): self.snapshot = storage2.snapshot(self.snapshot) if self.volume: self.volume = storage2.volume(self.volume) self.volume.snap = self.snapshot self.volume.ensure() else: self.volume = self.snapshot.restore() return self.volume
def _plug_new_style_volumes(self, volumes): for vol in volumes: template = vol.pop('template', None) from_template_if_missing = vol.pop('from_template_if_missing', False) vol = storage2.volume(**vol) self._log_ensure_volume(vol) try: vol.ensure(mount=bool(vol.mpoint), mkfs=True) except storage2.VolumeNotExistsError, e: if template and bool(int(from_template_if_missing)): LOG.warn('Volume %s not exists, re-creating %s from template', str(e), vol.type) vol = storage2.volume(**template) self._log_ensure_volume(vol) vol.ensure(mount=bool(vol.mpoint), mkfs=True) else: raise self._volumes.append(dict(vol))
def _plug_new_style_volumes(self, volumes): # ec2_ephemerals should be processed before ebses key_fun = lambda x: 0 if x.get('type') == "ec2_ephemeral" else 1 for vol in sorted(volumes, key=key_fun): vol = storage2.volume(**vol) #vol.tags.update(build_tags()) # [UI-343] self._log_ensure_volume(vol) vol.ensure(mount=bool(vol.mpoint), mkfs=True) self._volumes.append(dict(vol))
def do_grow(op): vol = storage2.volume(volume) self._mysql_init.stop('Growing data volume') try: growed_vol = vol.grow(**growth) __mysql__['volume'] = dict(growed_vol) return dict(growed_vol) finally: self._mysql_init.start()
def _clone(self, config): disks = [] for disk_cfg_or_obj in self.disks: disk = storage2.volume(disk_cfg_or_obj) disk_clone = disk.clone() disks.append(disk_clone) config['disks'] = disks for attr in ('pv_uuid', 'lvm_group_cfg', 'raid_pv', 'device'): config.pop(attr, None)
def on_ConvertVolume(self, message): try: if __node__['state'] != 'running': raise HandlerError('scalarizr is not in "running" state') old_volume = storage2.volume(__mysql__['volume']) new_volume = storage2.volume(message.volume) if old_volume.type != 'eph' or new_volume.type != 'lvm': raise HandlerError('%s to %s convertation unsupported.' % (old_volume.type, new_volume.type)) new_volume.ensure() __mysql__.update({'volume': new_volume}) except: e = sys.exc_info()[1] LOG.error('Volume convertation failed: %s' % e) self.send_message(MysqlMessages.CONVERT_VOLUME_RESULT, dict(status='error', last_error=str(e)))
def test_ensure_existed(self, stat, exists, losetup, losetup_all): stat.return_value = mock.Mock(st_size=1073741931) exists.return_value = True losetup_all.return_value.__getitem__.return_value = '/mnt/loopdev0' vol = storage2.volume(type='loop', device='/dev/loop0', file='/mnt/loopdev0') vol.ensure() losetup_all.assert_called_once_with()
def _plug_new_style_volumes(self, volumes): for vol in volumes: template = vol.pop('template', None) from_template_if_missing = vol.pop('from_template_if_missing', False) vol = storage2.volume(**vol) self._log_ensure_volume(vol) try: vol.ensure(mount=bool(vol.mpoint), mkfs=True) except storage2.VolumeNotExistsError, e: if template and bool(int(from_template_if_missing)): LOG.warn( 'Volume %s not exists, re-creating %s from template', str(e), vol.type) vol = storage2.volume(**template) self._log_ensure_volume(vol) vol.ensure(mount=bool(vol.mpoint), mkfs=True) else: raise self._volumes.append(dict(vol))
def do_databundle(self, volume): LOG.info("Creating PostgreSQL data bundle") volume = storage2.volume(volume) if volume.type == 'eph': volume.ensure() backup_obj = backup.backup(type='snap_postgresql', volume=volume, tags=volume.tags) restore = backup_obj.run() snap = restore.snapshot return dict(snap)
def do_grow(op): vol = storage2.volume(volume) ports = self.busy_ports LOG.debug("Stopping Redis processes on ports %s before growing data volume." % str(ports)) self.stop_service(ports=ports, reason='Growing data volume') LOG.debug("All redis processes stopped. Attempting to grow data volume.") try: growed_vol = vol.grow(**growth) redis_service.__redis__['volume'] = dict(growed_vol) return dict(growed_vol) finally: self.start_service(ports) LOG.info("Grow process: Redis service has been started on ports %s." % str(ports))
def test_ensure_existed(self, stat, exists, losetup, losetup_all): stat.return_value = mock.Mock(st_size=1073741931) exists.return_value=True losetup_all.return_value.__getitem__.return_value = '/mnt/loopdev0' vol = storage2.volume( type='loop', device='/dev/loop0', file='/mnt/loopdev0' ) vol.ensure() losetup_all.assert_called_once_with()
def on_BeforeHostTerminate(self, message): LOG.debug('Handling BeforeHostTerminate message from %s' % message.local_ip) #assert message.local_ip if message.local_ip == __node__['private_ip']: self.mysql.service.stop(reason='Server will be terminated') LOG.info('Detaching MySQL storage') vol = storage2.volume(__mysql__['volume']) vol.detach() if not int(__mysql__['replication_master']): LOG.info('Destroying volume %s', vol.id) vol.destroy(remove_disks=True) LOG.info('Volume %s has been destroyed.' % vol.id)
def test_ensure_new(self, dd, losetup, losetup_all): losetup_all.return_value.__getitem__.return_value = '/dev/loop0' vol = storage2.volume(type='loop', size=1, zerofill=True) vol.ensure() assert vol.device == '/dev/loop0' assert vol.file.startswith('/mnt/loopdev') dd.assert_called_once_with(**{ 'if': '/dev/zero', 'of': vol.file, 'bs': '1M', 'count': 1024}) losetup.assert_called_with(vol.file, find=True)
def on_BeforeHostTerminate(self, message): LOG.debug('Handling BeforeHostTerminate message from %s' % message.local_ip) if message.local_ip == __node__['private_ip']: self.mysql.service.stop(reason='Server will be terminated') LOG.info('Detaching MySQL storage') vol = storage2.volume(__mysql__['volume']) vol.detach() if not int(__mysql__['replication_master']): LOG.info('Destroying volume %s', vol.id) vol.destroy(remove_disks=True) LOG.info('Volume %s has been destroyed.' % vol.id) else: vol.umount()
def on_host_init_response(self, message): log = bus.init_op.logger log.info('Accept Scalr configuration') if not message.body.has_key("rabbitmq"): raise HandlerError( "HostInitResponse message for RabbitMQ behaviour must have 'rabbitmq' property" ) rabbitmq_data = message.rabbitmq.copy() if not rabbitmq_data['password']: rabbitmq_data['password'] = cryptotool.pwgen(10) self.service.stop() self.cleanup_hosts_file('/') if os.path.exists(RABBITMQ_ENV_CFG_PATH): os.remove(RABBITMQ_ENV_CFG_PATH) if not os.path.isdir(DEFAULT_STORAGE_PATH): os.makedirs(DEFAULT_STORAGE_PATH) rabbitmq_user = pwd.getpwnam("rabbitmq") os.chown(DEFAULT_STORAGE_PATH, rabbitmq_user.pw_uid, rabbitmq_user.pw_gid) self._logger.info('Performing initial cluster reset') hostname = rabbitmq_svc.RABBIT_HOSTNAME_TPL % int(message.server_index) __rabbitmq__['hostname'] = hostname dns.ScalrHosts.set('127.0.0.1', hostname) self._prepare_env_config() self.service.start() self.rabbitmq.stop_app() self.rabbitmq.reset() self.service.stop() # Use RABBITMQ_NODENAME instead of setting actual hostname #with open('/etc/hostname', 'w') as f: # f.write(hostname) #system2(('hostname', '-F', '/etc/hostname')) volume_config = rabbitmq_data.pop('volume_config') volume_config['mpoint'] = DEFAULT_STORAGE_PATH rabbitmq_data['volume'] = storage2.volume(volume_config) __rabbitmq__.update(rabbitmq_data)
def replace_disk(step, index, kind, raw_cfg): # Prepare config cfg = parse_config(raw_cfg, dot_notation=True) if kind == 'raid': raise Exception('Wrong disk type:raid') cfg['type'] = kind world.initial_cfg = cfg vol = world.new_volume = storage2.volume(**cfg) vol.ensure() world.volume.replace_disk(int(index), world.new_volume) mdadm.mdadm('misc', world.volume.raid_pv, '--wait')
def _ensure(self): if self.snap: config = self.snap \ if isinstance(self.snap, dict) \ else self.snap.config() else: config = self.config() disk = storage2.volume(config['disk']) if disk.device and disk.device.startswith('/dev/sd'): disk = storage2.volume( type='ec2_ephemeral', name='ephemeral0') disk.ensure() self.disk = config['disk'] = disk if self.snap: if self._eph_vol: self._eph_vol.detach(force=True) self._eph_vol = self._eph_pvd.create_from_snapshot(**config) self.snap = None else: self._eph_vol = self._eph_pvd.create(**config) self.device = self._eph_vol.device
def on_host_init_response(self, message): log = bus.init_op.logger log.info('Accept Scalr configuration') if not message.body.has_key("rabbitmq"): raise HandlerError("HostInitResponse message for RabbitMQ behaviour must have 'rabbitmq' property") rabbitmq_data = message.rabbitmq.copy() if not rabbitmq_data['password']: rabbitmq_data['password'] = cryptotool.pwgen(10) self.cleanup_hosts_file('/') if os.path.exists(RABBITMQ_ENV_CFG_PATH): os.remove(RABBITMQ_ENV_CFG_PATH) if not os.path.isdir(DEFAULT_STORAGE_PATH): os.makedirs(DEFAULT_STORAGE_PATH) rabbitmq_user = pwd.getpwnam("rabbitmq") os.chown(DEFAULT_STORAGE_PATH, rabbitmq_user.pw_uid, rabbitmq_user.pw_gid) self._logger.info('Performing initial cluster reset') self.service.stop() hostname = rabbitmq_svc.RABBIT_HOSTNAME_TPL % int(message.server_index) __rabbitmq__['hostname'] = hostname dns.ScalrHosts.set('127.0.0.1', hostname) self._set_nodename_in_env() self.service.start() self.rabbitmq.stop_app() self.rabbitmq.reset() self.service.stop() # Use RABBITMQ_NODENAME instead of setting actual hostname #with open('/etc/hostname', 'w') as f: # f.write(hostname) #system2(('hostname', '-F', '/etc/hostname')) volume_config = rabbitmq_data.pop('volume_config') volume_config['mpoint'] = DEFAULT_STORAGE_PATH rabbitmq_data['volume'] = storage2.volume(volume_config) rabbitmq_data['volume'].tags = self.rabbitmq_tags __rabbitmq__.update(rabbitmq_data)
def prepare_volume(step, kind, raw_cfg): # Prepare config cfg = parse_config(raw_cfg, dot_notation=True) if kind == 'raid': disk_count = cfg.pop('disks') disk_config = cfg.pop('disk') cfg['disks'] = [disk_config] * int(disk_count) world.tmp_mount_dir = tempfile.mkdtemp() cfg['mpoint'] = world.tmp_mount_dir cfg['type'] = kind world.initial_cfg = cfg vol = world.volume = storage2.volume(**cfg) vol.ensure(mount=True, mkfs=True)
def on_before_host_up(self, message): """ Configure PostgreSQL behaviour @type message: scalarizr.messaging.Message @param message: HostUp message """ repl = 'master' if self.is_replication_master else 'slave' #bus.fire('before_postgresql_configure', replication=repl) if self.is_replication_master: self._init_master(message) else: self._init_slave(message) # Force to resave volume settings __postgresql__['volume'] = storage2.volume(__postgresql__['volume']) bus.fire('service_configured', service_name=SERVICE_NAME, replication=repl, preset=self.initial_preset)
def on_init(self): bus.on( before_host_init=self.on_before_host_init, host_init_response=self.on_host_init_response, before_host_up=self.on_before_host_up ) try: handlers.script_executor.skip_events.add(Messages.INT_BLOCK_DEVICE_UPDATED) except AttributeError: pass if __node__['state'] == 'running': volumes = self._queryenv.list_farm_role_params(__node__['farm_role_id']).get('params', {}).get('volumes', []) volumes = volumes or [] # Cast to list for vol in volumes: vol = storage2.volume(vol) vol.ensure(mount=bool(vol.mpoint))
def on_BeforeHostTerminate(self, message): if message.local_ip != __node__['private_ip']: return if __node__['platform'] == 'cloudstack': # Important! # After following code run, server will loose network for some time # Fixes: SMNG-293 conn = __node__['cloudstack'].connect_cloudstack() vm = conn.listVirtualMachines( id=__node__['cloudstack']['instance_id'])[0] result = conn.listPublicIpAddresses(ipAddress=vm.publicip) if result: try: conn.disableStaticNat(result[0].id) except: self._logger.warn('Failed to disable static NAT: %s', str(sys.exc_info()[1])) suspend = message.body.get('suspend') suspend = suspend and int(suspend) or False if suspend: return volumes = message.body.get('volumes', []) volumes = volumes or [] for volume in volumes: try: volume = storage2.volume(volume) volume.umount() volume.detach() except: self._logger.warn('Failed to detach volume %s: %s', volume.id, sys.exc_info()[1]) if __node__['platform'] == 'openstack': conn = __node__['openstack'].connect_nova() sid = __node__['openstack']['server_id'] for vol in conn.volumes.get_server_volumes(sid): try: conn.volumes.delete_server_volume(sid, vol.id) except: self._logger.warn('Failed to detach volume %s: %s', vol.id, str(sys.exc_info()[1]))