def ensure(self, mount=False, mkfs=False, fstab=False, **updates): """ Make sure that volume is attached and ready for use. :param mount: if set, volume eventually will be mounted to it's mpoint :param mkfs: if set, volume will have corresponding fs eventually :return: """ if not self.features['restore']: self._check_restore_unsupported() if self.snap and isinstance(self.snap, Snapshot): self.snap = self.snap.config() self._ensure() self._check_attr('device') if not self.id: self.id = self._genid('vol-') if mount: try: LOG.debug('Mounting: %s', self.id) self.mount() except mod_mount.NoFileSystem: if mkfs: LOG.debug('Creating %s filesystem: %s', self.fstype, self.id) self.mkfs() self.mount() else: raise if fstab and self.device not in mod_mount.fstab(): LOG.debug('Adding to fstab: %s', self.id) mod_mount.fstab().add(self.device, self.mpoint, self.fstype) return self.config()
def on_IntBlockDeviceUpdated(self, message): if not message.devname: return if message.action == "add": LOG.debug("udev notified me that block device %s was attached", message.devname) self.send_message( Messages.BLOCK_DEVICE_ATTACHED, {"device_name" : self.get_devname(message.devname)}, broadcast=True ) bus.fire("block_device_attached", device=message.devname) elif message.action == "remove": LOG.debug("udev notified me that block device %s was detached", message.devname) fstab = mount.fstab() fstab.remove(message.devname) self.send_message( Messages.BLOCK_DEVICE_DETACHED, {"device_name" : self.get_devname(message.devname)}, broadcast=True ) bus.fire("block_device_detached", device=message.devname)
def on_init(self, *args, **kwargs): bus.on("before_hello", self.on_before_hello) bus.on("before_host_init", self.on_before_host_init) bus.on("before_restart", self.on_before_restart) bus.on("before_reboot_finish", self.on_before_reboot_finish) try: system(('ntpdate', '-u', '0.amazon.pool.ntp.org')) except: pass msg_service = bus.messaging_service producer = msg_service.get_producer() producer.on("before_send", self.on_before_message_send) if not os_dist.windows_family and not __node__.get('hostname'): # Set the hostname to this instance's public hostname try: hostname_as_pubdns = int(__ec2__['hostname_as_pubdns']) except: hostname_as_pubdns = True if hostname_as_pubdns: pub_hostname = self._platform.get_public_hostname() self._logger.debug('Setting hostname to %s' % pub_hostname) system2("hostname " + pub_hostname, shell=True) if disttool.is_ubuntu(): # Ubuntu cloud-init scripts may disable root ssh login for path in ('/etc/ec2-init/ec2-config.cfg', '/etc/cloud/cloud.cfg'): if os.path.exists(path): c = None with open(path, 'r') as fp: c = fp.read() c = re.sub(re.compile(r'^disable_root[^:=]*([:=]).*', re.M), r'disable_root\1 0', c) with open(path, 'w') as fp: fp.write(c) if not linux.os.windows_family: # Add server ssh public key to authorized_keys ssh_key = self._platform.get_ssh_pub_key() if ssh_key: add_authorized_key(ssh_key) # Mount ephemeral devices # Seen on eucalyptus: # - fstab contains invalid fstype and `mount -a` fails if self._platform.name == 'eucalyptus': mtab = mount.mounts() fstab = mount.fstab() for device in self._platform.instance_store_devices: if os.path.exists(device) and device in fstab and device not in mtab: entry = fstab[device] try: mount.mount(device, entry.mpoint, '-o', entry.options) except: self._logger.warn(sys.exc_info()[1]) else: if not os_dist.windows_family: system2('mount -a', shell=True, raise_exc=False)
def on_IntBlockDeviceUpdated(self, message): if not message.devname: return if message.action == "add": LOG.debug("udev notified me that block device %s was attached", message.devname) self.send_message( Messages.BLOCK_DEVICE_ATTACHED, {"device_name": self.get_devname(message.devname)}, broadcast=True) bus.fire("block_device_attached", device=message.devname) elif message.action == "remove": LOG.debug("udev notified me that block device %s was detached", message.devname) fstab = mount.fstab() fstab.remove(message.devname) self.send_message( Messages.BLOCK_DEVICE_DETACHED, {"device_name": self.get_devname(message.devname)}, broadcast=True) bus.fire("block_device_detached", device=message.devname)
def _test_fstab(self): fstab = mount.fstab("/etc/fstab") entries = fstab.list_entries() self.assertEqual(entries[1].device, "/dev/sda1") self.assertEqual(entries[1].mpoint, "/") self.assertEqual(entries[1].fstype, "ext4") self.assertEqual(entries[1].options, "errors=remount-ro") self.assertEqual(entries[1].value, "/dev/sda1 / ext4 errors=remount-ro 0 1")
def _cleanup_after_rebundle(): cnf = bus.cnf pl = bus.platform logger = logging.getLogger(__name__) if os.path.exists('/etc/chef/client.pem'): os.remove('/etc/chef/client.pem') if os.path.exists('/etc/chef/client.rb'): os.remove('/etc/chef/client.rb') # remove storage devices from fstab if not linux.os.windows_family: def remove_systemd_generated_mount_unit(mpoint): unit_name = '{}.mount'.format(mpoint.replace('/', '-')[1:]) logger.debug('Removing systemd runtime unit %s', unit_name) coreutils.remove( '/run/systemd/generator/local-fs.target.wants/{}'.format( unit_name)) coreutils.remove('/run/systemd/generator/{}'.format(unit_name)) fstab = mount.fstab() should_reload_systemd = False for entry in fstab: if 'comment=scalr' in entry.options: logger.debug('Removing %s from fstab', entry.device) fstab.remove(entry.device) if linux.os['family'] == 'RedHat' and \ linux.os['name'] != 'Amazon' and \ linux.os['release'] >= (7, 0): remove_systemd_generated_mount_unit(entry.mpoint) should_reload_systemd = True if should_reload_systemd: linux.system('systemctl daemon-reload', shell=True) # Reset private configuration priv_path = cnf.private_path() for file in os.listdir(priv_path): if file in ('.user-data', 'update.status', 'keys'): # protect user-data and UpdateClient status # keys/default maybe already refreshed by UpdateClient continue path = os.path.join(priv_path, file) coreutils.chmod_r(path, 0700) try: os.remove(path) if (os.path.isfile(path) or os.path.islink(path)) else shutil.rmtree(path) except: if linux.os.windows and sys.exc_info()[0] == WindowsError: # ScalrUpdClient locks db.sqlite logger.debug(sys.exc_info()[1]) else: raise if not linux.os.windows_family: system2('sync', shell=True)
def detach(self, force=False, **kwds): LOG.debug('Detaching volume %s', self.id) if not self.device: LOG.debug('Volume %s has no device, nothing to detach', self.id) return self.umount() fstab = mod_mount.fstab() if self.device in fstab: fstab.remove(self.device) self._detach(force, **kwds) if self.features['detach']: self.device = None LOG.debug('Volume %s detached', self.id)
def fix_fstab(self, volume): conn = self.platform.new_ec2_conn() fstab_file_path = os.path.join(volume.mpoint, 'etc/fstab') fstab = mount.fstab(fstab_file_path) vol_filters = {'attachment.instance-id': self.platform.get_instance_id()} attached_vols = conn.get_all_volumes(filters=vol_filters) for vol in attached_vols: try: fstab.remove(vol.attach_data.device) except KeyError: LOG.warn("Can't remove %s from fstab" % vol.attach_data.device)
def ensure(self, mount=False, mkfs=False, fstab=False, **updates): if not self.features['restore']: self._check_restore_unsupported() if self.snap and isinstance(self.snap, Snapshot): self.snap = self.snap.config() self._ensure() self._check_attr('device') if not self.id: self.id = self._genid('vol-') if mount: try: LOG.debug('Mounting: %s', self.id) self.mount() except mod_mount.NoFileSystem: if mkfs: LOG.debug('Creating %s filesystem: %s', self.fstype, self.id) self.mkfs() self.mount() else: raise if fstab and self.device not in mod_mount.fstab(): LOG.debug('Adding to fstab: %s', self.id) mod_mount.fstab().add(self.device, self.mpoint, self.fstype) return self.config()
def _cleanup_after_rebundle(): cnf = bus.cnf pl = bus.platform logger = logging.getLogger(__name__) if os.path.exists('/etc/chef/client.pem'): os.remove('/etc/chef/client.pem') if os.path.exists('/etc/chef/client.rb'): os.remove('/etc/chef/client.rb') # remove storage devices from fstab if not linux.os.windows_family: fstab = mount.fstab() for entry in fstab: if 'comment=scalr' in entry.options: fstab.remove(entry.device) # Reset private configuration priv_path = cnf.private_path() for file in os.listdir(priv_path): if file in ('.user-data', 'update.status', 'keys'): # protect user-data and UpdateClient status # keys/default maybe already refreshed by UpdateClient continue path = os.path.join(priv_path, file) coreutils.chmod_r(path, 0700) try: os.remove(path) if (os.path.isfile(path) or os.path.islink(path)) else shutil.rmtree(path) except: if linux.os.windows and sys.exc_info()[0] == WindowsError: # ScalrUpdClient locks db.sqlite logger.debug(sys.exc_info()[1]) else: raise if not linux.os.windows_family: system2('sync', shell=True)
def _ensure(self): self._v1_compat = self.snap and len(self.snap['disks']) and \ isinstance(self.snap['disks'][0], dict) and \ 'snapshot' in self.snap['disks'][0] if self.snap: disks = [] snaps = [] try: # @todo: create disks concurrently for disk_snap in self.snap['disks']: if self._v1_compat: disk_snap = disk_snap['snapshot'] snap = storage2.snapshot(disk_snap) snaps.append(snap) if self.disks: if len(self.disks) != len(snaps): raise storage2.StorageError( 'Volume disks count is not equal to ' 'snapshot disks count') self.disks = map(storage2.volume, self.disks) # Mixing snapshots to self.volumes (if exist) or empty volumes disks = self.disks or [ storage2.volume(type=s['type']) for s in snaps ] for disk, snap in zip(disks, snaps): disk.snap = snap except: with util.capture_exception(logger=LOG): for disk in disks: disk.destroy() self.disks = disks if self._v1_compat: # is some old snapshots /dev/vgname occured self.vg = os.path.basename(self.snap['vg']) else: self.vg = self.snap['vg'] self.level = int(self.snap['level']) self.pv_uuid = self.snap['pv_uuid'] self.lvm_group_cfg = self.snap['lvm_group_cfg'] self.snap = None self._check_attr('level') self._check_attr('vg') self._check_attr('disks') assert int(self.level) in (0, 1, 5, 10), 'Unknown raid level: %s' % self.level # Making sure autoassembly is disabled before attaching disks self._disable_autoassembly() disks = [] for disk in self.disks: disk = storage2.volume(disk) disk.ensure() disks.append(disk) self.disks = disks disks_devices = [disk.device for disk in self.disks] if self.lvm_group_cfg: time.sleep(2) # Give a time to device manager try: raid_device = mdadm.mdfind(*disks_devices) except storage2.StorageError: raid_device = mdadm.findname() """ if self.level in (1, 10): for disk in disks_devices: mdadm.mdadm('misc', None, disk, zero_superblock=True, force=True) try: kwargs = dict(force=True, metadata='default', level=self.level, assume_clean=True, raid_devices=len(disks_devices)) mdadm.mdadm('create', raid_device, *disks_devices, **kwargs) except: if self.level == 10 and self._v1_compat: self._v1_repair_raid10(raid_device) else: raise else: """ mdadm.mdadm('assemble', raid_device, *disks_devices) mdadm.mdadm('misc', None, raid_device, wait=True, raise_exc=False) # Restore vg config vg_restore_file = tempfile.mktemp() with open(vg_restore_file, 'w') as f: f.write(base64.b64decode(self.lvm_group_cfg)) # Ensure RAID physical volume try: lvm2.pvs(raid_device) except: lvm2.pvcreate(raid_device, uuid=self.pv_uuid, restorefile=vg_restore_file) finally: lvm2.vgcfgrestore(self.vg, file=vg_restore_file) os.remove(vg_restore_file) # Check that logical volume exists lv_infos = lvm2.lvs(self.vg) if not lv_infos: raise storage2.StorageError( 'No logical volumes found in %s vol. group') lv_name = lv_infos.popitem()[1].lv_name self.device = lvm2.lvpath(self.vg, lv_name) # Activate volume group lvm2.vgchange(self.vg, available='y') # Wait for logical volume device file util.wait_until(lambda: os.path.exists(self.device), timeout=120, logger=LOG, error_text='Logical volume %s not found' % self.device) if self.mpoint: # SCALARIZR-1929 raid wasn't auto-mounted after reboot. # XXX: needs investigation, code below is a hotfix. fstab = mount.fstab() if self.mpoint in fstab: self.mount() else: raid_device = mdadm.findname() kwargs = dict(force=True, level=self.level, assume_clean=True, raid_devices=len(disks_devices), metadata='default') mdadm.mdadm('create', raid_device, *disks_devices, **kwargs) mdadm.mdadm('misc', None, raid_device, wait=True, raise_exc=False) lvm2.pvcreate(raid_device, force=True) self.pv_uuid = lvm2.pvs(raid_device)[raid_device].pv_uuid lvm2.vgcreate(self.vg, raid_device) out, err = lvm2.lvcreate(self.vg, extents='100%FREE')[:2] try: clean_out = out.strip().split('\n')[-1].strip() vol = re.match(self.lv_re, clean_out).group(1) self.device = lvm2.lvpath(self.vg, vol) except: e = 'Logical volume creation failed: %s\n%s' % (out, err) raise Exception(e) self.lvm_group_cfg = lvm2.backup_vg_config(self.vg) self.raid_pv = raid_device
class Volume(Base): """ Base class for all volume types """ MAX_SIZE = None def __init__(self, device=None, fstype='ext3', mpoint=None, mount_options=None, snap=None, recreate_if_missing=False, template=None, **kwds): # Get rid of fscreated flag kwds.pop('fscreated', None) #Backwards compatibility with block_device handler from_template_if_missing = kwds.pop('from_template_if_missing', False) recreate_if_missing = recreate_if_missing or from_template_if_missing super(Volume, self).__init__(device=device, fstype=fstype, mpoint=mpoint, mount_options=mount_options or [], snap=snap, recreate_if_missing=recreate_if_missing, template=template, **kwds) self.features.update({'restore': True, 'grow': False, 'detach': True}) def ensure(self, mount=False, mkfs=False, fstab=True, **updates): """ Make sure that volume is attached and ready for use. :param mount: if set, volume eventually will be mounted to it's mpoint :param mkfs: if set, volume will have corresponding fs eventually :return: """ if not self.features['restore']: self._check_restore_unsupported() if self.snap and isinstance(self.snap, Snapshot): self.snap = self.snap.config() try: self._ensure() except storage2.VolumeNotExistsError, e: LOG.debug('recreate_if_missing: %s', self.recreate_if_missing) if self.recreate_if_missing: LOG.warning(e) LOG.info('Volume %s not exists, re-creating %s from template', self.id, self.type) template = dict(self.template or self.clone()) LOG.debug('Template: %s', template) vol = storage2.volume(**template) vol.ensure(mount=bool(vol.mpoint), mkfs=True) self._config = vol.config() else: raise self._check_attr('device') if not self.id: self.id = self._genid('vol-') if mount: if not self.is_fs_created() and mkfs: try: self.mkfs() except storage2.OperationError as e: if 'already' not in str(e): raise if not linux.os.windows: fstab_mgr = mod_mount.fstab() device_path = os.path.realpath(self.device) if device_path in fstab_mgr: fstab_entry = fstab_mgr[device_path] if fstab_entry.mpoint != self.mpoint: LOG.debug( 'According to fstab device {} should be mounted to {}. ' 'Deleting entry...'.format(fstab_entry.device, fstab_entry.mpoint)) del fstab_mgr[fstab_entry.device] else: return self.config() self.mount() if fstab and not linux.os.windows: LOG.debug('Adding to fstab: %s', self.id) if self.mount_options: fsoptions = list(self.mount_options) else: fsoptions = ['defaults'] if linux.os.ubuntu and linux.os['release'] < (16, 4): fsoptions.append('nobootwait') elif not (linux.os.redhat_family and linux.os['release'] < (6, 0)): # centos 5 doesn't support nofail fsoptions.append('nofail') fsoptions.append('comment=scalr') mod_mount.fstab().add(self.device, self.mpoint, self.fstype, ','.join(fsoptions)) return self.config()
def on_init(self, *args, **kwargs): bus.on("before_hello", self.on_before_hello) bus.on("before_host_init", self.on_before_host_init) bus.on("before_restart", self.on_before_restart) msg_service = bus.messaging_service producer = msg_service.get_producer() producer.on("before_send", self.on_before_message_send) # Set the hostname to this instance's public hostname try: hostname_as_pubdns = int(__ec2__['hostname_as_pubdns']) except: hostname_as_pubdns = True if hostname_as_pubdns: pub_hostname = self._platform.get_public_hostname() self._logger.debug('Setting hostname to %s' % pub_hostname) system2("hostname " + pub_hostname, shell=True) if disttool.is_ubuntu(): # Ubuntu cloud-init scripts may disable root ssh login for path in ('/etc/ec2-init/ec2-config.cfg', '/etc/cloud/cloud.cfg'): if os.path.exists(path): c = None with open(path, 'r') as fp: c = fp.read() c = re.sub( re.compile(r'^disable_root[^:=]*([:=]).*', re.M), r'disable_root\1 0', c) with open(path, 'w') as fp: fp.write(c) # Add server ssh public key to authorized_keys authorized_keys_path = "/root/.ssh/authorized_keys" if os.path.exists(authorized_keys_path): c = None with open(authorized_keys_path, 'r') as fp: c = fp.read() ssh_key = self._platform.get_ssh_pub_key() idx = c.find(ssh_key) if idx == -1: if c and c[-1] != '\n': c += '\n' c += ssh_key + "\n" self._logger.debug( "Add server ssh public key to authorized_keys") elif idx > 0 and c[idx - 1] != '\n': c = c[0:idx] + '\n' + c[idx:] self._logger.warn( 'Adding new-line character before server SSH key in authorized_keys file' ) with open(authorized_keys_path, 'w') as fp: fp.write(c) # Mount ephemeral devices # Seen on eucalyptus: # - fstab contains invalid fstype and `mount -a` fails if self._platform.name == 'eucalyptus': mtab = mount.mounts() fstab = mount.fstab() for device in self._platform.instance_store_devices: if os.path.exists( device) and device in fstab and device not in mtab: entry = fstab[device] try: mount.mount(device, entry.mpoint, '-o', entry.options) except: self._logger.warn(sys.exc_info()[1]) else: system2('mount -a', shell=True, raise_exc=False)
def on_init(self, *args, **kwargs): bus.on("before_hello", self.on_before_hello) bus.on("before_host_init", self.on_before_host_init) bus.on("before_restart", self.on_before_restart) msg_service = bus.messaging_service producer = msg_service.get_producer() producer.on("before_send", self.on_before_message_send) # Set the hostname to this instance's public hostname try: hostname_as_pubdns = int(__ec2__['hostname_as_pubdns']) except: hostname_as_pubdns = True if hostname_as_pubdns: pub_hostname = self._platform.get_public_hostname() self._logger.debug('Setting hostname to %s' % pub_hostname) system2("hostname " + pub_hostname, shell=True) if disttool.is_ubuntu(): # Ubuntu cloud-init scripts may disable root ssh login for path in ('/etc/ec2-init/ec2-config.cfg', '/etc/cloud/cloud.cfg'): if os.path.exists(path): c = None with open(path, 'r') as fp: c = fp.read() c = re.sub(re.compile(r'^disable_root[^:=]*([:=]).*', re.M), r'disable_root\1 0', c) with open(path, 'w') as fp: fp.write(c) # Add server ssh public key to authorized_keys authorized_keys_path = "/root/.ssh/authorized_keys" if os.path.exists(authorized_keys_path): c = None with open(authorized_keys_path, 'r') as fp: c = fp.read() ssh_key = self._platform.get_ssh_pub_key() idx = c.find(ssh_key) if idx == -1: if c and c[-1] != '\n': c += '\n' c += ssh_key + "\n" self._logger.debug("Add server ssh public key to authorized_keys") elif idx > 0 and c[idx-1] != '\n': c = c[0:idx] + '\n' + c[idx:] self._logger.warn('Adding new-line character before server SSH key in authorized_keys file') with open(authorized_keys_path, 'w') as fp: fp.write(c) # Mount ephemeral devices # Seen on eucalyptus: # - fstab contains invalid fstype and `mount -a` fails if self._platform.name == 'eucalyptus': mtab = mount.mounts() fstab = mount.fstab() for device in self._platform.instance_store_devices: if os.path.exists(device) and device in fstab and device not in mtab: entry = fstab[device] try: mount.mount(device, entry.mpoint, '-o', entry.options) except: self._logger.warn(sys.exc_info()[1]) else: system2('mount -a', shell=True, raise_exc=False)
def on_init(self, *args, **kwargs): bus.on("before_hello", self.on_before_hello) bus.on("before_host_init", self.on_before_host_init) bus.on("before_restart", self.on_before_restart) bus.on("before_reboot_finish", self.on_before_reboot_finish) try: system(('ntpdate', '-u', '0.amazon.pool.ntp.org')) except: pass msg_service = bus.messaging_service producer = msg_service.get_producer() producer.on("before_send", self.on_before_message_send) if not os_dist.windows_family and not __node__.get('hostname'): # Set the hostname to this instance's public hostname try: hostname_as_pubdns = int(__ec2__['hostname_as_pubdns']) except: hostname_as_pubdns = True if hostname_as_pubdns: pub_hostname = self._platform.get_public_hostname() self._logger.debug('Setting hostname to %s' % pub_hostname) system2("hostname " + pub_hostname, shell=True) if disttool.is_ubuntu(): # Ubuntu cloud-init scripts may disable root ssh login for path in ('/etc/ec2-init/ec2-config.cfg', '/etc/cloud/cloud.cfg'): if os.path.exists(path): c = None with open(path, 'r') as fp: c = fp.read() c = re.sub( re.compile(r'^disable_root[^:=]*([:=]).*', re.M), r'disable_root\1 0', c) with open(path, 'w') as fp: fp.write(c) if not linux.os.windows_family: # Add server ssh public key to authorized_keys ssh_key = self._platform.get_ssh_pub_key() if ssh_key: add_authorized_key(ssh_key) # Mount ephemeral devices # Seen on eucalyptus: # - fstab contains invalid fstype and `mount -a` fails if self._platform.name == 'eucalyptus': mtab = mount.mounts() fstab = mount.fstab() for device in self._platform.instance_store_devices: if os.path.exists( device) and device in fstab and device not in mtab: entry = fstab[device] try: mount.mount(device, entry.mpoint, '-o', entry.options) except: self._logger.warn(sys.exc_info()[1]) else: if not os_dist.windows_family: system2('mount -a', shell=True, raise_exc=False)
class Volume(Base): """ Base class for all volume types """ MAX_SIZE = None def __init__(self, device=None, fstype='ext3', mpoint=None, snap=None, **kwds): # Get rid of fscreated flag kwds.pop('fscreated', None) #Backwards compatibility with block_device handler from_template_if_missing = kwds.pop('from_template_if_missing', False) kwds['recreate_if_missing'] = kwds.get( 'recreate_if_missing', False) or from_template_if_missing super(Volume, self).__init__(device=device, fstype=fstype, mpoint=mpoint, snap=snap, **kwds) self.features.update({'restore': True, 'grow': False, 'detach': True}) def ensure(self, mount=False, mkfs=False, fstab=True, **updates): """ Make sure that volume is attached and ready for use. :param mount: if set, volume eventually will be mounted to it's mpoint :param mkfs: if set, volume will have corresponding fs eventually :return: """ if not self.features['restore']: self._check_restore_unsupported() if self.snap and isinstance(self.snap, Snapshot): self.snap = self.snap.config() try: self._ensure() except storage2.VolumeNotExistsError, e: LOG.debug("recreate_if_missing: %s" % self.recreate_if_missing) if self.recreate_if_missing: LOG.warning(e) LOG.info('Volume %s not exists, re-creating %s from template', self.id, self.type) template = self.clone() vol = storage2.volume(**dict(template)) vol.ensure(mount=bool(vol.mpoint), mkfs=True) self._config = vol.config() else: raise self._check_attr('device') if not self.id: self.id = self._genid('vol-') if mount: if not self.is_fs_created() and mkfs: LOG.debug('Creating %s filesystem: %s', self.fstype, self.id) self.mkfs() in_fstab = os.path.realpath(self.device) in mod_mount.fstab() if not in_fstab: self.mount() if fstab and not in_fstab: LOG.debug('Adding to fstab: %s', self.id) mod_mount.fstab().add(self.device, self.mpoint, self.fstype, 'defaults,comment=scalr,nofail') return self.config()