def load_conf(self, file): try: return LunrConfig.from_conf(file or LunrConfig.lunr_storage_config) except IOError, e: if file or self.verbose: print 'Warning: %s' % e return LunrConfig()
def test_unable_to_connect(self): tmp_dir = mkdtemp() os.rmdir(tmp_dir) self.assertFalse(os.path.exists(tmp_dir)) url = 'sqlite:///' + tmp_dir + '/lunr.db' self.assertRaises(db.DBError, db.configure, LunrConfig({'db': {'auto_create': True, 'url': url}})) self.assertRaises(db.DBError, db.configure, LunrConfig({'db': {'url': url}}))
def test_client_retry(self): glance.glanceclient = MockRetryGlanceClient conf = LunrConfig() glance_urls = ['url1:9292', 'url2:9292', 'url3:9292'] urls_string = ', '.join(glance_urls) self.conf = LunrConfig({'glance': {'glance_urls': urls_string}}) glance1 = glance.GlanceClient(self.conf) self.assertRaises(glance.GlanceError, glance1.head, 'junk') self.assertEquals(glance1._glance_url_index, 3) glance2 = glance.GlanceClient(self.conf) self.assertRaises(glance.GlanceError, glance2.get, 'junk') self.assertEquals(glance2._glance_url_index, 3)
def parse_args(self, *args, **kwargs): options, args = super(LunrMonkeyParser, self).parse_args(*args, **kwargs) try: conf = LunrConfig.from_conf(options.config) except IOError, e: raise SystemExit('Error: %s' % e)
def test_audit(self): manifest = Manifest.blank(2) worker = Worker('foo', LunrConfig({ 'backup': { 'client': 'memory' }, 'storage': { 'run_dir': self.scratch } }), manifest=manifest) conn = worker.conn conn.put_container('foo') backup = manifest.create_backup('bak1') backup[0] = worker.empty_block_hash conn.put_object('foo', backup[0], 'zeroes') backup[1] = 'some_block_hash' conn.put_object('foo', backup[1], ' more stuff') save_manifest(manifest, conn, worker.id, worker._lock_path()) # Add some non referenced blocks. conn.put_object('foo', 'stuff1', 'unreferenced stuff1') conn.put_object('foo', 'stuff2', 'unreferenced stuff2') conn.put_object('foo', 'stuff3', 'unreferenced stuff3') _headers, original_list = conn.get_container('foo') # Manifest, 2 blocks, 3 stuffs. self.assertEquals(len(original_list), 6) worker.audit() _headers, new_list = conn.get_container('foo') # Manifest, 2 blocks. self.assertEquals(len(new_list), 3)
def clone(self, id=None, src=None, backup=None, size=None): """ This runs a clone job outside of the storage api, which is useful for performance testing backup restores (Example: storage tools clone volume-clone --backup volume-backup --src volume-original) """ # Set basic Logging logging.basicConfig() # Get the lunr logger log = logger.get_logger() # Output Debug level info log.logger.setLevel(logging.DEBUG) # Load the local storage configuration conf = LunrConfig.from_storage_conf() # Init the volume helper volume = VolumeHelper(conf) # Attempt to figure out the original volume size size = size or str(volume.get(src)['size'] / 1073741824) # Size is in gigs if not re.match('G', size): size = size + 'G' # Create a tag to apply to the lvm volume tag = encode_tag(source_volume_id=src, backup_id=backup) # Create the volume execute('lvcreate', volume.volume_group, name=id, size=size, addtag=tag) # Get info for the newly created volume new = volume.get(id) with self.timeit(): print("Starting Backup") # Restore volume from the backup volume.clone(new, src, backup)
def setUp(self): conf = LunrConfig({'db': {'auto_create': True, 'url': 'sqlite://'}}) self.app = ApiWsgiApp(conf, urlmap) self.db = db.Session self.account = db.models.Account() vtype = db.models.VolumeType('vtype') node = db.models.Node('node', 10, volume_type=vtype, hostname='10.127.0.1', port=8080) # Simulate a volume that is being restored volume = db.models.Volume(0, 'vtype', id='v1', node=node, account=self.account) backup = db.models.Backup(volume, status='AVAILABLE') self.db.add_all([vtype, self.account, node, volume, backup]) self.db.commit() # Assign the backup as the restore of the volume volume.restore_of = backup.id self.volume = dict(volume) self.backup = dict(backup) self.db.commit()
def test_multi_config(self): base_conf_body = dedent(""" [db] # url = sqlite:// echo = False pool_size = 5 """) db_conf_body = dedent(""" [db] url = mysql://root:@localhost/lunr echo = True """) scratch = mkdtemp() try: os.mkdir(os.path.join(scratch, 'api-server.conf.d')) base_conf_filename = os.path.join(scratch, 'api-server.conf') db_conf_filename = os.path.join(scratch, 'api-server.conf.d/db.conf') with open(base_conf_filename, 'w') as f: f.write(base_conf_body) with open(db_conf_filename, 'w') as f: f.write(db_conf_body) conf = LunrConfig.from_conf(base_conf_filename) # override commented value self.assertEquals(conf.string('db', 'url', ''), 'mysql://root:@localhost/lunr') # override base value self.assertEquals(conf.bool('db', 'echo', None), True) # inherit base value self.assertEquals(conf.int('db', 'pool_size', 0), 5) finally: rmtree(scratch)
def test_status(self): VOLUME_DATA = dedent(""" tid:1 name:iqn.2010-11.com.rackspace:vol1 \tlun:0 path:/dev/lunr1/vol1 tid:2 name:iqn.2010-11.com.rackspace:vol2 \tlun:0 path:/dev/lunr1/vol2 tid:3 name:iqn.2010-11.com.rackspace:vol3 """) SESSION_DATA = dedent(""" tid:1 name:iqn.2010-11.com.rackspace:vol1 \tsid:281474997486080 initiator:iqn.2010-11.org:baaa6e50093 \t\tcid:0 ip:127.0.0.1 state:active hd:none dd:none tid:2 name:iqn.2010-11.com.rackspace:vol2 tid:3 name:iqn.2010-11.com.rackspace:vol3 """) proc_iet_volume = os.path.join(self.scratch, 'volume') proc_iet_session = os.path.join(self.scratch, 'session') ietd_config = os.path.join(self.scratch, 'ietd.conf') with open(proc_iet_volume, 'w') as f: f.write(VOLUME_DATA) with open(proc_iet_session, 'w') as f: f.write(SESSION_DATA) conf = LunrConfig({ 'export': { 'proc_iet_volume': proc_iet_volume, 'proc_iet_session': proc_iet_session, 'ietd_config': ietd_config, } }) h = export.ExportHelper(conf) expected = {'exports': 3, 'connected': 1, 'sessions': 3} self.assertEquals(h.status(), expected)
def setUp(self): self.scratch = mkdtemp() self.initiators_allow = os.path.join(self.scratch, 'initiators') self.proc_iet_volume = os.path.join(self.scratch, 'volume') self.proc_iet_session = os.path.join(self.scratch, 'session') self.iqn_prefix = 'iqn.monkey.corp' self.default_allows = '10.127.0.0/24' self.conf = LunrConfig({ 'export': { 'iqn_prefix': self.iqn_prefix, 'initiators_allow': self.initiators_allow, 'default_allows': self.default_allows, 'proc_iet_volume': self.proc_iet_volume, 'proc_iet_session': self.proc_iet_session, } }) self.allow_data = dedent(""" # Some random comments we might use in a header # just to assert our dominance over this file! iqn.monkey.corp:volume-00000001 10.0.0.1 iqn.monkey.corp:volume-00000002 10.0.0.1, 10.0.0.2 ALL 10.127.0.0/24 """) with open(self.initiators_allow, 'w') as f: f.write(self.allow_data)
def setUp(self): self.helper = Helper(LunrConfig()) def mock_scan_volumes(): return [ {'backup_id': '33485eb3-5900-4068-93a1-2b72677fd699', 'device_number': '252:5', 'id': '33485eb3-5900-4068-93a1-2b72677fd699', 'origin': '91e81aec-b6da-40e9-82fb-f04a99a866eb', 'path': '/dev/lunr-volume/33485eb3-5900-4068-93a1-2b72677fd699', 'size': 3221225472, 'timestamp': 1399449283.0}, {'clone_id': '70bde49f-0ca7-4fe1-bce4-b5a73f850b8b', 'device_number': '252:9', 'id': '5304e6b7-986c-4d8b-9659-29a606f7c9e3', 'origin': 'c270f302-9102-4327-bb0d-c8eaf9df872f', 'path': '/dev/lunr-volume/5304e6b7-986c-4d8b-9659-29a606f7c9e3', 'size': 5368709120}, {'device_number': '252:8', 'id': '70bde49f-0ca7-4fe1-bce4-b5a73f850b8b', 'origin': '', 'path': '/dev/lunr-volume/70bde49f-0ca7-4fe1-bce4-b5a73f850b8b', 'size': 5368709120, 'volume': True}] self.helper.volumes._scan_volumes = mock_scan_volumes self.resp = [] def mock_request(helper, url): return self.resp.pop(0) audit.request = mock_request
def main(): parser = ArgumentParser(description="Orbit, lunr's maintiance droid") parser.add_argument('-c', '--config', action='store', help="Provide a config file for orbit to use") parser.add_argument('-p', '--pid', action='store', help="Specify the file name of the pid to use") parser.add_argument('-u', '--user', action='store', help="Specify the user the daemon will run as") parser.add_argument( 'command', nargs='?', default='foreground', help="(start|stop|status|foreground) defaults to foreground") options = parser.parse_args() try: file = options.config or LunrConfig.lunr_orbit_config conf = LunrConfig.from_conf(file) except Exception, e: print "-- Config Failure: %s" % e parser.print_help() return 1
def test_writable_cow_multiline_table(self): # Let's do some silly math size = directio.size(self._ramdisk) megs = size / 1024 / 1024 megs = megs - megs % 4 # 12 megs for a volume, 4 for lvm itself alloc = megs - 12 - 4 vg = self.conf.string('volume', 'volume_group', None) # Reserve a 4m hole at the front, and 8m at the end execute('lvcreate', vg, size='4m', name='tmpvol') execute('lvcreate', vg, size='%sm' % alloc, name='wasted') execute('lvremove', '%s/tmpvol' % vg, force=None) foo = execute('pvs', self._ramdisk) foo = execute('vgs', vg) foo = execute('lvs', vg) volume_id = str(uuid4()) self.volume.create(volume_id) volume = self.volume.get(volume_id) execute('lvremove', '%s/wasted' % vg, force=None) dmname = '%s-%s' % (re.sub('-', '--', vg), re.sub( '-', '--', volume_id)) foo = execute('dmsetup', 'table', dmname) self.assert_('\n' in foo) backup_id = str(uuid4()) snapshot = self.volume.create_snapshot(volume_id, backup_id, '123456') scrub = Scrub(LunrConfig()) (cow_name, cow_path) = scrub.get_writable_cow(snapshot, volume) execute('dmsetup', 'remove', cow_name) self.assertTrue(True)
def test_status(self): def mock_vgs(cmd, vg, **kwargs): mock_values = { 'vg_size': '20000B', 'vg_free': '10000B', 'lv_count': '20', } values = [] options = kwargs['options'].split(',') for key in options: values.append(mock_values[key]) sep = kwargs.get('separator', ' ') out = ' ' + sep.join(values) if 'noheadings' not in kwargs: heading = ' ' + sep.join(options) out = '\n'.join([heading, out]) return out volume.execute = mock_vgs h = volume.VolumeHelper(LunrConfig()) status = h.status() expected = { 'volume_group': 'lunr-volume', 'vg_size': 20000, 'vg_free': 10000, 'lv_count': 20, } self.assertEquals(status, expected)
def load_conf(options, args): if options.config: try: conf = LunrConfig.from_conf(options.config) except IOError, e: print 'Error: %s' % e sys.exit(1)
def setUp(self): self.scratch = mkdtemp() self.storage = MockStorageNode(self.scratch) self._orig_subprocess = utils.subprocess utils.subprocess = MockSubprocess(storage=self.storage) self.conf = LunrConfig({ 'export': { 'ietd_config': os.path.join(self.scratch, 'ietd.conf'), 'proc_iet_volume': self.storage.proc_iet_volume, 'proc_iet_session': self.storage.proc_iet_session, 'device_prefix': self.storage.device_prefix, 'initiators_allow': os.path.join(self.scratch, 'initiators.allow'), }, 'storage': { 'skip_fork': True, 'run_dir': self.storage.run_dir }, 'volume': { 'device_prefix': self.storage.device_prefix }, 'disk': { 'path': os.path.join(self.scratch, 'backups') }, 'glance': { 'glance_urls': 'snet1,snet2', 'glance_mgmt_urls': 'mgmt1, mgmt2', } }) self.lockfile = os.path.join(self.scratch, 'lock') self.lock = ResourceFile(self.lockfile)
def test_parse_config_file(self): conf = None conf_str = "[DEFAULT]\n" \ "foo = bar\n" \ "[foo]\n" \ "foo = baz\n" \ "[foo]\n" \ "fog = buz\n" \ "[fiz]\n" \ "bang = bazt\n" \ "[casts]\n" \ "size = 25\n" \ "flush = True\n" with temp_disk_file(conf_str) as file: conf = LunrConfig.from_conf(file) self.assertEquals(conf.string('default', '__file__', ''), file) # All parsed options default to string value = conf.string('default', 'foo', '') self.assertEquals(value, 'bar') value = conf.string('foo', 'foo', '') self.assertEquals(value, 'baz') value = conf.string('foo', 'fog', '') self.assertEquals(value, 'buz') value = conf.string('fiz', 'bang', '') self.assertEquals(value, 'bazt') # Non-string casts value = conf.int('casts', 'size', 1) self.assertEquals(value, 25) value = conf.bool('casts', 'flush', False) self.assertEquals(value, True)
def test_auto_create_false(self): class FakeLogger(object): def __init__(self): self.warned = False def warn(self, msg): self.warned = True self.msg = msg logger = FakeLogger() temp = mkdtemp() try: conf = LunrConfig({ 'default': { 'lunr_dir': temp }, 'db': { 'auto_create': False }, }) with patch(db, 'logger', logger): db.configure(conf) self.assert_(logger.warned) self.assert_('not version controlled' in logger.msg) finally: rmtree(temp)
def load_conf(self, file): try: conf = LunrConfig.from_conf(file) return Helper(conf) except IOError, e: print 'Error: %s' % e sys.exit(1)
def setUp(self): self.test_conf = LunrConfig( {'db': { 'auto_create': True, 'url': 'sqlite://' }}) self.app = ApiWsgiApp(self.test_conf, urlmap) self.db = db.Session self.old_urlopen = base.urlopen base.urlopen = MockUrlopen self.old_db_close = self.db.close # This dirty little hack allows us to query the db after the request self.db.close = lambda: 42 self.vtype = db.models.VolumeType('vtype') self.account = db.models.Account() self.node = db.models.Node('node', 10, volume_type=self.vtype, hostname='10.127.0.1', port=8080) self.volume = db.models.Volume(0, 'vtype', id='v1', node=self.node, account=self.account) self.export = db.models.Export(volume=self.volume) self.db.add_all( [self.vtype, self.account, self.node, self.volume, self.export]) self.db.commit()
def test_max_snapshot_bytes_flag(self): max_size = 4 * 1024 * 1024 vol_size = 100 * 1024 * 1024 * 1024 conf = LunrConfig({'volume': {'max_snapshot_bytes': max_size}}) h = volume.VolumeHelper(conf) max_snap = h._max_snapshot_size(vol_size) self.assertEquals(max_snap, max_size)
def scrub_cow(self, path=None, display=None): """ Scrub or display details for a cow device Example: %(prog)s (cmd)s scrub-cow -d /dev/mapper/lunr--volume-my--snap-cow """ config = {} if self.verbose == 1: log.setLevel(logging.INFO) if self.verbose > 1: log.setLevel(logging.DEBUG) config['display-exceptions'] = True if display: if self.verbose < 2: log.setLevel(logging.INFO) log.info("Display Only, Not Scrubbing") config['display-only'] = True try: # Init the Scrub object with our command line options scrub = Scrub(LunrConfig({'scrub': config})) # Scrub the cow scrub.scrub_cow(path) except ScrubError, e: log.error(str(e)) return 1
def setUp(self): self.test_conf = LunrConfig({ 'auto_create': True, 'db': { 'url': 'sqlite://' } }) self.app = ApiWsgiApp(self.test_conf, urlmap)
def __init__(self): # Give our sub command a name self._name = 'tools' # Create a volume helper with our local storage config self.volume = VolumeHelper(LunrConfig.from_storage_conf()) # let the base class setup methods in our class SubCommand.__init__(self) self.total = defaultdict(float)
def test_status(self): self.conf = LunrConfig({ 'storage': {'run_dir': self.run_dir, 'skip_fork': True}, 'backup': {'client': 'memory'}, }) h = backup.BackupHelper(self.conf) expected = {'client': 'memory', 'containers': 0, 'objects': 0} self.assertEquals(h.status(), expected)
def test_custom(self): conf = LunrConfig() h = volume.VolumeHelper(conf) min_disk = 40 image = MockImageHead('imgid', 'size', 'format', 'container', 'min_disk', {'image_type': 'notbase'}, 'ACTIVE') result = h._get_scratch_multiplier(image) self.assertEquals(result, 4)
def test_volume_group_id_to_long(self): conf = LunrConfig({ 'volume': { 'volume_group': 'A' * 31 } }) helper = volume.VolumeHelper(conf) self.assertRaises(RuntimeError, helper.check_config)
def __init__(self): self.conf = LunrConfig( {'db': {'auto_create': True, 'url': 'sqlite://'}}) # self.urlmap = urlmap self.helper = db.configure(self.conf) self.fill_percentage_limit = 0.5 self.node_timeout = None self.backups_per_volume = 10
def __init__(self): self.conf = LunrConfig( {'db': {'auto_create': True, 'url': 'sqlite://', 'echo': False}}) # self.urlmap = urlmap self.helper = db.configure(self.conf) self.fill_percentage_limit = 0.5 self.fill_strategy = 'broad_fill' self.node_timeout = None self.image_convert_limit = 3
def __init__(self): self.conf = LunrConfig( {'db': { 'auto_create': True, 'url': 'sqlite://' }}) # self.urlmap = urlmap self.helper = db.configure(self.conf) self.fill_percentage_limit = 0.5
def filter_factory(global_conf, **local_conf): section = 'filter:trans-logger' conf = LunrConfig({section: local_conf}) echo = conf.bool(section, 'echo', False) level = conf.option(section, 'level', 'DEBUG', cast=log_level) name = conf.string(section, 'name', '') global_logger = logger if name: local_logger = logger.get_logger(name) else: local_logger = global_logger def trans_logger_filter(app): @wsgify def log_response(req): req.headers['X-Request-Id'] = req.headers.get( 'x-request-id', 'lunr-%s' % uuid4()) logger.local.request_id = req.headers['x-request-id'] if echo: local_logger.log(level, 'REQUEST:\n%s', req) resp = req.get_response(app) resp.headers['X-Request-Id'] = req.headers['x-request-id'] if req.params: request_str = '?'.join((req.path, urlencode(req.params))) else: request_str = req.path global_logger.info(' '.join( str(x) for x in ( # add more fields here req.remote_addr or '-', '"%s %s"' % (req.method, request_str), resp.status_int, resp.content_length, ))) if echo: local_logger.log(level, 'RESPONSE:\n%s', resp) logger.local.request_id = None return resp return log_response return trans_logger_filter
def app_factory(global_conf, **local_conf): """paste.deploy app factory for creating WSGI API server""" # Reload the paster config, since paster only passes us our config conf = LunrConfig.from_conf(global_conf['__file__']) # ensure global logger is named logger.rename(__name__) app = ApiWsgiApp(conf, urlmap) return app
def setUpClass(cls): conf = LunrConfig.from_api_conf() sess = db.configure(conf) # Change the min_size to 0, so we can # create volumes smaller than a gig query = sess.query(VolumeType).filter_by(name='vtype') # Save the original value cls._min_size = query.one().min_size # Set min_size to 0 query.update({'min_size': 0}) sess.commit()
def filter_factory(global_conf, **local_conf): section = 'filter:trans-logger' conf = LunrConfig({section: local_conf}) echo = conf.bool(section, 'echo', False) level = conf.option(section, 'level', 'DEBUG', cast=log_level) name = conf.string(section, 'name', '') global_logger = logger if name: local_logger = logger.get_logger(name) else: local_logger = global_logger def trans_logger_filter(app): @wsgify def log_response(req): req.headers['X-Request-Id'] = req.headers.get( 'x-request-id', 'lunr-%s' % uuid4()) logger.local.request_id = req.headers['x-request-id'] if echo: local_logger.log(level, 'REQUEST:\n%s', req) resp = req.get_response(app) resp.headers['X-Request-Id'] = req.headers['x-request-id'] if req.params: request_str = '?'.join((req.path, urlencode(req.params))) else: request_str = req.path global_logger.info(' '.join(str(x) for x in ( # add more fields here req.remote_addr or '-', '"%s %s"' % (req.method, request_str), resp.status_int, resp.content_length, ))) if echo: local_logger.log(level, 'RESPONSE:\n%s', resp) logger.local.request_id = None return resp return log_response return trans_logger_filter
def main(): from optparse import OptionParser from lunr.common.config import LunrConfig from lunr.db.console import DBConsole as ManifestConsole from lunr.storage.helper.utils.client import get_conn parser = OptionParser('%prog [options] volume_id') parser.add_option('-C', '--config', default=LunrConfig.lunr_storage_config, help="override config file") options, args = parser.parse_args() try: conf = LunrConfig.from_conf(options.config) except IOError, e: return 'ERROR: %s' % e
def test_from_storage_conf(self): conf_str = dedent( """ [DEFAULT] foo = bar """ ) with temp_disk_file(conf_str) as file: with patch(LunrConfig, 'lunr_storage_config', file): conf = LunrConfig.from_storage_conf() self.assertEquals(conf.lunr_storage_config, file) self.assertEquals(conf.string('default', '__file__', ''), conf.lunr_storage_config) self.assertEquals(conf.string('default', 'foo', ''), 'bar')
def test_case_sensitive_parser(self): conf_str = dedent( """ [vtype-mapping] SATA = STANDARD SSD = HIGH """ ) with temp_disk_file(conf_str) as f: conf = LunrConfig.from_conf(f) section = conf.section('vtype-mapping') expected = { 'SATA': 'STANDARD', 'SSD': 'HIGH', } self.assertEquals(section, expected)
def backup(self, id=None, src=None, timestamp=None): """ This runs a backup job outside of the storage api, which is useful for performance testing backups """ # Set basic Logging logging.basicConfig() # Get the lunr logger log = logger.get_logger() # Output Debug level info log.logger.setLevel(logging.DEBUG) # Load the local storage configuration conf = LunrConfig.from_storage_conf() # If no time provided, use current time timestamp = timestamp or time() # Init our helpers volume = VolumeHelper(conf) backup = BackupHelper(conf) try: # Create the snapshot snapshot = volume.create_snapshot(src, id, timestamp) # For testing non-snapshot speeds #snapshot = volume.get(src) #snapshot['backup_id'] = id #snapshot['origin'] = src #snapshot['timestamp'] = 1338410885.0 #del snapshot['volume'] print("Created snap-shot: ", pprint(snapshot)) with self.timeit(snapshot['size']): # Backup the snapshot print("Starting Backup") backup.save(snapshot, id) finally: # Delete the snapshot if it was created if 'snapshot' in locals(): self._remove_volume(snapshot['path'])
def app_factory(global_conf, **local_conf): """paste.deploy app factory for creating WSGI API server""" # Reload the paster config, since paster only passes us our config conf = LunrConfig.from_conf(global_conf['__file__']) # ensure global logger is named logger.rename(__name__) app = StorageWsgiApp(conf, urlmap) # Check for a valid volume config app.helper.volumes.check_config() try: app.helper.check_registration() except Exception: logger.exception('Registration failed') volumes = app.helper.volumes.list() app.helper.cgroups.load_initial_cgroups(volumes) app.helper.exports.init_initiator_allows() return app
def test_multi_config(self): base_conf_body = dedent( """ [db] # url = sqlite:// echo = False pool_size = 5 """ ) db_conf_body = dedent( """ [db] url = mysql://root:@localhost/lunr echo = True """ ) scratch = mkdtemp() try: os.mkdir(os.path.join(scratch, 'api-server.conf.d')) base_conf_filename = os.path.join(scratch, 'api-server.conf') db_conf_filename = os.path.join(scratch, 'api-server.conf.d/db.conf') with open(base_conf_filename, 'w') as f: f.write(base_conf_body) with open(db_conf_filename, 'w') as f: f.write(db_conf_body) conf = LunrConfig.from_conf(base_conf_filename) # override commented value self.assertEquals(conf.string('db', 'url', ''), 'mysql://root:@localhost/lunr') # override base value self.assertEquals(conf.bool('db', 'echo', None), True) # inherit base value self.assertEquals(conf.int('db', 'pool_size', 0), 5) finally: rmtree(scratch)
InteractiveConsole.__init__(self, locals=locals) def __call__(self): return self.interact(banner=self.banner) def load_conf(options, args): if options.config: try: conf = LunrConfig.from_conf(options.config) except IOError, e: print 'Error: %s' % e sys.exit(1) else: try: conf = LunrConfig.from_api_conf() except IOError, e: conf = LunrConfig() print 'Warning: %s' % e if options.verbose: conf.set('db', 'echo', options.verbose) if args: conf.set('db', 'url', args[0]) return conf def main(): parser = OptionParser('%prog [options] [DB_URL]') parser.add_option('-v', '--verbose', action='store_true', help='make sqlalchemy noisy')
def test_path(self): conf = LunrConfig({'default': {'lunr_dir': '/tmp'}}) self.assertEquals(conf.path('me'), '/tmp/me')