def test_unable_to_connect(self): tmp_dir = mkdtemp() os.rmdir(tmp_dir) self.assertFalse(os.path.exists(tmp_dir)) url = 'sqlite:///' + tmp_dir + '/lunr.db' self.assertRaises(db.DBError, db.configure, LunrConfig({'db': {'auto_create': True, 'url': url}})) self.assertRaises(db.DBError, db.configure, LunrConfig({'db': {'url': url}}))
def test_client_retry(self): glance.glanceclient = MockRetryGlanceClient conf = LunrConfig() glance_urls = ['url1:9292', 'url2:9292', 'url3:9292'] urls_string = ', '.join(glance_urls) self.conf = LunrConfig({'glance': {'glance_urls': urls_string}}) glance1 = glance.GlanceClient(self.conf) self.assertRaises(glance.GlanceError, glance1.head, 'junk') self.assertEquals(glance1._glance_url_index, 3) glance2 = glance.GlanceClient(self.conf) self.assertRaises(glance.GlanceError, glance2.get, 'junk') self.assertEquals(glance2._glance_url_index, 3)
def setUp(self): self.scratch = mkdtemp() self.storage = MockStorageNode(self.scratch) self._orig_subprocess = utils.subprocess utils.subprocess = MockSubprocess(storage=self.storage) self.conf = LunrConfig({ 'export': { 'ietd_config': os.path.join(self.scratch, 'ietd.conf'), 'proc_iet_volume': self.storage.proc_iet_volume, 'proc_iet_session': self.storage.proc_iet_session, 'device_prefix': self.storage.device_prefix, 'initiators_allow': os.path.join(self.scratch, 'initiators.allow'), }, 'storage': { 'skip_fork': True, 'run_dir': self.storage.run_dir }, 'volume': { 'device_prefix': self.storage.device_prefix }, 'disk': { 'path': os.path.join(self.scratch, 'backups') }, 'glance': { 'glance_urls': 'snet1,snet2', 'glance_mgmt_urls': 'mgmt1, mgmt2', } }) self.lockfile = os.path.join(self.scratch, 'lock') self.lock = ResourceFile(self.lockfile)
def setUp(self): self.scratch = mkdtemp() self.initiators_allow = os.path.join(self.scratch, 'initiators') self.proc_iet_volume = os.path.join(self.scratch, 'volume') self.proc_iet_session = os.path.join(self.scratch, 'session') self.iqn_prefix = 'iqn.monkey.corp' self.default_allows = '10.127.0.0/24' self.conf = LunrConfig({ 'export': { 'iqn_prefix': self.iqn_prefix, 'initiators_allow': self.initiators_allow, 'default_allows': self.default_allows, 'proc_iet_volume': self.proc_iet_volume, 'proc_iet_session': self.proc_iet_session, } }) self.allow_data = dedent(""" # Some random comments we might use in a header # just to assert our dominance over this file! iqn.monkey.corp:volume-00000001 10.0.0.1 iqn.monkey.corp:volume-00000002 10.0.0.1, 10.0.0.2 ALL 10.127.0.0/24 """) with open(self.initiators_allow, 'w') as f: f.write(self.allow_data)
def setUp(self): self.helper = Helper(LunrConfig()) def mock_scan_volumes(): return [ {'backup_id': '33485eb3-5900-4068-93a1-2b72677fd699', 'device_number': '252:5', 'id': '33485eb3-5900-4068-93a1-2b72677fd699', 'origin': '91e81aec-b6da-40e9-82fb-f04a99a866eb', 'path': '/dev/lunr-volume/33485eb3-5900-4068-93a1-2b72677fd699', 'size': 3221225472, 'timestamp': 1399449283.0}, {'clone_id': '70bde49f-0ca7-4fe1-bce4-b5a73f850b8b', 'device_number': '252:9', 'id': '5304e6b7-986c-4d8b-9659-29a606f7c9e3', 'origin': 'c270f302-9102-4327-bb0d-c8eaf9df872f', 'path': '/dev/lunr-volume/5304e6b7-986c-4d8b-9659-29a606f7c9e3', 'size': 5368709120}, {'device_number': '252:8', 'id': '70bde49f-0ca7-4fe1-bce4-b5a73f850b8b', 'origin': '', 'path': '/dev/lunr-volume/70bde49f-0ca7-4fe1-bce4-b5a73f850b8b', 'size': 5368709120, 'volume': True}] self.helper.volumes._scan_volumes = mock_scan_volumes self.resp = [] def mock_request(helper, url): return self.resp.pop(0) audit.request = mock_request
def load_conf(self, file): try: return LunrConfig.from_conf(file or LunrConfig.lunr_storage_config) except IOError, e: if file or self.verbose: print 'Warning: %s' % e return LunrConfig()
def setUp(self): conf = LunrConfig({'db': {'auto_create': True, 'url': 'sqlite://'}}) self.app = ApiWsgiApp(conf, urlmap) self.db = db.Session self.account = db.models.Account() vtype = db.models.VolumeType('vtype') node = db.models.Node('node', 10, volume_type=vtype, hostname='10.127.0.1', port=8080) # Simulate a volume that is being restored volume = db.models.Volume(0, 'vtype', id='v1', node=node, account=self.account) backup = db.models.Backup(volume, status='AVAILABLE') self.db.add_all([vtype, self.account, node, volume, backup]) self.db.commit() # Assign the backup as the restore of the volume volume.restore_of = backup.id self.volume = dict(volume) self.backup = dict(backup) self.db.commit()
def setUp(self): self.test_conf = LunrConfig( {'db': { 'auto_create': True, 'url': 'sqlite://' }}) self.app = ApiWsgiApp(self.test_conf, urlmap) self.db = db.Session self.old_urlopen = base.urlopen base.urlopen = MockUrlopen self.old_db_close = self.db.close # This dirty little hack allows us to query the db after the request self.db.close = lambda: 42 self.vtype = db.models.VolumeType('vtype') self.account = db.models.Account() self.node = db.models.Node('node', 10, volume_type=self.vtype, hostname='10.127.0.1', port=8080) self.volume = db.models.Volume(0, 'vtype', id='v1', node=self.node, account=self.account) self.export = db.models.Export(volume=self.volume) self.db.add_all( [self.vtype, self.account, self.node, self.volume, self.export]) self.db.commit()
def test_writable_cow_multiline_table(self): # Let's do some silly math size = directio.size(self._ramdisk) megs = size / 1024 / 1024 megs = megs - megs % 4 # 12 megs for a volume, 4 for lvm itself alloc = megs - 12 - 4 vg = self.conf.string('volume', 'volume_group', None) # Reserve a 4m hole at the front, and 8m at the end execute('lvcreate', vg, size='4m', name='tmpvol') execute('lvcreate', vg, size='%sm' % alloc, name='wasted') execute('lvremove', '%s/tmpvol' % vg, force=None) foo = execute('pvs', self._ramdisk) foo = execute('vgs', vg) foo = execute('lvs', vg) volume_id = str(uuid4()) self.volume.create(volume_id) volume = self.volume.get(volume_id) execute('lvremove', '%s/wasted' % vg, force=None) dmname = '%s-%s' % (re.sub('-', '--', vg), re.sub( '-', '--', volume_id)) foo = execute('dmsetup', 'table', dmname) self.assert_('\n' in foo) backup_id = str(uuid4()) snapshot = self.volume.create_snapshot(volume_id, backup_id, '123456') scrub = Scrub(LunrConfig()) (cow_name, cow_path) = scrub.get_writable_cow(snapshot, volume) execute('dmsetup', 'remove', cow_name) self.assertTrue(True)
def scrub_cow(self, path=None, display=None): """ Scrub or display details for a cow device Example: %(prog)s (cmd)s scrub-cow -d /dev/mapper/lunr--volume-my--snap-cow """ config = {} if self.verbose == 1: log.setLevel(logging.INFO) if self.verbose > 1: log.setLevel(logging.DEBUG) config['display-exceptions'] = True if display: if self.verbose < 2: log.setLevel(logging.INFO) log.info("Display Only, Not Scrubbing") config['display-only'] = True try: # Init the Scrub object with our command line options scrub = Scrub(LunrConfig({'scrub': config})) # Scrub the cow scrub.scrub_cow(path) except ScrubError, e: log.error(str(e)) return 1
def test_max_snapshot_bytes_flag(self): max_size = 4 * 1024 * 1024 vol_size = 100 * 1024 * 1024 * 1024 conf = LunrConfig({'volume': {'max_snapshot_bytes': max_size}}) h = volume.VolumeHelper(conf) max_snap = h._max_snapshot_size(vol_size) self.assertEquals(max_snap, max_size)
def test_audit(self): manifest = Manifest.blank(2) worker = Worker('foo', LunrConfig({ 'backup': { 'client': 'memory' }, 'storage': { 'run_dir': self.scratch } }), manifest=manifest) conn = worker.conn conn.put_container('foo') backup = manifest.create_backup('bak1') backup[0] = worker.empty_block_hash conn.put_object('foo', backup[0], 'zeroes') backup[1] = 'some_block_hash' conn.put_object('foo', backup[1], ' more stuff') save_manifest(manifest, conn, worker.id, worker._lock_path()) # Add some non referenced blocks. conn.put_object('foo', 'stuff1', 'unreferenced stuff1') conn.put_object('foo', 'stuff2', 'unreferenced stuff2') conn.put_object('foo', 'stuff3', 'unreferenced stuff3') _headers, original_list = conn.get_container('foo') # Manifest, 2 blocks, 3 stuffs. self.assertEquals(len(original_list), 6) worker.audit() _headers, new_list = conn.get_container('foo') # Manifest, 2 blocks. self.assertEquals(len(new_list), 3)
def test_status(self): def mock_vgs(cmd, vg, **kwargs): mock_values = { 'vg_size': '20000B', 'vg_free': '10000B', 'lv_count': '20', } values = [] options = kwargs['options'].split(',') for key in options: values.append(mock_values[key]) sep = kwargs.get('separator', ' ') out = ' ' + sep.join(values) if 'noheadings' not in kwargs: heading = ' ' + sep.join(options) out = '\n'.join([heading, out]) return out volume.execute = mock_vgs h = volume.VolumeHelper(LunrConfig()) status = h.status() expected = { 'volume_group': 'lunr-volume', 'vg_size': 20000, 'vg_free': 10000, 'lv_count': 20, } self.assertEquals(status, expected)
def test_status(self): VOLUME_DATA = dedent(""" tid:1 name:iqn.2010-11.com.rackspace:vol1 \tlun:0 path:/dev/lunr1/vol1 tid:2 name:iqn.2010-11.com.rackspace:vol2 \tlun:0 path:/dev/lunr1/vol2 tid:3 name:iqn.2010-11.com.rackspace:vol3 """) SESSION_DATA = dedent(""" tid:1 name:iqn.2010-11.com.rackspace:vol1 \tsid:281474997486080 initiator:iqn.2010-11.org:baaa6e50093 \t\tcid:0 ip:127.0.0.1 state:active hd:none dd:none tid:2 name:iqn.2010-11.com.rackspace:vol2 tid:3 name:iqn.2010-11.com.rackspace:vol3 """) proc_iet_volume = os.path.join(self.scratch, 'volume') proc_iet_session = os.path.join(self.scratch, 'session') ietd_config = os.path.join(self.scratch, 'ietd.conf') with open(proc_iet_volume, 'w') as f: f.write(VOLUME_DATA) with open(proc_iet_session, 'w') as f: f.write(SESSION_DATA) conf = LunrConfig({ 'export': { 'proc_iet_volume': proc_iet_volume, 'proc_iet_session': proc_iet_session, 'ietd_config': ietd_config, } }) h = export.ExportHelper(conf) expected = {'exports': 3, 'connected': 1, 'sessions': 3} self.assertEquals(h.status(), expected)
def test_auto_create_false(self): class FakeLogger(object): def __init__(self): self.warned = False def warn(self, msg): self.warned = True self.msg = msg logger = FakeLogger() temp = mkdtemp() try: conf = LunrConfig({ 'default': { 'lunr_dir': temp }, 'db': { 'auto_create': False }, }) with patch(db, 'logger', logger): db.configure(conf) self.assert_(logger.warned) self.assert_('not version controlled' in logger.msg) finally: rmtree(temp)
def test_status(self): self.conf = LunrConfig({ 'storage': {'run_dir': self.run_dir, 'skip_fork': True}, 'backup': {'client': 'memory'}, }) h = backup.BackupHelper(self.conf) expected = {'client': 'memory', 'containers': 0, 'objects': 0} self.assertEquals(h.status(), expected)
def setUp(self): self.test_conf = LunrConfig({ 'auto_create': True, 'db': { 'url': 'sqlite://' } }) self.app = ApiWsgiApp(self.test_conf, urlmap)
def test_custom(self): conf = LunrConfig() h = volume.VolumeHelper(conf) min_disk = 40 image = MockImageHead('imgid', 'size', 'format', 'container', 'min_disk', {'image_type': 'notbase'}, 'ACTIVE') result = h._get_scratch_multiplier(image) self.assertEquals(result, 4)
def test_volume_group_id_to_long(self): conf = LunrConfig({ 'volume': { 'volume_group': 'A' * 31 } }) helper = volume.VolumeHelper(conf) self.assertRaises(RuntimeError, helper.check_config)
def __init__(self): self.conf = LunrConfig( {'db': {'auto_create': True, 'url': 'sqlite://'}}) # self.urlmap = urlmap self.helper = db.configure(self.conf) self.fill_percentage_limit = 0.5 self.node_timeout = None self.backups_per_volume = 10
def __init__(self): self.conf = LunrConfig( {'db': { 'auto_create': True, 'url': 'sqlite://' }}) # self.urlmap = urlmap self.helper = db.configure(self.conf) self.fill_percentage_limit = 0.5
def __init__(self): self.conf = LunrConfig( {'db': {'auto_create': True, 'url': 'sqlite://', 'echo': False}}) # self.urlmap = urlmap self.helper = db.configure(self.conf) self.fill_percentage_limit = 0.5 self.fill_strategy = 'broad_fill' self.node_timeout = None self.image_convert_limit = 3
def test_echo_false(self): db.configure( LunrConfig({ 'db': { 'auto_create': True, 'url': 'sqlite://', 'echo': 'false' } })) self.assertFalse(db.Session.bind.echo)
def test_max_snapshot_bytes_aligned(self): # Has to be a multiple of sector size or lvm chokes. sector_size = 423 max_size = sector_size * 100 + 1 # 1 too many! vol_size = 100 * 1024 * 1024 * 1024 conf = LunrConfig({'volume': {'max_snapshot_bytes': max_size, 'sector_size': sector_size}}) h = volume.VolumeHelper(conf) max_snap = h._max_snapshot_size(vol_size) self.assertEquals(max_snap, max_size - 1) # We round down ourselves
def test_non_default_poolclass(self): db.configure( LunrConfig({ 'db': { 'auto_create': True, 'url': 'sqlite://', 'poolclass': 'StaticPool' } })) self.assert_(isinstance(db.Session.bind.pool, pool.StaticPool))
def test_shuffle_glance_url(self): glance.glanceclient = MockGlanceClient conf = LunrConfig() glance_urls = ['url1:9292', 'url2:9292', 'url3:9292'] urls_string = ', '.join(glance_urls) self.conf = LunrConfig({'glance': {'glance_urls': urls_string}}) glance1 = glance.GlanceClient(self.conf) self.assertItemsEqual(glance1.glance_urls, glance_urls) shuffled = False for i in range(10): glance2 = glance.GlanceClient(self.conf) self.assertItemsEqual(glance2.glance_urls, glance_urls) if glance1.glance_urls != glance2.glance_urls: shuffled = True break self.assertTrue(shuffled)
def test_salt_empty_blocks(self): vol1 = 'vol1' vol2 = 'vol2' manifest1 = Manifest() manifest2 = Manifest() conf = LunrConfig({'backup': {'client': 'memory'}}) worker1 = Worker(vol1, conf, manifest1) worker2 = Worker(vol1, conf, manifest2) self.assert_(worker1.manifest.salt != worker2.manifest.salt) self.assert_(worker1.empty_block_hash != worker2.empty_block_hash) self.assertEquals(worker1.empty_block, worker2.empty_block)
def test_base_conf(self): multiplier = 1.5 conf = LunrConfig({'glance': {'base_convert_multiplier': multiplier}}) h = volume.VolumeHelper(conf) min_disk = 40 image = MockImageHead('imgid', 'size', 'format', 'container', 'min_disk', {'image_type': 'base'}, 'ACTIVE') result = h._get_scratch_multiplier(image) self.assertEquals(result, multiplier)
def setUp(self): memory.reset() self.scratch = mkdtemp() self.run_dir = os.path.join(self.scratch, 'run') self.backup_dir = os.path.join(self.scratch, 'backups') os.mkdir(self.backup_dir) self.conf = LunrConfig({ 'storage': {'run_dir': self.run_dir, 'skip_fork': True}, 'backup': {'client': 'disk'}, 'disk': {'path': self.backup_dir} })
def test_create_default_file_db(self): temp = mkdtemp() try: conf = LunrConfig( {'default': {'lunr_dir': temp}, 'db': {'auto_create': True}}) db.configure(conf) self.assertEquals(str(db.Session.bind.url), 'sqlite:///' + temp + '/lunr.db') self.assert_(isinstance(db.Session.bind.pool, pool.NullPool)) finally: rmtree(temp)