def testInit(self): """Construct an empty or seeded GroupMap.""" self.assertEquals(group.GroupMap, type(group.GroupMap()), msg='failed to create an empty GroupMap') gmap = group.GroupMap([self._good_entry]) self.assertEquals(self._good_entry, gmap.PopItem(), msg='failed to seed GroupMap with list') self.assertRaises(TypeError, group.GroupMap, ['string'])
def testNssDbGroupHandlerWriteData(self): ent = 'foo:x:1000:bar' makedb_stdin = self.mox.CreateMock(sys.stdin) makedb_stdin.write('.foo %s\n' % ent) makedb_stdin.write('=1000 %s\n' % ent) makedb_stdin.write('00 %s\n' % ent) m = group.GroupMap() g = group.GroupMapEntry() g.name = 'foo' g.gid = 1000 g.passwd = 'x' g.members = ['bar'] self.failUnless(m.Add(g)) writer = nssdb.NssDbGroupHandler({ 'makedb': '/bin/false', 'dir': '/tmp' }) self.mox.ReplayAll() writer.WriteData(makedb_stdin, g, 0)
def testVerifyFailure(self): # create a map m = group.GroupMap() e = group.GroupMapEntry() e.name = 'foo' e.gid = 2000 self.assertTrue(m.Add(e)) updater = nssdb.NssDbGroupHandler({ 'dir': self.workdir, 'makedb': '/usr/bin/makedb' }) written = updater.Write(m) self.assertTrue(os.path.exists(updater.temp_cache_filename), 'updater.Write() did not create a file') # change the cache db = btopen(updater.temp_cache_filename) del db[db.first()[0]] db.sync() db.close() retval = updater.Verify(written) self.assertEqual(False, retval) self.assertFalse( os.path.exists(os.path.join(updater.temp_cache_filename)))
def testGetMap(self): group_map = group.GroupMap() cache_info = StringIO.StringIO('''[ {"Key": "org/groups/foo/gid", "Value": "MTA="}, {"Key": "org/groups/foo/members", "Value": "Zm9vCmJhcg=="} ]''') self.parser.GetMap(cache_info, group_map) self.assertEquals(self.good_entry, group_map.PopItem())
def testGetMap(self): group_map = group.GroupMap() cache_info = StringIO.StringIO('''[ { "Key": "foo", "Value": { "gid": 10, "members": "foo\\nbar", "irrelevant_key": "bacon" } } ]''') self.parser.GetMap(cache_info, group_map) self.assertEquals(self.good_entry, group_map.PopItem())
def GetGroupMap(): """Returns a GroupMap built from nss calls.""" group_map = group.GroupMap() for nss_entry in grp.getgrall(): map_entry = group.GroupMapEntry() map_entry.name = nss_entry[0] map_entry.passwd = nss_entry[1] map_entry.gid = nss_entry[2] map_entry.members = nss_entry[3] if not map_entry.members: map_entry.members = [''] group_map.Add(map_entry) return group_map
def testAdd(self): """Add throws an error for objects it can't verify.""" gmap = group.GroupMap() entry = self._good_entry self.assert_(gmap.Add(entry), msg='failed to append new entry.') self.assertEquals(1, len(gmap), msg='unexpected size for Map.') ret_entry = gmap.PopItem() self.assertEquals(ret_entry, entry, msg='failed to pop correct entry.') pentry = passwd.PasswdMapEntry() pentry.name = 'foo' pentry.uid = 10 pentry.gid = 10 self.assertRaises(TypeError, gmap.Add, pentry)
def testNssDbGroupHandlerWrite(self): ent = 'foo:x:1000:bar' makedb_stdin = self.mox.CreateMock(sys.stdin) makedb_stdin.write('.foo %s\n' % ent) makedb_stdin.write('=1000 %s\n' % ent) makedb_stdin.write('00 %s\n' % ent) makedb_stdin.close() makedb_stdout = self.mox.CreateMock(sys.stdout) makedb_stdout.read().AndReturn('') makedb_stdout.close() m = group.GroupMap() g = group.GroupMapEntry() g.name = 'foo' g.gid = 1000 g.passwd = 'x' g.members = ['bar'] g.Verify() self.failUnless(m.Add(g)) self.mox.StubOutWithMock(select, 'select') select.select([makedb_stdout], (), (), 0).AndReturn(([37], [], [])) select.select([makedb_stdout], (), (), 0).AndReturn(([], [], [])) def SpawnMakeDb(): makedb = MakeDbDummy() makedb.stdin = makedb_stdin makedb.stdout = makedb_stdout return makedb writer = nssdb.NssDbGroupHandler({ 'makedb': '/usr/bin/makedb', 'dir': self.workdir }) writer._SpawnMakeDb = SpawnMakeDb self.mox.ReplayAll() writer.Write(m) tmpgroup = os.path.join(self.workdir, 'group.db') self.failIf(os.path.exists(tmpgroup)) # just clean it up, Write() doesn't Commit() writer._Rollback()
def testMerge(self): """Verify Merge() throws the right exceptions and correctly merges.""" # Setup some MapEntry objects with distinct Key()s pentry1 = self._good_entry pentry2 = passwd.PasswdMapEntry() pentry2.name = 'john' pentry3 = passwd.PasswdMapEntry() pentry3.name = 'jane' # Setup some Map objects pmap_big = passwd.PasswdMap([pentry1, pentry2]) pmap_small = passwd.PasswdMap([pentry3]) # Merge small into big self.assertTrue(pmap_big.Merge(pmap_small), msg='Merging small into big failed!') self.assertTrue(pmap_big.Exists(pentry1), msg='pentry1 not found in Map') self.assertTrue(pmap_big.Exists(pentry2), msg='pentry1 not found in Map') self.assertTrue(pmap_big.Exists(pentry3), msg='pentry1 not found in Map') # A second merge should do nothing self.assertFalse(pmap_big.Merge(pmap_small), msg='Re-merging small into big succeeded.') # An empty merge should do nothing self.assertFalse(pmap_big.Merge(passwd.PasswdMap()), msg='Empty Merge should have done nothing.') # Merge a GroupMap should throw TypeError gmap = group.GroupMap() self.assertRaises(TypeError, pmap_big.Merge, gmap) # Merge an older map should throw an UnsupportedMap old_map = passwd.PasswdMap(modify_time=1) new_map = passwd.PasswdMap(modify_time=2) self.assertRaises(error.InvalidMerge, new_map.Merge, old_map) old_map = passwd.PasswdMap(update_time=1) new_map = passwd.PasswdMap(update_time=2) self.assertRaises(error.InvalidMerge, new_map.Merge, old_map)
def testVerifyFailure(self): # Can't test if no makedb if not os.path.exists('/usr/bin/makedb'): raise TestSkipped('no /usr/bin/makedb') # Hide the warning that we expect to get class TestFilter(logging.Filter): def filter(self, record): return not record.msg.startswith( 'verify failed: %d keys missing') fltr = TestFilter() logging.getLogger('NssDbGroupHandler').addFilter(fltr) # create a map m = group.GroupMap() e = group.GroupMapEntry() e.name = 'foo' e.gid = 2000 self.failUnless(m.Add(e)) updater = nssdb.NssDbGroupHandler({ 'dir': self.workdir, 'makedb': '/usr/bin/makedb' }) written = updater.Write(m) self.failUnless(os.path.exists(updater.temp_cache_filename), 'updater.Write() did not create a file') # change the cache db = bsddb.btopen(updater.temp_cache_filename) del db[db.first()[0]] db.sync() db.close() retval = updater.Verify(written) self.failUnlessEqual(False, retval) self.failIf(os.path.exists(os.path.join(updater.temp_cache_filename))) # no longer hide this message logging.getLogger('NssDbGroupHandler').removeFilter(fltr)
def testVerify(self): # create a map m = group.GroupMap() e = group.GroupMapEntry() e.name = 'foo' e.gid = 2000 self.failUnless(m.Add(e)) updater = nssdb.NssDbGroupHandler({ 'dir': self.workdir, 'makedb': '/usr/bin/makedb' }) written = updater.Write(m) self.failUnless(os.path.exists(updater.temp_cache_filename), 'updater.Write() did not create a file') retval = updater.Verify(written) self.failUnlessEqual(True, retval) os.unlink(updater.temp_cache_filename)
def __init__(self, conf, map_name, automount_mountpoint=None): """Initialise the Cache object. Args: conf: A dictionary of key/value pairs map_name: A string representation of the map type automount_mountpoint: A string containing the automount mountpoint, used only by automount maps. Raises: UnsupportedMap: for map types we don't know about """ super(Cache, self).__init__() # Set up a logger for our children self.log = logging.getLogger(self.__class__.__name__) # Store config info self.conf = conf self.output_dir = conf.get('dir', '.') self.automount_mountpoint = automount_mountpoint self.map_name = map_name # Setup the map we may be asked to load our cache into. if map_name == config.MAP_PASSWORD: self.data = passwd.PasswdMap() elif map_name == config.MAP_SSHKEY: self.data = sshkey.SshkeyMap() elif map_name == config.MAP_GROUP: self.data = group.GroupMap() elif map_name == config.MAP_SHADOW: self.data = shadow.ShadowMap() elif map_name == config.MAP_NETGROUP: self.data = netgroup.NetgroupMap() elif map_name == config.MAP_AUTOMOUNT: self.data = automount.AutomountMap() else: raise error.UnsupportedMap('Cache does not support %s' % map_name)
def CreateMap(self): """Return a GroupMap instance.""" return group.GroupMap()
def CreateMap(self): """Returns a new GroupMap instance to have GroupMapEntries added to it.""" return group.GroupMap()