def test_readall(self): """Read all configurations in a directory. """ g = ConfigGroup() t0 = time.time() g.add_path(self._tmpdir) t1 = time.time() _log.debug("Time to scan directory = {:.3g}s".format(t1 - t0)) expect = set(self._config_names) got = set(g.keys()) self.assertEqual(expect, got)
def test_sandbox(self): """Combine expand and prefix to make a sandbox. """ g = ConfigGroup().add_path(self._tmpdir) for i, sandbox in enumerate(["marvel.spiderman", "dc.flash"]): g.expand("{}*".format(sandbox)) _log.debug("Expanded {} keys: {}".format(sandbox, g.keys())) g.set_prefix(sandbox) qe, expect_name = None, None if i == 0: for wow in "amazing", "spectacular": qe = g[wow] expect_name = "spiderman.{}".format(wow) self.assertEqual(qe.collection.name, expect_name) self.assertEqual(qe.db.name, "marvel") elif i == 1: for who in "garrick", "barry", "wally": qe = g[who] expect_name = "flash.{}".format(who) self.assertEqual(qe.collection.name, expect_name) self.assertEqual(qe.db.name, "dc") # only entry in db should be hello:<collname> _log.debug("Query QE={} collection={}".format( qe, qe.collection.name)) cur = qe.query() row = cur[0] self.assertEqual(row["hello"], expect_name)
def test_sandbox(self): """Combine expand and prefix to make a sandbox. """ g = ConfigGroup().add_path(self._tmpdir) for i, sandbox in enumerate(["marvel.spiderman", "dc.flash"]): g.expand("{}*".format(sandbox)) _log.debug("Expanded {} keys: {}".format(sandbox,g.keys())) g.set_prefix(sandbox) qe, expect_name = None, None if i == 0: for wow in "amazing", "spectacular": qe = g[wow] expect_name = "spiderman.{}".format(wow) self.assertEqual(qe.collection.name, expect_name) self.assertEqual(qe.db.name, "marvel") elif i == 1: for who in "garrick", "barry", "wally": qe = g[who] expect_name = "flash.{}".format(who) self.assertEqual(qe.collection.name, expect_name) self.assertEqual(qe.db.name, "dc") # only entry in db should be hello:<collname> _log.debug("Query QE={} collection={}".format(qe, qe.collection.name)) cur = qe.query() row = cur[0] self.assertEqual(row["hello"], expect_name)
def test_expand(self): """Expand configurations to get full list of collections. """ g = ConfigGroup().add_path(self._tmpdir) _log.debug("Base: {}".format(g.keys())) t0 = time.time() g.expand("marvel.*") t1 = time.time() _log.debug("Expanded in {:.3g}s: {}".format(t1 - t0, g.keys())) # expect expanded marvel, but just configured dc marvel = map(lambda val: 'marvel.' + val, self.DB_COLL['marvel']) dc = map(lambda val: 'dc.' + val, self.DB_COLL_CFG['dc']) expect = set(marvel + dc) got = set(g.keys()) self.assertEqual(expect, got)
def setUp(self): self.g = ConfigGroup(qe_class=MockQueryEngine) self.configs = [Cfg("qe{:d}".format(i)) for i in range(5)]
class ConfigGroupTestCase(unittest.TestCase): def setUp(self): self.g = ConfigGroup(qe_class=MockQueryEngine) self.configs = [Cfg("qe{:d}".format(i)) for i in range(5)] def test_add(self): """ConfigGroup add and lookup """ keys = ["foo", "bar", "foo.a", "foo.b"] expect = {} for i, k in enumerate(keys): self.g.add(k, self.configs[i]) expect[k] = MockQueryEngine(**self.configs[i].settings) self.assertEqual(self.g["foo"], expect["foo"]) self.assertEqual(self.g["bar"], expect["bar"]) self.assertEqual(self.g["bar*"], {"bar": expect["bar"]}) self.assertEqual(self.g["foo.a"], expect["foo.a"]) self.assertEqual(self.g["foo.*"], { "foo.a": expect["foo.a"], "foo.b": expect["foo.b"] }) def test_add_path(self): """Add set of query engines from a path. """ # directory of pretend configs d = tempfile.mkdtemp() try: # fill with some configs c = {} for root in ("foo", "bar"): for sub in ("a", "b.1", "b.2"): config = {dbconfig.DB_KEY: root, dbconfig.COLL_KEY: sub} filename = "mg_core_{}_{}.json".format(root, sub) with open(_opj(d, filename), "w") as fp: json.dump(config, fp) c["{}.{}".format(root, sub)] = config # read them self.g.add_path(d) # check all were added self.assertEqual(sorted(self.g.keys()), sorted(c.keys())) # check one qe1 = self.g["foo.a"].kw c1 = c["foo.a"] self.assertTrue(dict_subset(c1, qe1)) # check with prefix self.g.set_prefix("foo.b") self.assertTrue(dict_subset(c["foo.b.1"], self.g["1"].kw)) self.assertRaises(KeyError, self.g.__getitem__, "bla") # check list with prefix gkeys = sorted(self.g["*"].keys()) self.assertEqual(gkeys, ["foo.b.1", "foo.b.2"]) # check list w/o prefix self.g.set_prefix() gkeys = sorted(self.g["bar.b.*"].keys()) self.assertEqual(gkeys, ["bar.b.1", "bar.b.2"]) finally: # rm -r $d for f in os.listdir(d): os.unlink(os.path.join(d, f)) os.rmdir(d) def test_uncache(self): """Remove cached query engine(s) from ConfigGroup. """ keys = ("foo.a", "foo", "bar") for i in range(len(keys)): self.g.add(keys[i], self.configs[i]) # force instantiation/caching for i in range(len(keys)): self.g[keys[i]] left_behind = self.g[keys[2]] # remove all foo from cache self.g.uncache("foo*") # check that they are not cached for i in range(2): self.assertRaises(KeyError, self.g._cached.__getitem__, keys[i]) # check that un-removed remain self.assertEqual(self.g[keys[2]], left_behind) def test_expand(self): """Add multiple collections at once with 'expand'. """ self.g.add("foo", Cfg("data"), expand=True) # check that data.* got added as foo.* keys = set(self.g.keys()) expect = set(["foo"] + [f.replace("data", "foo") for f in mockcoll]) self.assertEqual(expect, keys)
class ConfigGroupTestCase(unittest.TestCase): def setUp(self): self.g = ConfigGroup(qe_class=MockQueryEngine) self.configs = [Cfg("qe{:d}".format(i)) for i in range(5)] def test_add(self): """ConfigGroup add and lookup """ keys = ["foo", "bar", "foo.a", "foo.b"] expect = {} for i, k in enumerate(keys): self.g.add(k, self.configs[i]) expect[k] = MockQueryEngine(**self.configs[i].settings) self.assertEqual(self.g["foo"], expect["foo"]) self.assertEqual(self.g["bar"], expect["bar"]) self.assertEqual(self.g["bar*"], {"bar": expect["bar"]}) self.assertEqual(self.g["foo.a"], expect["foo.a"]) self.assertEqual(self.g["foo.*"], {"foo.a": expect["foo.a"], "foo.b": expect["foo.b"]}) def test_add_path(self): """Add set of query engines from a path. """ # directory of pretend configs d = tempfile.mkdtemp() try: # fill with some configs c = {} for root in ("foo", "bar"): for sub in ("a", "b.1", "b.2"): config = { dbconfig.DB_KEY: root, dbconfig.COLL_KEY: sub } filename = "mg_core_{}_{}.json".format(root, sub) with open(_opj(d, filename), "w") as fp: json.dump(config, fp) c["{}.{}".format(root, sub)] = config # read them self.g.add_path(d) # check all were added self.assertEqual(sorted(self.g.keys()), sorted(c.keys())) # check one qe1 = self.g["foo.a"].kw c1 = c["foo.a"] self.assertTrue(dict_subset(c1, qe1)) # check with prefix self.g.set_prefix("foo.b") self.assertTrue(dict_subset(c["foo.b.1"], self.g["1"].kw)) self.assertRaises(KeyError, self.g.__getitem__, "bla") # check list with prefix gkeys = sorted(self.g["*"].keys()) self.assertEqual(gkeys, ["foo.b.1", "foo.b.2"]) # check list w/o prefix self.g.set_prefix() gkeys = sorted(self.g["bar.b.*"].keys()) self.assertEqual(gkeys, ["bar.b.1", "bar.b.2"]) finally: # rm -r $d for f in os.listdir(d): os.unlink(os.path.join(d, f)) os.rmdir(d) def test_uncache(self): """Remove cached query engine(s) from ConfigGroup. """ keys = ("foo.a", "foo", "bar") for i in range(len(keys)): self.g.add(keys[i], self.configs[i]) # force instantiation/caching for i in range(len(keys)): self.g[keys[i]] left_behind = self.g[keys[2]] # remove all foo from cache self.g.uncache("foo*") # check that they are not cached for i in range(2): self.assertRaises(KeyError, self.g._cached.__getitem__, keys[i]) # check that un-removed remain self.assertEqual(self.g[keys[2]], left_behind) def test_expand(self): """Add multiple collections at once with 'expand'. """ self.g.add("foo", Cfg("data"), expand=True) # check that data.* got added as foo.* keys = set(self.g.keys()) expect = set(["foo"] + [f.replace("data", "foo") for f in mockcoll]) self.assertEqual(expect, keys)