def test_get_minibatch_100(self): self.opt['minibatch_size'] = 100 l = LustreGame.Lustre(self.opt) minibatch = l.get_minibatch() self.assertEqual(common.num_ticks - l.ticks_per_observation, len(minibatch)) random_index = random.randint(0, len(minibatch) - 1) random_ts = minibatch[random_index][4] exp_observation = self.db.get_observation(random_ts) # See if we can find identical data of this random_ts. Syntax explanation: https://stackoverflow.com/a/9542768 act_observation = next(x for x in minibatch if x[4] == random_ts)[0] self.assertTrue(np.array_equal(act_observation, exp_observation)) # simulate the case when one action is missing c = self.db.conn.cursor() c.execute('DELETE FROM actions WHERE ts = 25') self.db.conn.commit() self.assertEqual(0, self.db.get_action(25)) minibatch = l.get_minibatch() # the minibatch should still have the same amount of samples with one sample has action == 0 self.assertEqual(common.num_ticks - l.ticks_per_observation, len(minibatch)) c.execute('DELETE FROM actions') self.db.conn.commit() self.assertIsNone(l.get_minibatch_from_db())
def test_get_minibatch_32_from_memcache(self): l = LustreGame.Lustre(self.opt) mb = l.get_minibatch() self.assertEqual(l.minibatch_size, len(mb)) for observ, action, reward, observ_next, ts in mb: self.assertTrue(np.array_equal(observ, l.db.get_observation(ts))) self.assertTrue( np.array_equal(observ_next, l.db.get_observation(ts + 1))) self.assertEqual(action, l.db.get_action(ts))
def test_memcache(self): l = LustreGame.Lustre(self.opt) for i in range(l.ticks_per_observation - 1): with self.assertRaises(NotEnoughDataError): l.get_observation_by_cache_idx(i) with self.assertRaises(NotEnoughDataError): l.get_next_observation_by_cache_idx(common.num_ticks - 1) l.get_next_observation_by_cache_idx(common.num_ticks - 2)
def test_get_minibatch_32(self): l = LustreGame.Lustre(self.opt) mb = l.get_minibatch_from_db() self.assertEqual(l.minibatch_size, len(mb)) random_index = random.randint(0, len(mb) - 1) random_ts = mb[random_index][4] exp_observation = self.db.get_observation(random_ts) # See if we can find identical data of this random_ts. Syntax explanation: https://stackoverflow.com/a/9542768 act_observation = next(x for x in mb if x[4] == random_ts)[0] self.assertTrue(np.array_equal(act_observation, exp_observation)) self.assertEqual(l.minibatch_size, len(mb))
def test_not_enough_action(self): """This test case only applies to get_minibatch_from_db(). get_minibatch() (the memcache version) always treats missing actions as 0.""" common.xstart = 23 db2 = common.populate_testdb(self.test_db_file) self.assertEqual(db2.get_action_row_count(), common.num_ticks - 23 + 1) l = LustreGame.Lustre(self.opt) minibatch = l.get_minibatch_from_db() self.assertEqual(common.num_ticks - common.xstart, len(minibatch)) random_index = random.randint(0, len(minibatch) - 1) random_ts = minibatch[random_index][4] exp_observation = db2.get_observation(random_ts) # See if we can find identical data of this random_ts. Syntax explanation: https://stackoverflow.com/a/9542768 act_observation = next(x for x in minibatch if x[4] == random_ts)[0] self.assertTrue(np.array_equal(act_observation, exp_observation))
def test_reward_functions(self): nodeid_map = { 'ryu.soe.ucsc.edu': 1, 'sagat.soe.ucsc.edu': 2, 'zangief.soe.ucsc.edu': 3, 'guile.soe.ucsc.edu': 4, 'blanka.soe.ucsc.edu': 5, 'ken.soe.ucsc.edu': 6, 'vega.soe.ucsc.edu': 7, 'abel.soe.ucsc.edu': 8 } opt = { 'dbfile': os.path.join( os.path.dirname(os.path.realpath(__file__)), '../datasets/filebench_2016-09-05_18-14-07/ascar_replay_db.sqlite' ), 'nodeid_map': nodeid_map, 'clients': [ 'ryu.soe.ucsc.edu', 'sagat.soe.ucsc.edu', 'zangief.soe.ucsc.edu', 'guile.soe.ucsc.edu' ], 'pi_per_client_obd': 8, 'obd_per_client_ma': 4, 'tick_data_size': 8 * 4 * 4, # see conf.py for explanation 'num_actions': 1, # unused 'missing_entry_tolerance': 0, } l = LustreGame.Lustre(opt) self.assertAlmostEqual( 1.21978880e+07 + 5.42585720e+07 + 1.57655040e+07 + 6.11041280e+07, l.cumulative_reward) self.assertEqual( l._calc_total_throughput(l.db.get_observation(1473124606)) - l._calc_total_throughput(l.db.get_observation(1473124605)), l.collect_reward())
def test_get_observation(self): ts = self.db.get_last_ts() self.assertEqual(common.last_ts, ts) obs = self.db.get_observation(ts - common.num_ticks + 26) # observation should be sorted by MA ID # blanka: 1 exp_obs = np.array([ 68140, 70908, 402, 1, 40396, 22020096, 56112, 57510, 293, 1, 40396, 19922944, 57133, 55209, 341, 1, 40396, 22020096, 57047, 61719, 361, 2, 40396, 24117248, 45642, 47516, 254, 1, 32840, 22020096, 36120, 39135, 242, 1, 32840, 22020096, 51568, 52817, 206, 1, 40396, 23068672, 46591, 46726, 285, 4, 32840, 19922944, 37058, 34825, 287, 1, 32840, 17825792, 70221, 72842, 249, 1, 40396, 28311552, 45684, 45039, 169, 1, 32840, 20971520, 51986, 50942, 302, 3, 40396, 23068672, 52184, 49322, 231, 1, 40396, 26214400, 39298, 41973, 168, 1, 32840, 17825792, 51535, 49789, 295, 1, 40396, 26214400, 47524, 51471, 166, 2, 33980, 23068672 ], dtype=float) # dhalsim: 2 exp_obs = np.append(exp_obs, [ 44672, 45032, 344, 1, 32840, 28311552, 47702, 47393, 359, 3, 32840, 25165824, 40484, 42136, 269, 1, 32840, 18874368, 29907, 32180, 575, 1, 33980, 22020096, 71347, 68676, 228, 1, 40396, 16777216, 43211, 42763, 304, 1, 32840, 25165824, 42580, 42751, 316, 1, 32840, 27262976, 45113, 45804, 410, 1, 32840, 24117248, 55170, 52614, 828, 1, 33980, 23068672, 42609, 41687, 335, 1, 32840, 22020096, 42459, 42359, 259, 1, 32840, 22020096, 33002, 38247, 1532, 1, 33980, 26214400, 34712, 32994, 262, 1, 32840, 17825792, 43073, 44087, 135, 1, 32840, 24117248, 44707, 46093, 575, 1, 33980, 25165824, 74802, 76575, 279, 1, 40396, 25165824, ]) # gouken: 3 exp_obs = np.append(exp_obs, [ 38596, 37619, 350, 1, 32840, 18874368, 51683, 51510, 333, 1, 40396, 30408704, 61910, 58733, 283, 1, 40396, 20971520, 60315, 56297, 287, 2, 40396, 20971520, 44296, 44129, 255, 1, 32840, 26214400, 58944, 55465, 1213, 1, 40396, 19922944, 39664, 40037, 347, 1, 32840, 20971520, 39313, 40251, 240, 1, 32840, 22020096, 52859, 51982, 219, 1, 40396, 24117248, 45125, 45227, 224, 1, 32840, 14680064, 48504, 45885, 399, 1, 33980, 22020096, 53535, 50686, 270, 1, 33980, 20971520, 78392, 86948, 334, 1, 40396, 13631488, 39997, 38007, 270, 1, 32840, 28311552, 41975, 42880, 307, 1, 32840, 24117248, 41229, 40395, 296, 1, 32840, 25165824 ]) # ryu: 4 exp_obs = np.append(exp_obs, [ 44618, 44803, 286, 4, 32840, 24117248, 58567, 58088, 231, 1, 40396, 24117248, 46106, 50170, 255, 1, 33980, 22020096, 45173, 46716, 262, 1, 32840, 24117248, 50793, 50951, 251, 3, 40396, 24117248, 44843, 42653, 377, 1, 32840, 20971520, 46149, 48234, 336, 1, 32840, 24117248, 41938, 40991, 185, 1, 32840, 24117248, 44656, 44092, 164, 1, 32840, 23068672, 43573, 45397, 166, 1, 32840, 24117248, 42911, 45031, 240, 1, 32840, 19922944, 60013, 61752, 243, 1, 40396, 22020096, 61485, 60927, 362, 1, 40396, 23068672, 45752, 47223, 146, 1, 32840, 24117248, 39219, 38875, 276, 1, 32840, 27262976, 42183, 44488, 191, 1, 32840, 20971520, ]) # seth: 5 exp_obs = np.append(exp_obs, [ 51745, 51235, 211, 5, 40396, 25165824, 46701, 47343, 316, 1, 32840, 29360128, 51581, 55075, 277, 1, 40396, 24117248, 49948, 50423, 195, 1, 40396, 19922944, 51418, 54090, 275, 7, 40396, 22020096, 48995, 47562, 275, 1, 33980, 25165824, 33803, 35038, 289, 1, 32840, 29360128, 64600, 65689, 319, 1, 40396, 18874368, 68445, 70700, 206, 9, 40396, 18874368, 54304, 51380, 337, 1, 40396, 18874368, 79158, 75985, 154, 1, 40396, 14680064, 60275, 60124, 586, 1, 33980, 22020096, 48790, 48884, 294, 10, 40396, 22020096, 48906, 49216, 1150, 1, 33980, 22020096, 63492, 56481, 240, 1, 40396, 18874368, 53556, 52403, 560, 1, 40396, 22020096 ]) self.assertTrue(np.array_equal(exp_obs, obs)) # Test fetching from memcache l = LustreGame.Lustre(self.opt) self.assertTrue( np.array_equal(exp_obs, l.get_observation_by_cache_idx(25))) last_ts_data = self.db.get_last_n_observation()[0] self.assertTrue(np.array_equal(last_ts_data, l.observe())) del l # Introducing a missing entry c = self.db.conn.cursor() c.execute('DELETE FROM pis WHERE ts = ? AND ma_id=3', (ts - common.num_ticks + 25, )) self.db.conn.commit() ticks_per_observation = 4 # calculate the location of the hole hole_start = 2 * common.num_obd * common.pi_per_obd * ticks_per_observation +\ 2 * common.num_obd * common.pi_per_obd hole_end = hole_start + common.num_obd * common.pi_per_obd exp_obs[hole_start:hole_end] = 0 obs = self.db.get_observation(ts - common.num_ticks + 26) self.assertTrue(np.array_equal(exp_obs, obs)) # Test fetching with hole from memcache l = LustreGame.Lustre(self.opt) self.assertTrue( np.array_equal(exp_obs, l.get_observation_by_cache_idx(25))) del l # We remove one MA's data from the last ts and see if get_last_ts() returns ts-1 c.execute('DELETE FROM pis WHERE ts = ? AND ma_id=3', (common.last_ts, )) self.db.conn.commit() self.assertEqual(common.last_ts - 1, self.db.get_last_ts()) # Observe should return the ts-1 data because ts has a hole larger than missing entry tolerance self.opt['missing_entry_tolerance'] = 0 l = LustreGame.Lustre(self.opt) second_last_ts_data = l.db.get_last_n_observation()[0] self.assertFalse(np.array_equal(last_ts_data, second_last_ts_data)) self.assertTrue(np.array_equal(second_last_ts_data, l.observe())) del l # It should still return last ts data if we raise the missing entry tolerance self.opt['missing_entry_tolerance'] = 1 l = LustreGame.Lustre(self.opt) hole_start = 2 * common.num_obd * common.pi_per_obd * ticks_per_observation +\ 3 * common.num_obd * common.pi_per_obd hole_end = hole_start + common.num_obd * common.pi_per_obd last_ts_data[hole_start:hole_end] = 0 self.assertTrue(np.array_equal(last_ts_data, l.observe())) self.assertTrue( np.array_equal(last_ts_data, l.db.get_last_n_observation()[0])) del l # Introduce a big hole c.execute( 'INSERT INTO pis SELECT ma_id, ts+100 AS ts, pi_data FROM pis') self.db.conn.commit() l = LustreGame.Lustre(self.opt) for i in range(common.num_ticks, common.num_ticks + 3): with self.assertRaises(NotEnoughDataError): l.get_observation_by_cache_idx(i) self.assertTrue( np.array_equal( exp_obs, l.get_observation_by_cache_idx(25 + common.num_ticks))) del l
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OF THE UNIVERSITY OF CALIFORNIA BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ from ascar import LustreGame from ascar.DQLDaemon import DQLDaemon from tests import common __author__ = 'Yan Li' __copyright__ = 'Copyright (c) 2016, 2017 The Regents of the University of California. All rights reserved.' test_db_file = '/tmp/ascar-benchmark-dql.sqlite' db = common.populate_testdb(test_db_file) opt = common.dbopt opt['ticks_per_observe'] = 4 game = LustreGame.Lustre(opt) daemon = DQLDaemon(opt, game) for _ in range(1000): daemon._do_training_step()