def test_add_new_element(self): om = ag.ObjectsMemory() t1 = torch.arange(10, 20)[None, ...] om.add_new_element(t1, 1) npt.assert_equal(utils.t2a(om.M), np.arange(10, 20).reshape((1, 10))) npt.assert_equal(om.seq_ids, np.arange(1, 2)) g_set = set([1]) self.assertEqual(set(om.G.nodes), g_set) om = ag.ObjectsMemory() t1 = torch.arange(10, 20)[None, ...] t2 = torch.arange(20, 30)[None, ...] t3 = torch.arange(30, 40)[None, ...] om.add_new_element(t1, 1) om.add_new_element(t2, 2) om.add_new_element(t3, 3) npt.assert_equal(utils.t2a(om.M), np.arange(10, 40).reshape((3, 10))) npt.assert_equal(om.seq_ids, np.arange(1, 4)) g_set = set([1, 2, 3]) self.assertEqual(set(om.G.nodes), g_set)
def get_data(session_ds, inc_eval_ds, ms_band, db_eps): session_data = list(session_ds) inc_eval_data = list(inc_eval_ds) session_emb = np.squeeze([utils.t2a(d[0][0]) for d in session_data]) session_lab = np.squeeze([d[1] for d in session_data]) inc_eval_emb = np.squeeze([utils.t2a(d[0][0]) for d in inc_eval_data]) inc_eval_lab = np.squeeze([d[1] for d in inc_eval_data]) X = np.concatenate((session_emb, inc_eval_emb)) y = np.concatenate((session_lab, inc_eval_lab)) meanshifts = [cl.MeanShift(bandwidth=b).fit_predict(X) for b in ms_band] optics = cl.OPTICS(min_samples=1).fit_predict(X) dbscans = [cl.DBSCAN(eps=e, min_samples=1).fit_predict(X) for e in db_eps] res = np.array(meanshifts + dbscans + [optics]) inc_pred = res[:, session_lab.size:] aris = [adjusted_rand_score(p, inc_eval_lab) for p in inc_pred] amis = [ adjusted_mutual_info_score(p, inc_eval_lab, average_method='max') for p in inc_pred ] return np.array(aris), np.array(amis), inc_pred, inc_eval_lab
def test_get_something(self): om = ag.ObjectsMemory() t1 = torch.arange(10, 20) t2 = torch.arange(20, 30) t3 = torch.arange(30, 40) om.add_new_element(t1, 1) om.add_new_element(t2, 2) om.add_new_element(t3, 3) npt.assert_equal(om.get_sid(0), 1) npt.assert_equal(utils.t2a(om.get_embed(0)), utils.t2a(t1))
def test_globalmean(self): bogusdata = [torch.ones(5, 3), torch.zeros(6, 3)] gm = models.GlobalMean() fded = gm.forward(bogusdata) fded = np.array([utils.t2a(f.squeeze(dim=0)) for f in fded]) npt.assert_equal(fded, np.array([np.ones(3), np.zeros(3)])) fded = gm.forward(bogusdata[:1]) fded = utils.t2a(fded[0].squeeze(dim=0)) npt.assert_equal(fded, np.ones(3))
def test_recursivereduction(self): bogusdata = [torch.ones(50, 3), torch.zeros(60, 3)] rr = models.RecursiveReduction(3) fded = rr.forward(bogusdata) fded_a = np.concatenate([utils.t2a(f) for f in fded]) self.assertEqual(fded_a.shape, (2, 3))
def test_get_knn(self): om = ag.ObjectsMemory() t1 = torch.arange(10, 20).float() t2 = torch.arange(20, 30).float() t3 = torch.arange(40, 50).float() om.add_new_element(t1, 1) om.add_new_element(t2, 2) om.add_new_element(t3, 3) npt.assert_equal(utils.t2a(om.get_knn(t1, k=1)[1]), 0) npt.assert_equal(utils.t2a(om.get_knn(t1, k=2)[1]), np.array([[0, 1]])) t12 = torch.stack([t1, t2]) npt.assert_equal(utils.t2a(om.get_knn(t12, k=1)[1]), np.array([[0], [1]])) npt.assert_equal(utils.t2a(om.get_knn(t12, k=2)[1]), np.array([[0, 1], [1, 0]]))
def test_add_neighbors(self): om = ag.ObjectsMemory() t1 = torch.arange(10, 20)[None, ...] t2 = torch.arange(20, 30)[None, ...] t3 = torch.arange(30, 40)[None, ...] om.add_new_element(t1, 1) om.add_new_element(t2, 2) om.add_new_element(t3, 3) om.add_neighbors(3, [0]) npt.assert_equal(utils.t2a(om.M), np.arange(10, 40).reshape((3, 10))) npt.assert_equal(om.seq_ids, np.arange(1, 4)) g_set = set([1, 2, 3]) self.assertEqual(set(om.G.nodes), g_set) e_set = set([(1, 3)]) self.assertEqual(set([tuple(sorted(e)) for e in om.G.edges]), e_set)
def t_assert_equal(a, b): npt.assert_equal(utils.t2a(a), utils.t2a(b))