def test_HawkesExpKern_solver_step(self): """...Test HawkesExpKern setting of step parameter of solver """ for solver in solvers: if solver in ['bfgs']: msg = '^Solver "%s" has no settable step$' % solver with self.assertWarnsRegex(RuntimeWarning, msg): learner = HawkesExpKern( self.decays, solver=solver, step=1, **Test.specific_solver_kwargs(solver)) self.assertIsNone(learner.step) else: learner = HawkesExpKern(self.decays, solver=solver, step=self.float_1, **Test.specific_solver_kwargs(solver)) self.assertEqual(learner.step, self.float_1) self.assertEqual(learner._solver_obj.step, self.float_1) learner.step = self.float_2 self.assertEqual(learner.step, self.float_2) self.assertEqual(learner._solver_obj.step, self.float_2) if solver in ['sgd']: msg = '^SGD step needs to be tuned manually$' with self.assertWarnsRegex(RuntimeWarning, msg): learner = HawkesExpKern(self.decays, solver='sgd', max_iter=1) learner.fit(self.events, 0.3)
def test_corresponding_simu(self): """...Test that the corresponding simulation object is correctly built """ learner = HawkesExpKern(self.decays, max_iter=10) learner.fit(self.events) corresponding_simu = learner._corresponding_simu() self.assertEqual(corresponding_simu.decays, learner.decays) np.testing.assert_array_equal(corresponding_simu.baseline, learner.baseline) np.testing.assert_array_equal(corresponding_simu.adjacency, learner.adjacency)
def inference(dataset, decays): adj, events, n = load(dataset) tick_events = events_to_tick_events(events, n) learner = HawkesExpKern(decays=decays, penalty="l1", solver="agd", C=1000, verbose=True) learner.fit(tick_events) influence_matrix = learner.adjacency baseline = learner.baseline print("score = {}".format(learner.score())) with open("./model/" + dataset + ".pickle", "wb") as f: pickle.dump([influence_matrix, baseline, decays, n], f) return
def test_HawkesExpKern_score(self): """...Test HawkesExpKern score method """ n_nodes = 2 n_realizations = 3 train_events = [[ np.cumsum(np.random.rand(4 + i)) for i in range(n_nodes) ] for _ in range(n_realizations)] test_events = [[ np.cumsum(np.random.rand(4 + i)) for i in range(n_nodes) ] for _ in range(n_realizations)] learner = HawkesExpKern(self.decays) msg = '^You must either call `fit` before `score` or provide events$' with self.assertRaisesRegex(ValueError, msg): learner.score() given_baseline = np.random.rand(n_nodes) given_adjacency = np.random.rand(n_nodes, n_nodes) learner.fit(train_events) train_score_current_coeffs = learner.score() self.assertAlmostEqual(train_score_current_coeffs, 2.0855840) train_score_given_coeffs = learner.score(baseline=given_baseline, adjacency=given_adjacency) self.assertAlmostEqual(train_score_given_coeffs, 0.59502417) test_score_current_coeffs = learner.score(test_events) self.assertAlmostEqual(test_score_current_coeffs, 1.6001762) test_score_given_coeffs = learner.score(test_events, baseline=given_baseline, adjacency=given_adjacency) self.assertAlmostEqual(test_score_given_coeffs, 0.89322199)
def test_HawkesExpKern_fit_start(self): """...Test HawkesExpKern starting point of fit method """ n_nodes = len(self.events) n_coefs = n_nodes + n_nodes * n_nodes # Do not step learner = HawkesExpKern(self.decays, max_iter=-1) learner.fit(self.events) np.testing.assert_array_equal(learner.coeffs, np.ones(n_coefs)) learner.fit(self.events, start=self.float_1) np.testing.assert_array_equal(learner.coeffs, np.ones(n_coefs) * self.float_1) learner.fit(self.events, start=self.int_1) np.testing.assert_array_equal(learner.coeffs, np.ones(n_coefs) * self.int_1) random_coeffs = np.random.rand(n_coefs) learner.fit(self.events, start=random_coeffs) np.testing.assert_array_equal(learner.coeffs, random_coeffs)
def test_HawkesExpKern_fit(self): """...Test HawkesExpKern fit with different solvers and penalties """ sto_seed = 179312 n_nodes = 2 events, baseline, adjacency = Test.get_train_data( n_nodes=n_nodes, betas=self.decays) start = 0.3 initial_adjacency_error = \ Test.estimation_error(start * np.ones((n_nodes, n_nodes)), adjacency) for gofit in gofits: for penalty in penalties: for solver in solvers: solver_kwargs = { 'penalty': penalty, 'tol': 1e-10, 'solver': solver, 'verbose': False, 'max_iter': 10, 'gofit': gofit } if penalty != 'none': solver_kwargs['C'] = 50 if solver in ['sgd', 'svrg']: solver_kwargs['random_state'] = sto_seed # manually set step if solver == 'sgd' and gofit == 'likelihood': solver_kwargs['step'] = 3e-1 elif solver == 'sgd' and gofit == 'least-squares': solver_kwargs['step'] = 1e-5 elif solver == 'svrg' and gofit == 'likelihood': solver_kwargs['step'] = 1e-3 elif solver == 'svrg' and gofit == 'least-squares': continue if solver == 'bfgs': # BFGS only accepts ProxZero and ProxL2sq for now if penalty != 'l2': continue if penalty == 'nuclear': # Nuclear penalty only compatible with batch solvers if solver in \ HawkesExpKern._solvers_stochastic: continue learner = HawkesExpKern(self.decays, **solver_kwargs) learner.fit(events, start=start) adjacency_error = Test.estimation_error( learner.adjacency, adjacency) self.assertLess( adjacency_error, initial_adjacency_error * 0.8, "solver %s with penalty %s and " "gofit %s reached too high " "baseline error" % (solver, penalty, gofit))