Ejemplo n.º 1
0
    def test_corresponding_simu(self):
        """...Test that the corresponding simulation object is correctly
        built
        """
        learner = HawkesSumExpKern(self.decays, max_iter=10)
        learner.fit(self.events)

        corresponding_simu = learner._corresponding_simu()
        np.testing.assert_array_equal(corresponding_simu.decays,
                                      learner.decays)
        np.testing.assert_array_equal(corresponding_simu.baseline,
                                      learner.baseline)
        np.testing.assert_array_equal(corresponding_simu.adjacency,
                                      learner.adjacency)

        learner = HawkesSumExpKern(self.decays,
                                   n_baselines=3,
                                   period_length=1,
                                   max_iter=10)
        learner.fit(self.events)

        corresponding_simu = learner._corresponding_simu()
        np.testing.assert_array_equal(corresponding_simu.decays,
                                      learner.decays)
        np.testing.assert_array_equal(corresponding_simu.baseline,
                                      learner.baseline)
        np.testing.assert_array_equal(corresponding_simu.adjacency,
                                      learner.adjacency)
Ejemplo n.º 2
0
    def test_HawkesSumExpKern_solver_step(self):
        """...Test HawkesSumExpKern setting of step parameter
        of solver
        """
        for solver in solvers:
            if solver in ['bfgs']:
                msg = '^Solver "%s" has no settable step$' % solver
                with self.assertWarnsRegex(RuntimeWarning, msg):
                    learner = HawkesSumExpKern(
                        self.decays,
                        solver=solver,
                        step=1,
                        **Test.specific_solver_kwargs(solver))
                    self.assertIsNone(learner.step)
            else:
                learner = HawkesSumExpKern(
                    self.decays,
                    solver=solver,
                    step=self.float_1,
                    **Test.specific_solver_kwargs(solver))
                self.assertEqual(learner.step, self.float_1)
                self.assertEqual(learner._solver_obj.step, self.float_1)
                learner.step = self.float_2
                self.assertEqual(learner.step, self.float_2)
                self.assertEqual(learner._solver_obj.step, self.float_2)

            if solver in ['sgd']:
                msg = '^SGD step needs to be tuned manually$'
                with self.assertWarnsRegex(RuntimeWarning, msg):
                    learner = HawkesSumExpKern(self.decays,
                                               solver='sgd',
                                               max_iter=1)
                    learner.fit(self.events, 0.3)
Ejemplo n.º 3
0
    def test_HawkesSumExpKern_fit(self):
        """...Test HawkesSumExpKern fit with different solvers
        and penalties
        """
        sto_seed = 179312
        n_nodes = 2
        events, baseline, adjacency = Test.get_train_data(
            self.decays, n_nodes=n_nodes, n_decays=self.n_decays)
        start = 0.01
        initial_adjacency_error = \
            Test.estimation_error(start * np.ones((n_nodes, n_nodes)),
                                  adjacency)

        for penalty in penalties:
            for solver in solvers:

                solver_kwargs = {
                    'penalty': penalty,
                    'tol': 1e-10,
                    'solver': solver,
                    'verbose': False,
                    'max_iter': 1000
                }

                if penalty != 'none':
                    solver_kwargs['C'] = 50

                if solver in ['sgd', 'svrg']:
                    solver_kwargs['random_state'] = sto_seed

                # manually set step
                if solver == 'sgd':
                    solver_kwargs['step'] = 1e-5
                elif solver == 'svrg':
                    continue

                if solver == 'bfgs':
                    # BFGS only accepts ProxZero and ProxL2sq for now
                    if penalty != 'l2':
                        continue

                if penalty == 'nuclear':
                    # Nuclear penalty only compatible with batch solvers
                    if solver in \
                            HawkesSumExpKern._solvers_stochastic:
                        continue

                learner = HawkesSumExpKern(self.decays, **solver_kwargs)
                learner.fit(events, start=start)
                adjacency_error = Test.estimation_error(
                    learner.adjacency, adjacency)
                self.assertLess(
                    adjacency_error, initial_adjacency_error * 0.8,
                    "solver %s with penalty %s "
                    "reached too high baseline error" % (solver, penalty))
Ejemplo n.º 4
0
    def test_sparse(self):
        """...Test that original coeffs are correctly retrieved when some
        realizations are empty
        """
        baseline, adjacency, events = self.simulate_sparse_realization()

        learner = HawkesSumExpKern(self.decays, verbose=False)
        learner.fit(events)

        np.testing.assert_array_almost_equal(learner.baseline,
                                             baseline,
                                             decimal=1)
        np.testing.assert_array_almost_equal(learner.adjacency,
                                             adjacency,
                                             decimal=1)
Ejemplo n.º 5
0
    def test_HawkesSumExpKern_score(self):
        """...Test HawkesSumExpKern score method
        """
        n_nodes = 2
        n_realizations = 3

        train_events = [[
            np.cumsum(np.random.rand(4 + i)) for i in range(n_nodes)
        ] for _ in range(n_realizations)]

        test_events = [[
            np.cumsum(np.random.rand(4 + i)) for i in range(n_nodes)
        ] for _ in range(n_realizations)]

        learner = HawkesSumExpKern(self.decays)

        msg = '^You must either call `fit` before `score` or provide events$'
        with self.assertRaisesRegex(ValueError, msg):
            learner.score()

        given_baseline = np.random.rand(n_nodes)
        given_adjacency = np.random.rand(n_nodes, n_nodes, self.n_decays)

        learner.fit(train_events)

        train_score_current_coeffs = learner.score()
        self.assertAlmostEqual(train_score_current_coeffs, 1.684827141)

        train_score_given_coeffs = learner.score(baseline=given_baseline,
                                                 adjacency=given_adjacency)
        self.assertAlmostEqual(train_score_given_coeffs, 1.16247892)

        test_score_current_coeffs = learner.score(test_events)
        self.assertAlmostEqual(test_score_current_coeffs, 1.66494295)

        test_score_given_coeffs = learner.score(test_events,
                                                baseline=given_baseline,
                                                adjacency=given_adjacency)
        self.assertAlmostEqual(test_score_given_coeffs, 1.1081362)
Ejemplo n.º 6
0
    def test_HawkesSumExpKern_fit_start(self):
        """...Test HawkesSumExpKern starting point of fit method
        """
        n_nodes = len(self.events)
        n_coefs = n_nodes + n_nodes * n_nodes * self.n_decays
        # Do not step
        learner = HawkesSumExpKern(self.decays, max_iter=-1)

        learner.fit(self.events)
        np.testing.assert_array_equal(learner.coeffs, np.ones(n_coefs))

        learner.fit(self.events, start=self.float_1)
        np.testing.assert_array_equal(learner.coeffs,
                                      np.ones(n_coefs) * self.float_1)

        learner.fit(self.events, start=self.int_1)
        np.testing.assert_array_equal(learner.coeffs,
                                      np.ones(n_coefs) * self.int_1)

        random_coeffs = np.random.rand(n_coefs)
        learner.fit(self.events, start=random_coeffs)
        np.testing.assert_array_equal(learner.coeffs, random_coeffs)