示例#1
0
    def __init__(self,
                 kernel_support=None,
                 kernel_size=10,
                 kernel_discretization=None,
                 tol=1e-5,
                 max_iter=100,
                 print_every=10,
                 record_every=10,
                 verbose=False,
                 n_threads=1):

        LearnerHawkesNoParam.__init__(self,
                                      n_threads=n_threads,
                                      verbose=verbose,
                                      tol=tol,
                                      max_iter=max_iter,
                                      print_every=print_every,
                                      record_every=record_every)

        if kernel_discretization is not None:
            self._learner = _HawkesEM(kernel_discretization, n_threads)
        elif kernel_support is not None:
            self._learner = _HawkesEM(kernel_support, kernel_size, n_threads)
        else:
            raise ValueError('Either kernel support or kernel discretization '
                             'must be provided')

        self.baseline = None
        self.kernel = None

        self.history.print_order = ["n_iter", "rel_baseline", "rel_kernel"]
示例#2
0
    def __init__(self, max_mean_gaussian, n_gaussians=5, step_size=1e-7,
                 C=1e3, lasso_grouplasso_ratio=0.5, max_iter=50,
                 tol=1e-5, n_threads=1, verbose=False, print_every=10,
                 record_every=10, approx=0, em_max_iter=30,
                 em_tol=None):

        LearnerHawkesNoParam.__init__(self, verbose=verbose, max_iter=max_iter,
                                      print_every=print_every, tol=tol,
                                      n_threads=n_threads,
                                      record_every=record_every)
        self.baseline = None
        self.amplitudes = None

        self.n_gaussians = n_gaussians
        self.max_mean_gaussian = max_mean_gaussian
        self.step_size = step_size

        strength_lasso = lasso_grouplasso_ratio / C
        strength_grouplasso = (1. - lasso_grouplasso_ratio) / C

        self.em_max_iter = em_max_iter
        self.em_tol = em_tol

        self._learner = _HawkesSumGaussians(
            n_gaussians, max_mean_gaussian, step_size, strength_lasso,
            strength_grouplasso, em_max_iter, n_threads, approx)

        self.verbose = verbose

        self.history.print_order += ["rel_baseline", "rel_amplitudes"]
示例#3
0
    def fit(self,
            events,
            end_times=None,
            baseline_start=None,
            kernel_start=None):
        """Fit the model according to the given training data.

        Parameters
        ----------
        events : `list` of `list` of `np.ndarray`
            List of Hawkes processes realizations.
            Each realization of the Hawkes process is a list of n_node for
            each component of the Hawkes. Namely `events[i][j]` contains a
            one-dimensional `numpy.array` of the events' timestamps of
            component j of realization i.
            If only one realization is given, it will be wrapped into a list

        end_times : `np.ndarray` or `float`, default = None
            List of end time of all hawkes processes that will be given to the
            model. If None, it will be set to each realization's latest time.
            If only one realization is provided, then a float can be given.

        baseline_start : `None` or `np.ndarray`, shape=(n_nodes), default=None
            Used to force start values for baseline parameter
            If `None` starts with uniform 1 values

        kernel_start : `None` or `np.ndarray`, shape=(n_nodes, n_nodes, kernel_size), default=None
            Used to force start values for kernel parameter
            If `None` starts with random values
        """
        LearnerHawkesNoParam.fit(self, events, end_times=end_times)
        self.solve(baseline_start=baseline_start, kernel_start=kernel_start)
        return self
    def fit(self, events, end_times=None, adjacency_start=None, R_start=None):
        """Fit the model according to the given training data.

        Parameters
        ----------
        events : `list` of `list` of `np.ndarray`
            List of Hawkes processes realizations.
            Each realization of the Hawkes process is a list of n_node for
            each component of the Hawkes. Namely `events[i][j]` contains a
            one-dimensional `numpy.array` of the events' timestamps of
            component j of realization i.
            If only one realization is given, it will be wrapped into a list

        end_times : `np.ndarray` or `float`, default = None
            List of end time of all hawkes processes that will be given to the
            model. If None, it will be set to each realization's latest time.
            If only one realization is provided, then a float can be given.

        adjacency_start : `str` or `np.ndarray, shape=(n_nodes + n_nodes * n_nodes,), default=`None`
            Initial guess for the adjacency matrix. Will be used as
            starting point in optimization.
            If `None` and `R_start` is also `None`, a default starting point
            is estimated from the estimated cumulants
            If `"random"`, a starting point is estimated from estimated
            cumulants with a bit a randomness

        R_start : `np.ndarray`, shape=(n_nodes, n_nodes), default=None
            R variable at which we start optimization. Superseded by
            adjacency_start if adjacency_start is not `None`
        """
        LearnerHawkesNoParam.fit(self, events, end_times=end_times)
        self.solve(adjacency_start=adjacency_start, R_start=R_start)
    def __init__(self, integration_support, C=1e3, penalty='none',
                 solver='adam', step=1e-2, tol=1e-8, max_iter=1000,
                 verbose=False, print_every=100, record_every=10,
                 solver_kwargs=None, cs_ratio=None, elastic_net_ratio=0.95):
        try:
            import tensorflow
        except ImportError:
            raise ImportError('`tensorflow` >= 1.4.0 must be available to use '
                              'HawkesCumulantMatching')

        self._tf_graph = tf.Graph()

        LearnerHawkesNoParam.__init__(
            self, tol=tol, verbose=verbose, max_iter=max_iter,
            print_every=print_every, record_every=record_every)

        self._elastic_net_ratio = None
        self.C = C
        self.penalty = penalty
        self.elastic_net_ratio = elastic_net_ratio
        self.step = step
        self.cs_ratio = cs_ratio
        self.solver_kwargs = solver_kwargs
        if self.solver_kwargs is None:
            self.solver_kwargs = {}

        self._cumulant_computer = _HawkesCumulantComputer(
            integration_support=integration_support)
        self._learner = self._cumulant_computer._learner
        self._solver = solver
        self._tf_feed_dict = None
        self._events_of_cumulants = None

        self.history.print_order = ["n_iter", "objective", "rel_obj"]
示例#6
0
    def __init__(self, decay, C=1e3, lasso_nuclear_ratio=0.5, max_iter=50,
                 tol=1e-5, n_threads=1, verbose=False, print_every=10,
                 record_every=10, rho=.1, approx=0, em_max_iter=30,
                 em_tol=None):

        LearnerHawkesNoParam.__init__(
            self, verbose=verbose, max_iter=max_iter, print_every=print_every,
            tol=tol, n_threads=n_threads, record_every=record_every)
        self.baseline = None
        self.adjacency = None
        self._C = 0
        self._lasso_nuclear_ratio = 0

        self.decay = decay
        self.rho = rho

        self._prox_l1 = ProxL1(1.)
        self._prox_nuclear = ProxNuclear(1.)

        self.C = C
        self.lasso_nuclear_ratio = lasso_nuclear_ratio
        self.verbose = verbose

        self.em_max_iter = em_max_iter
        self.em_tol = em_tol

        self._learner = _HawkesADM4(decay, rho, n_threads, approx)

        # TODO add approx to model
        self._model = ModelHawkesExpKernLogLik(self.decay,
                                               n_threads=self.n_threads)

        self.history.print_order += ["rel_baseline", "rel_adjacency"]
示例#7
0
    def _set_data(self, events: list):
        """Set the corresponding realization(s) of the process.

        Parameters
        ----------
        events : `list` of `list` of `np.ndarray`
            List of Hawkes processes realizations.
            Each realization of the Hawkes process is a list of n_node for
            each component of the Hawkes. Namely `events[i][j]` contains a
            one-dimensional `numpy.array` of the events' timestamps of
            component j of realization i.
            If only one realization is given, it will be wrapped into a list
        """
        LearnerHawkesNoParam._set_data(self, events)

        events, end_times = self._clean_events_and_endtimes(events)

        self._model.fit(events, end_times=end_times)
        self._prox_nuclear.n_rows = self.n_nodes
    def __init__(self, kernel_support, n_basis=None, kernel_size=10, tol=1e-5,
                 C=1e-1, max_iter=100, verbose=False, print_every=10,
                 record_every=10, n_threads=1, ode_max_iter=100, ode_tol=1e-5):

        LearnerHawkesNoParam.__init__(self, max_iter=max_iter, verbose=verbose,
                                      tol=tol, print_every=print_every,
                                      record_every=record_every,
                                      n_threads=n_threads)

        self.ode_max_iter = ode_max_iter
        self.ode_tol = ode_tol

        alpha = 1. / C
        if n_basis is None:
            n_basis = 0

        self._learner = _HawkesBasisKernels(kernel_support, kernel_size,
                                            n_basis, alpha, n_threads)
        self._amplitudes_2d = None

        self.history.print_order = [
            "n_iter", "rel_baseline", "rel_amplitudes", "rel_basis_kernels"
        ]